Compare commits
No commits in common. "c8-stream-1.4" and "c8s-stream-ds" have entirely different histories.
c8-stream-
...
c8s-stream
@ -1,3 +1,3 @@
|
||||
bd9aab32d9cbf9231058d585479813f3420dc872 SOURCES/389-ds-base-1.4.3.39.tar.bz2
|
||||
1c8f2d0dfbf39fa8cd86363bf3314351ab21f8d4 SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
978b7c5e4a9e5784fddb23ba1abe4dc5a071589f SOURCES/vendor-1.4.3.39-1.tar.gz
|
||||
c69c175a2f27053dffbfefac9c84ff16c7ff4cbf SOURCES/389-ds-base-1.4.3.23.tar.bz2
|
||||
9e06b5cc57fd185379d007696da153893cf73e30 SOURCES/jemalloc-5.2.1.tar.bz2
|
||||
22b1ef11852864027e184bb4bee56286b855b703 SOURCES/vendor-1.4.3.23-2.tar.gz
|
||||
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -1,3 +1,3 @@
|
||||
SOURCES/389-ds-base-1.4.3.39.tar.bz2
|
||||
SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
SOURCES/vendor-1.4.3.39-1.tar.gz
|
||||
SOURCES/389-ds-base-1.4.3.23.tar.bz2
|
||||
SOURCES/jemalloc-5.2.1.tar.bz2
|
||||
SOURCES/vendor-1.4.3.23-2.tar.gz
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,119 +0,0 @@
|
||||
From dddb14210b402f317e566b6387c76a8e659bf7fa Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 14 Feb 2023 13:34:10 +0100
|
||||
Subject: [PATCH 1/2] issue 5647 - covscan: memory leak in audit log when
|
||||
adding entries (#5650)
|
||||
|
||||
covscan reported an issue about "vals" variable in auditlog.c:231 and indeed a charray_free is missing.
|
||||
Issue: 5647
|
||||
Reviewed by: @mreynolds389, @droideck
|
||||
---
|
||||
ldap/servers/slapd/auditlog.c | 71 +++++++++++++++++++----------------
|
||||
1 file changed, 38 insertions(+), 33 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 68cbc674d..3128e0497 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -177,6 +177,40 @@ write_auditfail_log_entry(Slapi_PBlock *pb)
|
||||
slapi_ch_free_string(&audit_config);
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Write the attribute values to the audit log as "comments"
|
||||
+ *
|
||||
+ * Slapi_Attr *entry - the attribute begin logged.
|
||||
+ * char *attrname - the attribute name.
|
||||
+ * lenstr *l - the audit log buffer
|
||||
+ *
|
||||
+ * Resulting output in the log:
|
||||
+ *
|
||||
+ * #ATTR: VALUE
|
||||
+ * #ATTR: VALUE
|
||||
+ */
|
||||
+static void
|
||||
+log_entry_attr(Slapi_Attr *entry_attr, char *attrname, lenstr *l)
|
||||
+{
|
||||
+ Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
+ for(size_t i = 0; vals && vals[i]; i++) {
|
||||
+ char log_val[256] = "";
|
||||
+ const struct berval *bv = slapi_value_get_berval(vals[i]);
|
||||
+ if (bv->bv_len >= 256) {
|
||||
+ strncpy(log_val, bv->bv_val, 252);
|
||||
+ strcpy(log_val+252, "...");
|
||||
+ } else {
|
||||
+ strncpy(log_val, bv->bv_val, bv->bv_len);
|
||||
+ log_val[bv->bv_len] = 0;
|
||||
+ }
|
||||
+ addlenstr(l, "#");
|
||||
+ addlenstr(l, attrname);
|
||||
+ addlenstr(l, ": ");
|
||||
+ addlenstr(l, log_val);
|
||||
+ addlenstr(l, "\n");
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Write "requested" attributes from the entry to the audit log as "comments"
|
||||
*
|
||||
@@ -212,21 +246,9 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
for (req_attr = ldap_utf8strtok_r(display_attrs, ", ", &last); req_attr;
|
||||
req_attr = ldap_utf8strtok_r(NULL, ", ", &last))
|
||||
{
|
||||
- char **vals = slapi_entry_attr_get_charray(entry, req_attr);
|
||||
- for(size_t i = 0; vals && vals[i]; i++) {
|
||||
- char log_val[256] = {0};
|
||||
-
|
||||
- if (strlen(vals[i]) > 256) {
|
||||
- strncpy(log_val, vals[i], 252);
|
||||
- strcat(log_val, "...");
|
||||
- } else {
|
||||
- strcpy(log_val, vals[i]);
|
||||
- }
|
||||
- addlenstr(l, "#");
|
||||
- addlenstr(l, req_attr);
|
||||
- addlenstr(l, ": ");
|
||||
- addlenstr(l, log_val);
|
||||
- addlenstr(l, "\n");
|
||||
+ slapi_entry_attr_find(entry, req_attr, &entry_attr);
|
||||
+ if (entry_attr) {
|
||||
+ log_entry_attr(entry_attr, req_attr, l);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -234,7 +256,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
for (; entry_attr; entry_attr = entry_attr->a_next) {
|
||||
Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
char *attr = NULL;
|
||||
- const char *val = NULL;
|
||||
|
||||
slapi_attr_get_type(entry_attr, &attr);
|
||||
if (strcmp(attr, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) {
|
||||
@@ -251,23 +272,7 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
addlenstr(l, ": ****************************\n");
|
||||
continue;
|
||||
}
|
||||
-
|
||||
- for(size_t i = 0; vals && vals[i]; i++) {
|
||||
- char log_val[256] = {0};
|
||||
-
|
||||
- val = slapi_value_get_string(vals[i]);
|
||||
- if (strlen(val) > 256) {
|
||||
- strncpy(log_val, val, 252);
|
||||
- strcat(log_val, "...");
|
||||
- } else {
|
||||
- strcpy(log_val, val);
|
||||
- }
|
||||
- addlenstr(l, "#");
|
||||
- addlenstr(l, attr);
|
||||
- addlenstr(l, ": ");
|
||||
- addlenstr(l, log_val);
|
||||
- addlenstr(l, "\n");
|
||||
- }
|
||||
+ log_entry_attr(entry_attr, attr, l);
|
||||
}
|
||||
}
|
||||
slapi_ch_free_string(&display_attrs);
|
||||
--
|
||||
2.43.0
|
||||
|
@ -0,0 +1,322 @@
|
||||
From 1e1c2b23c35282481628af7e971ac683da334502 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Tue, 27 Apr 2021 17:00:15 +0100
|
||||
Subject: [PATCH 02/12] Issue 4701 - RFE - Exclude attributes from retro
|
||||
changelog (#4723)
|
||||
|
||||
Description: When the retro changelog plugin is enabled it writes the
|
||||
added/modified values to the "cn-changelog" suffix. In
|
||||
some cases an entries attribute values can be of a
|
||||
sensitive nature and should be excluded. This RFE adds
|
||||
functionality that will allow an admin exclude certain
|
||||
attributes from the retro changelog DB.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/4701
|
||||
|
||||
Reviewed by: mreynolds389, droideck (Thanks folks)
|
||||
---
|
||||
.../tests/suites/retrocl/basic_test.py | 292 ++++++++++++++++++
|
||||
1 file changed, 292 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
new file mode 100644
|
||||
index 000000000..112c73cb9
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
@@ -0,0 +1,292 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+
|
||||
+import logging
|
||||
+import ldap
|
||||
+import time
|
||||
+import pytest
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389.plugins import RetroChangelogPlugin
|
||||
+from lib389._constants import *
|
||||
+from lib389.utils import *
|
||||
+from lib389.tasks import *
|
||||
+from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
|
||||
+from lib389.cli_base.dsrc import dsrc_arg_concat
|
||||
+from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add
|
||||
+from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+USER1_DN = 'uid=user1,ou=people,'+ DEFAULT_SUFFIX
|
||||
+USER2_DN = 'uid=user2,ou=people,'+ DEFAULT_SUFFIX
|
||||
+USER_PW = 'password'
|
||||
+ATTR_HOMEPHONE = 'homePhone'
|
||||
+ATTR_CARLICENSE = 'carLicense'
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+def test_retrocl_exclude_attr_add(topology_st):
|
||||
+ """ Test exclude attribute feature of the retrocl plugin for add operation
|
||||
+
|
||||
+ :id: 3481650f-2070-45ef-9600-2500cfc51559
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Enable dynamic plugins
|
||||
+ 2. Confige retro changelog plugin
|
||||
+ 3. Add an entry
|
||||
+ 4. Ensure entry attrs are in the changelog
|
||||
+ 5. Exclude an attr
|
||||
+ 6. Add another entry
|
||||
+ 7. Ensure excluded attr is not in the changelog
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ """
|
||||
+
|
||||
+ st = topology_st.standalone
|
||||
+
|
||||
+ log.info('Enable dynamic plugins')
|
||||
+ try:
|
||||
+ st.config.set('nsslapd-dynamic-plugins', 'on')
|
||||
+ except ldap.LDAPError as e:
|
||||
+ ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('Configure retrocl plugin')
|
||||
+ rcl = RetroChangelogPlugin(st)
|
||||
+ rcl.disable()
|
||||
+ rcl.enable()
|
||||
+ rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
|
||||
+
|
||||
+ log.info('Restarting instance')
|
||||
+ try:
|
||||
+ st.restart()
|
||||
+ except ldap.LDAPError as e:
|
||||
+ ldap.error('Failed to restart instance ' + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ users = UserAccounts(st, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ log.info('Adding user1')
|
||||
+ try:
|
||||
+ user1 = users.create(properties={
|
||||
+ 'sn': '1',
|
||||
+ 'cn': 'user 1',
|
||||
+ 'uid': 'user1',
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'givenname': 'user1',
|
||||
+ 'homePhone': '0861234567',
|
||||
+ 'carLicense': '131D16674',
|
||||
+ 'mail': 'user1@whereever.com',
|
||||
+ 'homeDirectory': '/home/user1',
|
||||
+ 'userpassword': USER_PW})
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error("Failed to add user1")
|
||||
+
|
||||
+ log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
|
||||
+ try:
|
||||
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ assert False
|
||||
+ assert len(cllist) > 0
|
||||
+ if cllist[0].hasAttr('changes'):
|
||||
+ clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ assert ATTR_HOMEPHONE in clstr
|
||||
+ assert ATTR_CARLICENSE in clstr
|
||||
+
|
||||
+ log.info('Excluding attribute ' + ATTR_HOMEPHONE)
|
||||
+ args = FakeArgs()
|
||||
+ args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM]
|
||||
+ args.instance = 'standalone1'
|
||||
+ args.basedn = None
|
||||
+ args.binddn = None
|
||||
+ args.starttls = False
|
||||
+ args.pwdfile = None
|
||||
+ args.bindpw = None
|
||||
+ args.prompt = False
|
||||
+ args.exclude_attrs = ATTR_HOMEPHONE
|
||||
+ args.func = retrochangelog_add
|
||||
+ dsrc_inst = dsrc_arg_concat(args, None)
|
||||
+ inst = connect_instance(dsrc_inst, False, args)
|
||||
+ result = args.func(inst, None, log, args)
|
||||
+ disconnect_instance(inst)
|
||||
+ assert result is None
|
||||
+
|
||||
+ log.info("5s delay for retrocl plugin to restart")
|
||||
+ time.sleep(5)
|
||||
+
|
||||
+ log.info('Adding user2')
|
||||
+ try:
|
||||
+ user2 = users.create(properties={
|
||||
+ 'sn': '2',
|
||||
+ 'cn': 'user 2',
|
||||
+ 'uid': 'user2',
|
||||
+ 'uidNumber': '22',
|
||||
+ 'gidNumber': '222',
|
||||
+ 'givenname': 'user2',
|
||||
+ 'homePhone': '0879088363',
|
||||
+ 'carLicense': '04WX11038',
|
||||
+ 'mail': 'user2@whereever.com',
|
||||
+ 'homeDirectory': '/home/user2',
|
||||
+ 'userpassword': USER_PW})
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error("Failed to add user2")
|
||||
+
|
||||
+ log.info('Verify homePhone attr is not in the changelog changestring')
|
||||
+ try:
|
||||
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN)
|
||||
+ assert len(cllist) > 0
|
||||
+ if cllist[0].hasAttr('changes'):
|
||||
+ clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ assert ATTR_HOMEPHONE not in clstr
|
||||
+ assert ATTR_CARLICENSE in clstr
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ assert False
|
||||
+
|
||||
+def test_retrocl_exclude_attr_mod(topology_st):
|
||||
+ """ Test exclude attribute feature of the retrocl plugin for mod operation
|
||||
+
|
||||
+ :id: f6bef689-685b-4f86-a98d-f7e6b1fcada3
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Enable dynamic plugins
|
||||
+ 2. Confige retro changelog plugin
|
||||
+ 3. Add user1 entry
|
||||
+ 4. Ensure entry attrs are in the changelog
|
||||
+ 5. Exclude an attr
|
||||
+ 6. Modify user1 entry
|
||||
+ 7. Ensure excluded attr is not in the changelog
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ """
|
||||
+
|
||||
+ st = topology_st.standalone
|
||||
+
|
||||
+ log.info('Enable dynamic plugins')
|
||||
+ try:
|
||||
+ st.config.set('nsslapd-dynamic-plugins', 'on')
|
||||
+ except ldap.LDAPError as e:
|
||||
+ ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('Configure retrocl plugin')
|
||||
+ rcl = RetroChangelogPlugin(st)
|
||||
+ rcl.disable()
|
||||
+ rcl.enable()
|
||||
+ rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
|
||||
+
|
||||
+ log.info('Restarting instance')
|
||||
+ try:
|
||||
+ st.restart()
|
||||
+ except ldap.LDAPError as e:
|
||||
+ ldap.error('Failed to restart instance ' + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ users = UserAccounts(st, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ log.info('Adding user1')
|
||||
+ try:
|
||||
+ user1 = users.create(properties={
|
||||
+ 'sn': '1',
|
||||
+ 'cn': 'user 1',
|
||||
+ 'uid': 'user1',
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'givenname': 'user1',
|
||||
+ 'homePhone': '0861234567',
|
||||
+ 'carLicense': '131D16674',
|
||||
+ 'mail': 'user1@whereever.com',
|
||||
+ 'homeDirectory': '/home/user1',
|
||||
+ 'userpassword': USER_PW})
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error("Failed to add user1")
|
||||
+
|
||||
+ log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
|
||||
+ try:
|
||||
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ assert False
|
||||
+ assert len(cllist) > 0
|
||||
+ if cllist[0].hasAttr('changes'):
|
||||
+ clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ assert ATTR_HOMEPHONE in clstr
|
||||
+ assert ATTR_CARLICENSE in clstr
|
||||
+
|
||||
+ log.info('Excluding attribute ' + ATTR_CARLICENSE)
|
||||
+ args = FakeArgs()
|
||||
+ args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM]
|
||||
+ args.instance = 'standalone1'
|
||||
+ args.basedn = None
|
||||
+ args.binddn = None
|
||||
+ args.starttls = False
|
||||
+ args.pwdfile = None
|
||||
+ args.bindpw = None
|
||||
+ args.prompt = False
|
||||
+ args.exclude_attrs = ATTR_CARLICENSE
|
||||
+ args.func = retrochangelog_add
|
||||
+ dsrc_inst = dsrc_arg_concat(args, None)
|
||||
+ inst = connect_instance(dsrc_inst, False, args)
|
||||
+ result = args.func(inst, None, log, args)
|
||||
+ disconnect_instance(inst)
|
||||
+ assert result is None
|
||||
+
|
||||
+ log.info("5s delay for retrocl plugin to restart")
|
||||
+ time.sleep(5)
|
||||
+
|
||||
+ log.info('Modify user1 carLicense attribute')
|
||||
+ try:
|
||||
+ st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")])
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('Verify carLicense attr is not in the changelog changestring')
|
||||
+ try:
|
||||
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ assert len(cllist) > 0
|
||||
+ # There will be 2 entries in the changelog for this user, we are only
|
||||
+ #interested in the second one, the modify operation.
|
||||
+ if cllist[1].hasAttr('changes'):
|
||||
+ clstr = (cllist[1].getValue('changes')).decode()
|
||||
+ assert ATTR_CARLICENSE not in clstr
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ assert False
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,27 +0,0 @@
|
||||
From be7c2b82958e91ce08775bf6b5da3c311d3b00e5 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 20 Feb 2023 16:14:05 +0100
|
||||
Subject: [PATCH 2/2] Issue 5647 - Fix unused variable warning from previous
|
||||
commit (#5670)
|
||||
|
||||
* issue 5647 - memory leak in audit log when adding entries
|
||||
* Issue 5647 - Fix unused variable warning from previous commit
|
||||
---
|
||||
ldap/servers/slapd/auditlog.c | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 3128e0497..0597ecc6f 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -254,7 +254,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
} else {
|
||||
/* Return all attributes */
|
||||
for (; entry_attr; entry_attr = entry_attr->a_next) {
|
||||
- Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
char *attr = NULL;
|
||||
|
||||
slapi_attr_get_type(entry_attr, &attr);
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,147 +0,0 @@
|
||||
From 692c4cec6cc5c0086cf58f83bcfa690c766c9887 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 2 Feb 2024 14:14:28 +0100
|
||||
Subject: [PATCH] Issue 5407 - sync_repl crashes if enabled while dynamic
|
||||
plugin is enabled (#5411)
|
||||
|
||||
Bug description:
|
||||
When dynamic plugin is enabled, if a MOD enables sync_repl plugin
|
||||
then sync_repl init function registers the postop callback
|
||||
that will be called for the MOD itself while the preop
|
||||
has not been called.
|
||||
postop expects preop to be called and so primary operation
|
||||
to be set. When it is not set it crashes
|
||||
|
||||
Fix description:
|
||||
If the primary operation is not set, just return
|
||||
|
||||
relates: #5407
|
||||
---
|
||||
.../suites/syncrepl_plugin/basic_test.py | 68 +++++++++++++++++++
|
||||
ldap/servers/plugins/sync/sync_persist.c | 23 ++++++-
|
||||
2 files changed, 90 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
index eb3770b78..cdf35eeaa 100644
|
||||
--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
@@ -592,6 +592,74 @@ def test_sync_repl_cenotaph(topo_m2, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+def test_sync_repl_dynamic_plugin(topology, request):
|
||||
+ """Test sync_repl with dynamic plugin
|
||||
+
|
||||
+ :id: d4f84913-c18a-459f-8525-110f610ca9e6
|
||||
+ :setup: install a standalone instance
|
||||
+ :steps:
|
||||
+ 1. reset instance to standard (no retroCL, no sync_repl, no dynamic plugin)
|
||||
+ 2. Enable dynamic plugin
|
||||
+ 3. Enable retroCL/content_sync
|
||||
+ 4. Establish a sync_repl req
|
||||
+ :expectedresults:
|
||||
+ 1. Should succeeds
|
||||
+ 2. Should succeeds
|
||||
+ 3. Should succeeds
|
||||
+ 4. Should succeeds
|
||||
+ """
|
||||
+
|
||||
+ # Reset the instance in a default config
|
||||
+ # Disable content sync plugin
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_REPL_SYNC)
|
||||
+
|
||||
+ # Disable retro changelog
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Disable dynamic plugins
|
||||
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'off')])
|
||||
+ topology.standalone.restart()
|
||||
+
|
||||
+ # Now start the test
|
||||
+ # Enable dynamic plugins
|
||||
+ try:
|
||||
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')])
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error('Failed to enable dynamic plugin! {}'.format(e.args[0]['desc']))
|
||||
+ assert False
|
||||
+
|
||||
+ # Enable retro changelog
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Enbale content sync plugin
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC)
|
||||
+
|
||||
+ # create a sync repl client and wait 5 seconds to be sure it is running
|
||||
+ sync_repl = Sync_persist(topology.standalone)
|
||||
+ sync_repl.start()
|
||||
+ time.sleep(5)
|
||||
+
|
||||
+ # create users
|
||||
+ users = UserAccounts(topology.standalone, DEFAULT_SUFFIX)
|
||||
+ users_set = []
|
||||
+ for i in range(10001, 10004):
|
||||
+ users_set.append(users.create_test_user(uid=i))
|
||||
+
|
||||
+ time.sleep(10)
|
||||
+ # delete users, that automember/memberof will generate nested updates
|
||||
+ for user in users_set:
|
||||
+ user.delete()
|
||||
+ # stop the server to get the sync_repl result set (exit from while loop).
|
||||
+ # Only way I found to acheive that.
|
||||
+ # and wait a bit to let sync_repl thread time to set its result before fetching it.
|
||||
+ topology.standalone.stop()
|
||||
+ sync_repl.get_result()
|
||||
+ sync_repl.join()
|
||||
+ log.info('test_sync_repl_dynamic_plugin: PASS\n')
|
||||
+
|
||||
+ # Success
|
||||
+ log.info('Test complete')
|
||||
+
|
||||
def test_sync_repl_invalid_cookie(topology, request):
|
||||
"""Test sync_repl with invalid cookie
|
||||
|
||||
diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c
|
||||
index d2210b64c..283607361 100644
|
||||
--- a/ldap/servers/plugins/sync/sync_persist.c
|
||||
+++ b/ldap/servers/plugins/sync/sync_persist.c
|
||||
@@ -156,6 +156,17 @@ ignore_op_pl(Slapi_PBlock *pb)
|
||||
* This is the same for ident
|
||||
*/
|
||||
prim_op = get_thread_primary_op();
|
||||
+ if (prim_op == NULL) {
|
||||
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
|
||||
+ * The only known case it happens is with dynamic plugin enabled and an
|
||||
+ * update that enable the sync_repl plugin. In such case sync_repl registers
|
||||
+ * the postop (sync_update_persist_op) that is called while the preop was not called
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ "ignore_op_pl - Operation without primary op set (0x%lx)\n",
|
||||
+ (ulong) op);
|
||||
+ return;
|
||||
+ }
|
||||
ident = sync_persist_get_operation_extension(pb);
|
||||
|
||||
if (ident) {
|
||||
@@ -232,8 +243,18 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
|
||||
|
||||
|
||||
prim_op = get_thread_primary_op();
|
||||
+ if (prim_op == NULL) {
|
||||
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
|
||||
+ * The only known case it happens is with dynamic plugin enabled and an
|
||||
+ * update that enable the sync_repl plugin. In such case sync_repl registers
|
||||
+ * the postop (sync_update_persist_op) that is called while the preop was not called
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ "sync_update_persist_op - Operation without primary op set (0x%lx)\n",
|
||||
+ (ulong) pb_op);
|
||||
+ return;
|
||||
+ }
|
||||
ident = sync_persist_get_operation_extension(pb);
|
||||
- PR_ASSERT(prim_op);
|
||||
|
||||
if ((ident == NULL) && operation_is_flag_set(pb_op, OP_FLAG_NOOP)) {
|
||||
/* This happens for URP (add cenotaph, fixup rename, tombstone resurrect)
|
||||
--
|
||||
2.43.0
|
||||
|
5307
SOURCES/0003-Ticket-137-Implement-EntryUUID-plugin.patch
Normal file
5307
SOURCES/0003-Ticket-137-Implement-EntryUUID-plugin.patch
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,840 +0,0 @@
|
||||
From 8dc61a176323f0d41df730abd715ccff3034c2be Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Sun, 27 Nov 2022 09:37:19 -0500
|
||||
Subject: [PATCH] Issue 5547 - automember plugin improvements
|
||||
|
||||
Description:
|
||||
|
||||
Rebuild task has the following improvements:
|
||||
|
||||
- Only one task allowed at a time
|
||||
- Do not cleanup previous members by default. Add new CLI option to intentionally
|
||||
cleanup memberships before rebuilding from scratch.
|
||||
- Add better task logging to show fixup progress
|
||||
|
||||
To prevent automember from being called in a nested be_txn loop thread storage is
|
||||
used to check and skip these loops.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/5547
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
---
|
||||
.../automember_plugin/automember_mod_test.py | 43 +++-
|
||||
ldap/servers/plugins/automember/automember.c | 232 ++++++++++++++----
|
||||
ldap/servers/slapd/back-ldbm/ldbm_add.c | 11 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 10 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 11 +-
|
||||
.../lib389/cli_conf/plugins/automember.py | 10 +-
|
||||
src/lib389/lib389/plugins.py | 7 +-
|
||||
src/lib389/lib389/tasks.py | 9 +-
|
||||
8 files changed, 250 insertions(+), 83 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
index 8d25384bf..7a0ed3275 100644
|
||||
--- a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
+++ b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
@@ -5,12 +5,13 @@
|
||||
# License: GPL (version 3 or any later version).
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
-#
|
||||
+import ldap
|
||||
import logging
|
||||
import pytest
|
||||
import os
|
||||
+import time
|
||||
from lib389.utils import ds_is_older
|
||||
-from lib389._constants import *
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.idm.group import Groups
|
||||
@@ -41,6 +42,11 @@ def automember_fixture(topo, request):
|
||||
user_accts = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
user = user_accts.create_test_user()
|
||||
|
||||
+ # Create extra users
|
||||
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ for i in range(0, 100):
|
||||
+ users.create_test_user(uid=i)
|
||||
+
|
||||
# Create automember definitions and regex rules
|
||||
automember_prop = {
|
||||
'cn': 'testgroup_definition',
|
||||
@@ -59,7 +65,7 @@ def automember_fixture(topo, request):
|
||||
automemberplugin.enable()
|
||||
topo.standalone.restart()
|
||||
|
||||
- return (user, groups)
|
||||
+ return user, groups
|
||||
|
||||
|
||||
def test_mods(automember_fixture, topo):
|
||||
@@ -72,19 +78,21 @@ def test_mods(automember_fixture, topo):
|
||||
2. Update user that should add it to group[1]
|
||||
3. Update user that should add it to group[2]
|
||||
4. Update user that should add it to group[0]
|
||||
- 5. Test rebuild task correctly moves user to group[1]
|
||||
+ 5. Test rebuild task adds user to group[1]
|
||||
+ 6. Test rebuild task cleanups groups and only adds it to group[1]
|
||||
:expectedresults:
|
||||
1. Success
|
||||
2. Success
|
||||
3. Success
|
||||
4. Success
|
||||
5. Success
|
||||
+ 6. Success
|
||||
"""
|
||||
(user, groups) = automember_fixture
|
||||
|
||||
# Update user which should go into group[0]
|
||||
user.replace('cn', 'whatever')
|
||||
- groups[0].is_member(user.dn)
|
||||
+ assert groups[0].is_member(user.dn)
|
||||
if groups[1].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -92,7 +100,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user0 which should go into group[1]
|
||||
user.replace('cn', 'mark')
|
||||
- groups[1].is_member(user.dn)
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -100,7 +108,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user which should go into group[2]
|
||||
user.replace('cn', 'simon')
|
||||
- groups[2].is_member(user.dn)
|
||||
+ assert groups[2].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[1].is_member(user.dn):
|
||||
@@ -108,7 +116,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user which should go back into group[0] (full circle)
|
||||
user.replace('cn', 'whatever')
|
||||
- groups[0].is_member(user.dn)
|
||||
+ assert groups[0].is_member(user.dn)
|
||||
if groups[1].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -128,12 +136,24 @@ def test_mods(automember_fixture, topo):
|
||||
automemberplugin.enable()
|
||||
topo.standalone.restart()
|
||||
|
||||
- # Run rebuild task
|
||||
+ # Run rebuild task (no cleanup)
|
||||
task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount")
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ # test only one fixup task is allowed at a time
|
||||
+ automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=top")
|
||||
task.wait()
|
||||
|
||||
- # Test membership
|
||||
- groups[1].is_member(user.dn)
|
||||
+ # Test membership (user should still be in groups[0])
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
+ if not groups[0].is_member(user.dn):
|
||||
+ assert False
|
||||
+
|
||||
+ # Run rebuild task with cleanup
|
||||
+ task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount", cleanup=True)
|
||||
+ task.wait()
|
||||
+
|
||||
+ # Test membership (user should only be in groups[1])
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -148,4 +168,3 @@ if __name__ == '__main__':
|
||||
# -s for DEBUG mode
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main(["-s", CURRENT_FILE])
|
||||
-
|
||||
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
|
||||
index 3494d0343..419adb052 100644
|
||||
--- a/ldap/servers/plugins/automember/automember.c
|
||||
+++ b/ldap/servers/plugins/automember/automember.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2011 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -14,7 +14,7 @@
|
||||
* Auto Membership Plug-in
|
||||
*/
|
||||
#include "automember.h"
|
||||
-
|
||||
+#include <pthread.h>
|
||||
|
||||
/*
|
||||
* Plug-in globals
|
||||
@@ -22,7 +22,9 @@
|
||||
static PRCList *g_automember_config = NULL;
|
||||
static Slapi_RWLock *g_automember_config_lock = NULL;
|
||||
static uint64_t abort_rebuild_task = 0;
|
||||
-
|
||||
+static pthread_key_t td_automem_block_nested;
|
||||
+static PRBool fixup_running = PR_FALSE;
|
||||
+static PRLock *fixup_lock = NULL;
|
||||
static void *_PluginID = NULL;
|
||||
static Slapi_DN *_PluginDN = NULL;
|
||||
static Slapi_DN *_ConfigAreaDN = NULL;
|
||||
@@ -93,9 +95,43 @@ static void automember_task_export_destructor(Slapi_Task *task);
|
||||
static void automember_task_map_destructor(Slapi_Task *task);
|
||||
|
||||
#define DEFAULT_FILE_MODE PR_IRUSR | PR_IWUSR
|
||||
+#define FIXUP_PROGRESS_LIMIT 1000
|
||||
static uint64_t plugin_do_modify = 0;
|
||||
static uint64_t plugin_is_betxn = 0;
|
||||
|
||||
+/* automember_plugin fixup task and add operations should block other be_txn
|
||||
+ * plugins from calling automember_post_op_mod() */
|
||||
+static int32_t
|
||||
+slapi_td_block_nested_post_op(void)
|
||||
+{
|
||||
+ int32_t val = 12345;
|
||||
+
|
||||
+ if (pthread_setspecific(td_automem_block_nested, (void *)&val) != 0) {
|
||||
+ return PR_FAILURE;
|
||||
+ }
|
||||
+ return PR_SUCCESS;
|
||||
+}
|
||||
+
|
||||
+static int32_t
|
||||
+slapi_td_unblock_nested_post_op(void)
|
||||
+{
|
||||
+ if (pthread_setspecific(td_automem_block_nested, NULL) != 0) {
|
||||
+ return PR_FAILURE;
|
||||
+ }
|
||||
+ return PR_SUCCESS;
|
||||
+}
|
||||
+
|
||||
+static int32_t
|
||||
+slapi_td_is_post_op_nested(void)
|
||||
+{
|
||||
+ int32_t *value = pthread_getspecific(td_automem_block_nested);
|
||||
+
|
||||
+ if (value == NULL) {
|
||||
+ return 0;
|
||||
+ }
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Config cache locking functions
|
||||
*/
|
||||
@@ -317,6 +353,14 @@ automember_start(Slapi_PBlock *pb)
|
||||
return -1;
|
||||
}
|
||||
|
||||
+ if (fixup_lock == NULL) {
|
||||
+ if ((fixup_lock = PR_NewLock()) == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_start - Failed to create fixup lock.\n");
|
||||
+ return -1;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* Get the plug-in target dn from the system
|
||||
* and store it for future use. */
|
||||
@@ -360,6 +404,11 @@ automember_start(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
|
||||
+ if (pthread_key_create(&td_automem_block_nested, NULL) != 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_start - pthread_key_create failed\n");
|
||||
+ }
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_start - ready for service\n");
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
@@ -394,6 +443,8 @@ automember_close(Slapi_PBlock *pb __attribute__((unused)))
|
||||
slapi_sdn_free(&_ConfigAreaDN);
|
||||
slapi_destroy_rwlock(g_automember_config_lock);
|
||||
g_automember_config_lock = NULL;
|
||||
+ PR_DestroyLock(fixup_lock);
|
||||
+ fixup_lock = NULL;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"<-- automember_close\n");
|
||||
@@ -1619,7 +1670,6 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
-
|
||||
/*
|
||||
* automember_update_member_value()
|
||||
*
|
||||
@@ -1634,7 +1684,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
LDAPMod *mods[2];
|
||||
char *vals[2];
|
||||
char *member_value = NULL;
|
||||
- int rc = 0;
|
||||
+ int rc = LDAP_SUCCESS;
|
||||
Slapi_DN *group_sdn;
|
||||
|
||||
/* First thing check that the group still exists */
|
||||
@@ -1653,7 +1703,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
"automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n",
|
||||
group_dn, rc);
|
||||
}
|
||||
- return rc;
|
||||
+ goto out;
|
||||
}
|
||||
|
||||
/* If grouping_value is dn, we need to fetch the dn instead. */
|
||||
@@ -1879,6 +1929,13 @@ automember_mod_post_op(Slapi_PBlock *pb)
|
||||
PRCList *list = NULL;
|
||||
int rc = SLAPI_PLUGIN_SUCCESS;
|
||||
|
||||
+ if (slapi_td_is_post_op_nested()) {
|
||||
+ /* don't process op twice in the same thread */
|
||||
+ return rc;
|
||||
+ } else {
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ }
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"--> automember_mod_post_op\n");
|
||||
|
||||
@@ -2005,6 +2062,7 @@ automember_mod_post_op(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
}
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"<-- automember_mod_post_op (%d)\n", rc);
|
||||
@@ -2024,6 +2082,13 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"--> automember_add_post_op\n");
|
||||
|
||||
+ if (slapi_td_is_post_op_nested()) {
|
||||
+ /* don't process op twice in the same thread */
|
||||
+ return rc;
|
||||
+ } else {
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ }
|
||||
+
|
||||
/* Reload config if a config entry was added. */
|
||||
if ((sdn = automember_get_sdn(pb))) {
|
||||
if (automember_dn_is_config(sdn)) {
|
||||
@@ -2039,7 +2104,7 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
|
||||
/* If replication, just bail. */
|
||||
if (automember_isrepl(pb)) {
|
||||
- return SLAPI_PLUGIN_SUCCESS;
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Get the newly added entry. */
|
||||
@@ -2052,7 +2117,7 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
tombstone);
|
||||
slapi_value_free(&tombstone);
|
||||
if (is_tombstone) {
|
||||
- return SLAPI_PLUGIN_SUCCESS;
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Check if a config entry applies
|
||||
@@ -2063,21 +2128,19 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
list = PR_LIST_HEAD(g_automember_config);
|
||||
while (list != g_automember_config) {
|
||||
config = (struct configEntry *)list;
|
||||
-
|
||||
/* Does the entry meet scope and filter requirements? */
|
||||
if (slapi_dn_issuffix(slapi_sdn_get_dn(sdn), config->scope) &&
|
||||
- (slapi_filter_test_simple(e, config->filter) == 0)) {
|
||||
+ (slapi_filter_test_simple(e, config->filter) == 0))
|
||||
+ {
|
||||
/* Find out what membership changes are needed and make them. */
|
||||
if (automember_update_membership(config, e, NULL) == SLAPI_PLUGIN_FAILURE) {
|
||||
rc = SLAPI_PLUGIN_FAILURE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
-
|
||||
list = PR_NEXT_LINK(list);
|
||||
}
|
||||
}
|
||||
-
|
||||
automember_config_unlock();
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
@@ -2098,6 +2161,7 @@ bail:
|
||||
slapi_pblock_set(pb, SLAPI_RESULT_CODE, &result);
|
||||
slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, &errtxt);
|
||||
}
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -2138,6 +2202,7 @@ typedef struct _task_data
|
||||
Slapi_DN *base_dn;
|
||||
char *bind_dn;
|
||||
int scope;
|
||||
+ PRBool cleanup;
|
||||
} task_data;
|
||||
|
||||
static void
|
||||
@@ -2270,6 +2335,7 @@ automember_task_abort_thread(void *arg)
|
||||
* basedn: dc=example,dc=com
|
||||
* filter: (uid=*)
|
||||
* scope: sub
|
||||
+ * cleanup: yes/on (default is off)
|
||||
*
|
||||
* basedn and filter are required. If scope is omitted, the default is sub
|
||||
*/
|
||||
@@ -2284,9 +2350,22 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
const char *base_dn;
|
||||
const char *filter;
|
||||
const char *scope;
|
||||
+ const char *cleanup_str;
|
||||
+ PRBool cleanup = PR_FALSE;
|
||||
|
||||
*returncode = LDAP_SUCCESS;
|
||||
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ if (fixup_running) {
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_task_add - there is already a fixup task running\n");
|
||||
+ rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
+ goto out;
|
||||
+ }
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+
|
||||
/*
|
||||
* Grab the task params
|
||||
*/
|
||||
@@ -2300,6 +2379,12 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
goto out;
|
||||
}
|
||||
+ if ((cleanup_str = slapi_entry_attr_get_ref(e, "cleanup"))) {
|
||||
+ if (strcasecmp(cleanup_str, "yes") == 0 || strcasecmp(cleanup_str, "on")) {
|
||||
+ cleanup = PR_TRUE;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
scope = slapi_fetch_attr(e, "scope", "sub");
|
||||
/*
|
||||
* setup our task data
|
||||
@@ -2315,6 +2400,7 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
mytaskdata->bind_dn = slapi_ch_strdup(bind_dn);
|
||||
mytaskdata->base_dn = slapi_sdn_new_dn_byval(base_dn);
|
||||
mytaskdata->filter_str = slapi_ch_strdup(filter);
|
||||
+ mytaskdata->cleanup = cleanup;
|
||||
|
||||
if (scope) {
|
||||
if (strcasecmp(scope, "sub") == 0) {
|
||||
@@ -2334,6 +2420,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
task = slapi_plugin_new_task(slapi_entry_get_ndn(e), arg);
|
||||
slapi_task_set_destructor_fn(task, automember_task_destructor);
|
||||
slapi_task_set_data(task, mytaskdata);
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_TRUE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
/*
|
||||
* Start the task as a separate thread
|
||||
*/
|
||||
@@ -2345,6 +2434,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
"automember_task_add - Unable to create task thread!\n");
|
||||
*returncode = LDAP_OPERATIONS_ERROR;
|
||||
slapi_task_finish(task, *returncode);
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_FALSE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
} else {
|
||||
rv = SLAPI_DSE_CALLBACK_OK;
|
||||
@@ -2372,6 +2464,9 @@ automember_rebuild_task_thread(void *arg)
|
||||
PRCList *list = NULL;
|
||||
PRCList *include_list = NULL;
|
||||
int result = 0;
|
||||
+ int64_t fixup_progress_count = 0;
|
||||
+ int64_t fixup_progress_elapsed = 0;
|
||||
+ int64_t fixup_start_time = 0;
|
||||
size_t i = 0;
|
||||
|
||||
/* Reset abort flag */
|
||||
@@ -2380,6 +2475,7 @@ automember_rebuild_task_thread(void *arg)
|
||||
if (!task) {
|
||||
return; /* no task */
|
||||
}
|
||||
+
|
||||
slapi_task_inc_refcount(task);
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_rebuild_task_thread - Refcount incremented.\n");
|
||||
@@ -2393,9 +2489,11 @@ automember_rebuild_task_thread(void *arg)
|
||||
slapi_task_log_status(task, "Automember rebuild task starting (base dn: (%s) filter (%s)...",
|
||||
slapi_sdn_get_dn(td->base_dn), td->filter_str);
|
||||
/*
|
||||
- * Set the bind dn in the local thread data
|
||||
+ * Set the bind dn in the local thread data, and block post op mods
|
||||
*/
|
||||
slapi_td_set_dn(slapi_ch_strdup(td->bind_dn));
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ fixup_start_time = slapi_current_rel_time_t();
|
||||
/*
|
||||
* Take the config lock now and search the database
|
||||
*/
|
||||
@@ -2426,6 +2524,21 @@ automember_rebuild_task_thread(void *arg)
|
||||
* Loop over the entries
|
||||
*/
|
||||
for (i = 0; entries && (entries[i] != NULL); i++) {
|
||||
+ fixup_progress_count++;
|
||||
+ if (fixup_progress_count % FIXUP_PROGRESS_LIMIT == 0 ) {
|
||||
+ slapi_task_log_notice(task,
|
||||
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
|
||||
+ fixup_progress_count,
|
||||
+ slapi_current_rel_time_t() - fixup_start_time,
|
||||
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
|
||||
+ slapi_task_log_status(task,
|
||||
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
|
||||
+ fixup_progress_count,
|
||||
+ slapi_current_rel_time_t() - fixup_start_time,
|
||||
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
|
||||
+ slapi_task_inc_progress(task);
|
||||
+ fixup_progress_elapsed = slapi_current_rel_time_t();
|
||||
+ }
|
||||
if (slapi_atomic_load_64(&abort_rebuild_task, __ATOMIC_ACQUIRE) == 1) {
|
||||
/* The task was aborted */
|
||||
slapi_task_log_notice(task, "Automember rebuild task was intentionally aborted");
|
||||
@@ -2443,48 +2556,66 @@ automember_rebuild_task_thread(void *arg)
|
||||
if (slapi_dn_issuffix(slapi_entry_get_dn(entries[i]), config->scope) &&
|
||||
(slapi_filter_test_simple(entries[i], config->filter) == 0))
|
||||
{
|
||||
- /* First clear out all the defaults groups */
|
||||
- for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
|
||||
- if ((result = automember_update_member_value(entries[i], config->default_groups[ii],
|
||||
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
|
||||
- {
|
||||
- slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from default group (%s) error (%d)",
|
||||
- config->default_groups[ii], result);
|
||||
- slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from default group (%s) error (%d)",
|
||||
- config->default_groups[ii], result);
|
||||
- slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
- "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
- config->default_groups[ii], result);
|
||||
- goto out;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- /* Then clear out the non-default group */
|
||||
- if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
|
||||
- include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
|
||||
- while (include_list != (PRCList *)config->inclusive_rules) {
|
||||
- struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
|
||||
- if ((result = automember_update_member_value(entries[i], slapi_sdn_get_dn(curr_rule->target_group_dn),
|
||||
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
|
||||
+ if (td->cleanup) {
|
||||
+
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Cleaning up groups (config %s)\n",
|
||||
+ config->dn);
|
||||
+ /* First clear out all the defaults groups */
|
||||
+ for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
|
||||
+ if ((result = automember_update_member_value(entries[i],
|
||||
+ config->default_groups[ii],
|
||||
+ config->grouping_attr,
|
||||
+ config->grouping_value,
|
||||
+ NULL, DEL_MEMBER)))
|
||||
{
|
||||
slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from group (%s) error (%d)",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ "member from default group (%s) error (%d)",
|
||||
+ config->default_groups[ii], result);
|
||||
slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from group (%s) error (%d)",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ "member from default group (%s) error (%d)",
|
||||
+ config->default_groups[ii], result);
|
||||
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ config->default_groups[ii], result);
|
||||
goto out;
|
||||
}
|
||||
- include_list = PR_NEXT_LINK(include_list);
|
||||
}
|
||||
+
|
||||
+ /* Then clear out the non-default group */
|
||||
+ if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
|
||||
+ include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
|
||||
+ while (include_list != (PRCList *)config->inclusive_rules) {
|
||||
+ struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
|
||||
+ if ((result = automember_update_member_value(entries[i],
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn),
|
||||
+ config->grouping_attr,
|
||||
+ config->grouping_value,
|
||||
+ NULL, DEL_MEMBER)))
|
||||
+ {
|
||||
+ slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
+ "member from group (%s) error (%d)",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
+ "member from group (%s) error (%d)",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ include_list = PR_NEXT_LINK(include_list);
|
||||
+ }
|
||||
+ }
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Finished cleaning up groups (config %s)\n",
|
||||
+ config->dn);
|
||||
}
|
||||
|
||||
/* Update the memberships for this entries */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Updating membership (config %s)\n",
|
||||
+ config->dn);
|
||||
if (slapi_is_shutting_down() ||
|
||||
automember_update_membership(config, entries[i], NULL) == SLAPI_PLUGIN_FAILURE)
|
||||
{
|
||||
@@ -2508,15 +2639,22 @@ out:
|
||||
slapi_task_log_notice(task, "Automember rebuild task aborted. Error (%d)", result);
|
||||
slapi_task_log_status(task, "Automember rebuild task aborted. Error (%d)", result);
|
||||
} else {
|
||||
- slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
|
||||
- slapi_task_log_status(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
|
||||
+ slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
|
||||
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
|
||||
+ slapi_task_log_status(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
|
||||
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
|
||||
}
|
||||
slapi_task_inc_progress(task);
|
||||
slapi_task_finish(task, result);
|
||||
slapi_task_dec_refcount(task);
|
||||
slapi_atomic_store_64(&abort_rebuild_task, 0, __ATOMIC_RELEASE);
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_FALSE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
- "automember_rebuild_task_thread - Refcount decremented.\n");
|
||||
+ "automember_rebuild_task_thread - task finished, refcount decremented.\n");
|
||||
}
|
||||
|
||||
/*
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
index ba2d73a84..ce4c314a1 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -1264,10 +1264,6 @@ ldbm_back_add(Slapi_PBlock *pb)
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
if (addingentry_id_assigned) {
|
||||
next_id_return(be, addingentry->ep_id);
|
||||
}
|
||||
@@ -1376,6 +1372,11 @@ diskfull_return:
|
||||
if (!not_an_error) {
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
}
|
||||
+
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
}
|
||||
|
||||
common_return:
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
index de23190c3..27f0ac58a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
@@ -1407,11 +1407,6 @@ commit_return:
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
-
|
||||
if (tombstone) {
|
||||
if (cache_is_in_cache(&inst->inst_cache, tombstone)) {
|
||||
tomb_ep_id = tombstone->ep_id; /* Otherwise, tombstone might have been freed. */
|
||||
@@ -1496,6 +1491,11 @@ error_return:
|
||||
conn_id, op_id, parent_modify_c.old_entry, parent_modify_c.new_entry, myrc);
|
||||
}
|
||||
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
+
|
||||
common_return:
|
||||
if (orig_entry) {
|
||||
/* NOTE: #define SLAPI_DELETE_BEPREOP_ENTRY SLAPI_ENTRY_PRE_OP */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
index 537369055..64b293001 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -1043,11 +1043,6 @@ ldbm_back_modify(Slapi_PBlock *pb)
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
-
|
||||
if (postentry != NULL) {
|
||||
slapi_entry_free(postentry);
|
||||
postentry = NULL;
|
||||
@@ -1103,6 +1098,10 @@ error_return:
|
||||
if (!not_an_error) {
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
}
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
}
|
||||
|
||||
/* if ec is in cache, remove it, then add back e if we still have it */
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugins/automember.py b/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
index 15b00c633..568586ad8 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
@@ -155,7 +155,7 @@ def fixup(inst, basedn, log, args):
|
||||
log.info('Attempting to add task entry... This will fail if Automembership plug-in is not enabled.')
|
||||
if not plugin.status():
|
||||
log.error("'%s' is disabled. Rebuild membership task can't be executed" % plugin.rdn)
|
||||
- fixup_task = plugin.fixup(args.DN, args.filter)
|
||||
+ fixup_task = plugin.fixup(args.DN, args.filter, args.cleanup)
|
||||
if args.wait:
|
||||
log.info(f'Waiting for fixup task "{fixup_task.dn}" to complete. You can safely exit by pressing Control C ...')
|
||||
fixup_task.wait(timeout=args.timeout)
|
||||
@@ -225,8 +225,8 @@ def create_parser(subparsers):
|
||||
subcommands = automember.add_subparsers(help='action')
|
||||
add_generic_plugin_parsers(subcommands, AutoMembershipPlugin)
|
||||
|
||||
- list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
|
||||
- subcommands_list = list.add_subparsers(help='action')
|
||||
+ automember_list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
|
||||
+ subcommands_list = automember_list.add_subparsers(help='action')
|
||||
list_definitions = subcommands_list.add_parser('definitions', help='Lists Automembership definitions.')
|
||||
list_definitions.set_defaults(func=definition_list)
|
||||
list_regexes = subcommands_list.add_parser('regexes', help='List Automembership regex rules.')
|
||||
@@ -269,6 +269,8 @@ def create_parser(subparsers):
|
||||
fixup_task.add_argument('-f', '--filter', required=True, help='Sets the LDAP filter for entries to fix up')
|
||||
fixup_task.add_argument('-s', '--scope', required=True, choices=['sub', 'base', 'one'], type=str.lower,
|
||||
help='Sets the LDAP search scope for entries to fix up')
|
||||
+ fixup_task.add_argument('--cleanup', action='store_true',
|
||||
+ help="Clean up previous group memberships before rebuilding")
|
||||
fixup_task.add_argument('--wait', action='store_true',
|
||||
help="Wait for the task to finish, this could take a long time")
|
||||
fixup_task.add_argument('--timeout', default=0, type=int,
|
||||
@@ -279,7 +281,7 @@ def create_parser(subparsers):
|
||||
fixup_status.add_argument('--dn', help="The task entry's DN")
|
||||
fixup_status.add_argument('--show-log', action='store_true', help="Display the task log")
|
||||
fixup_status.add_argument('--watch', action='store_true',
|
||||
- help="Watch the task's status and wait for it to finish")
|
||||
+ help="Watch the task's status and wait for it to finish")
|
||||
|
||||
abort_fixup = subcommands.add_parser('abort-fixup', help='Abort the rebuild membership task.')
|
||||
abort_fixup.set_defaults(func=abort)
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 52691a44c..a1ad0a45b 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -1141,13 +1141,15 @@ class AutoMembershipPlugin(Plugin):
|
||||
def __init__(self, instance, dn="cn=Auto Membership Plugin,cn=plugins,cn=config"):
|
||||
super(AutoMembershipPlugin, self).__init__(instance, dn)
|
||||
|
||||
- def fixup(self, basedn, _filter=None):
|
||||
+ def fixup(self, basedn, _filter=None, cleanup=False):
|
||||
"""Create an automember rebuild membership task
|
||||
|
||||
:param basedn: Basedn to fix up
|
||||
:type basedn: str
|
||||
:param _filter: a filter for entries to fix up
|
||||
:type _filter: str
|
||||
+ :param cleanup: cleanup old group memberships
|
||||
+ :type cleanup: boolean
|
||||
|
||||
:returns: an instance of Task(DSLdapObject)
|
||||
"""
|
||||
@@ -1156,6 +1158,9 @@ class AutoMembershipPlugin(Plugin):
|
||||
task_properties = {'basedn': basedn}
|
||||
if _filter is not None:
|
||||
task_properties['filter'] = _filter
|
||||
+ if cleanup:
|
||||
+ task_properties['cleanup'] = "yes"
|
||||
+
|
||||
task.create(properties=task_properties)
|
||||
|
||||
return task
|
||||
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
|
||||
index 1a16bbb83..193805780 100644
|
||||
--- a/src/lib389/lib389/tasks.py
|
||||
+++ b/src/lib389/lib389/tasks.py
|
||||
@@ -1006,12 +1006,13 @@ class Tasks(object):
|
||||
return exitCode
|
||||
|
||||
def automemberRebuild(self, suffix=DEFAULT_SUFFIX, scope='sub',
|
||||
- filterstr='objectclass=top', args=None):
|
||||
+ filterstr='objectclass=top', cleanup=False, args=None):
|
||||
'''
|
||||
- @param suffix - The suffix the task should examine - defualt is
|
||||
+ @param suffix - The suffix the task should examine - default is
|
||||
"dc=example,dc=com"
|
||||
@param scope - The scope of the search to find entries
|
||||
- @param fitlerstr - THe search filter to find entries
|
||||
+ @param fitlerstr - The search filter to find entries
|
||||
+ @param cleanup - reset/clear the old group mmeberships prior to rebuilding
|
||||
@param args - is a dictionary that contains modifier of the task
|
||||
wait: True/[False] - If True, waits for the completion of
|
||||
the task before to return
|
||||
@@ -1027,6 +1028,8 @@ class Tasks(object):
|
||||
entry.setValues('basedn', suffix)
|
||||
entry.setValues('filter', filterstr)
|
||||
entry.setValues('scope', scope)
|
||||
+ if cleanup:
|
||||
+ entry.setValues('cleanup', 'yes')
|
||||
|
||||
# start the task and possibly wait for task completion
|
||||
try:
|
||||
--
|
||||
2.43.0
|
||||
|
@ -0,0 +1,373 @@
|
||||
From c167d6127db45d8426437c273060c8c8f7fbcb9b Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william.brown@suse.com>
|
||||
Date: Wed, 23 Sep 2020 09:19:34 +1000
|
||||
Subject: [PATCH 04/12] Ticket 4326 - entryuuid fixup did not work correctly
|
||||
(#4328)
|
||||
|
||||
Bug Description: due to an oversight in how fixup tasks
|
||||
worked, the entryuuid fixup task did not work correctly and
|
||||
would not persist over restarts.
|
||||
|
||||
Fix Description: Correctly implement entryuuid fixup.
|
||||
|
||||
fixes: #4326
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: mreynolds (thanks!)
|
||||
---
|
||||
.../tests/suites/entryuuid/basic_test.py | 24 +++-
|
||||
src/plugins/entryuuid/src/lib.rs | 43 ++++++-
|
||||
src/slapi_r_plugin/src/constants.rs | 5 +
|
||||
src/slapi_r_plugin/src/entry.rs | 8 ++
|
||||
src/slapi_r_plugin/src/lib.rs | 2 +
|
||||
src/slapi_r_plugin/src/macros.rs | 2 +-
|
||||
src/slapi_r_plugin/src/modify.rs | 118 ++++++++++++++++++
|
||||
src/slapi_r_plugin/src/pblock.rs | 7 ++
|
||||
src/slapi_r_plugin/src/value.rs | 4 +
|
||||
9 files changed, 206 insertions(+), 7 deletions(-)
|
||||
create mode 100644 src/slapi_r_plugin/src/modify.rs
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/entryuuid/basic_test.py b/dirsrvtests/tests/suites/entryuuid/basic_test.py
|
||||
index beb73701d..4d8a40909 100644
|
||||
--- a/dirsrvtests/tests/suites/entryuuid/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/entryuuid/basic_test.py
|
||||
@@ -12,6 +12,7 @@ import time
|
||||
import shutil
|
||||
from lib389.idm.user import nsUserAccounts, UserAccounts
|
||||
from lib389.idm.account import Accounts
|
||||
+from lib389.idm.domain import Domain
|
||||
from lib389.topologies import topology_st as topology
|
||||
from lib389.backend import Backends
|
||||
from lib389.paths import Paths
|
||||
@@ -190,6 +191,7 @@ def test_entryuuid_fixup_task(topology):
|
||||
3. Enable the entryuuid plugin
|
||||
4. Run the fixup
|
||||
5. Assert the entryuuid now exists
|
||||
+ 6. Restart and check they persist
|
||||
|
||||
:expectedresults:
|
||||
1. Success
|
||||
@@ -197,6 +199,7 @@ def test_entryuuid_fixup_task(topology):
|
||||
3. Success
|
||||
4. Success
|
||||
5. Suddenly EntryUUID!
|
||||
+ 6. Still has EntryUUID!
|
||||
"""
|
||||
# 1. Disable the plugin
|
||||
plug = EntryUUIDPlugin(topology.standalone)
|
||||
@@ -220,7 +223,22 @@ def test_entryuuid_fixup_task(topology):
|
||||
assert(task.is_complete() and task.get_exit_code() == 0)
|
||||
topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
|
||||
- # 5. Assert the uuid.
|
||||
- euuid = account.get_attr_val_utf8('entryUUID')
|
||||
- assert(euuid is not None)
|
||||
+ # 5.1 Assert the uuid on the user.
|
||||
+ euuid_user = account.get_attr_val_utf8('entryUUID')
|
||||
+ assert(euuid_user is not None)
|
||||
+
|
||||
+ # 5.2 Assert it on the domain entry.
|
||||
+ domain = Domain(topology.standalone, dn=DEFAULT_SUFFIX)
|
||||
+ euuid_domain = domain.get_attr_val_utf8('entryUUID')
|
||||
+ assert(euuid_domain is not None)
|
||||
+
|
||||
+ # Assert it persists after a restart.
|
||||
+ topology.standalone.restart()
|
||||
+ # 6.1 Assert the uuid on the use.
|
||||
+ euuid_user_2 = account.get_attr_val_utf8('entryUUID')
|
||||
+ assert(euuid_user_2 == euuid_user)
|
||||
+
|
||||
+ # 6.2 Assert it on the domain entry.
|
||||
+ euuid_domain_2 = domain.get_attr_val_utf8('entryUUID')
|
||||
+ assert(euuid_domain_2 == euuid_domain)
|
||||
|
||||
diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
|
||||
index 6b5e8d1bb..92977db05 100644
|
||||
--- a/src/plugins/entryuuid/src/lib.rs
|
||||
+++ b/src/plugins/entryuuid/src/lib.rs
|
||||
@@ -187,9 +187,46 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
}
|
||||
}
|
||||
|
||||
-pub fn entryuuid_fixup_mapfn(mut e: EntryRef, _data: &()) -> Result<(), PluginError> {
|
||||
- assign_uuid(&mut e);
|
||||
- Ok(())
|
||||
+pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError> {
|
||||
+ /* Supply a modification to the entry. */
|
||||
+ let sdn = e.get_sdnref();
|
||||
+
|
||||
+ /* Sanity check that entryuuid doesn't already exist */
|
||||
+ if e.contains_attr("entryUUID") {
|
||||
+ log_error!(
|
||||
+ ErrorLevel::Trace,
|
||||
+ "skipping fixup for -> {}",
|
||||
+ sdn.to_dn_string()
|
||||
+ );
|
||||
+ return Ok(());
|
||||
+ }
|
||||
+
|
||||
+ // Setup the modifications
|
||||
+ let mut mods = SlapiMods::new();
|
||||
+
|
||||
+ let u: Uuid = Uuid::new_v4();
|
||||
+ let uuid_value = Value::from(&u);
|
||||
+ let values: ValueArray = std::iter::once(uuid_value).collect();
|
||||
+ mods.append(ModType::Replace, "entryUUID", values);
|
||||
+
|
||||
+ /* */
|
||||
+ let lmod = Modify::new(&sdn, mods, plugin_id())?;
|
||||
+
|
||||
+ match lmod.execute() {
|
||||
+ Ok(_) => {
|
||||
+ log_error!(ErrorLevel::Trace, "fixed-up -> {}", sdn.to_dn_string());
|
||||
+ Ok(())
|
||||
+ }
|
||||
+ Err(e) => {
|
||||
+ log_error!(
|
||||
+ ErrorLevel::Error,
|
||||
+ "entryuuid_fixup_mapfn -> fixup failed -> {} {:?}",
|
||||
+ sdn.to_dn_string(),
|
||||
+ e
|
||||
+ );
|
||||
+ Err(PluginError::GenericFailure)
|
||||
+ }
|
||||
+ }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs
|
||||
index cf76ccbdb..34845c2f4 100644
|
||||
--- a/src/slapi_r_plugin/src/constants.rs
|
||||
+++ b/src/slapi_r_plugin/src/constants.rs
|
||||
@@ -5,6 +5,11 @@ use std::os::raw::c_char;
|
||||
pub const LDAP_SUCCESS: i32 = 0;
|
||||
pub const PLUGIN_DEFAULT_PRECEDENCE: i32 = 50;
|
||||
|
||||
+#[repr(i32)]
|
||||
+pub enum OpFlags {
|
||||
+ ByassReferrals = 0x0040_0000,
|
||||
+}
|
||||
+
|
||||
#[repr(i32)]
|
||||
/// The set of possible function handles we can register via the pblock. These
|
||||
/// values correspond to slapi-plugin.h.
|
||||
diff --git a/src/slapi_r_plugin/src/entry.rs b/src/slapi_r_plugin/src/entry.rs
|
||||
index 034efe692..22ae45189 100644
|
||||
--- a/src/slapi_r_plugin/src/entry.rs
|
||||
+++ b/src/slapi_r_plugin/src/entry.rs
|
||||
@@ -70,6 +70,14 @@ impl EntryRef {
|
||||
}
|
||||
}
|
||||
|
||||
+ pub fn contains_attr(&self, name: &str) -> bool {
|
||||
+ let cname = CString::new(name).expect("invalid attr name");
|
||||
+ let va = unsafe { slapi_entry_attr_get_valuearray(self.raw_e, cname.as_ptr()) };
|
||||
+
|
||||
+ // If it's null, it's not present, so flip the logic.
|
||||
+ !va.is_null()
|
||||
+ }
|
||||
+
|
||||
pub fn add_value(&mut self, a: &str, v: &ValueRef) {
|
||||
// turn the attr to a c string.
|
||||
// TODO FIX
|
||||
diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs
|
||||
index d7fc22e52..076907bae 100644
|
||||
--- a/src/slapi_r_plugin/src/lib.rs
|
||||
+++ b/src/slapi_r_plugin/src/lib.rs
|
||||
@@ -9,6 +9,7 @@ pub mod dn;
|
||||
pub mod entry;
|
||||
pub mod error;
|
||||
pub mod log;
|
||||
+pub mod modify;
|
||||
pub mod pblock;
|
||||
pub mod plugin;
|
||||
pub mod search;
|
||||
@@ -24,6 +25,7 @@ pub mod prelude {
|
||||
pub use crate::entry::EntryRef;
|
||||
pub use crate::error::{DseCallbackStatus, LDAPError, PluginError, RPluginError};
|
||||
pub use crate::log::{log_error, ErrorLevel};
|
||||
+ pub use crate::modify::{ModType, Modify, SlapiMods};
|
||||
pub use crate::pblock::{Pblock, PblockRef};
|
||||
pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3};
|
||||
pub use crate::search::{Search, SearchScope};
|
||||
diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs
|
||||
index 030449632..bc8dfa60f 100644
|
||||
--- a/src/slapi_r_plugin/src/macros.rs
|
||||
+++ b/src/slapi_r_plugin/src/macros.rs
|
||||
@@ -825,7 +825,7 @@ macro_rules! slapi_r_search_callback_mapfn {
|
||||
let e = EntryRef::new(raw_e);
|
||||
let data_ptr = raw_data as *const _;
|
||||
let data = unsafe { &(*data_ptr) };
|
||||
- match $cb_mod_ident(e, data) {
|
||||
+ match $cb_mod_ident(&e, data) {
|
||||
Ok(_) => LDAPError::Success as i32,
|
||||
Err(e) => e as i32,
|
||||
}
|
||||
diff --git a/src/slapi_r_plugin/src/modify.rs b/src/slapi_r_plugin/src/modify.rs
|
||||
new file mode 100644
|
||||
index 000000000..30864377a
|
||||
--- /dev/null
|
||||
+++ b/src/slapi_r_plugin/src/modify.rs
|
||||
@@ -0,0 +1,118 @@
|
||||
+use crate::constants::OpFlags;
|
||||
+use crate::dn::SdnRef;
|
||||
+use crate::error::{LDAPError, PluginError};
|
||||
+use crate::pblock::Pblock;
|
||||
+use crate::plugin::PluginIdRef;
|
||||
+use crate::value::{slapi_value, ValueArray};
|
||||
+
|
||||
+use std::ffi::CString;
|
||||
+use std::ops::{Deref, DerefMut};
|
||||
+use std::os::raw::c_char;
|
||||
+
|
||||
+extern "C" {
|
||||
+ fn slapi_modify_internal_set_pb_ext(
|
||||
+ pb: *const libc::c_void,
|
||||
+ dn: *const libc::c_void,
|
||||
+ mods: *const *const libc::c_void,
|
||||
+ controls: *const *const libc::c_void,
|
||||
+ uniqueid: *const c_char,
|
||||
+ plugin_ident: *const libc::c_void,
|
||||
+ op_flags: i32,
|
||||
+ );
|
||||
+ fn slapi_modify_internal_pb(pb: *const libc::c_void);
|
||||
+ fn slapi_mods_free(smods: *const *const libc::c_void);
|
||||
+ fn slapi_mods_get_ldapmods_byref(smods: *const libc::c_void) -> *const *const libc::c_void;
|
||||
+ fn slapi_mods_new() -> *const libc::c_void;
|
||||
+ fn slapi_mods_add_mod_values(
|
||||
+ smods: *const libc::c_void,
|
||||
+ mtype: i32,
|
||||
+ attrtype: *const c_char,
|
||||
+ value: *const *const slapi_value,
|
||||
+ );
|
||||
+}
|
||||
+
|
||||
+#[derive(Debug)]
|
||||
+#[repr(i32)]
|
||||
+pub enum ModType {
|
||||
+ Add = 0,
|
||||
+ Delete = 1,
|
||||
+ Replace = 2,
|
||||
+}
|
||||
+
|
||||
+pub struct SlapiMods {
|
||||
+ inner: *const libc::c_void,
|
||||
+ vas: Vec<ValueArray>,
|
||||
+}
|
||||
+
|
||||
+impl Drop for SlapiMods {
|
||||
+ fn drop(&mut self) {
|
||||
+ unsafe { slapi_mods_free(&self.inner as *const *const libc::c_void) }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+impl SlapiMods {
|
||||
+ pub fn new() -> Self {
|
||||
+ SlapiMods {
|
||||
+ inner: unsafe { slapi_mods_new() },
|
||||
+ vas: Vec::new(),
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ pub fn append(&mut self, modtype: ModType, attrtype: &str, values: ValueArray) {
|
||||
+ // We can get the value array pointer here to push to the inner
|
||||
+ // because the internal pointers won't change even when we push them
|
||||
+ // to the list to preserve their lifetime.
|
||||
+ let vas = values.as_ptr();
|
||||
+ // We take ownership of this to ensure it lives as least as long as our
|
||||
+ // slapimods structure.
|
||||
+ self.vas.push(values);
|
||||
+ // now we can insert these to the modes.
|
||||
+ let c_attrtype = CString::new(attrtype).expect("failed to allocate attrtype");
|
||||
+ unsafe { slapi_mods_add_mod_values(self.inner, modtype as i32, c_attrtype.as_ptr(), vas) };
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+pub struct Modify {
|
||||
+ pb: Pblock,
|
||||
+ mods: SlapiMods,
|
||||
+}
|
||||
+
|
||||
+pub struct ModifyResult {
|
||||
+ pb: Pblock,
|
||||
+}
|
||||
+
|
||||
+impl Modify {
|
||||
+ pub fn new(dn: &SdnRef, mods: SlapiMods, plugin_id: PluginIdRef) -> Result<Self, PluginError> {
|
||||
+ let pb = Pblock::new();
|
||||
+ let lmods = unsafe { slapi_mods_get_ldapmods_byref(mods.inner) };
|
||||
+ // OP_FLAG_ACTION_LOG_ACCESS
|
||||
+
|
||||
+ unsafe {
|
||||
+ slapi_modify_internal_set_pb_ext(
|
||||
+ pb.deref().as_ptr(),
|
||||
+ dn.as_ptr(),
|
||||
+ lmods,
|
||||
+ std::ptr::null(),
|
||||
+ std::ptr::null(),
|
||||
+ plugin_id.raw_pid,
|
||||
+ OpFlags::ByassReferrals as i32,
|
||||
+ )
|
||||
+ };
|
||||
+
|
||||
+ Ok(Modify { pb, mods })
|
||||
+ }
|
||||
+
|
||||
+ pub fn execute(self) -> Result<ModifyResult, LDAPError> {
|
||||
+ let Modify {
|
||||
+ mut pb,
|
||||
+ mods: _mods,
|
||||
+ } = self;
|
||||
+ unsafe { slapi_modify_internal_pb(pb.deref().as_ptr()) };
|
||||
+ let result = pb.get_op_result();
|
||||
+
|
||||
+ match result {
|
||||
+ 0 => Ok(ModifyResult { pb }),
|
||||
+ _e => Err(LDAPError::from(result)),
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs
|
||||
index b69ce1680..0f83914f3 100644
|
||||
--- a/src/slapi_r_plugin/src/pblock.rs
|
||||
+++ b/src/slapi_r_plugin/src/pblock.rs
|
||||
@@ -11,6 +11,7 @@ pub use crate::log::{log_error, ErrorLevel};
|
||||
extern "C" {
|
||||
fn slapi_pblock_set(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32;
|
||||
fn slapi_pblock_get(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32;
|
||||
+ fn slapi_pblock_destroy(pb: *const libc::c_void);
|
||||
fn slapi_pblock_new() -> *const libc::c_void;
|
||||
}
|
||||
|
||||
@@ -41,6 +42,12 @@ impl DerefMut for Pblock {
|
||||
}
|
||||
}
|
||||
|
||||
+impl Drop for Pblock {
|
||||
+ fn drop(&mut self) {
|
||||
+ unsafe { slapi_pblock_destroy(self.value.raw_pb) }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
pub struct PblockRef {
|
||||
raw_pb: *const libc::c_void,
|
||||
}
|
||||
diff --git a/src/slapi_r_plugin/src/value.rs b/src/slapi_r_plugin/src/value.rs
|
||||
index 5a40dd279..46246837a 100644
|
||||
--- a/src/slapi_r_plugin/src/value.rs
|
||||
+++ b/src/slapi_r_plugin/src/value.rs
|
||||
@@ -96,6 +96,10 @@ impl ValueArray {
|
||||
let bs = vs.into_boxed_slice();
|
||||
Box::leak(bs) as *const _ as *const *const slapi_value
|
||||
}
|
||||
+
|
||||
+ pub fn as_ptr(&self) -> *const *const slapi_value {
|
||||
+ self.data.as_ptr() as *const *const slapi_value
|
||||
+ }
|
||||
}
|
||||
|
||||
impl FromIterator<Value> for ValueArray {
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,83 +0,0 @@
|
||||
From 9319d5b022918f14cacb00e3faef85a6ab730a26 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 27 Feb 2024 16:30:47 -0800
|
||||
Subject: [PATCH] Issue 3527 - Support HAProxy and Instance on the same machine
|
||||
configuration (#6107)
|
||||
|
||||
Description: Improve how we handle HAProxy connections to work better when
|
||||
the DS and HAProxy are on the same machine.
|
||||
Ensure the client and header destination IPs are checked against the trusted IP list.
|
||||
|
||||
Additionally, this change will also allow configuration having
|
||||
HAProxy is listening on a different subnet than the one used to forward the request.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/3527
|
||||
|
||||
Reviewed by: @progier389, @jchapma (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/connection.c | 35 +++++++++++++++++++++++++--------
|
||||
1 file changed, 27 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index d28a39bf7..10a8cc577 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -1187,6 +1187,8 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
|
||||
char str_ip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
char str_haproxy_ip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
char str_haproxy_destip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
+ int trusted_matches_ip_found = 0;
|
||||
+ int trusted_matches_destip_found = 0;
|
||||
struct berval **bvals = NULL;
|
||||
int proxy_connection = 0;
|
||||
|
||||
@@ -1245,21 +1247,38 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
|
||||
normalize_IPv4(conn->cin_addr, buf_ip, sizeof(buf_ip), str_ip, sizeof(str_ip));
|
||||
normalize_IPv4(&pr_netaddr_dest, buf_haproxy_destip, sizeof(buf_haproxy_destip),
|
||||
str_haproxy_destip, sizeof(str_haproxy_destip));
|
||||
+ size_t ip_len = strlen(buf_ip);
|
||||
+ size_t destip_len = strlen(buf_haproxy_destip);
|
||||
|
||||
/* Now, reset RC and set it to 0 only if a match is found */
|
||||
haproxy_rc = -1;
|
||||
|
||||
- /* Allow only:
|
||||
- * Trusted IP == Original Client IP == HAProxy Header Destination IP */
|
||||
+ /*
|
||||
+ * We need to allow a configuration where DS instance and HAProxy are on the same machine.
|
||||
+ * In this case, we need to check if
|
||||
+ * the HAProxy client IP (which will be a loopback address) matches one of the the trusted IP addresses,
|
||||
+ * while still checking that
|
||||
+ * the HAProxy header destination IP address matches one of the trusted IP addresses.
|
||||
+ * Additionally, this change will also allow configuration having
|
||||
+ * HAProxy listening on a different subnet than one used to forward the request.
|
||||
+ */
|
||||
for (size_t i = 0; bvals[i] != NULL; ++i) {
|
||||
- if ((strlen(bvals[i]->bv_val) == strlen(buf_ip)) &&
|
||||
- (strlen(bvals[i]->bv_val) == strlen(buf_haproxy_destip)) &&
|
||||
- (strncasecmp(bvals[i]->bv_val, buf_ip, strlen(buf_ip)) == 0) &&
|
||||
- (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, strlen(buf_haproxy_destip)) == 0)) {
|
||||
- haproxy_rc = 0;
|
||||
- break;
|
||||
+ size_t bval_len = strlen(bvals[i]->bv_val);
|
||||
+
|
||||
+ /* Check if the Client IP (HAProxy's machine IP) address matches the trusted IP address */
|
||||
+ if (!trusted_matches_ip_found) {
|
||||
+ trusted_matches_ip_found = (bval_len == ip_len) && (strncasecmp(bvals[i]->bv_val, buf_ip, ip_len) == 0);
|
||||
+ }
|
||||
+ /* Check if the HAProxy header destination IP address matches the trusted IP address */
|
||||
+ if (!trusted_matches_destip_found) {
|
||||
+ trusted_matches_destip_found = (bval_len == destip_len) && (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, destip_len) == 0);
|
||||
}
|
||||
}
|
||||
+
|
||||
+ if (trusted_matches_ip_found && trusted_matches_destip_found) {
|
||||
+ haproxy_rc = 0;
|
||||
+ }
|
||||
+
|
||||
if (haproxy_rc == -1) {
|
||||
slapi_log_err(SLAPI_LOG_CONNS, "connection_read_operation", "HAProxy header received from unknown source.\n");
|
||||
disconnect_server_nomutex(conn, conn->c_connid, -1, SLAPD_DISCONNECT_PROXY_UNKNOWN, EPROTO);
|
||||
--
|
||||
2.45.0
|
||||
|
@ -0,0 +1,192 @@
|
||||
From b2e0a1d405d15383064e547fd15008bc136d3efe Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Thu, 17 Dec 2020 08:22:23 +1000
|
||||
Subject: [PATCH 05/12] Issue 4498 - BUG - entryuuid replication may not work
|
||||
(#4503)
|
||||
|
||||
Bug Description: EntryUUID can be duplicated in replication,
|
||||
due to a missing check in assign_uuid
|
||||
|
||||
Fix Description: Add a test case to determine how this occurs,
|
||||
and add the correct check for existing entryUUID.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/4498
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @mreynolds389
|
||||
---
|
||||
.../tests/suites/entryuuid/replicated_test.py | 77 +++++++++++++++++++
|
||||
rpm.mk | 2 +-
|
||||
src/plugins/entryuuid/src/lib.rs | 20 ++++-
|
||||
src/slapi_r_plugin/src/constants.rs | 2 +
|
||||
src/slapi_r_plugin/src/pblock.rs | 7 ++
|
||||
5 files changed, 106 insertions(+), 2 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/entryuuid/replicated_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/entryuuid/replicated_test.py b/dirsrvtests/tests/suites/entryuuid/replicated_test.py
|
||||
new file mode 100644
|
||||
index 000000000..a2ebc8ff7
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/entryuuid/replicated_test.py
|
||||
@@ -0,0 +1,77 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2020 William Brown <william@blackhats.net.au>
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+
|
||||
+import ldap
|
||||
+import pytest
|
||||
+import logging
|
||||
+from lib389.topologies import topology_m2 as topo_m2
|
||||
+from lib389.idm.user import nsUserAccounts
|
||||
+from lib389.paths import Paths
|
||||
+from lib389.utils import ds_is_older
|
||||
+from lib389._constants import *
|
||||
+from lib389.replica import ReplicationManager
|
||||
+
|
||||
+default_paths = Paths()
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions")
|
||||
+
|
||||
+def test_entryuuid_with_replication(topo_m2):
|
||||
+ """ Check that entryuuid works with replication
|
||||
+
|
||||
+ :id: a5f15bf9-7f63-473a-840c-b9037b787024
|
||||
+
|
||||
+ :setup: two node mmr
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Create an entry on one server
|
||||
+ 2. Wait for replication
|
||||
+ 3. Assert it is on the second
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 1. Success
|
||||
+ 1. Success
|
||||
+ """
|
||||
+
|
||||
+ server_a = topo_m2.ms["supplier1"]
|
||||
+ server_b = topo_m2.ms["supplier2"]
|
||||
+ server_a.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE))
|
||||
+ server_b.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE))
|
||||
+
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+
|
||||
+ account_a = nsUserAccounts(server_a, DEFAULT_SUFFIX).create_test_user(uid=2000)
|
||||
+ euuid_a = account_a.get_attr_vals_utf8('entryUUID')
|
||||
+ print("🧩 %s" % euuid_a)
|
||||
+ assert(euuid_a is not None)
|
||||
+ assert(len(euuid_a) == 1)
|
||||
+
|
||||
+ repl.wait_for_replication(server_a, server_b)
|
||||
+
|
||||
+ account_b = nsUserAccounts(server_b, DEFAULT_SUFFIX).get("test_user_2000")
|
||||
+ euuid_b = account_b.get_attr_vals_utf8('entryUUID')
|
||||
+ print("🧩 %s" % euuid_b)
|
||||
+
|
||||
+ server_a.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
+ server_b.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
+
|
||||
+ assert(euuid_b is not None)
|
||||
+ assert(len(euuid_b) == 1)
|
||||
+ assert(euuid_b == euuid_a)
|
||||
+
|
||||
+ account_b.set("description", "update")
|
||||
+ repl.wait_for_replication(server_b, server_a)
|
||||
+
|
||||
+ euuid_c = account_a.get_attr_vals_utf8('entryUUID')
|
||||
+ print("🧩 %s" % euuid_c)
|
||||
+ assert(euuid_c is not None)
|
||||
+ assert(len(euuid_c) == 1)
|
||||
+ assert(euuid_c == euuid_a)
|
||||
+
|
||||
diff --git a/rpm.mk b/rpm.mk
|
||||
index 02f5bba37..d1cdff7df 100644
|
||||
--- a/rpm.mk
|
||||
+++ b/rpm.mk
|
||||
@@ -25,7 +25,7 @@ TSAN_ON = 0
|
||||
# Undefined Behaviour Sanitizer
|
||||
UBSAN_ON = 0
|
||||
|
||||
-RUST_ON = 0
|
||||
+RUST_ON = 1
|
||||
|
||||
# PERL_ON is deprecated and turns on the LEGACY_ON, this for not breaking people's workflows.
|
||||
PERL_ON = 1
|
||||
diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
|
||||
index 92977db05..0197c5e83 100644
|
||||
--- a/src/plugins/entryuuid/src/lib.rs
|
||||
+++ b/src/plugins/entryuuid/src/lib.rs
|
||||
@@ -30,6 +30,16 @@ slapi_r_search_callback_mapfn!(entryuuid, entryuuid_fixup_cb, entryuuid_fixup_ma
|
||||
fn assign_uuid(e: &mut EntryRef) {
|
||||
let sdn = e.get_sdnref();
|
||||
|
||||
+ // 🚧 safety barrier 🚧
|
||||
+ if e.contains_attr("entryUUID") {
|
||||
+ log_error!(
|
||||
+ ErrorLevel::Trace,
|
||||
+ "assign_uuid -> entryUUID exists, skipping dn {}",
|
||||
+ sdn.to_dn_string()
|
||||
+ );
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
// We could consider making these lazy static.
|
||||
let config_sdn = Sdn::try_from("cn=config").expect("Invalid static dn");
|
||||
let schema_sdn = Sdn::try_from("cn=schema").expect("Invalid static dn");
|
||||
@@ -66,7 +76,15 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
}
|
||||
|
||||
fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> {
|
||||
- log_error!(ErrorLevel::Trace, "betxn_pre_add");
|
||||
+ if pb.get_is_replicated_operation() {
|
||||
+ log_error!(
|
||||
+ ErrorLevel::Trace,
|
||||
+ "betxn_pre_add -> replicated operation, will not change"
|
||||
+ );
|
||||
+ return Ok(());
|
||||
+ }
|
||||
+
|
||||
+ log_error!(ErrorLevel::Trace, "betxn_pre_add -> start");
|
||||
|
||||
let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?;
|
||||
assign_uuid(&mut e);
|
||||
diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs
|
||||
index 34845c2f4..aa0691acc 100644
|
||||
--- a/src/slapi_r_plugin/src/constants.rs
|
||||
+++ b/src/slapi_r_plugin/src/constants.rs
|
||||
@@ -164,6 +164,8 @@ pub(crate) enum PblockType {
|
||||
AddEntry = 60,
|
||||
/// SLAPI_BACKEND
|
||||
Backend = 130,
|
||||
+ /// SLAPI_IS_REPLICATED_OPERATION
|
||||
+ IsReplicationOperation = 142,
|
||||
/// SLAPI_PLUGIN_MR_NAMES
|
||||
MRNames = 624,
|
||||
/// SLAPI_PLUGIN_SYNTAX_NAMES
|
||||
diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs
|
||||
index 0f83914f3..718ff2ca7 100644
|
||||
--- a/src/slapi_r_plugin/src/pblock.rs
|
||||
+++ b/src/slapi_r_plugin/src/pblock.rs
|
||||
@@ -279,4 +279,11 @@ impl PblockRef {
|
||||
pub fn get_op_result(&mut self) -> i32 {
|
||||
self.get_value_i32(PblockType::OpResult).unwrap_or(-1)
|
||||
}
|
||||
+
|
||||
+ pub fn get_is_replicated_operation(&mut self) -> bool {
|
||||
+ let i = self.get_value_i32(PblockType::IsReplicationOperation).unwrap_or(0);
|
||||
+ // Because rust returns the result of the last evaluation, we can
|
||||
+ // just return if not equal 0.
|
||||
+ i != 0
|
||||
+ }
|
||||
}
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,108 +0,0 @@
|
||||
From 016a2b6bd3e27cbff36609824a75b020dfd24823 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 1 May 2024 15:01:33 +0100
|
||||
Subject: [PATCH] CVE-2024-2199
|
||||
|
||||
---
|
||||
.../tests/suites/password/password_test.py | 56 +++++++++++++++++++
|
||||
ldap/servers/slapd/modify.c | 8 ++-
|
||||
2 files changed, 62 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py
|
||||
index 38079476a..b3ff08904 100644
|
||||
--- a/dirsrvtests/tests/suites/password/password_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/password_test.py
|
||||
@@ -65,6 +65,62 @@ def test_password_delete_specific_password(topology_st):
|
||||
log.info('test_password_delete_specific_password: PASSED')
|
||||
|
||||
|
||||
+def test_password_modify_non_utf8(topology_st):
|
||||
+ """Attempt a modify of the userPassword attribute with
|
||||
+ an invalid non utf8 value
|
||||
+
|
||||
+ :id: a31af9d5-d665-42b9-8d6e-fea3d0837d36
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Add a user if it doesnt exist and set its password
|
||||
+ 2. Verify password with a bind
|
||||
+ 3. Modify userPassword attr with invalid value
|
||||
+ 4. Attempt a bind with invalid password value
|
||||
+ 5. Verify original password with a bind
|
||||
+ :expectedresults:
|
||||
+ 1. The user with userPassword should be added successfully
|
||||
+ 2. Operation should be successful
|
||||
+ 3. Server returns ldap.UNWILLING_TO_PERFORM
|
||||
+ 4. Server returns ldap.INVALID_CREDENTIALS
|
||||
+ 5. Operation should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info('Running test_password_modify_non_utf8...')
|
||||
+
|
||||
+ # Create user and set password
|
||||
+ standalone = topology_st.standalone
|
||||
+ users = UserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ if not users.exists(TEST_USER_PROPERTIES['uid'][0]):
|
||||
+ user = users.create(properties=TEST_USER_PROPERTIES)
|
||||
+ else:
|
||||
+ user = users.get(TEST_USER_PROPERTIES['uid'][0])
|
||||
+ user.set('userpassword', PASSWORD)
|
||||
+
|
||||
+ # Verify password
|
||||
+ try:
|
||||
+ user.bind(PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ # Modify userPassword with an invalid value
|
||||
+ password = b'tes\x82t-password' # A non UTF-8 encoded password
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ user.replace('userpassword', password)
|
||||
+
|
||||
+ # Verify a bind fails with invalid pasword
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ user.bind(password)
|
||||
+
|
||||
+ # Verify we can still bind with original password
|
||||
+ try:
|
||||
+ user.bind(PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('test_password_modify_non_utf8: PASSED')
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
|
||||
index 5ca78539c..669bb104c 100644
|
||||
--- a/ldap/servers/slapd/modify.c
|
||||
+++ b/ldap/servers/slapd/modify.c
|
||||
@@ -765,8 +765,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
|
||||
* flagged - leave mod attributes alone */
|
||||
if (!repl_op && !skip_modified_attrs && lastmod) {
|
||||
modify_update_last_modified_attr(pb, &smods);
|
||||
+ slapi_pblock_set(pb, SLAPI_MODIFY_MODS, slapi_mods_get_ldapmods_byref(&smods));
|
||||
}
|
||||
|
||||
+
|
||||
if (0 == slapi_mods_get_num_mods(&smods)) {
|
||||
/* nothing to do - no mods - this is not an error - just
|
||||
send back LDAP_SUCCESS */
|
||||
@@ -933,8 +935,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
|
||||
|
||||
/* encode password */
|
||||
if (pw_encodevals_ext(pb, sdn, va)) {
|
||||
- slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s.\n", slapi_entry_get_dn_const(e));
|
||||
- send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to store attribute \"userPassword\" correctly\n", 0, NULL);
|
||||
+ slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s, "
|
||||
+ "check value is utf8 string.\n", slapi_entry_get_dn_const(e));
|
||||
+ send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to hash \"userPassword\" attribute, "
|
||||
+ "check value is utf8 string.\n", 0, NULL);
|
||||
valuearray_free(&va);
|
||||
goto free_and_return;
|
||||
}
|
||||
--
|
||||
2.45.0
|
||||
|
@ -0,0 +1,626 @@
|
||||
From 04c44e74503a842561b6c6e58001faf86d924b20 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 7 Dec 2020 11:00:45 -0500
|
||||
Subject: [PATCH 06/12] Issue 4421 - Unable to build with Rust enabled in
|
||||
closed environment
|
||||
|
||||
Description: Add Makefile flags and update rpm.mk that allow updating
|
||||
and downloading all the cargo/rust dependencies. This is
|
||||
needed for nightly tests and upstream/downstream releases.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4421
|
||||
|
||||
Reviewed by: firstyear(Thanks!)
|
||||
---
|
||||
rpm.mk | 3 +-
|
||||
rpm/389-ds-base.spec.in | 2 +-
|
||||
src/Cargo.lock | 563 ----------------------------------------
|
||||
3 files changed, 3 insertions(+), 565 deletions(-)
|
||||
delete mode 100644 src/Cargo.lock
|
||||
|
||||
diff --git a/rpm.mk b/rpm.mk
|
||||
index d1cdff7df..ef810c63c 100644
|
||||
--- a/rpm.mk
|
||||
+++ b/rpm.mk
|
||||
@@ -44,6 +44,7 @@ update-cargo-dependencies:
|
||||
cargo update --manifest-path=./src/Cargo.toml
|
||||
|
||||
download-cargo-dependencies:
|
||||
+ cargo update --manifest-path=./src/Cargo.toml
|
||||
cargo vendor --manifest-path=./src/Cargo.toml
|
||||
cargo fetch --manifest-path=./src/Cargo.toml
|
||||
tar -czf vendor.tar.gz vendor
|
||||
@@ -114,7 +115,7 @@ rpmbuildprep:
|
||||
cp dist/sources/$(JEMALLOC_TARBALL) $(RPMBUILD)/SOURCES/ ; \
|
||||
fi
|
||||
|
||||
-srpms: rpmroot srpmdistdir tarballs rpmbuildprep
|
||||
+srpms: rpmroot srpmdistdir download-cargo-dependencies tarballs rpmbuildprep
|
||||
rpmbuild --define "_topdir $(RPMBUILD)" -bs $(RPMBUILD)/SPECS/$(PACKAGE).spec
|
||||
cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)*.src.rpm dist/srpms/
|
||||
rm -rf $(RPMBUILD)
|
||||
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
|
||||
index b9f85489b..d80de8422 100644
|
||||
--- a/rpm/389-ds-base.spec.in
|
||||
+++ b/rpm/389-ds-base.spec.in
|
||||
@@ -357,7 +357,7 @@ UBSAN_FLAGS="--enable-ubsan --enable-debug"
|
||||
%endif
|
||||
|
||||
%if %{use_rust}
|
||||
-RUST_FLAGS="--enable-rust"
|
||||
+RUST_FLAGS="--enable-rust --enable-rust-offline"
|
||||
%endif
|
||||
|
||||
%if %{use_legacy}
|
||||
diff --git a/src/Cargo.lock b/src/Cargo.lock
|
||||
deleted file mode 100644
|
||||
index 33d7b8f23..000000000
|
||||
--- a/src/Cargo.lock
|
||||
+++ /dev/null
|
||||
@@ -1,563 +0,0 @@
|
||||
-# This file is automatically @generated by Cargo.
|
||||
-# It is not intended for manual editing.
|
||||
-[[package]]
|
||||
-name = "ansi_term"
|
||||
-version = "0.11.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
|
||||
-dependencies = [
|
||||
- "winapi",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "atty"
|
||||
-version = "0.2.14"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
-dependencies = [
|
||||
- "hermit-abi",
|
||||
- "libc",
|
||||
- "winapi",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "autocfg"
|
||||
-version = "1.0.1"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "base64"
|
||||
-version = "0.13.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "bitflags"
|
||||
-version = "1.2.1"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "byteorder"
|
||||
-version = "1.4.3"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "cbindgen"
|
||||
-version = "0.9.1"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
|
||||
-dependencies = [
|
||||
- "clap",
|
||||
- "log",
|
||||
- "proc-macro2",
|
||||
- "quote",
|
||||
- "serde",
|
||||
- "serde_json",
|
||||
- "syn",
|
||||
- "tempfile",
|
||||
- "toml",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "cc"
|
||||
-version = "1.0.67"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd"
|
||||
-dependencies = [
|
||||
- "jobserver",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "cfg-if"
|
||||
-version = "1.0.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "clap"
|
||||
-version = "2.33.3"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
|
||||
-dependencies = [
|
||||
- "ansi_term",
|
||||
- "atty",
|
||||
- "bitflags",
|
||||
- "strsim",
|
||||
- "textwrap",
|
||||
- "unicode-width",
|
||||
- "vec_map",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "entryuuid"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "cc",
|
||||
- "libc",
|
||||
- "paste",
|
||||
- "slapi_r_plugin",
|
||||
- "uuid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "entryuuid_syntax"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "cc",
|
||||
- "libc",
|
||||
- "paste",
|
||||
- "slapi_r_plugin",
|
||||
- "uuid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "fernet"
|
||||
-version = "0.1.4"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
|
||||
-dependencies = [
|
||||
- "base64",
|
||||
- "byteorder",
|
||||
- "getrandom",
|
||||
- "openssl",
|
||||
- "zeroize",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "foreign-types"
|
||||
-version = "0.3.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
|
||||
-dependencies = [
|
||||
- "foreign-types-shared",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "foreign-types-shared"
|
||||
-version = "0.1.1"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "getrandom"
|
||||
-version = "0.2.3"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
|
||||
-dependencies = [
|
||||
- "cfg-if",
|
||||
- "libc",
|
||||
- "wasi",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "hermit-abi"
|
||||
-version = "0.1.18"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
|
||||
-dependencies = [
|
||||
- "libc",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "itoa"
|
||||
-version = "0.4.7"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "jobserver"
|
||||
-version = "0.1.22"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd"
|
||||
-dependencies = [
|
||||
- "libc",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "lazy_static"
|
||||
-version = "1.4.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "libc"
|
||||
-version = "0.2.94"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "librnsslapd"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "cbindgen",
|
||||
- "libc",
|
||||
- "slapd",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "librslapd"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "cbindgen",
|
||||
- "libc",
|
||||
- "slapd",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "log"
|
||||
-version = "0.4.14"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
|
||||
-dependencies = [
|
||||
- "cfg-if",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "once_cell"
|
||||
-version = "1.7.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "openssl"
|
||||
-version = "0.10.34"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8"
|
||||
-dependencies = [
|
||||
- "bitflags",
|
||||
- "cfg-if",
|
||||
- "foreign-types",
|
||||
- "libc",
|
||||
- "once_cell",
|
||||
- "openssl-sys",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "openssl-sys"
|
||||
-version = "0.9.63"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98"
|
||||
-dependencies = [
|
||||
- "autocfg",
|
||||
- "cc",
|
||||
- "libc",
|
||||
- "pkg-config",
|
||||
- "vcpkg",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "paste"
|
||||
-version = "0.1.18"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
|
||||
-dependencies = [
|
||||
- "paste-impl",
|
||||
- "proc-macro-hack",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "paste-impl"
|
||||
-version = "0.1.18"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
|
||||
-dependencies = [
|
||||
- "proc-macro-hack",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "pkg-config"
|
||||
-version = "0.3.19"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "ppv-lite86"
|
||||
-version = "0.2.10"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "proc-macro-hack"
|
||||
-version = "0.5.19"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "proc-macro2"
|
||||
-version = "1.0.27"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
|
||||
-dependencies = [
|
||||
- "unicode-xid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "quote"
|
||||
-version = "1.0.9"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
|
||||
-dependencies = [
|
||||
- "proc-macro2",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "rand"
|
||||
-version = "0.8.3"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
|
||||
-dependencies = [
|
||||
- "libc",
|
||||
- "rand_chacha",
|
||||
- "rand_core",
|
||||
- "rand_hc",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "rand_chacha"
|
||||
-version = "0.3.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
|
||||
-dependencies = [
|
||||
- "ppv-lite86",
|
||||
- "rand_core",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "rand_core"
|
||||
-version = "0.6.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"
|
||||
-dependencies = [
|
||||
- "getrandom",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "rand_hc"
|
||||
-version = "0.3.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
|
||||
-dependencies = [
|
||||
- "rand_core",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "redox_syscall"
|
||||
-version = "0.2.8"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc"
|
||||
-dependencies = [
|
||||
- "bitflags",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "remove_dir_all"
|
||||
-version = "0.5.3"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
|
||||
-dependencies = [
|
||||
- "winapi",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "rsds"
|
||||
-version = "0.1.0"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "ryu"
|
||||
-version = "1.0.5"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "serde"
|
||||
-version = "1.0.126"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03"
|
||||
-dependencies = [
|
||||
- "serde_derive",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "serde_derive"
|
||||
-version = "1.0.126"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43"
|
||||
-dependencies = [
|
||||
- "proc-macro2",
|
||||
- "quote",
|
||||
- "syn",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "serde_json"
|
||||
-version = "1.0.64"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79"
|
||||
-dependencies = [
|
||||
- "itoa",
|
||||
- "ryu",
|
||||
- "serde",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "slapd"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "fernet",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "slapi_r_plugin"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "lazy_static",
|
||||
- "libc",
|
||||
- "paste",
|
||||
- "uuid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "strsim"
|
||||
-version = "0.8.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "syn"
|
||||
-version = "1.0.72"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82"
|
||||
-dependencies = [
|
||||
- "proc-macro2",
|
||||
- "quote",
|
||||
- "unicode-xid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "synstructure"
|
||||
-version = "0.12.4"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
|
||||
-dependencies = [
|
||||
- "proc-macro2",
|
||||
- "quote",
|
||||
- "syn",
|
||||
- "unicode-xid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "tempfile"
|
||||
-version = "3.2.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
|
||||
-dependencies = [
|
||||
- "cfg-if",
|
||||
- "libc",
|
||||
- "rand",
|
||||
- "redox_syscall",
|
||||
- "remove_dir_all",
|
||||
- "winapi",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "textwrap"
|
||||
-version = "0.11.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
-dependencies = [
|
||||
- "unicode-width",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "toml"
|
||||
-version = "0.5.8"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa"
|
||||
-dependencies = [
|
||||
- "serde",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "unicode-width"
|
||||
-version = "0.1.8"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "unicode-xid"
|
||||
-version = "0.2.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "uuid"
|
||||
-version = "0.8.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
|
||||
-dependencies = [
|
||||
- "getrandom",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "vcpkg"
|
||||
-version = "0.2.12"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "cbdbff6266a24120518560b5dc983096efb98462e51d0d68169895b237be3e5d"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "vec_map"
|
||||
-version = "0.8.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "wasi"
|
||||
-version = "0.10.2+wasi-snapshot-preview1"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "winapi"
|
||||
-version = "0.3.9"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
||||
-dependencies = [
|
||||
- "winapi-i686-pc-windows-gnu",
|
||||
- "winapi-x86_64-pc-windows-gnu",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "winapi-i686-pc-windows-gnu"
|
||||
-version = "0.4.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "winapi-x86_64-pc-windows-gnu"
|
||||
-version = "0.4.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "zeroize"
|
||||
-version = "1.3.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd"
|
||||
-dependencies = [
|
||||
- "zeroize_derive",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "zeroize_derive"
|
||||
-version = "1.1.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1"
|
||||
-dependencies = [
|
||||
- "proc-macro2",
|
||||
- "quote",
|
||||
- "syn",
|
||||
- "synstructure",
|
||||
-]
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,213 +0,0 @@
|
||||
From d5bbe52fbe84a7d3b5938bf82d5c4af15061a8e2 Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Wed, 17 Apr 2024 18:18:04 +0200
|
||||
Subject: [PATCH] CVE-2024-3657
|
||||
|
||||
---
|
||||
.../tests/suites/filter/large_filter_test.py | 34 +++++-
|
||||
ldap/servers/slapd/back-ldbm/index.c | 111 ++++++++++--------
|
||||
2 files changed, 92 insertions(+), 53 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/large_filter_test.py b/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
index ecc7bf979..40526bb16 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
@@ -13,19 +13,29 @@ verify and testing Filter from a search
|
||||
|
||||
import os
|
||||
import pytest
|
||||
+import ldap
|
||||
|
||||
-from lib389._constants import PW_DM
|
||||
+from lib389._constants import PW_DM, DEFAULT_SUFFIX, ErrorLog
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.user import UserAccounts, UserAccount
|
||||
from lib389.idm.account import Accounts
|
||||
from lib389.backend import Backends
|
||||
from lib389.idm.domain import Domain
|
||||
+from lib389.utils import get_ldapurl_from_serverid
|
||||
|
||||
SUFFIX = 'dc=anuj,dc=com'
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
|
||||
+def open_new_ldapi_conn(dsinstance):
|
||||
+ ldapurl, certdir = get_ldapurl_from_serverid(dsinstance)
|
||||
+ assert 'ldapi://' in ldapurl
|
||||
+ conn = ldap.initialize(ldapurl)
|
||||
+ conn.sasl_interactive_bind_s("", ldap.sasl.external())
|
||||
+ return conn
|
||||
+
|
||||
+
|
||||
@pytest.fixture(scope="module")
|
||||
def _create_entries(request, topo):
|
||||
"""
|
||||
@@ -160,6 +170,28 @@ def test_large_filter(topo, _create_entries, real_value):
|
||||
assert len(Accounts(conn, SUFFIX).filter(real_value)) == 3
|
||||
|
||||
|
||||
+def test_long_filter_value(topo):
|
||||
+ """Exercise large eq filter with dn syntax attributes
|
||||
+
|
||||
+ :id: b069ef72-fcc3-11ee-981c-482ae39447e5
|
||||
+ :setup: Standalone
|
||||
+ :steps:
|
||||
+ 1. Try to pass filter rules as per the condition.
|
||||
+ :expectedresults:
|
||||
+ 1. Pass
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ conn = open_new_ldapi_conn(inst.serverid)
|
||||
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE,ErrorLog.SEARCH_FILTER))
|
||||
+ filter_value = "a\x1Edmin" * 1025
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ filter_value = "aAdmin" * 1025
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ filter_value = "*"
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c
|
||||
index 410db23d1..30fa09ebb 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/index.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/index.c
|
||||
@@ -71,6 +71,32 @@ typedef struct _index_buffer_handle index_buffer_handle;
|
||||
#define INDEX_BUFFER_FLAG_SERIALIZE 1
|
||||
#define INDEX_BUFFER_FLAG_STATS 2
|
||||
|
||||
+/*
|
||||
+ * space needed to encode a byte:
|
||||
+ * 0x00-0x31 and 0x7f-0xff requires 3 bytes: \xx
|
||||
+ * 0x22 and 0x5C requires 2 bytes: \" and \\
|
||||
+ * other requires 1 byte: c
|
||||
+ */
|
||||
+static char encode_size[] = {
|
||||
+ /* 0x00 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x10 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x20 */ 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1,
|
||||
+ /* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3,
|
||||
+ /* 0x80 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x90 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xA0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xB0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xC0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xD0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xE0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xF0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+};
|
||||
+
|
||||
+
|
||||
/* Index buffering functions */
|
||||
|
||||
static int
|
||||
@@ -799,65 +825,46 @@ index_add_mods(
|
||||
|
||||
/*
|
||||
* Convert a 'struct berval' into a displayable ASCII string
|
||||
+ * returns the printable string
|
||||
*/
|
||||
-
|
||||
-#define SPECIAL(c) (c < 32 || c > 126 || c == '\\' || c == '"')
|
||||
-
|
||||
const char *
|
||||
encode(const struct berval *data, char buf[BUFSIZ])
|
||||
{
|
||||
- char *s;
|
||||
- char *last;
|
||||
- if (data == NULL || data->bv_len == 0)
|
||||
- return "";
|
||||
- last = data->bv_val + data->bv_len - 1;
|
||||
- for (s = data->bv_val; s < last; ++s) {
|
||||
- if (SPECIAL(*s)) {
|
||||
- char *first = data->bv_val;
|
||||
- char *bufNext = buf;
|
||||
- size_t bufSpace = BUFSIZ - 4;
|
||||
- while (1) {
|
||||
- /* printf ("%lu bytes ASCII\n", (unsigned long)(s - first)); */
|
||||
- if (bufSpace < (size_t)(s - first))
|
||||
- s = first + bufSpace - 1;
|
||||
- if (s != first) {
|
||||
- memcpy(bufNext, first, s - first);
|
||||
- bufNext += (s - first);
|
||||
- bufSpace -= (s - first);
|
||||
- }
|
||||
- do {
|
||||
- if (bufSpace) {
|
||||
- *bufNext++ = '\\';
|
||||
- --bufSpace;
|
||||
- }
|
||||
- if (bufSpace < 2) {
|
||||
- memcpy(bufNext, "..", 2);
|
||||
- bufNext += 2;
|
||||
- goto bail;
|
||||
- }
|
||||
- if (*s == '\\' || *s == '"') {
|
||||
- *bufNext++ = *s;
|
||||
- --bufSpace;
|
||||
- } else {
|
||||
- sprintf(bufNext, "%02x", (unsigned)*(unsigned char *)s);
|
||||
- bufNext += 2;
|
||||
- bufSpace -= 2;
|
||||
- }
|
||||
- } while (++s <= last && SPECIAL(*s));
|
||||
- if (s > last)
|
||||
- break;
|
||||
- first = s;
|
||||
- while (!SPECIAL(*s) && s <= last)
|
||||
- ++s;
|
||||
- }
|
||||
- bail:
|
||||
- *bufNext = '\0';
|
||||
- /* printf ("%lu chars in buffer\n", (unsigned long)(bufNext - buf)); */
|
||||
+ if (!data || !data->bv_val) {
|
||||
+ strcpy(buf, "<NULL>");
|
||||
+ return buf;
|
||||
+ }
|
||||
+ char *endbuff = &buf[BUFSIZ-4]; /* Reserve space to append "...\0" */
|
||||
+ char *ptout = buf;
|
||||
+ unsigned char *ptin = (unsigned char*) data->bv_val;
|
||||
+ unsigned char *endptin = ptin+data->bv_len;
|
||||
+
|
||||
+ while (ptin < endptin) {
|
||||
+ if (ptout >= endbuff) {
|
||||
+ /*
|
||||
+ * BUFSIZ(8K) > SLAPI_LOG_BUFSIZ(2K) so the error log message will be
|
||||
+ * truncated anyway. So there is no real interrest to test if the original
|
||||
+ * data contains no special characters and return it as is.
|
||||
+ */
|
||||
+ strcpy(endbuff, "...");
|
||||
return buf;
|
||||
}
|
||||
+ switch (encode_size[*ptin]) {
|
||||
+ case 1:
|
||||
+ *ptout++ = *ptin++;
|
||||
+ break;
|
||||
+ case 2:
|
||||
+ *ptout++ = '\\';
|
||||
+ *ptout++ = *ptin++;
|
||||
+ break;
|
||||
+ case 3:
|
||||
+ sprintf(ptout, "\\%02x", *ptin++);
|
||||
+ ptout += 3;
|
||||
+ break;
|
||||
+ }
|
||||
}
|
||||
- /* printf ("%lu bytes, all ASCII\n", (unsigned long)(s - data->bv_val)); */
|
||||
- return data->bv_val;
|
||||
+ *ptout = 0;
|
||||
+ return buf;
|
||||
}
|
||||
|
||||
static const char *
|
||||
--
|
||||
2.45.0
|
||||
|
412
SOURCES/0007-Ticket-51175-resolve-plugin-name-leaking.patch
Normal file
412
SOURCES/0007-Ticket-51175-resolve-plugin-name-leaking.patch
Normal file
@ -0,0 +1,412 @@
|
||||
From 279bdb5148eb0b67ddab40c4dd9d08e9e1672f13 Mon Sep 17 00:00:00 2001
|
||||
From: William Brown <william@blackhats.net.au>
|
||||
Date: Fri, 26 Jun 2020 10:27:56 +1000
|
||||
Subject: [PATCH 07/12] Ticket 51175 - resolve plugin name leaking
|
||||
|
||||
Bug Description: Previously pblock.c assumed that all plugin
|
||||
names were static c strings. Rust can't create static C
|
||||
strings, so these were intentionally leaked.
|
||||
|
||||
Fix Description: Rather than leak these, we do a dup/free
|
||||
through the slapiplugin struct instead, meaning we can use
|
||||
ephemeral, and properly managed strings in rust. This does not
|
||||
affect any other existing code which will still handle the
|
||||
static strings correctly.
|
||||
|
||||
https://pagure.io/389-ds-base/issue/51175
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: mreynolds, tbordaz (Thanks!)
|
||||
---
|
||||
Makefile.am | 1 +
|
||||
configure.ac | 2 +-
|
||||
ldap/servers/slapd/pagedresults.c | 6 +--
|
||||
ldap/servers/slapd/pblock.c | 9 ++--
|
||||
ldap/servers/slapd/plugin.c | 7 +++
|
||||
ldap/servers/slapd/pw_verify.c | 1 +
|
||||
ldap/servers/slapd/tools/pwenc.c | 2 +-
|
||||
src/slapi_r_plugin/README.md | 6 +--
|
||||
src/slapi_r_plugin/src/charray.rs | 32 ++++++++++++++
|
||||
src/slapi_r_plugin/src/lib.rs | 8 ++--
|
||||
src/slapi_r_plugin/src/macros.rs | 17 +++++---
|
||||
src/slapi_r_plugin/src/syntax_plugin.rs | 57 +++++++------------------
|
||||
12 files changed, 85 insertions(+), 63 deletions(-)
|
||||
create mode 100644 src/slapi_r_plugin/src/charray.rs
|
||||
|
||||
diff --git a/Makefile.am b/Makefile.am
|
||||
index 627953850..36434cf17 100644
|
||||
--- a/Makefile.am
|
||||
+++ b/Makefile.am
|
||||
@@ -1312,6 +1312,7 @@ rust-nsslapd-private.h: @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a
|
||||
libslapi_r_plugin_SOURCES = \
|
||||
src/slapi_r_plugin/src/backend.rs \
|
||||
src/slapi_r_plugin/src/ber.rs \
|
||||
+ src/slapi_r_plugin/src/charray.rs \
|
||||
src/slapi_r_plugin/src/constants.rs \
|
||||
src/slapi_r_plugin/src/dn.rs \
|
||||
src/slapi_r_plugin/src/entry.rs \
|
||||
diff --git a/configure.ac b/configure.ac
|
||||
index b3cf77d08..61bf35e4a 100644
|
||||
--- a/configure.ac
|
||||
+++ b/configure.ac
|
||||
@@ -122,7 +122,7 @@ if test "$enable_debug" = yes ; then
|
||||
debug_defs="-DDEBUG -DMCC_DEBUG"
|
||||
debug_cflags="-g3 -O0 -rdynamic"
|
||||
debug_cxxflags="-g3 -O0 -rdynamic"
|
||||
- debug_rust_defs="-C debuginfo=2"
|
||||
+ debug_rust_defs="-C debuginfo=2 -Z macro-backtrace"
|
||||
cargo_defs=""
|
||||
rust_target_dir="debug"
|
||||
else
|
||||
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
|
||||
index d8b8798b6..e3444e944 100644
|
||||
--- a/ldap/servers/slapd/pagedresults.c
|
||||
+++ b/ldap/servers/slapd/pagedresults.c
|
||||
@@ -738,10 +738,10 @@ pagedresults_cleanup(Connection *conn, int needlock)
|
||||
int i;
|
||||
PagedResults *prp = NULL;
|
||||
|
||||
- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n");
|
||||
+ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n"); */
|
||||
|
||||
if (NULL == conn) {
|
||||
- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n");
|
||||
+ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n"); */
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -767,7 +767,7 @@ pagedresults_cleanup(Connection *conn, int needlock)
|
||||
if (needlock) {
|
||||
pthread_mutex_unlock(&(conn->c_mutex));
|
||||
}
|
||||
- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc);
|
||||
+ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc); */
|
||||
return rc;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
|
||||
index 1ad9d0399..f7d1f8885 100644
|
||||
--- a/ldap/servers/slapd/pblock.c
|
||||
+++ b/ldap/servers/slapd/pblock.c
|
||||
@@ -3351,13 +3351,15 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
|
||||
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
|
||||
return (-1);
|
||||
}
|
||||
- pblock->pb_plugin->plg_syntax_names = (char **)value;
|
||||
+ PR_ASSERT(pblock->pb_plugin->plg_syntax_names == NULL);
|
||||
+ pblock->pb_plugin->plg_syntax_names = slapi_ch_array_dup((char **)value);
|
||||
break;
|
||||
case SLAPI_PLUGIN_SYNTAX_OID:
|
||||
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
|
||||
return (-1);
|
||||
}
|
||||
- pblock->pb_plugin->plg_syntax_oid = (char *)value;
|
||||
+ PR_ASSERT(pblock->pb_plugin->plg_syntax_oid == NULL);
|
||||
+ pblock->pb_plugin->plg_syntax_oid = slapi_ch_strdup((char *)value);
|
||||
break;
|
||||
case SLAPI_PLUGIN_SYNTAX_FLAGS:
|
||||
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
|
||||
@@ -3806,7 +3808,8 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
|
||||
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
|
||||
return (-1);
|
||||
}
|
||||
- pblock->pb_plugin->plg_mr_names = (char **)value;
|
||||
+ PR_ASSERT(pblock->pb_plugin->plg_mr_names == NULL);
|
||||
+ pblock->pb_plugin->plg_mr_names = slapi_ch_array_dup((char **)value);
|
||||
break;
|
||||
case SLAPI_PLUGIN_MR_COMPARE:
|
||||
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
|
||||
diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c
|
||||
index 282b98738..e6b48de60 100644
|
||||
--- a/ldap/servers/slapd/plugin.c
|
||||
+++ b/ldap/servers/slapd/plugin.c
|
||||
@@ -2694,6 +2694,13 @@ plugin_free(struct slapdplugin *plugin)
|
||||
if (plugin->plg_type == SLAPI_PLUGIN_PWD_STORAGE_SCHEME || plugin->plg_type == SLAPI_PLUGIN_REVER_PWD_STORAGE_SCHEME) {
|
||||
slapi_ch_free_string(&plugin->plg_pwdstorageschemename);
|
||||
}
|
||||
+ if (plugin->plg_type == SLAPI_PLUGIN_SYNTAX) {
|
||||
+ slapi_ch_free_string(&plugin->plg_syntax_oid);
|
||||
+ slapi_ch_array_free(plugin->plg_syntax_names);
|
||||
+ }
|
||||
+ if (plugin->plg_type == SLAPI_PLUGIN_MATCHINGRULE) {
|
||||
+ slapi_ch_array_free(plugin->plg_mr_names);
|
||||
+ }
|
||||
release_componentid(plugin->plg_identity);
|
||||
slapi_counter_destroy(&plugin->plg_op_counter);
|
||||
if (!plugin->plg_group) {
|
||||
diff --git a/ldap/servers/slapd/pw_verify.c b/ldap/servers/slapd/pw_verify.c
|
||||
index 4f0944b73..4ff1fa2fd 100644
|
||||
--- a/ldap/servers/slapd/pw_verify.c
|
||||
+++ b/ldap/servers/slapd/pw_verify.c
|
||||
@@ -111,6 +111,7 @@ pw_verify_token_dn(Slapi_PBlock *pb) {
|
||||
if (fernet_verify_token(dn, cred->bv_val, key, tok_ttl) != 0) {
|
||||
rc = SLAPI_BIND_SUCCESS;
|
||||
}
|
||||
+ slapi_ch_free_string(&key);
|
||||
#endif
|
||||
return rc;
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/tools/pwenc.c b/ldap/servers/slapd/tools/pwenc.c
|
||||
index 1629c06cd..d89225e34 100644
|
||||
--- a/ldap/servers/slapd/tools/pwenc.c
|
||||
+++ b/ldap/servers/slapd/tools/pwenc.c
|
||||
@@ -34,7 +34,7 @@
|
||||
|
||||
int ldap_syslog;
|
||||
int ldap_syslog_level;
|
||||
-int slapd_ldap_debug = LDAP_DEBUG_ANY;
|
||||
+/* int slapd_ldap_debug = LDAP_DEBUG_ANY; */
|
||||
int detached;
|
||||
FILE *error_logfp;
|
||||
FILE *access_logfp;
|
||||
diff --git a/src/slapi_r_plugin/README.md b/src/slapi_r_plugin/README.md
|
||||
index af9743ec9..1c9bcbf17 100644
|
||||
--- a/src/slapi_r_plugin/README.md
|
||||
+++ b/src/slapi_r_plugin/README.md
|
||||
@@ -15,7 +15,7 @@ the [Rust Nomicon](https://doc.rust-lang.org/nomicon/index.html)
|
||||
> warning about danger.
|
||||
|
||||
This document will not detail the specifics of unsafe or the invariants you must adhere to for rust
|
||||
-to work with C.
|
||||
+to work with C. Failure to uphold these invariants will lead to less than optimal consequences.
|
||||
|
||||
If you still want to see more about the plugin bindings, go on ...
|
||||
|
||||
@@ -135,7 +135,7 @@ associated functions.
|
||||
Now, you may notice that not all members of the trait are implemented. This is due to a feature
|
||||
of rust known as default trait impls. This allows the trait origin (src/plugin.rs) to provide
|
||||
template versions of these functions. If you "overwrite" them, your implementation is used. Unlike
|
||||
-OO, you may not inherit or call the default function.
|
||||
+OO, you may not inherit or call the default function.
|
||||
|
||||
If a default is not provided you *must* implement that function to be considered valid. Today (20200422)
|
||||
this only applies to `start` and `close`.
|
||||
@@ -183,7 +183,7 @@ It's important to understand how Rust manages memory both on the stack and the h
|
||||
As a result, this means that we must express in code, assertions about the proper ownership of memory
|
||||
and who is responsible for it (unlike C, where it can be hard to determine who or what is responsible
|
||||
for freeing some value.) Failure to handle this correctly, can and will lead to crashes, leaks or
|
||||
-*hand waving* magical failures that are eXtReMeLy FuN to debug.
|
||||
+*hand waving* magical failures that are `eXtReMeLy FuN` to debug.
|
||||
|
||||
### Reference Types
|
||||
|
||||
diff --git a/src/slapi_r_plugin/src/charray.rs b/src/slapi_r_plugin/src/charray.rs
|
||||
new file mode 100644
|
||||
index 000000000..d2e44693c
|
||||
--- /dev/null
|
||||
+++ b/src/slapi_r_plugin/src/charray.rs
|
||||
@@ -0,0 +1,32 @@
|
||||
+use std::ffi::CString;
|
||||
+use std::iter::once;
|
||||
+use std::os::raw::c_char;
|
||||
+use std::ptr;
|
||||
+
|
||||
+pub struct Charray {
|
||||
+ pin: Vec<CString>,
|
||||
+ charray: Vec<*const c_char>,
|
||||
+}
|
||||
+
|
||||
+impl Charray {
|
||||
+ pub fn new(input: &[&str]) -> Result<Self, ()> {
|
||||
+ let pin: Result<Vec<_>, ()> = input
|
||||
+ .iter()
|
||||
+ .map(|s| CString::new(*s).map_err(|_e| ()))
|
||||
+ .collect();
|
||||
+
|
||||
+ let pin = pin?;
|
||||
+
|
||||
+ let charray: Vec<_> = pin
|
||||
+ .iter()
|
||||
+ .map(|s| s.as_ptr())
|
||||
+ .chain(once(ptr::null()))
|
||||
+ .collect();
|
||||
+
|
||||
+ Ok(Charray { pin, charray })
|
||||
+ }
|
||||
+
|
||||
+ pub fn as_ptr(&self) -> *const *const c_char {
|
||||
+ self.charray.as_ptr()
|
||||
+ }
|
||||
+}
|
||||
diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs
|
||||
index 076907bae..be28cac95 100644
|
||||
--- a/src/slapi_r_plugin/src/lib.rs
|
||||
+++ b/src/slapi_r_plugin/src/lib.rs
|
||||
@@ -1,9 +1,11 @@
|
||||
-// extern crate lazy_static;
|
||||
+#[macro_use]
|
||||
+extern crate lazy_static;
|
||||
|
||||
#[macro_use]
|
||||
pub mod macros;
|
||||
pub mod backend;
|
||||
pub mod ber;
|
||||
+pub mod charray;
|
||||
mod constants;
|
||||
pub mod dn;
|
||||
pub mod entry;
|
||||
@@ -20,6 +22,7 @@ pub mod value;
|
||||
pub mod prelude {
|
||||
pub use crate::backend::{BackendRef, BackendRefTxn};
|
||||
pub use crate::ber::BerValRef;
|
||||
+ pub use crate::charray::Charray;
|
||||
pub use crate::constants::{FilterType, PluginFnType, PluginType, PluginVersion, LDAP_SUCCESS};
|
||||
pub use crate::dn::{Sdn, SdnRef};
|
||||
pub use crate::entry::EntryRef;
|
||||
@@ -30,8 +33,7 @@ pub mod prelude {
|
||||
pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3};
|
||||
pub use crate::search::{Search, SearchScope};
|
||||
pub use crate::syntax_plugin::{
|
||||
- matchingrule_register, name_to_leaking_char, names_to_leaking_char_array, SlapiOrdMr,
|
||||
- SlapiSubMr, SlapiSyntaxPlugin1,
|
||||
+ matchingrule_register, SlapiOrdMr, SlapiSubMr, SlapiSyntaxPlugin1,
|
||||
};
|
||||
pub use crate::task::{task_register_handler_fn, task_unregister_handler_fn, Task, TaskRef};
|
||||
pub use crate::value::{Value, ValueArray, ValueArrayRef, ValueRef};
|
||||
diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs
|
||||
index bc8dfa60f..97fc5d7ef 100644
|
||||
--- a/src/slapi_r_plugin/src/macros.rs
|
||||
+++ b/src/slapi_r_plugin/src/macros.rs
|
||||
@@ -249,6 +249,7 @@ macro_rules! slapi_r_syntax_plugin_hooks {
|
||||
paste::item! {
|
||||
use libc;
|
||||
use std::convert::TryFrom;
|
||||
+ use std::ffi::CString;
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn [<$mod_ident _plugin_init>](raw_pb: *const libc::c_void) -> i32 {
|
||||
@@ -261,15 +262,15 @@ macro_rules! slapi_r_syntax_plugin_hooks {
|
||||
};
|
||||
|
||||
// Setup the names/oids that this plugin provides syntaxes for.
|
||||
-
|
||||
- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::attr_supported_names()) };
|
||||
- match pb.register_syntax_names(name_ptr) {
|
||||
+ // DS will clone these, so they can be ephemeral to this function.
|
||||
+ let name_vec = Charray::new($hooks_ident::attr_supported_names().as_slice()).expect("invalid supported names");
|
||||
+ match pb.register_syntax_names(name_vec.as_ptr()) {
|
||||
0 => {},
|
||||
e => return e,
|
||||
};
|
||||
|
||||
- let name_ptr = unsafe { name_to_leaking_char($hooks_ident::attr_oid()) };
|
||||
- match pb.register_syntax_oid(name_ptr) {
|
||||
+ let attr_oid = CString::new($hooks_ident::attr_oid()).expect("invalid attr oid");
|
||||
+ match pb.register_syntax_oid(attr_oid.as_ptr()) {
|
||||
0 => {},
|
||||
e => return e,
|
||||
};
|
||||
@@ -430,7 +431,8 @@ macro_rules! slapi_r_syntax_plugin_hooks {
|
||||
e => return e,
|
||||
};
|
||||
|
||||
- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::eq_mr_supported_names()) };
|
||||
+ let name_vec = Charray::new($hooks_ident::eq_mr_supported_names().as_slice()).expect("invalid mr supported names");
|
||||
+ let name_ptr = name_vec.as_ptr();
|
||||
// SLAPI_PLUGIN_MR_NAMES
|
||||
match pb.register_mr_names(name_ptr) {
|
||||
0 => {},
|
||||
@@ -672,7 +674,8 @@ macro_rules! slapi_r_syntax_plugin_hooks {
|
||||
e => return e,
|
||||
};
|
||||
|
||||
- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::ord_mr_supported_names()) };
|
||||
+ let name_vec = Charray::new($hooks_ident::ord_mr_supported_names().as_slice()).expect("invalid ord supported names");
|
||||
+ let name_ptr = name_vec.as_ptr();
|
||||
// SLAPI_PLUGIN_MR_NAMES
|
||||
match pb.register_mr_names(name_ptr) {
|
||||
0 => {},
|
||||
diff --git a/src/slapi_r_plugin/src/syntax_plugin.rs b/src/slapi_r_plugin/src/syntax_plugin.rs
|
||||
index e7d5c01bd..86f84bdd8 100644
|
||||
--- a/src/slapi_r_plugin/src/syntax_plugin.rs
|
||||
+++ b/src/slapi_r_plugin/src/syntax_plugin.rs
|
||||
@@ -1,11 +1,11 @@
|
||||
use crate::ber::BerValRef;
|
||||
// use crate::constants::FilterType;
|
||||
+use crate::charray::Charray;
|
||||
use crate::error::PluginError;
|
||||
use crate::pblock::PblockRef;
|
||||
use crate::value::{ValueArray, ValueArrayRef};
|
||||
use std::cmp::Ordering;
|
||||
use std::ffi::CString;
|
||||
-use std::iter::once;
|
||||
use std::os::raw::c_char;
|
||||
use std::ptr;
|
||||
|
||||
@@ -26,37 +26,6 @@ struct slapi_matchingRuleEntry {
|
||||
mr_compat_syntax: *const *const c_char,
|
||||
}
|
||||
|
||||
-pub unsafe fn name_to_leaking_char(name: &str) -> *const c_char {
|
||||
- let n = CString::new(name)
|
||||
- .expect("An invalid string has been hardcoded!")
|
||||
- .into_boxed_c_str();
|
||||
- let n_ptr = n.as_ptr();
|
||||
- // Now we intentionally leak the name here, and the pointer will remain valid.
|
||||
- Box::leak(n);
|
||||
- n_ptr
|
||||
-}
|
||||
-
|
||||
-pub unsafe fn names_to_leaking_char_array(names: &[&str]) -> *const *const c_char {
|
||||
- let n_arr: Vec<CString> = names
|
||||
- .iter()
|
||||
- .map(|s| CString::new(*s).expect("An invalid string has been hardcoded!"))
|
||||
- .collect();
|
||||
- let n_arr = n_arr.into_boxed_slice();
|
||||
- let n_ptr_arr: Vec<*const c_char> = n_arr
|
||||
- .iter()
|
||||
- .map(|v| v.as_ptr())
|
||||
- .chain(once(ptr::null()))
|
||||
- .collect();
|
||||
- let n_ptr_arr = n_ptr_arr.into_boxed_slice();
|
||||
-
|
||||
- // Now we intentionally leak these names here,
|
||||
- let _r_n_arr = Box::leak(n_arr);
|
||||
- let r_n_ptr_arr = Box::leak(n_ptr_arr);
|
||||
-
|
||||
- let name_ptr = r_n_ptr_arr as *const _ as *const *const c_char;
|
||||
- name_ptr
|
||||
-}
|
||||
-
|
||||
// oid - the oid of the matching rule
|
||||
// name - the name of the mr
|
||||
// desc - description
|
||||
@@ -69,20 +38,24 @@ pub unsafe fn matchingrule_register(
|
||||
syntax: &str,
|
||||
compat_syntax: &[&str],
|
||||
) -> i32 {
|
||||
- let oid_ptr = name_to_leaking_char(oid);
|
||||
- let name_ptr = name_to_leaking_char(name);
|
||||
- let desc_ptr = name_to_leaking_char(desc);
|
||||
- let syntax_ptr = name_to_leaking_char(syntax);
|
||||
- let compat_syntax_ptr = names_to_leaking_char_array(compat_syntax);
|
||||
+ // Make everything CStrings that live long enough.
|
||||
+
|
||||
+ let oid_cs = CString::new(oid).expect("invalid oid");
|
||||
+ let name_cs = CString::new(name).expect("invalid name");
|
||||
+ let desc_cs = CString::new(desc).expect("invalid desc");
|
||||
+ let syntax_cs = CString::new(syntax).expect("invalid syntax");
|
||||
+
|
||||
+ // We have to do this so the cstrings live long enough.
|
||||
+ let compat_syntax_ca = Charray::new(compat_syntax).expect("invalid compat_syntax");
|
||||
|
||||
let new_mr = slapi_matchingRuleEntry {
|
||||
- mr_oid: oid_ptr,
|
||||
+ mr_oid: oid_cs.as_ptr(),
|
||||
_mr_oidalias: ptr::null(),
|
||||
- mr_name: name_ptr,
|
||||
- mr_desc: desc_ptr,
|
||||
- mr_syntax: syntax_ptr,
|
||||
+ mr_name: name_cs.as_ptr(),
|
||||
+ mr_desc: desc_cs.as_ptr(),
|
||||
+ mr_syntax: syntax_cs.as_ptr(),
|
||||
_mr_obsolete: 0,
|
||||
- mr_compat_syntax: compat_syntax_ptr,
|
||||
+ mr_compat_syntax: compat_syntax_ca.as_ptr(),
|
||||
};
|
||||
|
||||
let new_mr_ptr = &new_mr as *const _;
|
||||
--
|
||||
2.26.3
|
||||
|
@ -0,0 +1,37 @@
|
||||
From 40e9a4835a6e95f021a711a7c42ce0c1bddc5ba4 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Fri, 21 May 2021 13:09:12 -0400
|
||||
Subject: [PATCH 08/12] Issue 4773 - Enable interval feature of DNA plugin
|
||||
|
||||
Description: Enable the dormant interval feature in DNA plugin
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4773
|
||||
|
||||
Review by: mreynolds (one line commit rule)
|
||||
---
|
||||
ldap/servers/plugins/dna/dna.c | 2 --
|
||||
1 file changed, 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
|
||||
index bf6b74a99..928a3f54a 100644
|
||||
--- a/ldap/servers/plugins/dna/dna.c
|
||||
+++ b/ldap/servers/plugins/dna/dna.c
|
||||
@@ -1023,7 +1023,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
|
||||
/* Set the default interval to 1 */
|
||||
entry->interval = 1;
|
||||
|
||||
-#ifdef DNA_ENABLE_INTERVAL
|
||||
value = slapi_entry_attr_get_charptr(e, DNA_INTERVAL);
|
||||
if (value) {
|
||||
entry->interval = strtoull(value, 0, 0);
|
||||
@@ -1032,7 +1031,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
|
||||
|
||||
slapi_log_err(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM,
|
||||
"dna_parse_config_entry - %s [%" PRIu64 "]\n", DNA_INTERVAL, entry->interval);
|
||||
-#endif
|
||||
|
||||
value = slapi_entry_attr_get_charptr(e, DNA_GENERATE);
|
||||
if (value) {
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,143 +0,0 @@
|
||||
From 6e5f03d5872129963106024f53765234a282406c Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 16 Feb 2024 11:13:16 +0000
|
||||
Subject: [PATCH] Issue 6096 - Improve connection timeout error logging (#6097)
|
||||
|
||||
Bug description: When a paged result search is run with a time limit,
|
||||
if the time limit is exceed the server closes the connection with
|
||||
closed IO timeout (nsslapd-ioblocktimeout) - T2. This error message
|
||||
is incorrect as the reason the connection has been closed was because
|
||||
the specified time limit on a paged result search has been exceeded.
|
||||
|
||||
Fix description: Correct error message
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6096
|
||||
|
||||
Reviewed by: @tbordaz (Thank you)
|
||||
---
|
||||
ldap/admin/src/logconv.pl | 24 ++++++++++++++++++-
|
||||
ldap/servers/slapd/daemon.c | 4 ++--
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 1 +
|
||||
ldap/servers/slapd/disconnect_errors.h | 2 +-
|
||||
4 files changed, 27 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl
|
||||
index 7698c383a..2a933c4a3 100755
|
||||
--- a/ldap/admin/src/logconv.pl
|
||||
+++ b/ldap/admin/src/logconv.pl
|
||||
@@ -267,7 +267,7 @@ my $optimeAvg = 0;
|
||||
my %cipher = ();
|
||||
my @removefiles = ();
|
||||
|
||||
-my @conncodes = qw(A1 B1 B4 T1 T2 B2 B3 R1 P1 P2 U1);
|
||||
+my @conncodes = qw(A1 B1 B4 T1 T2 T3 B2 B3 R1 P1 P2 U1);
|
||||
my %conn = ();
|
||||
map {$conn{$_} = $_} @conncodes;
|
||||
|
||||
@@ -355,6 +355,7 @@ $connmsg{"B1"} = "Bad Ber Tag Encountered";
|
||||
$connmsg{"B4"} = "Server failed to flush data (response) back to Client";
|
||||
$connmsg{"T1"} = "Idle Timeout Exceeded";
|
||||
$connmsg{"T2"} = "IO Block Timeout Exceeded or NTSSL Timeout";
|
||||
+$connmsg{"T3"} = "Paged Search Time Limit Exceeded";
|
||||
$connmsg{"B2"} = "Ber Too Big";
|
||||
$connmsg{"B3"} = "Ber Peek";
|
||||
$connmsg{"R1"} = "Revents";
|
||||
@@ -1723,6 +1724,10 @@ if ($usage =~ /j/i || $verb eq "yes"){
|
||||
print "\n $recCount. You have some coonections that are being closed by the ioblocktimeout setting. You may want to increase the ioblocktimeout.\n";
|
||||
$recCount++;
|
||||
}
|
||||
+ if (defined($conncount->{"T3"}) and $conncount->{"T3"} > 0){
|
||||
+ print "\n $recCount. You have some connections that are being closed because a paged result search limit has been exceeded. You may want to increase the search time limit.\n";
|
||||
+ $recCount++;
|
||||
+ }
|
||||
# compare binds to unbinds, if the difference is more than 30% of the binds, then report a issue
|
||||
if (($bindCount - $unbindCount) > ($bindCount*.3)){
|
||||
print "\n $recCount. You have a significant difference between binds and unbinds. You may want to investigate this difference.\n";
|
||||
@@ -2366,6 +2371,7 @@ sub parseLineNormal
|
||||
$brokenPipeCount++;
|
||||
if (m/- T1/){ $hashes->{rc}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{rc}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{rc}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{rc}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{rc}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{rc}->{"B4"}++; }
|
||||
@@ -2381,6 +2387,7 @@ sub parseLineNormal
|
||||
$connResetByPeerCount++;
|
||||
if (m/- T1/){ $hashes->{src}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{src}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{src}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{src}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{src}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{src}->{"B4"}++; }
|
||||
@@ -2396,6 +2403,7 @@ sub parseLineNormal
|
||||
$resourceUnavailCount++;
|
||||
if (m/- T1/){ $hashes->{rsrc}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{rsrc}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{rsrc}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{rsrc}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{rsrc}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{rsrc}->{"B4"}++; }
|
||||
@@ -2494,6 +2502,20 @@ sub parseLineNormal
|
||||
}
|
||||
}
|
||||
}
|
||||
+ if (m/- T3/){
|
||||
+ if ($_ =~ /conn= *([0-9A-Z]+)/i) {
|
||||
+ $exc = "no";
|
||||
+ $ip = getIPfromConn($1, $serverRestartCount);
|
||||
+ for (my $xxx = 0; $xxx < $#excludeIP; $xxx++){
|
||||
+ if ($ip eq $excludeIP[$xxx]){$exc = "yes";}
|
||||
+ }
|
||||
+ if ($exc ne "yes"){
|
||||
+ $hashes->{T3}->{$ip}++;
|
||||
+ $hashes->{conncount}->{"T3"}++;
|
||||
+ $connCodeCount++;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
if (m/- B2/){
|
||||
if ($_ =~ /conn= *([0-9A-Z]+)/i) {
|
||||
$exc = "no";
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 5a48aa66f..bb80dae36 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1599,9 +1599,9 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
int add_fd = 1;
|
||||
/* check timeout for PAGED RESULTS */
|
||||
if (pagedresults_is_timedout_nolock(c)) {
|
||||
- /* Exceeded the timelimit; disconnect the client */
|
||||
+ /* Exceeded the paged search timelimit; disconnect the client */
|
||||
disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_IO_TIMEOUT,
|
||||
+ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
0);
|
||||
connection_table_move_connection_out_of_active_list(ct,
|
||||
c);
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index f7a31d728..c2d9e283b 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -27,6 +27,7 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
|
||||
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
|
||||
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
|
||||
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
|
||||
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
|
||||
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
|
||||
ER2(SLAPD_DISCONNECT_POLL, "P2")
|
||||
diff --git a/ldap/servers/slapd/disconnect_errors.h b/ldap/servers/slapd/disconnect_errors.h
|
||||
index a0484f1c2..e118f674c 100644
|
||||
--- a/ldap/servers/slapd/disconnect_errors.h
|
||||
+++ b/ldap/servers/slapd/disconnect_errors.h
|
||||
@@ -35,6 +35,6 @@
|
||||
#define SLAPD_DISCONNECT_SASL_FAIL SLAPD_DISCONNECT_ERROR_BASE + 12
|
||||
#define SLAPD_DISCONNECT_PROXY_INVALID_HEADER SLAPD_DISCONNECT_ERROR_BASE + 13
|
||||
#define SLAPD_DISCONNECT_PROXY_UNKNOWN SLAPD_DISCONNECT_ERROR_BASE + 14
|
||||
-
|
||||
+#define SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT SLAPD_DISCONNECT_ERROR_BASE + 15
|
||||
|
||||
#endif /* __DISCONNECT_ERRORS_H_ */
|
||||
--
|
||||
2.45.0
|
||||
|
@ -0,0 +1,926 @@
|
||||
From 8df95679519364d0993572ecbea72ab89e5250a5 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Thu, 20 May 2021 14:24:25 +0200
|
||||
Subject: [PATCH 09/12] Issue 4623 - RFE - Monitor the current DB locks (#4762)
|
||||
|
||||
Description: DB lock gets exhausted because of unindexed internal searches
|
||||
(under a transaction). Indexing those searches is the way to prevent exhaustion.
|
||||
If db lock get exhausted during a txn, it leads to db panic and the later recovery
|
||||
can possibly fail. That leads to a full reinit of the instance where the db locks
|
||||
got exhausted.
|
||||
|
||||
Add three attributes to global BDB config: "nsslapd-db-locks-monitoring-enabled",
|
||||
"nsslapd-db-locks-monitoring-threshold" and "nsslapd-db-locks-monitoring-pause".
|
||||
By default, nsslapd-db-locks-monitoring-enabled is turned on, nsslapd-db-locks-monitoring-threshold is set to 90% and nsslapd-db-locks-monitoring-threshold is 500ms.
|
||||
|
||||
When current locks are close to the maximum locks value of 90% - returning
|
||||
the next candidate will fail until the maximum of locks won't be
|
||||
increased or current locks are released.
|
||||
The monitoring thread runs with the configurable interval of 500ms.
|
||||
|
||||
Add the setting to UI and CLI tools.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4623
|
||||
|
||||
Reviewed by: @Firstyear, @tbordaz, @jchapma, @mreynolds389 (Thank you!!)
|
||||
---
|
||||
.../suites/monitor/db_locks_monitor_test.py | 251 ++++++++++++++++++
|
||||
ldap/servers/slapd/back-ldbm/back-ldbm.h | 13 +-
|
||||
.../slapd/back-ldbm/db-bdb/bdb_config.c | 99 +++++++
|
||||
.../slapd/back-ldbm/db-bdb/bdb_layer.c | 85 ++++++
|
||||
ldap/servers/slapd/back-ldbm/init.c | 3 +
|
||||
ldap/servers/slapd/back-ldbm/ldbm_config.c | 3 +
|
||||
ldap/servers/slapd/back-ldbm/ldbm_config.h | 3 +
|
||||
ldap/servers/slapd/back-ldbm/ldbm_search.c | 13 +
|
||||
ldap/servers/slapd/libglobs.c | 4 +-
|
||||
src/cockpit/389-console/src/css/ds.css | 4 +
|
||||
src/cockpit/389-console/src/database.jsx | 7 +
|
||||
src/cockpit/389-console/src/index.html | 2 +-
|
||||
.../src/lib/database/databaseConfig.jsx | 88 +++++-
|
||||
src/lib389/lib389/backend.py | 3 +
|
||||
src/lib389/lib389/cli_conf/backend.py | 10 +
|
||||
15 files changed, 576 insertions(+), 12 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
|
||||
new file mode 100644
|
||||
index 000000000..7f9938f30
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
|
||||
@@ -0,0 +1,251 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import logging
|
||||
+import pytest
|
||||
+import datetime
|
||||
+import subprocess
|
||||
+from multiprocessing import Process, Queue
|
||||
+from lib389 import pid_from_file
|
||||
+from lib389.utils import ldap, os
|
||||
+from lib389._constants import DEFAULT_SUFFIX, ReplicaRole
|
||||
+from lib389.cli_base import LogCapture
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
+from lib389.tasks import AccessLog
|
||||
+from lib389.backend import Backends
|
||||
+from lib389.ldclt import Ldclt
|
||||
+from lib389.dbgen import dbgen_users
|
||||
+from lib389.tasks import ImportTask
|
||||
+from lib389.index import Indexes
|
||||
+from lib389.plugins import AttributeUniquenessPlugin
|
||||
+from lib389.config import BDB_LDBMConfig
|
||||
+from lib389.monitor import MonitorLDBM
|
||||
+from lib389.topologies import create_topology, _remove_ssca_db
|
||||
+
|
||||
+pytestmark = pytest.mark.tier2
|
||||
+db_locks_monitoring_ack = pytest.mark.skipif(not os.environ.get('DB_LOCKS_MONITORING_ACK', False),
|
||||
+ reason="DB locks monitoring tests may take hours if the feature is not present or another failure exists. "
|
||||
+ "Also, the feature requires a big amount of space as we set nsslapd-db-locks to 1300000.")
|
||||
+
|
||||
+DEBUGGING = os.getenv('DEBUGGING', default=False)
|
||||
+if DEBUGGING:
|
||||
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+else:
|
||||
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+def _kill_ns_slapd(inst):
|
||||
+ pid = str(pid_from_file(inst.ds_paths.pid_file))
|
||||
+ cmd = ['kill', '-9', pid]
|
||||
+ subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def topology_st_fn(request):
|
||||
+ """Create DS standalone instance for each test case"""
|
||||
+
|
||||
+ topology = create_topology({ReplicaRole.STANDALONE: 1})
|
||||
+
|
||||
+ def fin():
|
||||
+ # Kill the hanging process at the end of test to prevent failures in the following tests
|
||||
+ if DEBUGGING:
|
||||
+ [_kill_ns_slapd(inst) for inst in topology]
|
||||
+ else:
|
||||
+ [_kill_ns_slapd(inst) for inst in topology]
|
||||
+ assert _remove_ssca_db(topology)
|
||||
+ [inst.stop() for inst in topology if inst.exists()]
|
||||
+ [inst.delete() for inst in topology if inst.exists()]
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ topology.logcap = LogCapture()
|
||||
+ return topology
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def setup_attruniq_index_be_import(topology_st_fn):
|
||||
+ """Enable Attribute Uniqueness, disable indexes and
|
||||
+ import 120000 entries to the default backend
|
||||
+ """
|
||||
+ inst = topology_st_fn.standalone
|
||||
+
|
||||
+ inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access')
|
||||
+ inst.config.set('nsslapd-plugin-logging', 'on')
|
||||
+ inst.restart()
|
||||
+
|
||||
+ attruniq = AttributeUniquenessPlugin(inst, dn="cn=attruniq,cn=plugins,cn=config")
|
||||
+ attruniq.create(properties={'cn': 'attruniq'})
|
||||
+ for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']:
|
||||
+ attruniq.add_unique_attribute(cn)
|
||||
+ attruniq.add_unique_subtree(DEFAULT_SUFFIX)
|
||||
+ attruniq.enable_all_subtrees()
|
||||
+ attruniq.enable()
|
||||
+
|
||||
+ indexes = Indexes(inst)
|
||||
+ for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']:
|
||||
+ indexes.ensure_state(properties={
|
||||
+ 'cn': cn,
|
||||
+ 'nsSystemIndex': 'false',
|
||||
+ 'nsIndexType': 'none'})
|
||||
+
|
||||
+ bdb_config = BDB_LDBMConfig(inst)
|
||||
+ bdb_config.replace("nsslapd-db-locks", "130000")
|
||||
+ inst.restart()
|
||||
+
|
||||
+ ldif_dir = inst.get_ldif_dir()
|
||||
+ import_ldif = ldif_dir + '/perf_import.ldif'
|
||||
+
|
||||
+ # Valid online import
|
||||
+ import_task = ImportTask(inst)
|
||||
+ dbgen_users(inst, 120000, import_ldif, DEFAULT_SUFFIX, entry_name="userNew")
|
||||
+ import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
|
||||
+ import_task.wait()
|
||||
+ assert import_task.is_complete()
|
||||
+
|
||||
+
|
||||
+def create_user_wrapper(q, users):
|
||||
+ try:
|
||||
+ users.create_test_user()
|
||||
+ except Exception as ex:
|
||||
+ q.put(ex)
|
||||
+
|
||||
+
|
||||
+def spawn_worker_thread(function, users, log, timeout, info):
|
||||
+ log.info(f"Starting the thread - {info}")
|
||||
+ q = Queue()
|
||||
+ p = Process(target=function, args=(q,users,))
|
||||
+ p.start()
|
||||
+
|
||||
+ log.info(f"Waiting for {timeout} seconds for the thread to finish")
|
||||
+ p.join(timeout)
|
||||
+
|
||||
+ if p.is_alive():
|
||||
+ log.info("Killing the thread as it's still running")
|
||||
+ p.terminate()
|
||||
+ p.join()
|
||||
+ raise RuntimeError(f"Function call was aborted: {info}")
|
||||
+ result = q.get()
|
||||
+ if isinstance(result, Exception):
|
||||
+ raise result
|
||||
+ else:
|
||||
+ return result
|
||||
+
|
||||
+
|
||||
+@db_locks_monitoring_ack
|
||||
+@pytest.mark.parametrize("lock_threshold", [("70"), ("80"), ("95")])
|
||||
+def test_exhaust_db_locks_basic(topology_st_fn, setup_attruniq_index_be_import, lock_threshold):
|
||||
+ """Test that when all of the locks are exhausted the instance still working
|
||||
+ and database is not corrupted
|
||||
+
|
||||
+ :id: 299108cc-04d8-4ddc-b58e-99157fccd643
|
||||
+ :setup: Standalone instance with Attr Uniq plugin and user indexes disabled
|
||||
+ :steps: 1. Set nsslapd-db-locks to 11000
|
||||
+ 2. Check that we stop acquiring new locks when the threshold is reached
|
||||
+ 3. Check that we can regulate a pause interval for DB locks monitoring thread
|
||||
+ 4. Make sure the feature works for different backends on the same suffix
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st_fn.standalone
|
||||
+ ADDITIONAL_SUFFIX = 'ou=newpeople,dc=example,dc=com'
|
||||
+
|
||||
+ backends = Backends(inst)
|
||||
+ backends.create(properties={'nsslapd-suffix': ADDITIONAL_SUFFIX,
|
||||
+ 'name': ADDITIONAL_SUFFIX[-3:]})
|
||||
+ ous = OrganizationalUnits(inst, DEFAULT_SUFFIX)
|
||||
+ ous.create(properties={'ou': 'newpeople'})
|
||||
+
|
||||
+ bdb_config = BDB_LDBMConfig(inst)
|
||||
+ bdb_config.replace("nsslapd-db-locks", "11000")
|
||||
+
|
||||
+ # Restart server
|
||||
+ inst.restart()
|
||||
+
|
||||
+ for lock_enabled in ["on", "off"]:
|
||||
+ for lock_pause in ["100", "500", "1000"]:
|
||||
+ bdb_config.replace("nsslapd-db-locks-monitoring-enabled", lock_enabled)
|
||||
+ bdb_config.replace("nsslapd-db-locks-monitoring-threshold", lock_threshold)
|
||||
+ bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause)
|
||||
+ inst.restart()
|
||||
+
|
||||
+ if lock_enabled == "off":
|
||||
+ raised_exception = (RuntimeError, ldap.SERVER_DOWN)
|
||||
+ else:
|
||||
+ raised_exception = ldap.OPERATIONS_ERROR
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ with pytest.raises(raised_exception):
|
||||
+ spawn_worker_thread(create_user_wrapper, users, log, 30,
|
||||
+ f"Adding user with monitoring enabled='{lock_enabled}'; "
|
||||
+ f"threshold='{lock_threshold}'; pause='{lock_pause}'.")
|
||||
+ # Restart because we already run out of locks and the next unindexed searches will fail eventually
|
||||
+ if lock_enabled == "off":
|
||||
+ _kill_ns_slapd(inst)
|
||||
+ inst.restart()
|
||||
+
|
||||
+ users = UserAccounts(inst, ADDITIONAL_SUFFIX, rdn=None)
|
||||
+ with pytest.raises(raised_exception):
|
||||
+ spawn_worker_thread(create_user_wrapper, users, log, 30,
|
||||
+ f"Adding user with monitoring enabled='{lock_enabled}'; "
|
||||
+ f"threshold='{lock_threshold}'; pause='{lock_pause}'.")
|
||||
+ # In case feature is disabled - restart for the clean up
|
||||
+ if lock_enabled == "off":
|
||||
+ _kill_ns_slapd(inst)
|
||||
+ inst.restart()
|
||||
+
|
||||
+
|
||||
+@db_locks_monitoring_ack
|
||||
+def test_exhaust_db_locks_big_pause(topology_st_fn, setup_attruniq_index_be_import):
|
||||
+ """Test that DB lock pause setting increases the wait interval value for the monitoring thread
|
||||
+
|
||||
+ :id: 7d5bf838-5d4e-4ad5-8c03-5716afb84ea6
|
||||
+ :setup: Standalone instance with Attr Uniq plugin and user indexes disabled
|
||||
+ :steps: 1. Set nsslapd-db-locks to 20000 while using the default threshold value (95%)
|
||||
+ 2. Set nsslapd-db-locks-monitoring-pause to 10000 (10 seconds)
|
||||
+ 3. Make sure that the pause is successfully increased a few times in a row
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st_fn.standalone
|
||||
+
|
||||
+ bdb_config = BDB_LDBMConfig(inst)
|
||||
+ bdb_config.replace("nsslapd-db-locks", "20000")
|
||||
+ lock_pause = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-pause")
|
||||
+ assert lock_pause == 500
|
||||
+ lock_pause = "10000"
|
||||
+ bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause)
|
||||
+
|
||||
+ # Restart server
|
||||
+ inst.restart()
|
||||
+
|
||||
+ lock_enabled = bdb_config.get_attr_val_utf8_l("nsslapd-db-locks-monitoring-enabled")
|
||||
+ lock_threshold = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-threshold")
|
||||
+ assert lock_enabled == "on"
|
||||
+ assert lock_threshold == 90
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ start = datetime.datetime.now()
|
||||
+ with pytest.raises(ldap.OPERATIONS_ERROR):
|
||||
+ spawn_worker_thread(create_user_wrapper, users, log, 30,
|
||||
+ f"Adding user with monitoring enabled='{lock_enabled}'; "
|
||||
+ f"threshold='{lock_threshold}'; pause='{lock_pause}'. Expect it to 'Work'")
|
||||
+ end = datetime.datetime.now()
|
||||
+ time_delta = end - start
|
||||
+ if time_delta.seconds < 9:
|
||||
+ raise RuntimeError("nsslapd-db-locks-monitoring-pause attribute doesn't function correctly. "
|
||||
+ f"Finished the execution in {time_delta.seconds} seconds")
|
||||
+ # In case something has failed - restart for the clean up
|
||||
+ inst.restart()
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
index 571b0a58b..afb831c32 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
@@ -155,6 +155,8 @@ typedef unsigned short u_int16_t;
|
||||
#define DEFAULT_DNCACHE_MAXCOUNT -1 /* no limit */
|
||||
#define DEFAULT_DBCACHE_SIZE 33554432
|
||||
#define DEFAULT_DBCACHE_SIZE_STR "33554432"
|
||||
+#define DEFAULT_DBLOCK_PAUSE 500
|
||||
+#define DEFAULT_DBLOCK_PAUSE_STR "500"
|
||||
#define DEFAULT_MODE 0600
|
||||
#define DEFAULT_ALLIDSTHRESHOLD 4000
|
||||
#define DEFAULT_IDL_TUNE 1
|
||||
@@ -575,12 +577,21 @@ struct ldbminfo
|
||||
char *li_backend_implement; /* low layer backend implementation */
|
||||
int li_noparentcheck; /* check if parent exists on add */
|
||||
|
||||
- /* the next 3 fields are for the params that don't get changed until
|
||||
+ /* db lock monitoring */
|
||||
+ /* if we decide to move the values to bdb_config, we can use slapi_back_get_info function to retrieve the values */
|
||||
+ int32_t li_dblock_monitoring; /* enables db locks monitoring thread - requires restart */
|
||||
+ uint32_t li_dblock_monitoring_pause; /* an interval for db locks monitoring thread */
|
||||
+ uint32_t li_dblock_threshold; /* when the percentage is reached, abort the search in ldbm_back_next_search_entry - requires restart*/
|
||||
+ uint32_t li_dblock_threshold_reached;
|
||||
+
|
||||
+ /* the next 4 fields are for the params that don't get changed until
|
||||
* the server is restarted (used by the admin console)
|
||||
*/
|
||||
char *li_new_directory;
|
||||
uint64_t li_new_dbcachesize;
|
||||
int li_new_dblock;
|
||||
+ int32_t li_new_dblock_monitoring;
|
||||
+ uint64_t li_new_dblock_threshold;
|
||||
|
||||
int li_new_dbncache;
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
|
||||
index 738b841aa..167644943 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
|
||||
@@ -190,6 +190,102 @@ bdb_config_db_lock_set(void *arg, void *value, char *errorbuf, int phase, int ap
|
||||
return retval;
|
||||
}
|
||||
|
||||
+static void *
|
||||
+bdb_config_db_lock_monitoring_get(void *arg)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+
|
||||
+ return (void *)((intptr_t)(li->li_new_dblock_monitoring));
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+bdb_config_db_lock_monitoring_set(void *arg, void *value, char *errorbuf __attribute__((unused)), int phase __attribute__((unused)), int apply)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+ int retval = LDAP_SUCCESS;
|
||||
+ int val = (int32_t)((intptr_t)value);
|
||||
+
|
||||
+ if (apply) {
|
||||
+ if (CONFIG_PHASE_RUNNING == phase) {
|
||||
+ li->li_new_dblock_monitoring = val;
|
||||
+ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_monitoring_set",
|
||||
+ "New nsslapd-db-lock-monitoring value will not take affect until the server is restarted\n");
|
||||
+ } else {
|
||||
+ li->li_new_dblock_monitoring = val;
|
||||
+ li->li_dblock_monitoring = val;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ return retval;
|
||||
+}
|
||||
+
|
||||
+static void *
|
||||
+bdb_config_db_lock_pause_get(void *arg)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+
|
||||
+ return (void *)((uintptr_t)(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED)));
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+bdb_config_db_lock_pause_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+ int retval = LDAP_SUCCESS;
|
||||
+ u_int32_t val = (u_int32_t)((uintptr_t)value);
|
||||
+
|
||||
+ if (val == 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_pause_set",
|
||||
+ "%s was set to '0'. The default value will be used (%s)",
|
||||
+ CONFIG_DB_LOCKS_PAUSE, DEFAULT_DBLOCK_PAUSE_STR);
|
||||
+ val = DEFAULT_DBLOCK_PAUSE;
|
||||
+ }
|
||||
+
|
||||
+ if (apply) {
|
||||
+ slapi_atomic_store_32((int32_t *)&(li->li_dblock_monitoring_pause), val, __ATOMIC_RELAXED);
|
||||
+ }
|
||||
+ return retval;
|
||||
+}
|
||||
+
|
||||
+static void *
|
||||
+bdb_config_db_lock_threshold_get(void *arg)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+
|
||||
+ return (void *)((uintptr_t)(li->li_new_dblock_threshold));
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+bdb_config_db_lock_threshold_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+ int retval = LDAP_SUCCESS;
|
||||
+ u_int32_t val = (u_int32_t)((uintptr_t)value);
|
||||
+
|
||||
+ if (val < 70 || val > 95) {
|
||||
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
|
||||
+ "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95",
|
||||
+ CONFIG_DB_LOCKS_THRESHOLD, val);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_lock_threshold_set",
|
||||
+ "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95",
|
||||
+ CONFIG_DB_LOCKS_THRESHOLD, val);
|
||||
+ retval = LDAP_OPERATIONS_ERROR;
|
||||
+ return retval;
|
||||
+ }
|
||||
+
|
||||
+ if (apply) {
|
||||
+ if (CONFIG_PHASE_RUNNING == phase) {
|
||||
+ li->li_new_dblock_threshold = val;
|
||||
+ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_threshold_set",
|
||||
+ "New nsslapd-db-lock-monitoring-threshold value will not take affect until the server is restarted\n");
|
||||
+ } else {
|
||||
+ li->li_new_dblock_threshold = val;
|
||||
+ li->li_dblock_threshold = val;
|
||||
+ }
|
||||
+ }
|
||||
+ return retval;
|
||||
+}
|
||||
+
|
||||
static void *
|
||||
bdb_config_dbcachesize_get(void *arg)
|
||||
{
|
||||
@@ -1409,6 +1505,9 @@ static config_info bdb_config_param[] = {
|
||||
{CONFIG_SERIAL_LOCK, CONFIG_TYPE_ONOFF, "on", &bdb_config_serial_lock_get, &bdb_config_serial_lock_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
|
||||
{CONFIG_USE_LEGACY_ERRORCODE, CONFIG_TYPE_ONOFF, "off", &bdb_config_legacy_errcode_get, &bdb_config_legacy_errcode_set, 0},
|
||||
{CONFIG_DB_DEADLOCK_POLICY, CONFIG_TYPE_INT, STRINGIFYDEFINE(DB_LOCK_YOUNGEST), &bdb_config_db_deadlock_policy_get, &bdb_config_db_deadlock_policy_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
|
||||
+ {CONFIG_DB_LOCKS_MONITORING, CONFIG_TYPE_ONOFF, "on", &bdb_config_db_lock_monitoring_get, &bdb_config_db_lock_monitoring_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
|
||||
+ {CONFIG_DB_LOCKS_THRESHOLD, CONFIG_TYPE_INT, "90", &bdb_config_db_lock_threshold_get, &bdb_config_db_lock_threshold_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
|
||||
+ {CONFIG_DB_LOCKS_PAUSE, CONFIG_TYPE_INT, DEFAULT_DBLOCK_PAUSE_STR, &bdb_config_db_lock_pause_get, &bdb_config_db_lock_pause_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
|
||||
{NULL, 0, NULL, NULL, NULL, 0}};
|
||||
|
||||
void
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
index 6cccad8e6..2f25f67a2 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
@@ -35,6 +35,8 @@
|
||||
(env)->txn_checkpoint((env), (kbyte), (min), (flags))
|
||||
#define MEMP_STAT(env, gsp, fsp, flags, malloc) \
|
||||
(env)->memp_stat((env), (gsp), (fsp), (flags))
|
||||
+#define LOCK_STAT(env, statp, flags, malloc) \
|
||||
+ (env)->lock_stat((env), (statp), (flags))
|
||||
#define MEMP_TRICKLE(env, pct, nwrotep) \
|
||||
(env)->memp_trickle((env), (pct), (nwrotep))
|
||||
#define LOG_ARCHIVE(env, listp, flags, malloc) \
|
||||
@@ -66,6 +68,7 @@
|
||||
#define NEWDIR_MODE 0755
|
||||
#define DB_REGION_PREFIX "__db."
|
||||
|
||||
+static int locks_monitoring_threadmain(void *param);
|
||||
static int perf_threadmain(void *param);
|
||||
static int checkpoint_threadmain(void *param);
|
||||
static int trickle_threadmain(void *param);
|
||||
@@ -84,6 +87,7 @@ static int bdb_start_checkpoint_thread(struct ldbminfo *li);
|
||||
static int bdb_start_trickle_thread(struct ldbminfo *li);
|
||||
static int bdb_start_perf_thread(struct ldbminfo *li);
|
||||
static int bdb_start_txn_test_thread(struct ldbminfo *li);
|
||||
+static int bdb_start_locks_monitoring_thread(struct ldbminfo *li);
|
||||
static int trans_batch_count = 0;
|
||||
static int trans_batch_limit = 0;
|
||||
static int trans_batch_txn_min_sleep = 50; /* ms */
|
||||
@@ -1299,6 +1303,10 @@ bdb_start(struct ldbminfo *li, int dbmode)
|
||||
return return_value;
|
||||
}
|
||||
|
||||
+ if (0 != (return_value = bdb_start_locks_monitoring_thread(li))) {
|
||||
+ return return_value;
|
||||
+ }
|
||||
+
|
||||
/* We need to free the memory to avoid a leak
|
||||
* Also, we have to evaluate if the performance counter
|
||||
* should be preserved or not for database restore.
|
||||
@@ -2885,6 +2893,7 @@ bdb_start_perf_thread(struct ldbminfo *li)
|
||||
return return_value;
|
||||
}
|
||||
|
||||
+
|
||||
/* Performance thread */
|
||||
static int
|
||||
perf_threadmain(void *param)
|
||||
@@ -2910,6 +2919,82 @@ perf_threadmain(void *param)
|
||||
return 0;
|
||||
}
|
||||
|
||||
+
|
||||
+/*
|
||||
+ * create a thread for locks_monitoring_threadmain
|
||||
+ */
|
||||
+static int
|
||||
+bdb_start_locks_monitoring_thread(struct ldbminfo *li)
|
||||
+{
|
||||
+ int return_value = 0;
|
||||
+ if (li->li_dblock_monitoring) {
|
||||
+ if (NULL == PR_CreateThread(PR_USER_THREAD,
|
||||
+ (VFP)(void *)locks_monitoring_threadmain, li,
|
||||
+ PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
|
||||
+ PR_UNJOINABLE_THREAD,
|
||||
+ SLAPD_DEFAULT_THREAD_STACKSIZE)) {
|
||||
+ PRErrorCode prerr = PR_GetError();
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "bdb_start_locks_monitoring_thread",
|
||||
+ "Failed to create database locks monitoring thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
|
||||
+ prerr, slapd_pr_strerror(prerr));
|
||||
+ return_value = -1;
|
||||
+ }
|
||||
+ }
|
||||
+ return return_value;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+/* DB Locks Monitoring thread */
|
||||
+static int
|
||||
+locks_monitoring_threadmain(void *param)
|
||||
+{
|
||||
+ int ret = 0;
|
||||
+ uint64_t current_locks = 0;
|
||||
+ uint64_t max_locks = 0;
|
||||
+ uint32_t lock_exhaustion = 0;
|
||||
+ PRIntervalTime interval;
|
||||
+ struct ldbminfo *li = NULL;
|
||||
+
|
||||
+ PR_ASSERT(NULL != param);
|
||||
+ li = (struct ldbminfo *)param;
|
||||
+
|
||||
+ dblayer_private *priv = li->li_dblayer_private;
|
||||
+ bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
|
||||
+ PR_ASSERT(NULL != priv);
|
||||
+
|
||||
+ INCR_THREAD_COUNT(pEnv);
|
||||
+
|
||||
+ while (!BDB_CONFIG(li)->bdb_stop_threads) {
|
||||
+ if (dblayer_db_uses_locking(pEnv->bdb_DB_ENV)) {
|
||||
+ DB_LOCK_STAT *lockstat = NULL;
|
||||
+ ret = LOCK_STAT(pEnv->bdb_DB_ENV, &lockstat, 0, (void *)slapi_ch_malloc);
|
||||
+ if (0 == ret) {
|
||||
+ current_locks = lockstat->st_nlocks;
|
||||
+ max_locks = lockstat->st_maxlocks;
|
||||
+ if (max_locks){
|
||||
+ lock_exhaustion = (uint32_t)((double)current_locks / (double)max_locks * 100.0);
|
||||
+ } else {
|
||||
+ lock_exhaustion = 0;
|
||||
+ }
|
||||
+ if ((li->li_dblock_threshold) &&
|
||||
+ (lock_exhaustion >= li->li_dblock_threshold)) {
|
||||
+ slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 1, __ATOMIC_RELAXED);
|
||||
+ } else {
|
||||
+ slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 0, __ATOMIC_RELAXED);
|
||||
+ }
|
||||
+ }
|
||||
+ slapi_ch_free((void **)&lockstat);
|
||||
+ }
|
||||
+ interval = PR_MillisecondsToInterval(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED));
|
||||
+ DS_Sleep(interval);
|
||||
+ }
|
||||
+
|
||||
+ DECR_THREAD_COUNT(pEnv);
|
||||
+ slapi_log_err(SLAPI_LOG_TRACE, "locks_monitoring_threadmain", "Leaving locks_monitoring_threadmain\n");
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+
|
||||
/*
|
||||
* create a thread for deadlock_threadmain
|
||||
*/
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c
|
||||
index 893776699..4165c8fad 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/init.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/init.c
|
||||
@@ -70,6 +70,9 @@ ldbm_back_init(Slapi_PBlock *pb)
|
||||
/* Initialize the set of instances. */
|
||||
li->li_instance_set = objset_new(&ldbm_back_instance_set_destructor);
|
||||
|
||||
+ /* Init lock threshold value */
|
||||
+ li->li_dblock_threshold_reached = 0;
|
||||
+
|
||||
/* ask the factory to give us space in the Connection object
|
||||
* (only bulk import uses this)
|
||||
*/
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
|
||||
index 10cef250f..60884cf33 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
|
||||
@@ -87,6 +87,9 @@ static char *ldbm_config_moved_attributes[] =
|
||||
CONFIG_SERIAL_LOCK,
|
||||
CONFIG_USE_LEGACY_ERRORCODE,
|
||||
CONFIG_DB_DEADLOCK_POLICY,
|
||||
+ CONFIG_DB_LOCKS_MONITORING,
|
||||
+ CONFIG_DB_LOCKS_THRESHOLD,
|
||||
+ CONFIG_DB_LOCKS_PAUSE,
|
||||
""};
|
||||
|
||||
/* Used to add an array of entries, like the one above and
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.h b/ldap/servers/slapd/back-ldbm/ldbm_config.h
|
||||
index 58e64799c..6fa8292eb 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.h
|
||||
@@ -104,6 +104,9 @@ struct config_info
|
||||
#define CONFIG_DB_VERBOSE "nsslapd-db-verbose"
|
||||
#define CONFIG_DB_DEBUG "nsslapd-db-debug"
|
||||
#define CONFIG_DB_LOCK "nsslapd-db-locks"
|
||||
+#define CONFIG_DB_LOCKS_MONITORING "nsslapd-db-locks-monitoring-enabled"
|
||||
+#define CONFIG_DB_LOCKS_THRESHOLD "nsslapd-db-locks-monitoring-threshold"
|
||||
+#define CONFIG_DB_LOCKS_PAUSE "nsslapd-db-locks-monitoring-pause"
|
||||
#define CONFIG_DB_NAMED_REGIONS "nsslapd-db-named-regions"
|
||||
#define CONFIG_DB_PRIVATE_MEM "nsslapd-db-private-mem"
|
||||
#define CONFIG_DB_PRIVATE_IMPORT_MEM "nsslapd-db-private-import-mem"
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
index 1a7b510d4..6e22debde 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
@@ -1472,6 +1472,7 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
|
||||
slapi_pblock_get(pb, SLAPI_CONNECTION, &conn);
|
||||
slapi_pblock_get(pb, SLAPI_OPERATION, &op);
|
||||
|
||||
+
|
||||
if ((reverse_list = operation_is_flag_set(op, OP_FLAG_REVERSE_CANDIDATE_ORDER))) {
|
||||
/*
|
||||
* Start at the end of the list and work our way forward. Since a single
|
||||
@@ -1538,6 +1539,18 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
|
||||
|
||||
/* Find the next candidate entry and return it. */
|
||||
while (1) {
|
||||
+ if (li->li_dblock_monitoring &&
|
||||
+ slapi_atomic_load_32((int32_t *)&(li->li_dblock_threshold_reached), __ATOMIC_RELAXED)) {
|
||||
+ slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_next_search_entry",
|
||||
+ "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold "
|
||||
+ "under cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config). "
|
||||
+ "Please, increase nsslapd-db-locks according to your needs.\n");
|
||||
+ slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_ENTRY, NULL);
|
||||
+ delete_search_result_set(pb, &sr);
|
||||
+ rc = SLAPI_FAIL_GENERAL;
|
||||
+ slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold)", 0, NULL);
|
||||
+ goto bail;
|
||||
+ }
|
||||
|
||||
/* check for abandon */
|
||||
if (slapi_op_abandoned(pb) || (NULL == sr)) {
|
||||
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
|
||||
index 388616b36..db7d01bbc 100644
|
||||
--- a/ldap/servers/slapd/libglobs.c
|
||||
+++ b/ldap/servers/slapd/libglobs.c
|
||||
@@ -8171,8 +8171,8 @@ config_set(const char *attr, struct berval **values, char *errorbuf, int apply)
|
||||
#if 0
|
||||
debugHashTable(attr);
|
||||
#endif
|
||||
- slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored", attr);
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored", attr);
|
||||
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored\n", attr);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored\n", attr);
|
||||
return LDAP_NO_SUCH_ATTRIBUTE;
|
||||
}
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/css/ds.css b/src/cockpit/389-console/src/css/ds.css
|
||||
index 9248116e7..3cf50b593 100644
|
||||
--- a/src/cockpit/389-console/src/css/ds.css
|
||||
+++ b/src/cockpit/389-console/src/css/ds.css
|
||||
@@ -639,6 +639,10 @@ option {
|
||||
padding-right: 0 !important;
|
||||
}
|
||||
|
||||
+.ds-vertical-scroll-auto {
|
||||
+ overflow-y: auto !important;
|
||||
+}
|
||||
+
|
||||
.alert {
|
||||
max-width: 750px;
|
||||
}
|
||||
diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx
|
||||
index efa3ce6d5..11cae972c 100644
|
||||
--- a/src/cockpit/389-console/src/database.jsx
|
||||
+++ b/src/cockpit/389-console/src/database.jsx
|
||||
@@ -157,6 +157,7 @@ export class Database extends React.Component {
|
||||
const attrs = config.attrs;
|
||||
let db_cache_auto = false;
|
||||
let import_cache_auto = false;
|
||||
+ let dblocksMonitoring = false;
|
||||
let dbhome = "";
|
||||
|
||||
if ('nsslapd-db-home-directory' in attrs) {
|
||||
@@ -168,6 +169,9 @@ export class Database extends React.Component {
|
||||
if (attrs['nsslapd-import-cache-autosize'] != "0") {
|
||||
import_cache_auto = true;
|
||||
}
|
||||
+ if (attrs['nsslapd-db-locks-monitoring-enabled'][0] == "on") {
|
||||
+ dblocksMonitoring = true;
|
||||
+ }
|
||||
|
||||
this.setState(() => (
|
||||
{
|
||||
@@ -187,6 +191,9 @@ export class Database extends React.Component {
|
||||
txnlogdir: attrs['nsslapd-db-logdirectory'],
|
||||
dbhomedir: dbhome,
|
||||
dblocks: attrs['nsslapd-db-locks'],
|
||||
+ dblocksMonitoring: dblocksMonitoring,
|
||||
+ dblocksMonitoringThreshold: attrs['nsslapd-db-locks-monitoring-threshold'],
|
||||
+ dblocksMonitoringPause: attrs['nsslapd-db-locks-monitoring-pause'],
|
||||
chxpoint: attrs['nsslapd-db-checkpoint-interval'],
|
||||
compactinterval: attrs['nsslapd-db-compactdb-interval'],
|
||||
importcacheauto: attrs['nsslapd-import-cache-autosize'],
|
||||
diff --git a/src/cockpit/389-console/src/index.html b/src/cockpit/389-console/src/index.html
|
||||
index 1278844fc..fd0eeb669 100644
|
||||
--- a/src/cockpit/389-console/src/index.html
|
||||
+++ b/src/cockpit/389-console/src/index.html
|
||||
@@ -12,7 +12,7 @@
|
||||
</head>
|
||||
|
||||
|
||||
-<body>
|
||||
+<body class="ds-vertical-scroll-auto">
|
||||
<div id="dsinstance"></div>
|
||||
<script src="index.js"></script>
|
||||
</body>
|
||||
diff --git a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
|
||||
index f6e662bca..6a71c138d 100644
|
||||
--- a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
|
||||
@@ -31,6 +31,9 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
txnlogdir: this.props.data.txnlogdir,
|
||||
dbhomedir: this.props.data.dbhomedir,
|
||||
dblocks: this.props.data.dblocks,
|
||||
+ dblocksMonitoring: this.props.data.dblocksMonitoring,
|
||||
+ dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold,
|
||||
+ dblocksMonitoringPause: this.props.data.dblocksMonitoringPause,
|
||||
chxpoint: this.props.data.chxpoint,
|
||||
compactinterval: this.props.data.compactinterval,
|
||||
importcachesize: this.props.data.importcachesize,
|
||||
@@ -47,6 +50,9 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
_txnlogdir: this.props.data.txnlogdir,
|
||||
_dbhomedir: this.props.data.dbhomedir,
|
||||
_dblocks: this.props.data.dblocks,
|
||||
+ _dblocksMonitoring: this.props.data.dblocksMonitoring,
|
||||
+ _dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold,
|
||||
+ _dblocksMonitoringPause: this.props.data.dblocksMonitoringPause,
|
||||
_chxpoint: this.props.data.chxpoint,
|
||||
_compactinterval: this.props.data.compactinterval,
|
||||
_importcachesize: this.props.data.importcachesize,
|
||||
@@ -55,6 +61,7 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
_import_cache_auto: this.props.data.import_cache_auto,
|
||||
};
|
||||
this.handleChange = this.handleChange.bind(this);
|
||||
+ this.select_db_locks_monitoring = this.select_db_locks_monitoring.bind(this);
|
||||
this.select_auto_cache = this.select_auto_cache.bind(this);
|
||||
this.select_auto_import_cache = this.select_auto_import_cache.bind(this);
|
||||
this.save_db_config = this.save_db_config.bind(this);
|
||||
@@ -76,6 +83,12 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
}, this.handleChange(e));
|
||||
}
|
||||
|
||||
+ select_db_locks_monitoring (val, e) {
|
||||
+ this.setState({
|
||||
+ dblocksMonitoring: !this.state.dblocksMonitoring
|
||||
+ }, this.handleChange(val, e));
|
||||
+ }
|
||||
+
|
||||
handleChange(e) {
|
||||
// Generic
|
||||
const value = e.target.type === 'checkbox' ? e.target.checked : e.target.value;
|
||||
@@ -150,6 +163,21 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
cmd.push("--locks=" + this.state.dblocks);
|
||||
requireRestart = true;
|
||||
}
|
||||
+ if (this.state._dblocksMonitoring != this.state.dblocksMonitoring) {
|
||||
+ if (this.state.dblocksMonitoring) {
|
||||
+ cmd.push("--locks-monitoring-enabled=on");
|
||||
+ } else {
|
||||
+ cmd.push("--locks-monitoring-enabled=off");
|
||||
+ }
|
||||
+ requireRestart = true;
|
||||
+ }
|
||||
+ if (this.state._dblocksMonitoringThreshold != this.state.dblocksMonitoringThreshold) {
|
||||
+ cmd.push("--locks-monitoring-threshold=" + this.state.dblocksMonitoringThreshold);
|
||||
+ requireRestart = true;
|
||||
+ }
|
||||
+ if (this.state._dblocksMonitoringPause != this.state.dblocksMonitoringPause) {
|
||||
+ cmd.push("--locks-monitoring-pause=" + this.state.dblocksMonitoringPause);
|
||||
+ }
|
||||
if (this.state._chxpoint != this.state.chxpoint) {
|
||||
cmd.push("--checkpoint-interval=" + this.state.chxpoint);
|
||||
requireRestart = true;
|
||||
@@ -216,6 +244,28 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
let import_cache_form;
|
||||
let db_auto_checked = false;
|
||||
let import_auto_checked = false;
|
||||
+ let dblocksMonitor = "";
|
||||
+
|
||||
+ if (this.state.dblocksMonitoring) {
|
||||
+ dblocksMonitor = <div className="ds-margin-top">
|
||||
+ <Row className="ds-margin-top" title="Sets the DB lock exhaustion value in percentage (valid range is 70-95). If too many locks are acquired, the server will abort the searches while the number of locks are not decreased. It helps to avoid DB corruption and long recovery. (nsslapd-db-locks-monitoring-threshold)">
|
||||
+ <Col componentClass={ControlLabel} sm={4}>
|
||||
+ DB Locks Threshold Percentage
|
||||
+ </Col>
|
||||
+ <Col sm={8}>
|
||||
+ <input className="ds-input" type="number" id="dblocksMonitoringThreshold" size="10" onChange={this.handleChange} value={this.state.dblocksMonitoringThreshold} />
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
+ <Row className="ds-margin-top" title="Sets the amount of time (milliseconds) that the monitoring thread spends waiting between checks. (nsslapd-db-locks-monitoring-pause)">
|
||||
+ <Col componentClass={ControlLabel} sm={4}>
|
||||
+ DB Locks Pause Milliseconds
|
||||
+ </Col>
|
||||
+ <Col sm={8}>
|
||||
+ <input className="ds-input" type="number" id="dblocksMonitoringPause" size="10" onChange={this.handleChange} value={this.state.dblocksMonitoringPause} />
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
+ </div>;
|
||||
+ }
|
||||
|
||||
if (this.state.db_cache_auto) {
|
||||
db_cache_form = <div id="auto-cache-form" className="ds-margin-left">
|
||||
@@ -422,14 +472,6 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
<input id="dbhomedir" value={this.state.dbhomedir} onChange={this.handleChange} className="ds-input-auto" type="text" />
|
||||
</Col>
|
||||
</Row>
|
||||
- <Row className="ds-margin-top" title="The number of database locks (nsslapd-db-locks).">
|
||||
- <Col componentClass={ControlLabel} sm={4}>
|
||||
- Database Locks
|
||||
- </Col>
|
||||
- <Col sm={8}>
|
||||
- <input id="dblocks" value={this.state.dblocks} onChange={this.handleChange} className="ds-input-auto" type="text" />
|
||||
- </Col>
|
||||
- </Row>
|
||||
<Row className="ds-margin-top" title="Amount of time in seconds after which the Directory Server sends a checkpoint entry to the database transaction log (nsslapd-db-checkpoint-interval).">
|
||||
<Col componentClass={ControlLabel} sm={4}>
|
||||
Database Checkpoint Interval
|
||||
@@ -446,6 +488,36 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
<input id="compactinterval" value={this.state.compactinterval} onChange={this.handleChange} className="ds-input-auto" type="text" />
|
||||
</Col>
|
||||
</Row>
|
||||
+ <Row className="ds-margin-top" title="The number of database locks (nsslapd-db-locks).">
|
||||
+ <Col componentClass={ControlLabel} sm={4}>
|
||||
+ Database Locks
|
||||
+ </Col>
|
||||
+ <Col sm={8}>
|
||||
+ <input id="dblocks" value={this.state.dblocks} onChange={this.handleChange} className="ds-input-auto" type="text" />
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
+ <Row>
|
||||
+ <Col sm={12}>
|
||||
+ <h5 className="ds-sub-header">DB Locks Monitoring</h5>
|
||||
+ <hr />
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
+ <Row>
|
||||
+ <Col sm={12}>
|
||||
+ <Checkbox title="Set input to be set automatically"
|
||||
+ id="dblocksMonitoring"
|
||||
+ checked={this.state.dblocksMonitoring}
|
||||
+ onChange={this.select_db_locks_monitoring}
|
||||
+ >
|
||||
+ Enable Monitoring
|
||||
+ </Checkbox>
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
+ <Row>
|
||||
+ <Col sm={12}>
|
||||
+ {dblocksMonitor}
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
</Form>
|
||||
</div>
|
||||
</div>
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index bcd7b383f..13bb27842 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -1011,6 +1011,9 @@ class DatabaseConfig(DSLdapObject):
|
||||
'nsslapd-db-transaction-batch-max-wait',
|
||||
'nsslapd-db-logbuf-size',
|
||||
'nsslapd-db-locks',
|
||||
+ 'nsslapd-db-locks-monitoring-enabled',
|
||||
+ 'nsslapd-db-locks-monitoring-threshold',
|
||||
+ 'nsslapd-db-locks-monitoring-pause',
|
||||
'nsslapd-db-private-import-mem',
|
||||
'nsslapd-import-cache-autosize',
|
||||
'nsslapd-cache-autosize',
|
||||
diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py
|
||||
index 6bfbcb036..722764d10 100644
|
||||
--- a/src/lib389/lib389/cli_conf/backend.py
|
||||
+++ b/src/lib389/lib389/cli_conf/backend.py
|
||||
@@ -46,6 +46,9 @@ arg_to_attr = {
|
||||
'txn_batch_max': 'nsslapd-db-transaction-batch-max-wait',
|
||||
'logbufsize': 'nsslapd-db-logbuf-size',
|
||||
'locks': 'nsslapd-db-locks',
|
||||
+ 'locks_monitoring_enabled': 'nsslapd-db-locks-monitoring-enabled',
|
||||
+ 'locks_monitoring_threshold': 'nsslapd-db-locks-monitoring-threshold',
|
||||
+ 'locks_monitoring_pause': 'nsslapd-db-locks-monitoring-pause',
|
||||
'import_cache_autosize': 'nsslapd-import-cache-autosize',
|
||||
'cache_autosize': 'nsslapd-cache-autosize',
|
||||
'cache_autosize_split': 'nsslapd-cache-autosize-split',
|
||||
@@ -998,6 +1001,13 @@ def create_parser(subparsers):
|
||||
'the batch count (only works when txn-batch-val is set)')
|
||||
set_db_config_parser.add_argument('--logbufsize', help='Specifies the transaction log information buffer size')
|
||||
set_db_config_parser.add_argument('--locks', help='Sets the maximum number of database locks')
|
||||
+ set_db_config_parser.add_argument('--locks-monitoring-enabled', help='Set to "on" or "off" to monitor DB locks. When it crosses the percentage value '
|
||||
+ 'set with "--locks-monitoring-threshold" ("on" by default)')
|
||||
+ set_db_config_parser.add_argument('--locks-monitoring-threshold', help='Sets the DB lock exhaustion value in percentage (valid range is 70-95). If too many locks are '
|
||||
+ 'acquired, the server will abort the searches while the number of locks '
|
||||
+ 'are not decreased. It helps to avoid DB corruption and long recovery.')
|
||||
+ set_db_config_parser.add_argument('--locks-monitoring-pause', help='Sets the DB lock monitoring value in milliseconds for the amount of time '
|
||||
+ 'that the monitoring thread spends waiting between checks.')
|
||||
set_db_config_parser.add_argument('--import-cache-autosize', help='Set to "on" or "off" to automatically set the size of the import '
|
||||
'cache to be used during the the import process of LDIF files')
|
||||
set_db_config_parser.add_argument('--cache-autosize', help='Sets the percentage of free memory that is used in total for the database '
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,44 +0,0 @@
|
||||
From a112394af3a20787755029804684d57a9c3ffa9a Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 21 Feb 2024 12:43:03 +0000
|
||||
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
|
||||
(#6104)
|
||||
|
||||
Bug description: A recent addition to the connection disconnect error
|
||||
messaging, conflicts with how errormap.c maps error codes/strings.
|
||||
|
||||
Fix description: errormap expects error codes/strings to be in ascending
|
||||
order. Moved the new error code to the bottom of the list.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6103
|
||||
|
||||
Reviewed by: @droideck. @progier389 (Thank you)
|
||||
---
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 5 +++--
|
||||
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index c2d9e283b..f603a08ce 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -14,7 +14,8 @@
|
||||
/* disconnect_error_strings.h
|
||||
*
|
||||
* Strings describing the errors used in logging the reason a connection
|
||||
- * was closed.
|
||||
+ * was closed. Ensure definitions are in the same order as the error codes
|
||||
+ * defined in disconnect_errors.h
|
||||
*/
|
||||
#ifndef __DISCONNECT_ERROR_STRINGS_H_
|
||||
#define __DISCONNECT_ERROR_STRINGS_H_
|
||||
@@ -35,6 +36,6 @@ ER2(SLAPD_DISCONNECT_NTSSL_TIMEOUT, "T2")
|
||||
ER2(SLAPD_DISCONNECT_SASL_FAIL, "S1")
|
||||
ER2(SLAPD_DISCONNECT_PROXY_INVALID_HEADER, "P3")
|
||||
ER2(SLAPD_DISCONNECT_PROXY_UNKNOWN, "P4")
|
||||
-
|
||||
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
|
||||
#endif /* __DISCONNECT_ERROR_STRINGS_H_ */
|
||||
--
|
||||
2.45.0
|
||||
|
@ -0,0 +1,33 @@
|
||||
From 7573c62a2e61293a4800e67919d79341fa1a1532 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Wed, 26 May 2021 16:07:43 +0200
|
||||
Subject: [PATCH 10/12] Issue 4764 - replicated operation sometime checks ACI
|
||||
(#4783)
|
||||
|
||||
(cherry picked from commit 0cfdea7abcacfca6686a6cf84dbf7ae1167f3022)
|
||||
---
|
||||
ldap/servers/slapd/connection.c | 8 ++++++++
|
||||
1 file changed, 8 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index c7a15e775..e0c1a52d2 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -1771,6 +1771,14 @@ connection_threadmain()
|
||||
}
|
||||
}
|
||||
|
||||
+ /*
|
||||
+ * Fix bz 1931820 issue (the check to set OP_FLAG_REPLICATED may be done
|
||||
+ * before replication session is properly set).
|
||||
+ */
|
||||
+ if (replication_connection) {
|
||||
+ operation_set_flag(op, OP_FLAG_REPLICATED);
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* Call the do_<operation> function to process this request.
|
||||
*/
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,30 +0,0 @@
|
||||
From edd9abc8901604dde1d739d87ca2906734d53dd3 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 13 Jun 2024 13:35:09 +0200
|
||||
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
|
||||
|
||||
Description:
|
||||
Remove duplicate SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT error code.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6103
|
||||
|
||||
Reviewed by: @tbordaz (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index f603a08ce..d49cc79a2 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -28,7 +28,6 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
|
||||
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
|
||||
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
|
||||
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
|
||||
-ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
|
||||
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
|
||||
ER2(SLAPD_DISCONNECT_POLL, "P2")
|
||||
--
|
||||
2.45.0
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,220 +0,0 @@
|
||||
From 8cf981c00ae18d3efaeb10819282cd991621e9a2 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 22 May 2024 11:29:05 +0200
|
||||
Subject: [PATCH] Issue 6172 - RFE: improve the performance of evaluation of
|
||||
filter component when tested against a large valueset (like group members)
|
||||
(#6173)
|
||||
|
||||
Bug description:
|
||||
Before returning an entry (to a SRCH) the server checks that the entry matches the SRCH filter.
|
||||
If a filter component (equality) is testing the value (ava) against a
|
||||
large valueset (like uniquemember values), it takes a long time because
|
||||
of the large number of values and required normalization of the values.
|
||||
This can be improved taking benefit of sorted valueset. Those sorted
|
||||
valueset were created to improve updates of large valueset (groups) but
|
||||
at that time not implemented in SRCH path.
|
||||
|
||||
Fix description:
|
||||
In case of LDAP_FILTER_EQUALITY component, the server can get
|
||||
benefit of the sorted valuearray.
|
||||
To limit the risk of regression, we use the sorted valuearray
|
||||
only for the DN syntax attribute. Indeed the sorted valuearray was
|
||||
designed for those type of attribute.
|
||||
With those two limitations, there is no need of a toggle and
|
||||
the call to plugin_call_syntax_filter_ava can be replaced by
|
||||
a call to slapi_valueset_find.
|
||||
In both cases, sorted valueset and plugin_call_syntax_filter_ava, ava and
|
||||
values are normalized.
|
||||
In sorted valueset, the values have been normalized to insert the index
|
||||
in the sorted array and then comparison is done on normalized values.
|
||||
In plugin_call_syntax_filter_ava, all values in valuearray (of valueset) are normalized
|
||||
before comparison.
|
||||
|
||||
relates: #6172
|
||||
|
||||
Reviewed by: Pierre Rogier, Simon Pichugin (Big Thanks !!!)
|
||||
---
|
||||
.../tests/suites/filter/filter_test.py | 125 ++++++++++++++++++
|
||||
ldap/servers/slapd/filterentry.c | 22 ++-
|
||||
2 files changed, 146 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
index d6bfa5a3b..4baaf04a7 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
@@ -9,7 +9,11 @@
|
||||
import logging
|
||||
|
||||
import pytest
|
||||
+import time
|
||||
+from lib389.dirsrv_log import DirsrvAccessLog
|
||||
from lib389.tasks import *
|
||||
+from lib389.backend import Backends, Backend
|
||||
+from lib389.dbgen import dbgen_users, dbgen_groups
|
||||
from lib389.topologies import topology_st
|
||||
from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX
|
||||
from lib389.utils import *
|
||||
@@ -304,6 +308,127 @@ def test_extended_search(topology_st):
|
||||
ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
|
||||
assert len(ents) == 1
|
||||
|
||||
+def test_match_large_valueset(topology_st):
|
||||
+ """Test that when returning a big number of entries
|
||||
+ and that we need to match the filter from a large valueset
|
||||
+ we get benefit to use the sorted valueset
|
||||
+
|
||||
+ :id: 7db5aa88-50e0-4c31-85dd-1d2072cb674c
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Create a users and groups backends and tune them
|
||||
+ 2. Generate a test ldif (2k users and 1K groups with all users)
|
||||
+ 3. Import test ldif file using Offline import (ldif2db).
|
||||
+ 4. Prim the 'groups' entrycache with a "fast" search
|
||||
+ 5. Search the 'groups' with a difficult matching value
|
||||
+ 6. check that etime from step 5 is less than a second
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Create a users and groups backends should PASS
|
||||
+ 2. Generate LDIF should PASS.
|
||||
+ 3. Offline import should PASS.
|
||||
+ 4. Priming should PASS.
|
||||
+ 5. Performance search should PASS.
|
||||
+ 6. Etime of performance search should PASS.
|
||||
+ """
|
||||
+
|
||||
+ log.info('Running test_match_large_valueset...')
|
||||
+ #
|
||||
+ # Test online/offline LDIF imports
|
||||
+ #
|
||||
+ inst = topology_st.standalone
|
||||
+ inst.start()
|
||||
+ backends = Backends(inst)
|
||||
+ users_suffix = "ou=users,%s" % DEFAULT_SUFFIX
|
||||
+ users_backend = 'users'
|
||||
+ users_ldif = 'users_import.ldif'
|
||||
+ groups_suffix = "ou=groups,%s" % DEFAULT_SUFFIX
|
||||
+ groups_backend = 'groups'
|
||||
+ groups_ldif = 'groups_import.ldif'
|
||||
+ groups_entrycache = '200000000'
|
||||
+ users_number = 2000
|
||||
+ groups_number = 1000
|
||||
+
|
||||
+
|
||||
+ # For priming the cache we just want to be fast
|
||||
+ # taking the first value in the valueset is good
|
||||
+ # whether the valueset is sorted or not
|
||||
+ priming_user_rdn = "user0001"
|
||||
+
|
||||
+ # For performance testing, this is important to use
|
||||
+ # user1000 rather then user0001
|
||||
+ # Because user0001 is the first value in the valueset
|
||||
+ # whether we use the sorted valuearray or non sorted
|
||||
+ # valuearray the performance will be similar.
|
||||
+ # With middle value user1000, the performance boost of
|
||||
+ # the sorted valuearray will make the difference.
|
||||
+ perf_user_rdn = "user1000"
|
||||
+
|
||||
+ # Step 1. Prepare the backends and tune the groups entrycache
|
||||
+ try:
|
||||
+ be_users = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': users_suffix, 'name': users_backend})
|
||||
+ be_groups = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': groups_suffix, 'name': groups_backend})
|
||||
+
|
||||
+ # set the entry cache to 200Mb as the 1K groups of 2K users require at least 170Mb
|
||||
+ be_groups.replace('nsslapd-cachememsize', groups_entrycache)
|
||||
+ except:
|
||||
+ raise
|
||||
+
|
||||
+ # Step 2. Generate a test ldif (10k users entries)
|
||||
+ log.info("Generating users LDIF...")
|
||||
+ ldif_dir = inst.get_ldif_dir()
|
||||
+ users_import_ldif = "%s/%s" % (ldif_dir, users_ldif)
|
||||
+ groups_import_ldif = "%s/%s" % (ldif_dir, groups_ldif)
|
||||
+ dbgen_users(inst, users_number, users_import_ldif, suffix=users_suffix, generic=True, parent=users_suffix)
|
||||
+
|
||||
+ # Generate a test ldif (800 groups with 10k members) that fit in 700Mb entry cache
|
||||
+ props = {
|
||||
+ "name": "group",
|
||||
+ "suffix": groups_suffix,
|
||||
+ "parent": groups_suffix,
|
||||
+ "number": groups_number,
|
||||
+ "numMembers": users_number,
|
||||
+ "createMembers": False,
|
||||
+ "memberParent": users_suffix,
|
||||
+ "membershipAttr": "uniquemember",
|
||||
+ }
|
||||
+ dbgen_groups(inst, groups_import_ldif, props)
|
||||
+
|
||||
+ # Step 3. Do the both offline imports
|
||||
+ inst.stop()
|
||||
+ if not inst.ldif2db(users_backend, None, None, None, users_import_ldif):
|
||||
+ log.fatal('test_basic_import_export: Offline users import failed')
|
||||
+ assert False
|
||||
+ if not inst.ldif2db(groups_backend, None, None, None, groups_import_ldif):
|
||||
+ log.fatal('test_basic_import_export: Offline groups import failed')
|
||||
+ assert False
|
||||
+ inst.start()
|
||||
+
|
||||
+ # Step 4. first prime the cache
|
||||
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (priming_user_rdn, users_suffix), ['dn'])
|
||||
+ assert len(entries) == groups_number
|
||||
+
|
||||
+ # Step 5. Now do the real performance checking it should take less than a second
|
||||
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||
+ search_start = time.time()
|
||||
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (perf_user_rdn, users_suffix), ['dn'])
|
||||
+ duration = time.time() - search_start
|
||||
+ log.info("Duration of the search was %f", duration)
|
||||
+
|
||||
+ # Step 6. Gather the etime from the access log
|
||||
+ inst.stop()
|
||||
+ access_log = DirsrvAccessLog(inst)
|
||||
+ search_result = access_log.match(".*RESULT err=0 tag=101 nentries=%s.*" % groups_number)
|
||||
+ log.info("Found patterns are %s", search_result[0])
|
||||
+ log.info("Found patterns are %s", search_result[1])
|
||||
+ etime = float(search_result[1].split('etime=')[1])
|
||||
+ log.info("Duration of the search from access log was %f", etime)
|
||||
+ assert len(entries) == groups_number
|
||||
+ assert (etime < 1)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/filterentry.c b/ldap/servers/slapd/filterentry.c
|
||||
index fd8fdda9f..cae5c7edc 100644
|
||||
--- a/ldap/servers/slapd/filterentry.c
|
||||
+++ b/ldap/servers/slapd/filterentry.c
|
||||
@@ -296,7 +296,27 @@ test_ava_filter(
|
||||
rc = -1;
|
||||
for (; a != NULL; a = a->a_next) {
|
||||
if (slapi_attr_type_cmp(ava->ava_type, a->a_type, SLAPI_TYPE_CMP_SUBTYPE) == 0) {
|
||||
- rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||
+ if ((ftype == LDAP_FILTER_EQUALITY) &&
|
||||
+ (slapi_attr_is_dn_syntax_type(a->a_type))) {
|
||||
+ /* This path is for a performance improvement */
|
||||
+
|
||||
+ /* In case of equality filter we can get benefit of the
|
||||
+ * sorted valuearray (from valueset).
|
||||
+ * This improvement is limited to DN syntax attributes for
|
||||
+ * which the sorted valueset was designed.
|
||||
+ */
|
||||
+ Slapi_Value *sval = NULL;
|
||||
+ sval = slapi_value_new_berval(&ava->ava_value);
|
||||
+ if (slapi_valueset_find((const Slapi_Attr *)a, &a->a_present_values, sval)) {
|
||||
+ rc = 0;
|
||||
+ }
|
||||
+ slapi_value_free(&sval);
|
||||
+ } else {
|
||||
+ /* When sorted valuearray optimization cannot be used
|
||||
+ * lets filter the value according to its syntax
|
||||
+ */
|
||||
+ rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||
+ }
|
||||
if (rc == 0) {
|
||||
break;
|
||||
}
|
||||
--
|
||||
2.46.0
|
||||
|
@ -0,0 +1,155 @@
|
||||
From 580880a598a8f9972994684c49593a4cf8b8969b Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Sat, 29 May 2021 13:19:53 -0400
|
||||
Subject: [PATCH 12/12] Issue 4778 - RFE - Add changelog compaction task in
|
||||
1.4.3
|
||||
|
||||
Description: In 1.4.3 the replication changelog is a separate database,
|
||||
so it needs a separate "nsds5task" compaction task (COMPACT_CL5)
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4778
|
||||
|
||||
ASAN tested and approved
|
||||
|
||||
Reviewed by: mreynolds
|
||||
---
|
||||
ldap/servers/plugins/replication/cl5_api.c | 21 +++++++++----------
|
||||
ldap/servers/plugins/replication/cl5_api.h | 1 +
|
||||
.../replication/repl5_replica_config.c | 9 +++++++-
|
||||
3 files changed, 19 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
|
||||
index 75a2f46f5..4c5077b48 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.c
|
||||
@@ -266,7 +266,6 @@ static int _cl5TrimInit(void);
|
||||
static void _cl5TrimCleanup(void);
|
||||
static int _cl5TrimMain(void *param);
|
||||
static void _cl5DoTrimming(void);
|
||||
-static void _cl5CompactDBs(void);
|
||||
static void _cl5PurgeRID(Object *file_obj, ReplicaId cleaned_rid);
|
||||
static int _cl5PurgeGetFirstEntry(Object *file_obj, CL5Entry *entry, void **iterator, DB_TXN *txnid, int rid, DBT *key);
|
||||
static int _cl5PurgeGetNextEntry(CL5Entry *entry, void *iterator, DBT *key);
|
||||
@@ -3152,7 +3151,7 @@ _cl5TrimMain(void *param __attribute__((unused)))
|
||||
if (slapi_current_utc_time() > compactdb_time) {
|
||||
/* time to trim */
|
||||
timeCompactPrev = timeNow;
|
||||
- _cl5CompactDBs();
|
||||
+ cl5CompactDBs();
|
||||
compacting = PR_FALSE;
|
||||
}
|
||||
}
|
||||
@@ -3250,8 +3249,8 @@ _cl5DoPurging(cleanruv_purge_data *purge_data)
|
||||
}
|
||||
|
||||
/* clear free page files to reduce changelog */
|
||||
-static void
|
||||
-_cl5CompactDBs(void)
|
||||
+void
|
||||
+cl5CompactDBs(void)
|
||||
{
|
||||
int rc;
|
||||
Object *fileObj = NULL;
|
||||
@@ -3264,14 +3263,14 @@ _cl5CompactDBs(void)
|
||||
rc = TXN_BEGIN(s_cl5Desc.dbEnv, NULL, &txnid, 0);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - Failed to begin transaction; db error - %d %s\n",
|
||||
+ "cl5CompactDBs - Failed to begin transaction; db error - %d %s\n",
|
||||
rc, db_strerror(rc));
|
||||
goto bail;
|
||||
}
|
||||
|
||||
|
||||
slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - compacting replication changelogs...\n");
|
||||
+ "cl5CompactDBs - compacting replication changelogs...\n");
|
||||
for (fileObj = objset_first_obj(s_cl5Desc.dbFiles);
|
||||
fileObj;
|
||||
fileObj = objset_next_obj(s_cl5Desc.dbFiles, fileObj)) {
|
||||
@@ -3284,17 +3283,17 @@ _cl5CompactDBs(void)
|
||||
&c_data, DB_FREE_SPACE, NULL /*end*/);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - Failed to compact %s; db error - %d %s\n",
|
||||
+ "cl5CompactDBs - Failed to compact %s; db error - %d %s\n",
|
||||
dbFile->replName, rc, db_strerror(rc));
|
||||
goto bail;
|
||||
}
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - %s - %d pages freed\n",
|
||||
+ "cl5CompactDBs - %s - %d pages freed\n",
|
||||
dbFile->replName, c_data.compact_pages_free);
|
||||
}
|
||||
|
||||
slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - compacting replication changelogs finished.\n");
|
||||
+ "cl5CompactDBs - compacting replication changelogs finished.\n");
|
||||
bail:
|
||||
if (fileObj) {
|
||||
object_release(fileObj);
|
||||
@@ -3303,14 +3302,14 @@ bail:
|
||||
rc = TXN_ABORT(txnid);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - Failed to abort transaction; db error - %d %s\n",
|
||||
+ "cl5CompactDBs - Failed to abort transaction; db error - %d %s\n",
|
||||
rc, db_strerror(rc));
|
||||
}
|
||||
} else {
|
||||
rc = TXN_COMMIT(txnid);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - Failed to commit transaction; db error - %d %s\n",
|
||||
+ "cl5CompactDBs - Failed to commit transaction; db error - %d %s\n",
|
||||
rc, db_strerror(rc));
|
||||
}
|
||||
}
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h
|
||||
index 4b0949fb3..11db771f2 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.h
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.h
|
||||
@@ -405,5 +405,6 @@ int cl5DeleteRUV(void);
|
||||
void cl5CleanRUV(ReplicaId rid);
|
||||
void cl5NotifyCleanup(int rid);
|
||||
void trigger_cl_purging(cleanruv_purge_data *purge_data);
|
||||
+void cl5CompactDBs(void);
|
||||
|
||||
#endif
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
|
||||
index a969ef82f..e708a1ccb 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
|
||||
@@ -29,6 +29,8 @@
|
||||
#define CLEANRUVLEN 8
|
||||
#define CLEANALLRUV "CLEANALLRUV"
|
||||
#define CLEANALLRUVLEN 11
|
||||
+#define COMPACT_CL5 "COMPACT_CL5"
|
||||
+#define COMPACT_CL5_LEN 11
|
||||
#define REPLICA_RDN "cn=replica"
|
||||
|
||||
#define CLEANALLRUV_MAX_WAIT 7200 /* 2 hours */
|
||||
@@ -1050,7 +1052,6 @@ replica_config_change_flags(Replica *r, const char *new_flags, char *returntext
|
||||
static int
|
||||
replica_execute_task(Replica *r, const char *task_name, char *returntext, int apply_mods)
|
||||
{
|
||||
-
|
||||
if (strcasecmp(task_name, CL2LDIF_TASK) == 0) {
|
||||
if (apply_mods) {
|
||||
return replica_execute_cl2ldif_task(r, returntext);
|
||||
@@ -1084,6 +1085,12 @@ replica_execute_task(Replica *r, const char *task_name, char *returntext, int ap
|
||||
return replica_execute_cleanall_ruv_task(r, (ReplicaId)temprid, empty_task, "no", PR_TRUE, returntext);
|
||||
} else
|
||||
return LDAP_SUCCESS;
|
||||
+ } else if (strncasecmp(task_name, COMPACT_CL5, COMPACT_CL5_LEN) == 0) {
|
||||
+ /* compact the replication changelogs */
|
||||
+ if (apply_mods) {
|
||||
+ cl5CompactDBs();
|
||||
+ }
|
||||
+ return LDAP_SUCCESS;
|
||||
} else {
|
||||
PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, "Unsupported replica task - %s", task_name);
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,163 +0,0 @@
|
||||
From 57051154bafaf50b83fc27dadbd89a49fd1c8c36 Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Fri, 14 Jun 2024 13:27:10 +0200
|
||||
Subject: [PATCH] Security fix for CVE-2024-5953
|
||||
|
||||
Description:
|
||||
A denial of service vulnerability was found in the 389 Directory Server.
|
||||
This issue may allow an authenticated user to cause a server denial
|
||||
of service while attempting to log in with a user with a malformed hash
|
||||
in their password.
|
||||
|
||||
Fix Description:
|
||||
To prevent buffer overflow when a bind request is processed, the bind fails
|
||||
if the hash size is not coherent without even attempting to process further
|
||||
the hashed password.
|
||||
|
||||
References:
|
||||
- https://nvd.nist.gov/vuln/detail/CVE-2024-5953
|
||||
- https://access.redhat.com/security/cve/CVE-2024-5953
|
||||
- https://bugzilla.redhat.com/show_bug.cgi?id=2292104
|
||||
---
|
||||
.../tests/suites/password/regression_test.py | 54 ++++++++++++++++++-
|
||||
ldap/servers/plugins/pwdstorage/md5_pwd.c | 9 +++-
|
||||
ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 6 +++
|
||||
3 files changed, 66 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/regression_test.py b/dirsrvtests/tests/suites/password/regression_test.py
|
||||
index 8f1facb6d..1fa581643 100644
|
||||
--- a/dirsrvtests/tests/suites/password/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/regression_test.py
|
||||
@@ -7,12 +7,14 @@
|
||||
#
|
||||
import pytest
|
||||
import time
|
||||
+import glob
|
||||
+import base64
|
||||
from lib389._constants import PASSWORD, DN_DM, DEFAULT_SUFFIX
|
||||
from lib389._constants import SUFFIX, PASSWORD, DN_DM, DN_CONFIG, PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DEFAULT_CHANGELOG_DB
|
||||
from lib389 import Entry
|
||||
from lib389.topologies import topology_m1 as topo_supplier
|
||||
-from lib389.idm.user import UserAccounts
|
||||
-from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer
|
||||
+from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer, ds_supports_new_changelog
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
|
||||
@@ -39,6 +41,13 @@ TEST_PASSWORDS += ['CNpwtest1ZZZZ', 'ZZZZZCNpwtest1',
|
||||
TEST_PASSWORDS2 = (
|
||||
'CN12pwtest31', 'SN3pwtest231', 'UID1pwtest123', 'MAIL2pwtest12@redhat.com', '2GN1pwtest123', 'People123')
|
||||
|
||||
+SUPPORTED_SCHEMES = (
|
||||
+ "{SHA}", "{SSHA}", "{SHA256}", "{SSHA256}",
|
||||
+ "{SHA384}", "{SSHA384}", "{SHA512}", "{SSHA512}",
|
||||
+ "{crypt}", "{NS-MTA-MD5}", "{clear}", "{MD5}",
|
||||
+ "{SMD5}", "{PBKDF2_SHA256}", "{PBKDF2_SHA512}",
|
||||
+ "{GOST_YESCRYPT}", "{PBKDF2-SHA256}", "{PBKDF2-SHA512}" )
|
||||
+
|
||||
def _check_unhashed_userpw(inst, user_dn, is_present=False):
|
||||
"""Check if unhashed#user#password attribute is present or not in the changelog"""
|
||||
unhashed_pwd_attribute = 'unhashed#user#password'
|
||||
@@ -319,6 +328,47 @@ def test_unhashed_pw_switch(topo_supplier):
|
||||
# Add debugging steps(if any)...
|
||||
pass
|
||||
|
||||
+@pytest.mark.parametrize("scheme", SUPPORTED_SCHEMES )
|
||||
+def test_long_hashed_password(topo, create_user, scheme):
|
||||
+ """Check that hashed password with very long value does not cause trouble
|
||||
+
|
||||
+ :id: 252a1f76-114b-11ef-8a7a-482ae39447e5
|
||||
+ :setup: standalone Instance
|
||||
+ :parametrized: yes
|
||||
+ :steps:
|
||||
+ 1. Add a test user user
|
||||
+ 2. Set a long password with requested scheme
|
||||
+ 3. Bind on that user using a wrong password
|
||||
+ 4. Check that instance is still alive
|
||||
+ 5. Remove the added user
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Should get ldap.INVALID_CREDENTIALS exception
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ # Make sure that server is started as this test may crash it
|
||||
+ inst.start()
|
||||
+ # Adding Test user (It may already exists if previous test failed)
|
||||
+ user2 = UserAccount(inst, dn='uid=test_user_1002,ou=People,dc=example,dc=com')
|
||||
+ if not user2.exists():
|
||||
+ user2 = users.create_test_user(uid=1002, gid=2002)
|
||||
+ # Setting hashed password
|
||||
+ passwd = 'A'*4000
|
||||
+ hashed_passwd = scheme.encode('utf-8') + base64.b64encode(passwd.encode('utf-8'))
|
||||
+ user2.replace('userpassword', hashed_passwd)
|
||||
+ # Bind on that user using a wrong password
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ conn = user2.bind(PASSWORD)
|
||||
+ # Check that instance is still alive
|
||||
+ assert inst.status()
|
||||
+ # Remove the added user
|
||||
+ user2.delete()
|
||||
+
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/md5_pwd.c b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
index 1e2cf58e7..b9a48d5ca 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
@@ -37,6 +37,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
unsigned char hash_out[MD5_HASH_LEN];
|
||||
unsigned char b2a_out[MD5_HASH_LEN * 2]; /* conservative */
|
||||
SECItem binary_item;
|
||||
+ size_t dbpwd_len = strlen(dbpwd);
|
||||
|
||||
ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||
if (ctx == NULL) {
|
||||
@@ -45,6 +46,12 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
goto loser;
|
||||
}
|
||||
|
||||
+ if (dbpwd_len >= sizeof b2a_out) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||
+ "The hashed password stored in the user entry is longer than any valid md5 hash");
|
||||
+ goto loser;
|
||||
+ }
|
||||
+
|
||||
/* create the hash */
|
||||
PK11_DigestBegin(ctx);
|
||||
PK11_DigestOp(ctx, (const unsigned char *)userpwd, strlen(userpwd));
|
||||
@@ -57,7 +64,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
bver = NSSBase64_EncodeItem(NULL, (char *)b2a_out, sizeof b2a_out, &binary_item);
|
||||
/* bver points to b2a_out upon success */
|
||||
if (bver) {
|
||||
- rc = slapi_ct_memcmp(bver, dbpwd, strlen(dbpwd));
|
||||
+ rc = slapi_ct_memcmp(bver, dbpwd, dbpwd_len);
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||
"Could not base64 encode hashed value for password compare");
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
index dcac4fcdd..82b8c9501 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
@@ -255,6 +255,12 @@ pbkdf2_sha256_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
passItem.data = (unsigned char *)userpwd;
|
||||
passItem.len = strlen(userpwd);
|
||||
|
||||
+ if (pwdstorage_base64_decode_len(dbpwd, dbpwd_len) > sizeof dbhash) {
|
||||
+ /* Hashed value is too long and cannot match any value generated by pbkdf2_sha256_hash */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value. (hashed value is too long)\n");
|
||||
+ return result;
|
||||
+ }
|
||||
+
|
||||
/* Decode the DBpwd to bytes from b64 */
|
||||
if (PL_Base64Decode(dbpwd, dbpwd_len, dbhash) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value\n");
|
||||
--
|
||||
2.46.0
|
||||
|
@ -1,178 +0,0 @@
|
||||
From e8a5b1deef1b455aafecb71efc029d2407b1b06f Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 16 Jul 2024 08:32:21 -0700
|
||||
Subject: [PATCH] Issue 4778 - Add COMPACT_CL5 task to dsconf replication
|
||||
(#6260)
|
||||
|
||||
Description: In 1.4.3, the changelog is not part of a backend.
|
||||
It can be compacted with nsds5task: CAMPACT_CL5 as part of the replication entry.
|
||||
Add the task as a compact-changelog command under the dsconf replication tool.
|
||||
Add tests for the feature and fix old tests.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/4778
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
.../tests/suites/config/compact_test.py | 36 ++++++++++++++---
|
||||
src/lib389/lib389/cli_conf/replication.py | 10 +++++
|
||||
src/lib389/lib389/replica.py | 40 +++++++++++++++++++
|
||||
3 files changed, 81 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/compact_test.py b/dirsrvtests/tests/suites/config/compact_test.py
|
||||
index 317258d0e..31d98d10c 100644
|
||||
--- a/dirsrvtests/tests/suites/config/compact_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/compact_test.py
|
||||
@@ -13,14 +13,14 @@ import time
|
||||
import datetime
|
||||
from lib389.tasks import DBCompactTask
|
||||
from lib389.backend import DatabaseConfig
|
||||
-from lib389.replica import Changelog5
|
||||
+from lib389.replica import Changelog5, Replicas
|
||||
from lib389.topologies import topology_m1 as topo
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def test_compact_db_task(topo):
|
||||
- """Specify a test case purpose or name here
|
||||
+ """Test compaction of database
|
||||
|
||||
:id: 1b3222ef-a336-4259-be21-6a52f76e1859
|
||||
:setup: Standalone Instance
|
||||
@@ -48,7 +48,7 @@ def test_compact_db_task(topo):
|
||||
|
||||
|
||||
def test_compaction_interval_and_time(topo):
|
||||
- """Specify a test case purpose or name here
|
||||
+ """Test compaction interval and time for database and changelog
|
||||
|
||||
:id: f361bee9-d7e7-4569-9255-d7b60dd9d92e
|
||||
:setup: Supplier Instance
|
||||
@@ -95,10 +95,36 @@ def test_compaction_interval_and_time(topo):
|
||||
|
||||
# Check compaction occurred as expected
|
||||
time.sleep(45)
|
||||
- assert not inst.searchErrorsLog("Compacting databases")
|
||||
+ assert not inst.searchErrorsLog("compacting replication changelogs")
|
||||
|
||||
time.sleep(90)
|
||||
- assert inst.searchErrorsLog("Compacting databases")
|
||||
+ assert inst.searchErrorsLog("compacting replication changelogs")
|
||||
+ inst.deleteErrorLogs(restart=False)
|
||||
+
|
||||
+
|
||||
+def test_compact_cl5_task(topo):
|
||||
+ """Test compaction of changelog5 database
|
||||
+
|
||||
+ :id: aadfa9f7-73c0-463a-912c-0a29aa1f8167
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Run compaction task
|
||||
+ 2. Check errors log to show task was run
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+ inst = topo.ms["supplier1"]
|
||||
+
|
||||
+ replicas = Replicas(inst)
|
||||
+ replicas.compact_changelog(log=log)
|
||||
+
|
||||
+ # Check compaction occurred as expected. But instead of time.sleep(5) check 1 sec in loop
|
||||
+ for _ in range(5):
|
||||
+ time.sleep(1)
|
||||
+ if inst.searchErrorsLog("compacting replication changelogs"):
|
||||
+ break
|
||||
+ assert inst.searchErrorsLog("compacting replication changelogs")
|
||||
inst.deleteErrorLogs(restart=False)
|
||||
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 352c0ee5b..ccc394255 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -1199,6 +1199,11 @@ def restore_cl_dir(inst, basedn, log, args):
|
||||
replicas.restore_changelog(replica_roots=args.REPLICA_ROOTS, log=log)
|
||||
|
||||
|
||||
+def compact_cl5(inst, basedn, log, args):
|
||||
+ replicas = Replicas(inst)
|
||||
+ replicas.compact_changelog(replica_roots=args.REPLICA_ROOTS, log=log)
|
||||
+
|
||||
+
|
||||
def create_parser(subparsers):
|
||||
|
||||
############################################
|
||||
@@ -1326,6 +1331,11 @@ def create_parser(subparsers):
|
||||
help="Specify one replica root whose changelog you want to restore. "
|
||||
"The replica root will be consumed from the LDIF file name if the option is omitted.")
|
||||
|
||||
+ compact_cl = repl_subcommands.add_parser('compact-changelog', help='Compact the changelog database')
|
||||
+ compact_cl.set_defaults(func=compact_cl5)
|
||||
+ compact_cl.add_argument('REPLICA_ROOTS', nargs="+",
|
||||
+ help="Specify replica roots whose changelog you want to compact.")
|
||||
+
|
||||
restore_changelogdir = restore_subcommands.add_parser('from-changelogdir', help='Restore LDIF files from changelogdir.')
|
||||
restore_changelogdir.set_defaults(func=restore_cl_dir)
|
||||
restore_changelogdir.add_argument('REPLICA_ROOTS', nargs="+",
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 94e1fdad5..1f321972d 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -1648,6 +1648,11 @@ class Replica(DSLdapObject):
|
||||
"""
|
||||
self.replace('nsds5task', 'ldif2cl')
|
||||
|
||||
+ def begin_task_compact_cl5(self):
|
||||
+ """Begin COMPACT_CL5 task
|
||||
+ """
|
||||
+ self.replace('nsds5task', 'COMPACT_CL5')
|
||||
+
|
||||
def get_suffix(self):
|
||||
"""Return the suffix
|
||||
"""
|
||||
@@ -1829,6 +1834,41 @@ class Replicas(DSLdapObjects):
|
||||
log.error(f"Changelog LDIF for '{repl_root}' was not found")
|
||||
continue
|
||||
|
||||
+ def compact_changelog(self, replica_roots=[], log=None):
|
||||
+ """Compact Directory Server replication changelog
|
||||
+
|
||||
+ :param replica_roots: Replica suffixes that need to be processed (and optional LDIF file path)
|
||||
+ :type replica_roots: list of str
|
||||
+ :param log: The logger object
|
||||
+ :type log: logger
|
||||
+ """
|
||||
+
|
||||
+ if log is None:
|
||||
+ log = self._log
|
||||
+
|
||||
+ # Check if the changelog entry exists
|
||||
+ try:
|
||||
+ cl = Changelog5(self._instance)
|
||||
+ cl.get_attr_val_utf8_l("nsslapd-changelogdir")
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ raise ValueError("Changelog entry was not found. Probably, the replication is not enabled on this instance")
|
||||
+
|
||||
+ # Get all the replicas on the server if --replica-roots option is not specified
|
||||
+ repl_roots = []
|
||||
+ if not replica_roots:
|
||||
+ for replica in self.list():
|
||||
+ repl_roots.append(replica.get_attr_val_utf8("nsDS5ReplicaRoot"))
|
||||
+ else:
|
||||
+ for repl_root in replica_roots:
|
||||
+ repl_roots.append(repl_root)
|
||||
+
|
||||
+ # Dump the changelog for the replica
|
||||
+
|
||||
+ # Dump the changelog for the replica
|
||||
+ for repl_root in repl_roots:
|
||||
+ replica = self.get(repl_root)
|
||||
+ replica.begin_task_compact_cl5()
|
||||
+
|
||||
|
||||
class BootstrapReplicationManager(DSLdapObject):
|
||||
"""A Replication Manager credential for bootstrapping the repl process.
|
||||
--
|
||||
2.47.0
|
||||
|
@ -0,0 +1,52 @@
|
||||
From bc41bbb89405b2059b80e344b2d4c59ae39aabe6 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 10 Jun 2021 15:03:27 +0200
|
||||
Subject: [PATCH 1/3] Issue 4797 - ACL IP ADDRESS evaluation may corrupt
|
||||
c_isreplication_session connection flags (#4799)
|
||||
|
||||
Bug description:
|
||||
The fix for ticket #3764 was broken with a missing break in a
|
||||
switch. The consequence is that while setting the client IP
|
||||
address in the pblock (SLAPI_CONN_CLIENTNETADDR_ACLIP), the
|
||||
connection is erroneously set as replication connection.
|
||||
This can lead to crash or failure of testcase
|
||||
test_access_from_certain_network_only_ip.
|
||||
This bug was quite hidden until the fix for #4764 is
|
||||
showing it more frequently
|
||||
|
||||
Fix description:
|
||||
Add the missing break
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4797
|
||||
|
||||
Reviewed by: Mark Reynolds
|
||||
|
||||
Platforms tested: F33
|
||||
---
|
||||
ldap/servers/slapd/pblock.c | 3 ++-
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
|
||||
index fcac53839..a64986aeb 100644
|
||||
--- a/ldap/servers/slapd/pblock.c
|
||||
+++ b/ldap/servers/slapd/pblock.c
|
||||
@@ -2595,7 +2595,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
|
||||
pblock->pb_conn->c_authtype = slapi_ch_strdup((char *)value);
|
||||
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
|
||||
break;
|
||||
- case SLAPI_CONN_CLIENTNETADDR_ACLIP:
|
||||
+ case SLAPI_CONN_CLIENTNETADDR_ACLIP:
|
||||
if (pblock->pb_conn == NULL) {
|
||||
break;
|
||||
}
|
||||
@@ -2603,6 +2603,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
|
||||
slapi_ch_free((void **)&pblock->pb_conn->cin_addr_aclip);
|
||||
pblock->pb_conn->cin_addr_aclip = (PRNetAddr *)value;
|
||||
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
|
||||
+ break;
|
||||
case SLAPI_CONN_IS_REPLICATION_SESSION:
|
||||
if (pblock->pb_conn == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR,
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,79 @@
|
||||
From b3170e39519530c39d59202413b20e6bd466224d Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 27 Jan 2021 09:56:38 +0000
|
||||
Subject: [PATCH 2/3] Issue 4396 - Minor memory leak in backend (#4558) (#4572)
|
||||
|
||||
Bug Description: As multiple suffixes per backend were no longer used, this
|
||||
functionality has been replaced with a single suffix per backend. Legacy
|
||||
code remains that adds multiple suffixes to the dse internal backend,
|
||||
resulting in memory allocations that are lost.
|
||||
|
||||
Also a minor typo is corrected in backend.c
|
||||
|
||||
Fix Description: Calls to be_addsuffix on the DSE backend are removed
|
||||
as they are never used.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4396
|
||||
|
||||
Reviewed by: mreynolds389, Firstyear, droideck (Thank you)
|
||||
---
|
||||
ldap/servers/slapd/backend.c | 2 +-
|
||||
ldap/servers/slapd/fedse.c | 12 +++---------
|
||||
2 files changed, 4 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
|
||||
index bc52b4643..5707504a9 100644
|
||||
--- a/ldap/servers/slapd/backend.c
|
||||
+++ b/ldap/servers/slapd/backend.c
|
||||
@@ -42,7 +42,7 @@ be_init(Slapi_Backend *be, const char *type, const char *name, int isprivate, in
|
||||
}
|
||||
be->be_monitordn = slapi_create_dn_string("cn=monitor,cn=%s,cn=%s,cn=plugins,cn=config",
|
||||
name, type);
|
||||
- if (NULL == be->be_configdn) {
|
||||
+ if (NULL == be->be_monitordn) {
|
||||
slapi_log_err(SLAPI_LOG_ERR,
|
||||
"be_init", "Failed create instance monitor dn for "
|
||||
"plugin %s, instance %s\n",
|
||||
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
|
||||
index 0d645f909..7b820b540 100644
|
||||
--- a/ldap/servers/slapd/fedse.c
|
||||
+++ b/ldap/servers/slapd/fedse.c
|
||||
@@ -2827,7 +2827,7 @@ search_snmp(Slapi_PBlock *pb __attribute__((unused)),
|
||||
}
|
||||
|
||||
/*
|
||||
- * Called from config.c to install the internal backends
|
||||
+ * Called from main.c to install the internal backends
|
||||
*/
|
||||
int
|
||||
setup_internal_backends(char *configdir)
|
||||
@@ -2846,7 +2846,6 @@ setup_internal_backends(char *configdir)
|
||||
Slapi_DN counters;
|
||||
Slapi_DN snmp;
|
||||
Slapi_DN root;
|
||||
- Slapi_Backend *be;
|
||||
Slapi_DN encryption;
|
||||
Slapi_DN saslmapping;
|
||||
Slapi_DN plugins;
|
||||
@@ -2895,16 +2894,11 @@ setup_internal_backends(char *configdir)
|
||||
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &saslmapping, LDAP_SCOPE_SUBTREE, "(objectclass=nsSaslMapping)", sasl_map_config_add, NULL, NULL);
|
||||
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &plugins, LDAP_SCOPE_SUBTREE, "(objectclass=nsSlapdPlugin)", check_plugin_path, NULL, NULL);
|
||||
|
||||
- be = be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
|
||||
- be_addsuffix(be, &root);
|
||||
- be_addsuffix(be, &monitor);
|
||||
- be_addsuffix(be, &config);
|
||||
+ be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
|
||||
|
||||
/*
|
||||
- * Now that the be's are in place, we can
|
||||
- * setup the mapping tree.
|
||||
+ * Now that the be's are in place, we can setup the mapping tree.
|
||||
*/
|
||||
-
|
||||
if (mapping_tree_init()) {
|
||||
slapi_log_err(SLAPI_LOG_EMERG, "setup_internal_backends", "Failed to init mapping tree\n");
|
||||
exit(1);
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,55 +0,0 @@
|
||||
From d1cd9a5675e2953b7c8034ebb87a434cdd3ce0c3 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 2 Dec 2024 17:18:32 +0100
|
||||
Subject: [PATCH] Issue 6417 - If an entry RDN is identical to the suffix, then
|
||||
Entryrdn gets broken during a reindex (#6418)
|
||||
|
||||
Bug description:
|
||||
During a reindex, the entryrdn index is built at the end from
|
||||
each entry in the suffix.
|
||||
If one entry has a RDN that is identical to the suffix DN,
|
||||
then entryrdn_lookup_dn may erroneously return the suffix DN
|
||||
as the DN of the entry.
|
||||
|
||||
Fix description:
|
||||
When the lookup entry has no parent (because index is under
|
||||
work) the loop lookup the entry using the RDN.
|
||||
If this RDN matches the suffix DN, then it exits from the loop
|
||||
with the suffix DN.
|
||||
Before exiting it checks that the original lookup entryID
|
||||
is equal to suffix entryID. If it does not match
|
||||
the function fails and then the DN from the entry will be
|
||||
built from id2enty
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier, Simon Pichugin (Thanks !!!)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 11 ++++++++++-
|
||||
1 file changed, 10 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 5797dd779..83b041192 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1224,7 +1224,16 @@ entryrdn_lookup_dn(backend *be,
|
||||
}
|
||||
goto bail;
|
||||
}
|
||||
- maybesuffix = 1;
|
||||
+ if (workid == 1) {
|
||||
+ /* The loop (workid) iterates from the starting 'id'
|
||||
+ * up to the suffix ID (i.e. '1').
|
||||
+ * A corner case (#6417) is if an entry, on the path
|
||||
+ * 'id' -> suffix, has the same RDN than the suffix.
|
||||
+ * In order to erroneously believe the loop hits the suffix
|
||||
+ * we need to check that 'workid' is '1' (suffix)
|
||||
+ */
|
||||
+ maybesuffix = 1;
|
||||
+ }
|
||||
} else {
|
||||
_entryrdn_cursor_print_error("entryrdn_lookup_dn",
|
||||
key.data, data.size, data.ulen, rc);
|
||||
--
|
||||
2.48.0
|
||||
|
@ -0,0 +1,66 @@
|
||||
From 8d06fdf44b0d337f1e321e61ee1b22972ddea917 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 2 Apr 2021 14:05:41 +0200
|
||||
Subject: [PATCH 3/3] Issue 4700 - Regression in winsync replication agreement
|
||||
(#4712)
|
||||
|
||||
Bug description:
|
||||
#4396 fixes a memory leak but did not set 'cn=config' as
|
||||
DSE backend.
|
||||
It had no signicant impact unless with sidgen IPA plugin
|
||||
|
||||
Fix description:
|
||||
revert the portion of the #4364 patch that set be_suffix
|
||||
in be_addsuffix, free the suffix before setting it
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4700
|
||||
|
||||
Reviewed by: Pierre Rogier (thanks !)
|
||||
|
||||
Platforms tested: F33
|
||||
---
|
||||
ldap/servers/slapd/backend.c | 3 ++-
|
||||
ldap/servers/slapd/fedse.c | 6 +++++-
|
||||
2 files changed, 7 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
|
||||
index 5707504a9..5db706841 100644
|
||||
--- a/ldap/servers/slapd/backend.c
|
||||
+++ b/ldap/servers/slapd/backend.c
|
||||
@@ -173,7 +173,8 @@ void
|
||||
be_addsuffix(Slapi_Backend *be, const Slapi_DN *suffix)
|
||||
{
|
||||
if (be->be_state != BE_STATE_DELETED) {
|
||||
- be->be_suffix = slapi_sdn_dup(suffix);;
|
||||
+ slapi_sdn_free(&be->be_suffix);
|
||||
+ be->be_suffix = slapi_sdn_dup(suffix);
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
|
||||
index 7b820b540..44159c991 100644
|
||||
--- a/ldap/servers/slapd/fedse.c
|
||||
+++ b/ldap/servers/slapd/fedse.c
|
||||
@@ -2846,6 +2846,7 @@ setup_internal_backends(char *configdir)
|
||||
Slapi_DN counters;
|
||||
Slapi_DN snmp;
|
||||
Slapi_DN root;
|
||||
+ Slapi_Backend *be;
|
||||
Slapi_DN encryption;
|
||||
Slapi_DN saslmapping;
|
||||
Slapi_DN plugins;
|
||||
@@ -2894,7 +2895,10 @@ setup_internal_backends(char *configdir)
|
||||
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &saslmapping, LDAP_SCOPE_SUBTREE, "(objectclass=nsSaslMapping)", sasl_map_config_add, NULL, NULL);
|
||||
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &plugins, LDAP_SCOPE_SUBTREE, "(objectclass=nsSlapdPlugin)", check_plugin_path, NULL, NULL);
|
||||
|
||||
- be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
|
||||
+ be = be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
|
||||
+ be_addsuffix(be, &root);
|
||||
+ be_addsuffix(be, &monitor);
|
||||
+ be_addsuffix(be, &config);
|
||||
|
||||
/*
|
||||
* Now that the be's are in place, we can setup the mapping tree.
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,267 +0,0 @@
|
||||
From 9b2fc77a36156ea987dcea6e2043f8e4c4a6b259 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 18 Jun 2024 14:21:07 +0200
|
||||
Subject: [PATCH] Issue 6224 - d2entry - Could not open id2entry err 0 - at
|
||||
startup when having sub-suffixes (#6225)
|
||||
|
||||
Problem:: d2entry - Could not open id2entry err 0 is logged at startup when having sub-suffixes
|
||||
Reason: The slapi_exist_referral internal search access a backend that is not yet started.
|
||||
Solution: Limit the internal search to a single backend
|
||||
|
||||
Issue: #6224
|
||||
|
||||
Reviewed by: @droideck Thanks!
|
||||
|
||||
(cherry picked from commit 796f703021e961fdd8cbc53b4ad4e20258af0e96)
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 1 +
|
||||
.../suites/mapping_tree/regression_test.py | 161 +++++++++++++++++-
|
||||
ldap/servers/slapd/backend.c | 7 +-
|
||||
3 files changed, 159 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 812936c62..84a9c6ec8 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,6 +1222,7 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+<<<<<<< HEAD
|
||||
def test_referral_subsuffix(topology_st, request):
|
||||
"""Test the results of an inverted parent suffix definition in the configuration.
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/mapping_tree/regression_test.py b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
index 99d4a1d5f..689ff9f59 100644
|
||||
--- a/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
@@ -11,10 +11,14 @@ import ldap
|
||||
import logging
|
||||
import os
|
||||
import pytest
|
||||
+import time
|
||||
from lib389.backend import Backends, Backend
|
||||
+from lib389._constants import HOST_STANDALONE, PORT_STANDALONE, DN_DM, PW_DM
|
||||
from lib389.dbgen import dbgen_users
|
||||
from lib389.mappingTree import MappingTrees
|
||||
from lib389.topologies import topology_st
|
||||
+from lib389.referral import Referrals, Referral
|
||||
+
|
||||
|
||||
try:
|
||||
from lib389.backend import BackendSuffixView
|
||||
@@ -31,14 +35,26 @@ else:
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
+PARENT_SUFFIX = "dc=parent"
|
||||
+CHILD1_SUFFIX = f"dc=child1,{PARENT_SUFFIX}"
|
||||
+CHILD2_SUFFIX = f"dc=child2,{PARENT_SUFFIX}"
|
||||
+
|
||||
+PARENT_REFERRAL_DN = f"cn=ref,ou=People,{PARENT_SUFFIX}"
|
||||
+CHILD1_REFERRAL_DN = f"cn=ref,ou=people,{CHILD1_SUFFIX}"
|
||||
+CHILD2_REFERRAL_DN = f"cn=ref,ou=people,{CHILD2_SUFFIX}"
|
||||
+
|
||||
+REFERRAL_CHECK_PEDIOD = 7
|
||||
+
|
||||
+
|
||||
+
|
||||
BESTRUCT = [
|
||||
- { "bename" : "parent", "suffix": "dc=parent" },
|
||||
- { "bename" : "child1", "suffix": "dc=child1,dc=parent" },
|
||||
- { "bename" : "child2", "suffix": "dc=child2,dc=parent" },
|
||||
+ { "bename" : "parent", "suffix": PARENT_SUFFIX },
|
||||
+ { "bename" : "child1", "suffix": CHILD1_SUFFIX },
|
||||
+ { "bename" : "child2", "suffix": CHILD2_SUFFIX },
|
||||
]
|
||||
|
||||
|
||||
-@pytest.fixture(scope="function")
|
||||
+@pytest.fixture(scope="module")
|
||||
def topo(topology_st, request):
|
||||
bes = []
|
||||
|
||||
@@ -50,6 +66,9 @@ def topo(topology_st, request):
|
||||
request.addfinalizer(fin)
|
||||
|
||||
inst = topology_st.standalone
|
||||
+ # Reduce nsslapd-referral-check-period to accelerate test
|
||||
+ topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK_PEDIOD))
|
||||
+
|
||||
ldif_files = {}
|
||||
for d in BESTRUCT:
|
||||
bename = d['bename']
|
||||
@@ -76,14 +95,13 @@ def topo(topology_st, request):
|
||||
inst.start()
|
||||
return topology_st
|
||||
|
||||
-# Parameters for test_change_repl_passwd
|
||||
-EXPECTED_ENTRIES = (("dc=parent", 39), ("dc=child1,dc=parent", 13), ("dc=child2,dc=parent", 13))
|
||||
+# Parameters for test_sub_suffixes
|
||||
@pytest.mark.parametrize(
|
||||
"orphan_param",
|
||||
[
|
||||
- pytest.param( ( True, { "dc=parent": 2, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-true" ),
|
||||
- pytest.param( ( False, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-false" ),
|
||||
- pytest.param( ( None, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="no-orphan" ),
|
||||
+ pytest.param( ( True, { PARENT_SUFFIX: 2, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="orphan-is-true" ),
|
||||
+ pytest.param( ( False, { PARENT_SUFFIX: 3, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="orphan-is-false" ),
|
||||
+ pytest.param( ( None, { PARENT_SUFFIX: 3, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="no-orphan" ),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -128,3 +146,128 @@ def test_sub_suffixes(topo, orphan_param):
|
||||
log.info('Test PASSED')
|
||||
|
||||
|
||||
+def test_one_level_search_on_sub_suffixes(topo):
|
||||
+ """ Perform one level scoped search accross suffix and sub-suffix
|
||||
+
|
||||
+ :id: 92f3139e-280e-11ef-a989-482ae39447e5
|
||||
+ :feature: mapping-tree
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+ :steps:
|
||||
+ 1. Perform a ONE LEVEL search on dc=parent
|
||||
+ 2. Check that all expected entries have been returned
|
||||
+ 3. Check that only the expected entries have been returned
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. each expected dn should be in the result set
|
||||
+ 3. Number of returned entries should be the same as the number of expected entries
|
||||
+ """
|
||||
+ expected_dns = ( 'dc=child1,dc=parent',
|
||||
+ 'dc=child2,dc=parent',
|
||||
+ 'ou=accounting,dc=parent',
|
||||
+ 'ou=product development,dc=parent',
|
||||
+ 'ou=product testing,dc=parent',
|
||||
+ 'ou=human resources,dc=parent',
|
||||
+ 'ou=payroll,dc=parent',
|
||||
+ 'ou=people,dc=parent',
|
||||
+ 'ou=groups,dc=parent', )
|
||||
+ entries = topo.standalone.search_s("dc=parent", ldap.SCOPE_ONELEVEL, "(objectClass=*)",
|
||||
+ attrlist=("dc","ou"), escapehatch='i am sure')
|
||||
+ log.info(f'one level search on dc=parent returned the following entries: {entries}')
|
||||
+ dns = [ entry.dn for entry in entries ]
|
||||
+ for dn in expected_dns:
|
||||
+ assert dn in dns
|
||||
+ assert len(entries) == len(expected_dns)
|
||||
+
|
||||
+
|
||||
+def test_sub_suffixes_errlog(topo):
|
||||
+ """ check the entries found on suffix/sub-suffix
|
||||
+ used int
|
||||
+
|
||||
+ :id: 1db9d52e-28de-11ef-b286-482ae39447e5
|
||||
+ :feature: mapping-tree
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+ :steps:
|
||||
+ 1. Check that id2entry error message is not in the error log.
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ assert not inst.searchErrorsLog('id2entry - Could not open id2entry err 0')
|
||||
+
|
||||
+
|
||||
+# Parameters for test_referral_subsuffix:
|
||||
+# a tuple pair containing:
|
||||
+# - list of referral dn that must be created
|
||||
+# - dict of searches basedn: expected_number_of_referrals
|
||||
+@pytest.mark.parametrize(
|
||||
+ "parameters",
|
||||
+ [
|
||||
+ pytest.param( ((PARENT_REFERRAL_DN, CHILD1_REFERRAL_DN), {PARENT_SUFFIX: 2, CHILD1_SUFFIX:1, CHILD2_SUFFIX:0}), id="Both"),
|
||||
+ pytest.param( ((PARENT_REFERRAL_DN,), {PARENT_SUFFIX: 1, CHILD1_SUFFIX:0, CHILD2_SUFFIX:0}) , id="Parent"),
|
||||
+ pytest.param( ((CHILD1_REFERRAL_DN,), {PARENT_SUFFIX: 1, CHILD1_SUFFIX:1, CHILD2_SUFFIX:0}) , id="Child"),
|
||||
+ pytest.param( ((), {PARENT_SUFFIX: 0, CHILD1_SUFFIX:0, CHILD2_SUFFIX:0}), id="None"),
|
||||
+ ])
|
||||
+
|
||||
+def test_referral_subsuffix(topo, request, parameters):
|
||||
+ """Test the results of an inverted parent suffix definition in the configuration.
|
||||
+
|
||||
+ For more details see:
|
||||
+ https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html
|
||||
+
|
||||
+ :id: 4e111a22-2a5d-11ef-a890-482ae39447e5
|
||||
+ :feature: referrals
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+ :parametrized: yes
|
||||
+ :steps:
|
||||
+ refs,searches = referrals
|
||||
+
|
||||
+ 1. Create the referrals according to the current parameter
|
||||
+ 2. Wait enough time so they get detected
|
||||
+ 3. For each search base dn, in the current parameter, perform the two following steps
|
||||
+ 4. In 3. loop: Perform a search with provided base dn
|
||||
+ 5. In 3. loop: Check that the number of returned referrals is the expected one.
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ all steps succeeds
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Deleting all referrals')
|
||||
+ for ref in Referrals(inst, PARENT_SUFFIX).list():
|
||||
+ ref.delete()
|
||||
+
|
||||
+ # Set cleanup callback
|
||||
+ if DEBUGGING:
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Remove all referrals
|
||||
+ fin()
|
||||
+ # Add requested referrals
|
||||
+ for dn in parameters[0]:
|
||||
+ refs = Referral(inst, dn=dn)
|
||||
+ refs.create(basedn=dn, properties={ 'cn': 'ref', 'ref': f'ldap://remote/{dn}'})
|
||||
+ # Wait that the internal search detects the referrals
|
||||
+ time.sleep(REFERRAL_CHECK_PEDIOD + 1)
|
||||
+ # Open a test connection
|
||||
+ ldc = ldap.initialize(f"ldap://{HOST_STANDALONE}:{PORT_STANDALONE}")
|
||||
+ ldc.set_option(ldap.OPT_REFERRALS,0)
|
||||
+ ldc.simple_bind_s(DN_DM,PW_DM)
|
||||
+
|
||||
+ # For each search base dn:
|
||||
+ for basedn,nbref in parameters[1].items():
|
||||
+ log.info(f"Referrals are: {parameters[0]}")
|
||||
+ # Perform a search with provided base dn
|
||||
+ result = ldc.search_s(basedn, ldap.SCOPE_SUBTREE, filterstr="(ou=People)")
|
||||
+ found_dns = [ dn for dn,entry in result if dn is not None ]
|
||||
+ found_refs = [ entry for dn,entry in result if dn is None ]
|
||||
+ log.info(f"Search on {basedn} returned {found_dns} and {found_refs}")
|
||||
+ # Check that the number of returned referrals is the expected one.
|
||||
+ log.info(f"Search returned {len(found_refs)} referrals. {nbref} are expected.")
|
||||
+ assert len(found_refs) == nbref
|
||||
+ ldc.unbind()
|
||||
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
|
||||
index 498f683b1..f86b0b9b6 100644
|
||||
--- a/ldap/servers/slapd/backend.c
|
||||
+++ b/ldap/servers/slapd/backend.c
|
||||
@@ -230,12 +230,17 @@ slapi_exist_referral(Slapi_Backend *be)
|
||||
|
||||
/* search for ("smart") referral entries */
|
||||
search_pb = slapi_pblock_new();
|
||||
- server_ctrls = (LDAPControl **) slapi_ch_calloc(2, sizeof (LDAPControl *));
|
||||
+ server_ctrls = (LDAPControl **) slapi_ch_calloc(3, sizeof (LDAPControl *));
|
||||
server_ctrls[0] = (LDAPControl *) slapi_ch_malloc(sizeof (LDAPControl));
|
||||
server_ctrls[0]->ldctl_oid = slapi_ch_strdup(LDAP_CONTROL_MANAGEDSAIT);
|
||||
server_ctrls[0]->ldctl_value.bv_val = NULL;
|
||||
server_ctrls[0]->ldctl_value.bv_len = 0;
|
||||
server_ctrls[0]->ldctl_iscritical = '\0';
|
||||
+ server_ctrls[1] = (LDAPControl *) slapi_ch_malloc(sizeof (LDAPControl));
|
||||
+ server_ctrls[1]->ldctl_oid = slapi_ch_strdup(MTN_CONTROL_USE_ONE_BACKEND_EXT_OID);
|
||||
+ server_ctrls[1]->ldctl_value.bv_val = NULL;
|
||||
+ server_ctrls[1]->ldctl_value.bv_len = 0;
|
||||
+ server_ctrls[1]->ldctl_iscritical = '\0';
|
||||
slapi_search_internal_set_pb(search_pb, suffix, LDAP_SCOPE_SUBTREE,
|
||||
filter, NULL, 0, server_ctrls, NULL,
|
||||
(void *) plugin_get_default_component_id(), 0);
|
||||
--
|
||||
2.48.0
|
||||
|
88
SOURCES/0016-Issue-4725-Fix-compiler-warnings.patch
Normal file
88
SOURCES/0016-Issue-4725-Fix-compiler-warnings.patch
Normal file
@ -0,0 +1,88 @@
|
||||
From 7345c51c68dfd90a704ccbb0e5b1e736af80f146 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 17 May 2021 16:10:22 +0200
|
||||
Subject: [PATCH] Issue 4725 - Fix compiler warnings
|
||||
|
||||
---
|
||||
ldap/servers/slapd/proto-slap.h | 2 +-
|
||||
ldap/servers/slapd/pw.c | 9 ++++-----
|
||||
ldap/servers/slapd/pw_retry.c | 2 --
|
||||
3 files changed, 5 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
|
||||
index 6ff178127..2768d5a1d 100644
|
||||
--- a/ldap/servers/slapd/proto-slap.h
|
||||
+++ b/ldap/servers/slapd/proto-slap.h
|
||||
@@ -1012,7 +1012,7 @@ int add_shadow_ext_password_attrs(Slapi_PBlock *pb, Slapi_Entry **e);
|
||||
* pw_retry.c
|
||||
*/
|
||||
int update_pw_retry(Slapi_PBlock *pb);
|
||||
-int update_trp_pw_usecount(Slapi_PBlock *pb, Slapi_Entry *e, int32_t use_count);
|
||||
+int update_tpr_pw_usecount(Slapi_PBlock *pb, Slapi_Entry *e, int32_t use_count);
|
||||
void pw_apply_mods(const Slapi_DN *sdn, Slapi_Mods *mods);
|
||||
void pw_set_componentID(struct slapi_componentid *cid);
|
||||
struct slapi_componentid *pw_get_componentID(void);
|
||||
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
|
||||
index d98422513..2a167c8f1 100644
|
||||
--- a/ldap/servers/slapd/pw.c
|
||||
+++ b/ldap/servers/slapd/pw.c
|
||||
@@ -2622,7 +2622,6 @@ int
|
||||
slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int send_result) {
|
||||
passwdPolicy *pwpolicy = NULL;
|
||||
char *dn = NULL;
|
||||
- int tpr_maxuse;
|
||||
char *value;
|
||||
time_t cur_time;
|
||||
char *cur_time_str = NULL;
|
||||
@@ -2638,7 +2637,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
|
||||
return 0;
|
||||
}
|
||||
|
||||
- if (slapi_entry_attr_hasvalue(bind_target_entry, "pwdTPRReset", "TRUE") == NULL) {
|
||||
+ if (!slapi_entry_attr_hasvalue(bind_target_entry, "pwdTPRReset", "TRUE")) {
|
||||
/* the password was not reset by an admin while a TRP pwp was set, just returned */
|
||||
return 0;
|
||||
}
|
||||
@@ -2646,7 +2645,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
|
||||
/* Check entry TPR max use */
|
||||
if (pwpolicy->pw_tpr_maxuse >= 0) {
|
||||
uint use_count;
|
||||
- value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRUseCount");
|
||||
+ value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRUseCount");
|
||||
if (value) {
|
||||
/* max Use is enforced */
|
||||
use_count = strtoull(value, 0, 0);
|
||||
@@ -2681,7 +2680,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
|
||||
|
||||
/* Check entry TPR expiration at a specific time */
|
||||
if (pwpolicy->pw_tpr_delay_expire_at >= 0) {
|
||||
- value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRExpireAt");
|
||||
+ value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRExpireAt");
|
||||
if (value) {
|
||||
/* max Use is enforced */
|
||||
if (difftime(parse_genTime(cur_time_str), parse_genTime(value)) >= 0) {
|
||||
@@ -2709,7 +2708,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
|
||||
|
||||
/* Check entry TPR valid after a specific time */
|
||||
if (pwpolicy->pw_tpr_delay_valid_from >= 0) {
|
||||
- value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRValidFrom");
|
||||
+ value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRValidFrom");
|
||||
if (value) {
|
||||
/* validity after a specific time is enforced */
|
||||
if (difftime(parse_genTime(value), parse_genTime(cur_time_str)) >= 0) {
|
||||
diff --git a/ldap/servers/slapd/pw_retry.c b/ldap/servers/slapd/pw_retry.c
|
||||
index 5d13eb636..af54aa19d 100644
|
||||
--- a/ldap/servers/slapd/pw_retry.c
|
||||
+++ b/ldap/servers/slapd/pw_retry.c
|
||||
@@ -163,8 +163,6 @@ set_retry_cnt_and_time(Slapi_PBlock *pb, int count, time_t cur_time)
|
||||
int
|
||||
set_tpr_usecount_mods(Slapi_PBlock *pb, Slapi_Mods *smods, int count)
|
||||
{
|
||||
- char *timestr;
|
||||
- time_t unlock_time;
|
||||
char retry_cnt[16] = {0}; /* 1-65535 */
|
||||
const char *dn = NULL;
|
||||
Slapi_DN *sdn = NULL;
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,32 +0,0 @@
|
||||
From ab06b3cebbe0287ef557c0307ca2ee86fe8cb761 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Thu, 21 Nov 2024 16:26:02 +0100
|
||||
Subject: [PATCH] Issue 6224 - Fix merge issue in 389-ds-base-2.1 for
|
||||
ds_log_test.py (#6414)
|
||||
|
||||
Fix a merge issue during cherry-pick over 389-ds-base-2.1 and 389-ds-base-1.4.3 branches
|
||||
|
||||
Issue: #6224
|
||||
|
||||
Reviewed by: @mreynolds389
|
||||
|
||||
(cherry picked from commit 2b541c64b8317209e4dafa4f82918d714039907c)
|
||||
---
|
||||
dirsrvtests/tests/suites/ds_logs/ds_logs_test.py | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 84a9c6ec8..812936c62 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,7 +1222,6 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-<<<<<<< HEAD
|
||||
def test_referral_subsuffix(topology_st, request):
|
||||
"""Test the results of an inverted parent suffix definition in the configuration.
|
||||
|
||||
--
|
||||
2.48.0
|
||||
|
@ -0,0 +1,202 @@
|
||||
From 59266365eda8130abf6901263efae4c87586376a Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 28 Jun 2021 16:40:15 +0200
|
||||
Subject: [PATCH] Issue 4814 - _cl5_get_tod_expiration may crash at startup
|
||||
|
||||
Bug description:
|
||||
This bug exist only in 1.4.3 branch
|
||||
In 1.4.3, CL open as a separated database so
|
||||
compaction mechanism is started along a CL
|
||||
mechanism (CL trimming).
|
||||
The problem is that the configuration of the CL
|
||||
compaction is done after the compaction mechanism
|
||||
(is started). Depending on thread scheduling it
|
||||
crashes
|
||||
|
||||
Fix description:
|
||||
Make sure configuration of compaction thread is
|
||||
taken into account (cl5ConfigSetCompaction) before
|
||||
the compaction thread starts (cl5open)
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4814
|
||||
|
||||
Reviewed by: Mark Reynolds, Simon Pichugin (thanks !)
|
||||
|
||||
Platforms tested: 8.5
|
||||
---
|
||||
ldap/servers/plugins/replication/cl5_api.c | 24 ++++++++++++-------
|
||||
ldap/servers/plugins/replication/cl5_api.h | 10 +++++++-
|
||||
ldap/servers/plugins/replication/cl5_config.c | 8 +++++--
|
||||
ldap/servers/plugins/replication/cl5_init.c | 4 +++-
|
||||
ldap/servers/plugins/replication/cl5_test.c | 2 +-
|
||||
.../servers/plugins/replication/repl_shared.h | 2 +-
|
||||
6 files changed, 35 insertions(+), 15 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
|
||||
index 4c5077b48..954b6b9e3 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.c
|
||||
@@ -1016,6 +1016,20 @@ cl5GetState()
|
||||
return s_cl5Desc.dbState;
|
||||
}
|
||||
|
||||
+void
|
||||
+cl5ConfigSetCompaction(int compactInterval, char *compactTime)
|
||||
+{
|
||||
+
|
||||
+ if (compactInterval != CL5_NUM_IGNORE) {
|
||||
+ s_cl5Desc.dbTrim.compactInterval = compactInterval;
|
||||
+ }
|
||||
+
|
||||
+ if (strcmp(compactTime, CL5_STR_IGNORE) != 0) {
|
||||
+ s_cl5Desc.dbTrim.compactTime = slapi_ch_strdup(compactTime);
|
||||
+ }
|
||||
+
|
||||
+}
|
||||
+
|
||||
/* Name: cl5ConfigTrimming
|
||||
Description: sets changelog trimming parameters; changelog must be open.
|
||||
Parameters: maxEntries - maximum number of entries in the chnagelog (in all files);
|
||||
@@ -1026,7 +1040,7 @@ cl5GetState()
|
||||
CL5_BAD_STATE if changelog is not open
|
||||
*/
|
||||
int
|
||||
-cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval)
|
||||
+cl5ConfigTrimming(int maxEntries, const char *maxAge, int trimInterval)
|
||||
{
|
||||
if (s_cl5Desc.dbState == CL5_STATE_NONE) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
@@ -1058,14 +1072,6 @@ cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char
|
||||
s_cl5Desc.dbTrim.maxEntries = maxEntries;
|
||||
}
|
||||
|
||||
- if (compactInterval != CL5_NUM_IGNORE) {
|
||||
- s_cl5Desc.dbTrim.compactInterval = compactInterval;
|
||||
- }
|
||||
-
|
||||
- if (strcmp(compactTime, CL5_STR_IGNORE) != 0) {
|
||||
- s_cl5Desc.dbTrim.compactTime = slapi_ch_strdup(compactTime);
|
||||
- }
|
||||
-
|
||||
if (trimInterval != CL5_NUM_IGNORE) {
|
||||
s_cl5Desc.dbTrim.trimInterval = trimInterval;
|
||||
}
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h
|
||||
index 11db771f2..6aa48aec4 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.h
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.h
|
||||
@@ -227,6 +227,14 @@ int cl5ImportLDIF(const char *clDir, const char *ldifFile, Replica **replicas);
|
||||
|
||||
int cl5GetState(void);
|
||||
|
||||
+/* Name: cl5ConfigSetCompaction
|
||||
+ * Description: sets the database compaction parameters
|
||||
+ * Parameters: compactInterval - Interval for compaction default is 30days
|
||||
+ * compactTime - Compact time default is 23:59
|
||||
+ * Return: void
|
||||
+ */
|
||||
+void cl5ConfigSetCompaction(int compactInterval, char *compactTime);
|
||||
+
|
||||
/* Name: cl5ConfigTrimming
|
||||
Description: sets changelog trimming parameters
|
||||
Parameters: maxEntries - maximum number of entries in the log;
|
||||
@@ -236,7 +244,7 @@ int cl5GetState(void);
|
||||
Return: CL5_SUCCESS if successful;
|
||||
CL5_BAD_STATE if changelog has not been open
|
||||
*/
|
||||
-int cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval);
|
||||
+int cl5ConfigTrimming(int maxEntries, const char *maxAge, int trimInterval);
|
||||
|
||||
void cl5DestroyIterator(void *iterator);
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_config.c b/ldap/servers/plugins/replication/cl5_config.c
|
||||
index b32686788..a43534c9b 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_config.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_config.c
|
||||
@@ -197,6 +197,8 @@ changelog5_config_add(Slapi_PBlock *pb __attribute__((unused)),
|
||||
|
||||
goto done;
|
||||
}
|
||||
+ /* Set compaction parameters */
|
||||
+ cl5ConfigSetCompaction(config.compactInterval, config.compactTime);
|
||||
|
||||
/* start the changelog */
|
||||
rc = cl5Open(config.dir, &config.dbconfig);
|
||||
@@ -212,7 +214,7 @@ changelog5_config_add(Slapi_PBlock *pb __attribute__((unused)),
|
||||
}
|
||||
|
||||
/* set trimming parameters */
|
||||
- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
|
||||
+ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval);
|
||||
if (rc != CL5_SUCCESS) {
|
||||
*returncode = 1;
|
||||
if (returntext) {
|
||||
@@ -548,6 +550,8 @@ changelog5_config_modify(Slapi_PBlock *pb,
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
|
||||
"changelog5_config_modify - Deleted the changelog at %s\n", currentDir);
|
||||
}
|
||||
+ /* Set compaction parameters */
|
||||
+ cl5ConfigSetCompaction(config.compactInterval, config.compactTime);
|
||||
|
||||
rc = cl5Open(config.dir, &config.dbconfig);
|
||||
if (rc != CL5_SUCCESS) {
|
||||
@@ -575,7 +579,7 @@ changelog5_config_modify(Slapi_PBlock *pb,
|
||||
if (config.maxEntries != CL5_NUM_IGNORE ||
|
||||
config.trimInterval != CL5_NUM_IGNORE ||
|
||||
strcmp(config.maxAge, CL5_STR_IGNORE) != 0) {
|
||||
- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
|
||||
+ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval);
|
||||
if (rc != CL5_SUCCESS) {
|
||||
*returncode = 1;
|
||||
if (returntext) {
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_init.c b/ldap/servers/plugins/replication/cl5_init.c
|
||||
index 251859714..567e0274c 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_init.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_init.c
|
||||
@@ -45,6 +45,8 @@ changelog5_init()
|
||||
rc = 0; /* OK */
|
||||
goto done;
|
||||
}
|
||||
+ /* Set compaction parameters */
|
||||
+ cl5ConfigSetCompaction(config.compactInterval, config.compactTime);
|
||||
|
||||
/* start changelog */
|
||||
rc = cl5Open(config.dir, &config.dbconfig);
|
||||
@@ -57,7 +59,7 @@ changelog5_init()
|
||||
}
|
||||
|
||||
/* set trimming parameters */
|
||||
- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
|
||||
+ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval);
|
||||
if (rc != CL5_SUCCESS) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
"changelog5_init: failed to configure changelog trimming\n");
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_test.c b/ldap/servers/plugins/replication/cl5_test.c
|
||||
index d6656653c..efb8c543a 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_test.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_test.c
|
||||
@@ -281,7 +281,7 @@ testTrimming()
|
||||
rc = populateChangelog(300, NULL);
|
||||
|
||||
if (rc == 0)
|
||||
- rc = cl5ConfigTrimming(300, "1d", CHANGELOGDB_COMPACT_INTERVAL, CHANGELOGDB_TRIM_INTERVAL);
|
||||
+ rc = cl5ConfigTrimming(300, "1d", CHANGELOGDB_TRIM_INTERVAL);
|
||||
|
||||
interval = PR_SecondsToInterval(300); /* 5 min is default trimming interval */
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
diff --git a/ldap/servers/plugins/replication/repl_shared.h b/ldap/servers/plugins/replication/repl_shared.h
|
||||
index 6708e12f7..b59b2bd27 100644
|
||||
--- a/ldap/servers/plugins/replication/repl_shared.h
|
||||
+++ b/ldap/servers/plugins/replication/repl_shared.h
|
||||
@@ -26,7 +26,7 @@
|
||||
|
||||
#define CHANGELOGDB_TRIM_INTERVAL 300 /* 5 minutes */
|
||||
#define CHANGELOGDB_COMPACT_INTERVAL 2592000 /* 30 days */
|
||||
-#define CHANGELOGDB_COMPACT_TIME "23:55" /* 30 days */
|
||||
+#define CHANGELOGDB_COMPACT_TIME "23:59" /* around midnight */
|
||||
|
||||
#define CONFIG_CHANGELOG_DIR_ATTRIBUTE "nsslapd-changelogdir"
|
||||
#define CONFIG_CHANGELOG_MAXENTRIES_ATTRIBUTE "nsslapd-changelogmaxentries"
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,214 +0,0 @@
|
||||
From 3fe2cf7cdedcdf5cafb59867e52a1fbe4a643571 Mon Sep 17 00:00:00 2001
|
||||
From: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
Date: Fri, 20 Dec 2024 22:37:15 +0900
|
||||
Subject: [PATCH] Issue 6224 - Remove test_referral_subsuffix from
|
||||
ds_logs_test.py (#6456)
|
||||
|
||||
Bug Description:
|
||||
|
||||
test_referral_subsuffix test was removed from main branch and some other
|
||||
ones for higher versions. But, it was not removed from 389-ds-base-1.4.3
|
||||
and 389-ds-base-2.1. The test doesn't work anymore with the fix for
|
||||
Issue 6224, because the added new control limited one backend for internal
|
||||
search. The test should be removed.
|
||||
|
||||
Fix Description:
|
||||
|
||||
remove the test from ds_logs_test.py
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6224
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 177 ------------------
|
||||
1 file changed, 177 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 812936c62..84d721756 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,183 +1222,6 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_referral_subsuffix(topology_st, request):
|
||||
- """Test the results of an inverted parent suffix definition in the configuration.
|
||||
-
|
||||
- For more details see:
|
||||
- https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html
|
||||
-
|
||||
- :id: 4faf210a-4fde-4e4f-8834-865bdc8f4d37
|
||||
- :setup: Standalone instance
|
||||
- :steps:
|
||||
- 1. First create two Backends, without mapping trees.
|
||||
- 2. create the mapping trees for these backends
|
||||
- 3. reduce nsslapd-referral-check-period to accelerate test
|
||||
- 4. Remove error log file
|
||||
- 5. Create a referral entry on parent suffix
|
||||
- 6. Check that the server detected the referral
|
||||
- 7. Delete the referral entry
|
||||
- 8. Check that the server detected the deletion of the referral
|
||||
- 9. Remove error log file
|
||||
- 10. Create a referral entry on child suffix
|
||||
- 11. Check that the server detected the referral on both parent and child suffixes
|
||||
- 12. Delete the referral entry
|
||||
- 13. Check that the server detected the deletion of the referral on both parent and child suffixes
|
||||
- 14. Remove error log file
|
||||
- 15. Create a referral entry on parent suffix
|
||||
- 16. Check that the server detected the referral on both parent and child suffixes
|
||||
- 17. Delete the child referral entry
|
||||
- 18. Check that the server detected the deletion of the referral on child suffix but not on parent suffix
|
||||
- 19. Delete the parent referral entry
|
||||
- 20. Check that the server detected the deletion of the referral parent suffix
|
||||
-
|
||||
- :expectedresults:
|
||||
- all steps succeeds
|
||||
- """
|
||||
- inst = topology_st.standalone
|
||||
- # Step 1 First create two Backends, without mapping trees.
|
||||
- PARENT_SUFFIX='dc=parent,dc=com'
|
||||
- CHILD_SUFFIX='dc=child,%s' % PARENT_SUFFIX
|
||||
- be1 = create_backend(inst, 'Parent', PARENT_SUFFIX)
|
||||
- be2 = create_backend(inst, 'Child', CHILD_SUFFIX)
|
||||
- # Step 2 create the mapping trees for these backends
|
||||
- mts = MappingTrees(inst)
|
||||
- mt1 = mts.create(properties={
|
||||
- 'cn': PARENT_SUFFIX,
|
||||
- 'nsslapd-state': 'backend',
|
||||
- 'nsslapd-backend': 'Parent',
|
||||
- })
|
||||
- mt2 = mts.create(properties={
|
||||
- 'cn': CHILD_SUFFIX,
|
||||
- 'nsslapd-state': 'backend',
|
||||
- 'nsslapd-backend': 'Child',
|
||||
- 'nsslapd-parent-suffix': PARENT_SUFFIX,
|
||||
- })
|
||||
-
|
||||
- dc_ex = Domain(inst, dn=PARENT_SUFFIX)
|
||||
- assert dc_ex.exists()
|
||||
-
|
||||
- dc_st = Domain(inst, dn=CHILD_SUFFIX)
|
||||
- assert dc_st.exists()
|
||||
-
|
||||
- # Step 3 reduce nsslapd-referral-check-period to accelerate test
|
||||
- # requires a restart done on step 4
|
||||
- REFERRAL_CHECK=7
|
||||
- topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK))
|
||||
-
|
||||
- # Check that if we create a referral at parent level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is not detected at child backend
|
||||
-
|
||||
- # Step 3 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 4 Create a referral entry on parent suffix
|
||||
- rs_parent = Referrals(topology_st.standalone, PARENT_SUFFIX)
|
||||
-
|
||||
- referral_entry_parent = rs_parent.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 5 Check that the server detected the referral
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Step 6 Delete the referral entry
|
||||
- referral_entry_parent.delete()
|
||||
-
|
||||
- # Step 7 Check that the server detected the deletion of the referral
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Check that if we create a referral at child level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is detected at child backend
|
||||
-
|
||||
- # Step 8 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 9 Create a referral entry on child suffix
|
||||
- rs_child = Referrals(topology_st.standalone, CHILD_SUFFIX)
|
||||
- referral_entry_child = rs_child.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 10 Check that the server detected the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Step 11 Delete the referral entry
|
||||
- referral_entry_child.delete()
|
||||
-
|
||||
- # Step 12 Check that the server detected the deletion of the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Check that if we create a referral at child level and parent level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is detected at child backend
|
||||
-
|
||||
- # Step 13 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 14 Create a referral entry on parent suffix
|
||||
- # Create a referral entry on child suffix
|
||||
- referral_entry_parent = rs_parent.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
- referral_entry_child = rs_child.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 15 Check that the server detected the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Step 16 Delete the child referral entry
|
||||
- referral_entry_child.delete()
|
||||
-
|
||||
- # Step 17 Check that the server detected the deletion of the referral on child suffix but not on parent suffix
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Step 18 Delete the parent referral entry
|
||||
- referral_entry_parent.delete()
|
||||
-
|
||||
- # Step 19 Check that the server detected the deletion of the referral parent suffix
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- def fin():
|
||||
- log.info('Deleting referral')
|
||||
- try:
|
||||
- referral_entry_parent.delete()
|
||||
- referral.entry_child.delete()
|
||||
- except:
|
||||
- pass
|
||||
-
|
||||
- request.addfinalizer(fin)
|
||||
|
||||
def test_missing_backend_suffix(topology_st, request):
|
||||
"""Test that the server does not crash if a backend has no suffix
|
||||
--
|
||||
2.48.0
|
||||
|
@ -0,0 +1,51 @@
|
||||
From e7fdfe527a5f72674fe4b577a0555cabf8ec73a5 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 7 Jun 2021 11:23:35 +0200
|
||||
Subject: [PATCH] Issue 4789 - Temporary password rules are not enforce with
|
||||
local password policy (#4790)
|
||||
|
||||
Bug description:
|
||||
When allocating a password policy structure (new_passwdPolicy)
|
||||
it is initialized with the local policy definition or
|
||||
the global one. If it exists a local policy entry, the TPR
|
||||
attributes (passwordTPRMaxUse, passwordTPRDelayValidFrom and
|
||||
passwordTPRDelayExpireAt) are not taken into account.
|
||||
|
||||
Fix description:
|
||||
Take into account TPR attributes to initialize the policy
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4789
|
||||
|
||||
Reviewed by: Simon Pichugin, William Brown
|
||||
|
||||
Platforms tested: F34
|
||||
---
|
||||
ldap/servers/slapd/pw.c | 12 ++++++++++++
|
||||
1 file changed, 12 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
|
||||
index 2a167c8f1..7680df41d 100644
|
||||
--- a/ldap/servers/slapd/pw.c
|
||||
+++ b/ldap/servers/slapd/pw.c
|
||||
@@ -2356,6 +2356,18 @@ new_passwdPolicy(Slapi_PBlock *pb, const char *dn)
|
||||
if ((sval = attr_get_present_values(attr))) {
|
||||
pwdpolicy->pw_dict_path = (char *)slapi_value_get_string(*sval);
|
||||
}
|
||||
+ } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_MAXUSE)) {
|
||||
+ if ((sval = attr_get_present_values(attr))) {
|
||||
+ pwdpolicy->pw_tpr_maxuse = slapi_value_get_int(*sval);
|
||||
+ }
|
||||
+ } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_DELAY_EXPIRE_AT)) {
|
||||
+ if ((sval = attr_get_present_values(attr))) {
|
||||
+ pwdpolicy->pw_tpr_delay_expire_at = slapi_value_get_int(*sval);
|
||||
+ }
|
||||
+ } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_DELAY_VALID_FROM)) {
|
||||
+ if ((sval = attr_get_present_values(attr))) {
|
||||
+ pwdpolicy->pw_tpr_delay_valid_from = slapi_value_get_int(*sval);
|
||||
+ }
|
||||
}
|
||||
} /* end of for() loop */
|
||||
if (pw_entry) {
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,90 +0,0 @@
|
||||
From 4121ffe7a44fbacf513758661e71e483eb11ee3c Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 6 Jan 2025 14:00:39 +0100
|
||||
Subject: [PATCH] Issue 6417 - (2nd) If an entry RDN is identical to the
|
||||
suffix, then Entryrdn gets broken during a reindex (#6460)
|
||||
|
||||
Bug description:
|
||||
The primary fix has a flaw as it assumes that the
|
||||
suffix ID is '1'.
|
||||
If the RUV entry is the first entry of the database
|
||||
the server loops indefinitely
|
||||
|
||||
Fix description:
|
||||
Read the suffix ID from the entryrdn index
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier (also reviewed the first fix)
|
||||
---
|
||||
.../suites/replication/regression_m2_test.py | 9 +++++++++
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 19 ++++++++++++++++++-
|
||||
2 files changed, 27 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index abac46ada..72d4b9f89 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -1010,6 +1010,15 @@ def test_online_reinit_may_hang(topo_with_sigkill):
|
||||
"""
|
||||
M1 = topo_with_sigkill.ms["supplier1"]
|
||||
M2 = topo_with_sigkill.ms["supplier2"]
|
||||
+
|
||||
+ # The RFE 5367 (when enabled) retrieves the DN
|
||||
+ # from the dncache. This hides an issue
|
||||
+ # with primary fix for 6417.
|
||||
+ # We need to disable the RFE to verify that the primary
|
||||
+ # fix is properly fixed.
|
||||
+ if ds_is_newer('2.3.1'):
|
||||
+ M1.config.replace('nsslapd-return-original-entrydn', 'off')
|
||||
+
|
||||
M1.stop()
|
||||
ldif_file = '%s/supplier1.ldif' % M1.get_ldif_dir()
|
||||
M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 83b041192..1bbb6252a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1115,6 +1115,7 @@ entryrdn_lookup_dn(backend *be,
|
||||
rdn_elem *elem = NULL;
|
||||
int maybesuffix = 0;
|
||||
int db_retry = 0;
|
||||
+ ID suffix_id = 1;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "entryrdn_lookup_dn",
|
||||
"--> entryrdn_lookup_dn\n");
|
||||
@@ -1175,6 +1176,22 @@ entryrdn_lookup_dn(backend *be,
|
||||
/* Setting the bulk fetch buffer */
|
||||
data.flags = DB_DBT_MALLOC;
|
||||
|
||||
+ /* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
+ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
+ dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1);
|
||||
+ rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data);
|
||||
+ if (rc) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
+ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
+ slapi_sdn_get_ndn(be->be_suffix),
|
||||
+ suffix_id);
|
||||
+ } else {
|
||||
+ elem = (rdn_elem *)data.data;
|
||||
+ suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ }
|
||||
+ dblayer_value_free(be, &data);
|
||||
+ dblayer_value_free(be, &key);
|
||||
+
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
slapi_ch_free_string(&keybuf);
|
||||
@@ -1224,7 +1241,7 @@ entryrdn_lookup_dn(backend *be,
|
||||
}
|
||||
goto bail;
|
||||
}
|
||||
- if (workid == 1) {
|
||||
+ if (workid == suffix_id) {
|
||||
/* The loop (workid) iterates from the starting 'id'
|
||||
* up to the suffix ID (i.e. '1').
|
||||
* A corner case (#6417) is if an entry, on the path
|
||||
--
|
||||
2.48.0
|
||||
|
@ -0,0 +1,350 @@
|
||||
From 6a741b3ef50babf2ac2479437a38829204ffd438 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 17 Jun 2021 16:22:09 +0200
|
||||
Subject: [PATCH] Issue 4788 - CLI should support Temporary Password Rules
|
||||
attributes (#4793)
|
||||
|
||||
Bug description:
|
||||
Since #4725, password policy support temporary password rules.
|
||||
CLI (dsconf) does not support this RFE and only direct ldap
|
||||
operation can configure global/local password policy
|
||||
|
||||
Fix description:
|
||||
Update dsconf to support this new RFE.
|
||||
To run successfully the testcase it relies on #4788
|
||||
|
||||
relates: #4788
|
||||
|
||||
Reviewed by: Simon Pichugin (thanks !!)
|
||||
|
||||
Platforms tested: F34
|
||||
---
|
||||
.../password/pwdPolicy_attribute_test.py | 172 ++++++++++++++++--
|
||||
src/lib389/lib389/cli_conf/pwpolicy.py | 5 +-
|
||||
src/lib389/lib389/pwpolicy.py | 5 +-
|
||||
3 files changed, 165 insertions(+), 17 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
|
||||
index aee3a91ad..085d0a373 100644
|
||||
--- a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
|
||||
@@ -34,7 +34,7 @@ log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
-def create_user(topology_st, request):
|
||||
+def test_user(topology_st, request):
|
||||
"""User for binding operation"""
|
||||
topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on')
|
||||
log.info('Adding test user {}')
|
||||
@@ -56,10 +56,11 @@ def create_user(topology_st, request):
|
||||
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
|
||||
request.addfinalizer(fin)
|
||||
+ return user
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
-def password_policy(topology_st, create_user):
|
||||
+def password_policy(topology_st, test_user):
|
||||
"""Set up password policy for subtree and user"""
|
||||
|
||||
pwp = PwPolicyManager(topology_st.standalone)
|
||||
@@ -71,7 +72,7 @@ def password_policy(topology_st, create_user):
|
||||
pwp.create_user_policy(TEST_USER_DN, policy_props)
|
||||
|
||||
@pytest.mark.skipif(ds_is_older('1.4.3.3'), reason="Not implemented")
|
||||
-def test_pwd_reset(topology_st, create_user):
|
||||
+def test_pwd_reset(topology_st, test_user):
|
||||
"""Test new password policy attribute "pwdReset"
|
||||
|
||||
:id: 03db357b-4800-411e-a36e-28a534293004
|
||||
@@ -124,7 +125,7 @@ def test_pwd_reset(topology_st, create_user):
|
||||
[('on', 'off', ldap.UNWILLING_TO_PERFORM),
|
||||
('off', 'off', ldap.UNWILLING_TO_PERFORM),
|
||||
('off', 'on', False), ('on', 'on', False)])
|
||||
-def test_change_pwd(topology_st, create_user, password_policy,
|
||||
+def test_change_pwd(topology_st, test_user, password_policy,
|
||||
subtree_pwchange, user_pwchange, exception):
|
||||
"""Verify that 'passwordChange' attr works as expected
|
||||
User should have a priority over a subtree.
|
||||
@@ -184,7 +185,7 @@ def test_change_pwd(topology_st, create_user, password_policy,
|
||||
user.reset_password(TEST_USER_PWD)
|
||||
|
||||
|
||||
-def test_pwd_min_age(topology_st, create_user, password_policy):
|
||||
+def test_pwd_min_age(topology_st, test_user, password_policy):
|
||||
"""If we set passwordMinAge to some value, for example to 10, then it
|
||||
should not allow the user to change the password within 10 seconds after
|
||||
his previous change.
|
||||
@@ -257,7 +258,7 @@ def test_pwd_min_age(topology_st, create_user, password_policy):
|
||||
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
user.reset_password(TEST_USER_PWD)
|
||||
|
||||
-def test_global_tpr_maxuse_1(topology_st, create_user, request):
|
||||
+def test_global_tpr_maxuse_1(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRMaxUse
|
||||
Test that after passwordTPRMaxUse failures to bind
|
||||
additional bind with valid password are failing with CONSTRAINT_VIOLATION
|
||||
@@ -374,7 +375,7 @@ def test_global_tpr_maxuse_1(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_maxuse_2(topology_st, create_user, request):
|
||||
+def test_global_tpr_maxuse_2(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRMaxUse
|
||||
Test that after less than passwordTPRMaxUse failures to bind
|
||||
additional bind with valid password are successfull
|
||||
@@ -474,7 +475,7 @@ def test_global_tpr_maxuse_2(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_maxuse_3(topology_st, create_user, request):
|
||||
+def test_global_tpr_maxuse_3(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRMaxUse
|
||||
Test that after less than passwordTPRMaxUse failures to bind
|
||||
A bind with valid password is successfull but passwordMustChange
|
||||
@@ -587,7 +588,7 @@ def test_global_tpr_maxuse_3(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_maxuse_4(topology_st, create_user, request):
|
||||
+def test_global_tpr_maxuse_4(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRMaxUse
|
||||
Test that a TPR attribute passwordTPRMaxUse
|
||||
can be updated by DM but not the by user itself
|
||||
@@ -701,7 +702,148 @@ def test_global_tpr_maxuse_4(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayValidFrom_1(topology_st, create_user, request):
|
||||
+def test_local_tpr_maxuse_5(topology_st, test_user, request):
|
||||
+ """Test TPR local policy overpass global one: passwordTPRMaxUse
|
||||
+ Test that after passwordTPRMaxUse failures to bind
|
||||
+ additional bind with valid password are failing with CONSTRAINT_VIOLATION
|
||||
+
|
||||
+ :id: c3919707-d804-445a-8754-8385b1072c42
|
||||
+ :customerscenario: False
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Global password policy Enable passwordMustChange
|
||||
+ 2. Global password policy Set passwordTPRMaxUse=5
|
||||
+ 3. Global password policy Set passwordMaxFailure to a higher value to not disturb the test
|
||||
+ 4. Local password policy Enable passwordMustChange
|
||||
+ 5. Local password policy Set passwordTPRMaxUse=10 (higher than global)
|
||||
+ 6. Bind with a wrong password 10 times and check INVALID_CREDENTIALS
|
||||
+ 7. Check that passwordTPRUseCount got to the limit (5)
|
||||
+ 8. Bind with a wrong password (CONSTRAINT_VIOLATION)
|
||||
+ and check passwordTPRUseCount overpass the limit by 1 (11)
|
||||
+ 9. Bind with a valid password 10 times and check CONSTRAINT_VIOLATION
|
||||
+ and check passwordTPRUseCount increases
|
||||
+ 10. Reset password policy configuration and remove local password from user
|
||||
+ :expected results:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Success
|
||||
+ 9. Success
|
||||
+ 10. Success
|
||||
+ """
|
||||
+
|
||||
+ global_tpr_maxuse = 5
|
||||
+ # Set global password policy config, passwordMaxFailure being higher than
|
||||
+ # passwordTPRMaxUse so that TPR is enforced first
|
||||
+ topology_st.standalone.config.replace('passwordMustChange', 'on')
|
||||
+ topology_st.standalone.config.replace('passwordMaxFailure', str(global_tpr_maxuse + 20))
|
||||
+ topology_st.standalone.config.replace('passwordTPRMaxUse', str(global_tpr_maxuse))
|
||||
+ time.sleep(.5)
|
||||
+
|
||||
+ local_tpr_maxuse = global_tpr_maxuse + 5
|
||||
+ # Reset user's password with a local password policy
|
||||
+ # that has passwordTPRMaxUse higher than global
|
||||
+ #our_user = UserAccount(topology_st.standalone, TEST_USER_DN)
|
||||
+ subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(),
|
||||
+ 'slapd-standalone1',
|
||||
+ 'localpwp',
|
||||
+ 'adduser',
|
||||
+ test_user.dn])
|
||||
+ subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(),
|
||||
+ 'slapd-standalone1',
|
||||
+ 'localpwp',
|
||||
+ 'set',
|
||||
+ '--pwptprmaxuse',
|
||||
+ str(local_tpr_maxuse),
|
||||
+ '--pwdmustchange',
|
||||
+ 'on',
|
||||
+ test_user.dn])
|
||||
+ test_user.replace('userpassword', PASSWORD)
|
||||
+ time.sleep(.5)
|
||||
+
|
||||
+ # look up to passwordTPRMaxUse with failing
|
||||
+ # bind to check that the limits of TPR are enforced
|
||||
+ for i in range(local_tpr_maxuse):
|
||||
+ # Bind as user with a wrong password
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ test_user.rebind('wrong password')
|
||||
+ time.sleep(.5)
|
||||
+
|
||||
+ # Check that pwdReset is TRUE
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ #assert test_user.get_attr_val_utf8('pwdReset') == 'TRUE'
|
||||
+
|
||||
+ # Check that pwdTPRReset is TRUE
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1)
|
||||
+ log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1))
|
||||
+
|
||||
+
|
||||
+ # Now the #failures reached passwordTPRMaxUse
|
||||
+ # Check that pwdReset is TRUE
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ # Check that pwdTPRReset is TRUE
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse)
|
||||
+ log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (local_tpr_maxuse))
|
||||
+
|
||||
+ # Bind as user with wrong password --> ldap.CONSTRAINT_VIOLATION
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ test_user.rebind("wrong password")
|
||||
+ time.sleep(.5)
|
||||
+
|
||||
+ # Check that pwdReset is TRUE
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ # Check that pwdTPRReset is TRUE
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + 1)
|
||||
+ log.info("failing bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i))
|
||||
+
|
||||
+ # Now check that all next attempts with correct password are all in LDAP_CONSTRAINT_VIOLATION
|
||||
+ # and passwordTPRRetryCount remains unchanged
|
||||
+ # account is now similar to locked
|
||||
+ for i in range(10):
|
||||
+ # Bind as user with valid password
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ test_user.rebind(PASSWORD)
|
||||
+ time.sleep(.5)
|
||||
+
|
||||
+ # Check that pwdReset is TRUE
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ # Check that pwdTPRReset is TRUE
|
||||
+ # pwdTPRUseCount keeps increasing
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + i + 2)
|
||||
+ log.info("Rejected bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i + 2))
|
||||
+
|
||||
+
|
||||
+ def fin():
|
||||
+ topology_st.standalone.restart()
|
||||
+ # Reset password policy config
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ topology_st.standalone.config.replace('passwordMustChange', 'off')
|
||||
+
|
||||
+ # Remove local password policy from that entry
|
||||
+ subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(),
|
||||
+ 'slapd-standalone1',
|
||||
+ 'localpwp',
|
||||
+ 'remove',
|
||||
+ test_user.dn])
|
||||
+
|
||||
+ # Reset user's password
|
||||
+ test_user.replace('userpassword', TEST_USER_PWD)
|
||||
+
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+def test_global_tpr_delayValidFrom_1(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayValidFrom
|
||||
Test that a TPR password is not valid before reset time +
|
||||
passwordTPRDelayValidFrom
|
||||
@@ -766,7 +908,7 @@ def test_global_tpr_delayValidFrom_1(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayValidFrom_2(topology_st, create_user, request):
|
||||
+def test_global_tpr_delayValidFrom_2(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayValidFrom
|
||||
Test that a TPR password is valid after reset time +
|
||||
passwordTPRDelayValidFrom
|
||||
@@ -838,7 +980,7 @@ def test_global_tpr_delayValidFrom_2(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayValidFrom_3(topology_st, create_user, request):
|
||||
+def test_global_tpr_delayValidFrom_3(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayValidFrom
|
||||
Test that a TPR attribute passwordTPRDelayValidFrom
|
||||
can be updated by DM but not the by user itself
|
||||
@@ -940,7 +1082,7 @@ def test_global_tpr_delayValidFrom_3(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayExpireAt_1(topology_st, create_user, request):
|
||||
+def test_global_tpr_delayExpireAt_1(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayExpireAt
|
||||
Test that a TPR password is not valid after reset time +
|
||||
passwordTPRDelayExpireAt
|
||||
@@ -1010,7 +1152,7 @@ def test_global_tpr_delayExpireAt_1(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayExpireAt_2(topology_st, create_user, request):
|
||||
+def test_global_tpr_delayExpireAt_2(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayExpireAt
|
||||
Test that a TPR password is valid before reset time +
|
||||
passwordTPRDelayExpireAt
|
||||
@@ -1082,7 +1224,7 @@ def test_global_tpr_delayExpireAt_2(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayExpireAt_3(topology_st, create_user, request):
|
||||
+def test_global_tpr_delayExpireAt_3(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayExpireAt
|
||||
Test that a TPR attribute passwordTPRDelayExpireAt
|
||||
can be updated by DM but not the by user itself
|
||||
diff --git a/src/lib389/lib389/cli_conf/pwpolicy.py b/src/lib389/lib389/cli_conf/pwpolicy.py
|
||||
index 2838afcb8..26af6e7ec 100644
|
||||
--- a/src/lib389/lib389/cli_conf/pwpolicy.py
|
||||
+++ b/src/lib389/lib389/cli_conf/pwpolicy.py
|
||||
@@ -255,6 +255,9 @@ def create_parser(subparsers):
|
||||
set_parser.add_argument('--pwpinheritglobal', help="Set to \"on\" to allow local policies to inherit the global policy")
|
||||
set_parser.add_argument('--pwddictcheck', help="Set to \"on\" to enforce CrackLib dictionary checking")
|
||||
set_parser.add_argument('--pwddictpath', help="Filesystem path to specific/custom CrackLib dictionary files")
|
||||
+ set_parser.add_argument('--pwptprmaxuse', help="Number of times a reset password can be used for authentication")
|
||||
+ set_parser.add_argument('--pwptprdelayexpireat', help="Number of seconds after which a reset password expires")
|
||||
+ set_parser.add_argument('--pwptprdelayvalidfrom', help="Number of seconds to wait before using a reset password to authenticated")
|
||||
# delete local password policy
|
||||
del_parser = local_subcommands.add_parser('remove', help='Remove a local password policy')
|
||||
del_parser.set_defaults(func=del_local_policy)
|
||||
@@ -291,4 +294,4 @@ def create_parser(subparsers):
|
||||
#############################################
|
||||
set_parser.add_argument('DN', nargs=1, help='Set the local policy for this entry DN')
|
||||
add_subtree_parser.add_argument('DN', nargs=1, help='Add/replace the subtree policy for this entry DN')
|
||||
- add_user_parser.add_argument('DN', nargs=1, help='Add/replace the local password policy for this entry DN')
|
||||
\ No newline at end of file
|
||||
+ add_user_parser.add_argument('DN', nargs=1, help='Add/replace the local password policy for this entry DN')
|
||||
diff --git a/src/lib389/lib389/pwpolicy.py b/src/lib389/lib389/pwpolicy.py
|
||||
index 8653cb195..d2427933b 100644
|
||||
--- a/src/lib389/lib389/pwpolicy.py
|
||||
+++ b/src/lib389/lib389/pwpolicy.py
|
||||
@@ -65,7 +65,10 @@ class PwPolicyManager(object):
|
||||
'pwddictcheck': 'passworddictcheck',
|
||||
'pwddictpath': 'passworddictpath',
|
||||
'pwdallowhash': 'nsslapd-allow-hashed-passwords',
|
||||
- 'pwpinheritglobal': 'nsslapd-pwpolicy-inherit-global'
|
||||
+ 'pwpinheritglobal': 'nsslapd-pwpolicy-inherit-global',
|
||||
+ 'pwptprmaxuse': 'passwordTPRMaxUse',
|
||||
+ 'pwptprdelayexpireat': 'passwordTPRDelayExpireAt',
|
||||
+ 'pwptprdelayvalidfrom': 'passwordTPRDelayValidFrom'
|
||||
}
|
||||
|
||||
def is_subtree_policy(self, dn):
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,40 +0,0 @@
|
||||
From 1ffcc9aa9a397180fe35283ee61b164471d073fb Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 7 Jan 2025 10:01:51 +0100
|
||||
Subject: [PATCH] Issue 6417 - (2nd) fix typo
|
||||
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 10 ++++++----
|
||||
1 file changed, 6 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 1bbb6252a..e2b8273a2 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1178,8 +1178,10 @@ entryrdn_lookup_dn(backend *be,
|
||||
|
||||
/* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
- dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1);
|
||||
- rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data);
|
||||
+ key.data = keybuf;
|
||||
+ key.size = key.ulen = strlen(keybuf) + 1;
|
||||
+ key.flags = DB_DBT_USERMEM;
|
||||
+ rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
"Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
@@ -1189,8 +1191,8 @@ entryrdn_lookup_dn(backend *be,
|
||||
elem = (rdn_elem *)data.data;
|
||||
suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
}
|
||||
- dblayer_value_free(be, &data);
|
||||
- dblayer_value_free(be, &key);
|
||||
+ slapi_ch_free(&data.data);
|
||||
+ slapi_ch_free_string(&keybuf);
|
||||
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
--
|
||||
2.48.0
|
||||
|
@ -0,0 +1,179 @@
|
||||
From 7b7217538908ae58df864ef5cd82e1d3303c189f Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 7 Jun 2021 12:58:42 -0400
|
||||
Subject: [PATCH] Issue 4447 - Crash when the Referential Integrity log is
|
||||
manually edited
|
||||
|
||||
Bug Description: If the referint log is manually edited with a string
|
||||
that is not a DN the server will crash when processing
|
||||
the log.
|
||||
|
||||
Fix Description: Check for NULL pointers when strtoking the file line.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4447
|
||||
|
||||
Reviewed by: firstyear(Thanks!)
|
||||
---
|
||||
.../tests/suites/plugins/referint_test.py | 72 +++++++++++++++----
|
||||
ldap/servers/plugins/referint/referint.c | 7 ++
|
||||
src/lib389/lib389/plugins.py | 15 ++++
|
||||
3 files changed, 80 insertions(+), 14 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/referint_test.py b/dirsrvtests/tests/suites/plugins/referint_test.py
|
||||
index 02b985767..fda602545 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/referint_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/referint_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2016 Red Hat, Inc.
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -12,13 +12,11 @@ Created on Dec 12, 2019
|
||||
@author: tbordaz
|
||||
'''
|
||||
import logging
|
||||
-import subprocess
|
||||
import pytest
|
||||
from lib389 import Entry
|
||||
-from lib389.utils import *
|
||||
-from lib389.plugins import *
|
||||
-from lib389._constants import *
|
||||
-from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.plugins import ReferentialIntegrityPlugin
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
+from lib389.idm.user import UserAccounts
|
||||
from lib389.idm.group import Groups
|
||||
from lib389.topologies import topology_st as topo
|
||||
|
||||
@@ -29,21 +27,27 @@ log = logging.getLogger(__name__)
|
||||
ESCAPED_RDN_BASE = "foo\\,oo"
|
||||
def _user_get_dn(no):
|
||||
uid = '%s%d' % (ESCAPED_RDN_BASE, no)
|
||||
- dn = 'uid=%s,%s' % (uid, SUFFIX)
|
||||
+ dn = 'uid=%s,%s' % (uid, DEFAULT_SUFFIX)
|
||||
return (uid, dn)
|
||||
|
||||
def add_escaped_user(server, no):
|
||||
(uid, dn) = _user_get_dn(no)
|
||||
log.fatal('Adding user (%s): ' % dn)
|
||||
- server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'],
|
||||
- 'uid': [uid],
|
||||
- 'sn' : [uid],
|
||||
- 'cn' : [uid]})))
|
||||
+ users = UserAccounts(server, DEFAULT_SUFFIX, None)
|
||||
+ user_properties = {
|
||||
+ 'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson', 'posixAccount'],
|
||||
+ 'uid': uid,
|
||||
+ 'cn' : uid,
|
||||
+ 'sn' : uid,
|
||||
+ 'uidNumber' : '1000',
|
||||
+ 'gidNumber' : '2000',
|
||||
+ 'homeDirectory' : '/home/testuser',
|
||||
+ }
|
||||
+ users.create(properties=user_properties)
|
||||
return dn
|
||||
|
||||
-@pytest.mark.ds50020
|
||||
def test_referential_false_failure(topo):
|
||||
- """On MODRDN referential integrity can erronously fail
|
||||
+ """On MODRDN referential integrity can erroneously fail
|
||||
|
||||
:id: f77aeb80-c4c4-471b-8c1b-4733b714778b
|
||||
:setup: Standalone Instance
|
||||
@@ -100,6 +104,46 @@ def test_referential_false_failure(topo):
|
||||
inst.restart()
|
||||
|
||||
# Here if the bug is fixed, referential is able to update the member value
|
||||
- inst.rename_s(user1.dn, 'uid=new_test_user_1001', newsuperior=SUFFIX, delold=0)
|
||||
+ user1.rename('uid=new_test_user_1001', newsuperior=DEFAULT_SUFFIX, deloldrdn=False)
|
||||
|
||||
|
||||
+def test_invalid_referint_log(topo):
|
||||
+ """If there is an invalid log line in the referint log, make sure the server
|
||||
+ does not crash at startup
|
||||
+
|
||||
+ :id: 34807b5a-ab17-4281-ae48-4e3513e19145
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Set the referint log delay
|
||||
+ 2. Create invalid log
|
||||
+ 3. Start the server (no crash)
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+
|
||||
+ # Set delay - required for log parsing at server startup
|
||||
+ plugin = ReferentialIntegrityPlugin(inst)
|
||||
+ plugin.enable()
|
||||
+ plugin.set_update_delay('2')
|
||||
+ logfile = plugin.get_log_file()
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Create invalid log
|
||||
+ inst.stop()
|
||||
+ with open(logfile, 'w') as log_fh:
|
||||
+ log_fh.write("CRASH\n")
|
||||
+
|
||||
+ # Start the instance
|
||||
+ inst.start()
|
||||
+ assert inst.status()
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
|
||||
index fd5356d72..28240c1f6 100644
|
||||
--- a/ldap/servers/plugins/referint/referint.c
|
||||
+++ b/ldap/servers/plugins/referint/referint.c
|
||||
@@ -1447,6 +1447,13 @@ referint_thread_func(void *arg __attribute__((unused)))
|
||||
sdn = slapi_sdn_new_normdn_byref(ptoken);
|
||||
ptoken = ldap_utf8strtok_r(NULL, delimiter, &iter);
|
||||
|
||||
+ if (ptoken == NULL) {
|
||||
+ /* Invalid line in referint log, skip it */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, REFERINT_PLUGIN_SUBSYSTEM,
|
||||
+ "Skipping invalid referint log line: (%s)\n", thisline);
|
||||
+ slapi_sdn_free(&sdn);
|
||||
+ continue;
|
||||
+ }
|
||||
if (!strcasecmp(ptoken, "NULL")) {
|
||||
tmprdn = NULL;
|
||||
} else {
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 2d88e60bd..b07e80022 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -518,6 +518,21 @@ class ReferentialIntegrityPlugin(Plugin):
|
||||
|
||||
self.set('referint-update-delay', str(value))
|
||||
|
||||
+ def get_log_file(self):
|
||||
+ """Get referint log file"""
|
||||
+
|
||||
+ return self.get_attr_val_utf8('referint-logfile')
|
||||
+
|
||||
+ def get_log_file_formatted(self):
|
||||
+ """Get referint log file"""
|
||||
+
|
||||
+ return self.display_attr('referint-logfile')
|
||||
+
|
||||
+ def set_log_file(self, value):
|
||||
+ """Set referint log file"""
|
||||
+
|
||||
+ self.set('referint-logfile', value)
|
||||
+
|
||||
def get_membership_attr(self, formatted=False):
|
||||
"""Get referint-membership-attr attribute"""
|
||||
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,75 +0,0 @@
|
||||
From 9e1284122a929fe14633a2aa6e2de4d72891f98f Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 13 Jan 2025 17:41:18 +0100
|
||||
Subject: [PATCH] Issue 6417 - (3rd) If an entry RDN is identical to the
|
||||
suffix, then Entryrdn gets broken during a reindex (#6480)
|
||||
|
||||
Bug description:
|
||||
The previous fix had a flaw.
|
||||
In case entryrdn_lookup_dn is called with an undefined suffix
|
||||
the lookup of the suffix trigger a crash.
|
||||
For example it can occur during internal search of an
|
||||
unexisting map (view plugin).
|
||||
The issue exists in all releases but is hidden since 2.3.
|
||||
|
||||
Fix description:
|
||||
testing the suffix is defined
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier (THnaks !)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 36 +++++++++++---------
|
||||
1 file changed, 20 insertions(+), 16 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index e2b8273a2..01c77156f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1176,23 +1176,27 @@ entryrdn_lookup_dn(backend *be,
|
||||
/* Setting the bulk fetch buffer */
|
||||
data.flags = DB_DBT_MALLOC;
|
||||
|
||||
- /* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
- keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
- key.data = keybuf;
|
||||
- key.size = key.ulen = strlen(keybuf) + 1;
|
||||
- key.flags = DB_DBT_USERMEM;
|
||||
- rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
- if (rc) {
|
||||
- slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
- "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
- slapi_sdn_get_ndn(be->be_suffix),
|
||||
- suffix_id);
|
||||
- } else {
|
||||
- elem = (rdn_elem *)data.data;
|
||||
- suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ /* Just in case the suffix ID is not '1' retrieve it from the database
|
||||
+ * if the suffix is not defined suffix_id remains '1'
|
||||
+ */
|
||||
+ if (be->be_suffix) {
|
||||
+ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
+ key.data = keybuf;
|
||||
+ key.size = key.ulen = strlen(keybuf) + 1;
|
||||
+ key.flags = DB_DBT_USERMEM;
|
||||
+ rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
+ if (rc) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
+ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
+ slapi_sdn_get_ndn(be->be_suffix),
|
||||
+ suffix_id);
|
||||
+ } else {
|
||||
+ elem = (rdn_elem *) data.data;
|
||||
+ suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ }
|
||||
+ slapi_ch_free(&data.data);
|
||||
+ slapi_ch_free_string(&keybuf);
|
||||
}
|
||||
- slapi_ch_free(&data.data);
|
||||
- slapi_ch_free_string(&keybuf);
|
||||
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
--
|
||||
2.48.0
|
||||
|
114
SOURCES/0021-Issue-4791-Missing-dependency-for-RetroCL-RFE.patch
Normal file
114
SOURCES/0021-Issue-4791-Missing-dependency-for-RetroCL-RFE.patch
Normal file
@ -0,0 +1,114 @@
|
||||
From 964a153b420b26140e0bbddfbebb4a51aaa0e4ea Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Thu, 3 Jun 2021 15:16:22 +0000
|
||||
Subject: [PATCH 1/7] Issue 4791 - Missing dependency for RetroCL RFE
|
||||
|
||||
Description: The RetroCL exclude attribute RFE is dependent on functionality of the
|
||||
EntryUUID bug fix, that didn't make into the latest build. This breaks the
|
||||
RetroCL exclude attr feature so we need to provide a workaround.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4791
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/pull/4723
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/4224
|
||||
|
||||
Reviewed by: tbordaz, droideck (Thank you)
|
||||
---
|
||||
.../tests/suites/retrocl/basic_test.py | 6 ++--
|
||||
.../lib389/cli_conf/plugins/retrochangelog.py | 35 +++++++++++++++++--
|
||||
2 files changed, 36 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
index 112c73cb9..f3bc50f29 100644
|
||||
--- a/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
@@ -17,7 +17,7 @@ from lib389.utils import *
|
||||
from lib389.tasks import *
|
||||
from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
|
||||
from lib389.cli_base.dsrc import dsrc_arg_concat
|
||||
-from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add
|
||||
+from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add_attr
|
||||
from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
@@ -122,7 +122,7 @@ def test_retrocl_exclude_attr_add(topology_st):
|
||||
args.bindpw = None
|
||||
args.prompt = False
|
||||
args.exclude_attrs = ATTR_HOMEPHONE
|
||||
- args.func = retrochangelog_add
|
||||
+ args.func = retrochangelog_add_attr
|
||||
dsrc_inst = dsrc_arg_concat(args, None)
|
||||
inst = connect_instance(dsrc_inst, False, args)
|
||||
result = args.func(inst, None, log, args)
|
||||
@@ -255,7 +255,7 @@ def test_retrocl_exclude_attr_mod(topology_st):
|
||||
args.bindpw = None
|
||||
args.prompt = False
|
||||
args.exclude_attrs = ATTR_CARLICENSE
|
||||
- args.func = retrochangelog_add
|
||||
+ args.func = retrochangelog_add_attr
|
||||
dsrc_inst = dsrc_arg_concat(args, None)
|
||||
inst = connect_instance(dsrc_inst, False, args)
|
||||
result = args.func(inst, None, log, args)
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py
|
||||
index 9940c6532..160fbb82d 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py
|
||||
@@ -6,8 +6,13 @@
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
|
||||
+# JC Work around for missing dependency on https://github.com/389ds/389-ds-base/pull/4344
|
||||
+import ldap
|
||||
+
|
||||
from lib389.plugins import RetroChangelogPlugin
|
||||
-from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit
|
||||
+# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344
|
||||
+# from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add_attr
|
||||
+from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, _args_to_attrs
|
||||
|
||||
arg_to_attr = {
|
||||
'is_replicated': 'isReplicated',
|
||||
@@ -18,12 +23,38 @@ arg_to_attr = {
|
||||
'exclude_attrs': 'nsslapd-exclude-attrs'
|
||||
}
|
||||
|
||||
-
|
||||
def retrochangelog_edit(inst, basedn, log, args):
|
||||
log = log.getChild('retrochangelog_edit')
|
||||
plugin = RetroChangelogPlugin(inst)
|
||||
generic_object_edit(plugin, log, args, arg_to_attr)
|
||||
|
||||
+# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344
|
||||
+def retrochangelog_add_attr(inst, basedn, log, args):
|
||||
+ log = log.getChild('retrochangelog_add_attr')
|
||||
+ plugin = RetroChangelogPlugin(inst)
|
||||
+ generic_object_add_attr(plugin, log, args, arg_to_attr)
|
||||
+
|
||||
+# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344
|
||||
+def generic_object_add_attr(dsldap_object, log, args, arg_to_attr):
|
||||
+ """Add an attribute to the entry. This differs to 'edit' as edit uses replace,
|
||||
+ and this allows multivalues to be added.
|
||||
+
|
||||
+ dsldap_object should be a single instance of DSLdapObject with a set dn
|
||||
+ """
|
||||
+ log = log.getChild('generic_object_add_attr')
|
||||
+ # Gather the attributes
|
||||
+ attrs = _args_to_attrs(args, arg_to_attr)
|
||||
+
|
||||
+ modlist = []
|
||||
+ for attr, value in attrs.items():
|
||||
+ if not isinstance(value, list):
|
||||
+ value = [value]
|
||||
+ modlist.append((ldap.MOD_ADD, attr, value))
|
||||
+ if len(modlist) > 0:
|
||||
+ dsldap_object.apply_mods(modlist)
|
||||
+ log.info("Successfully changed the %s", dsldap_object.dn)
|
||||
+ else:
|
||||
+ raise ValueError("There is nothing to set in the %s plugin entry" % dsldap_object.dn)
|
||||
|
||||
def _add_parser_args(parser):
|
||||
parser.add_argument('--is-replicated', choices=['TRUE', 'FALSE'], type=str.upper,
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,297 +0,0 @@
|
||||
From d2f9dd82e3610ee9b73feea981c680c03bb21394 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 16 Jan 2025 08:42:53 -0500
|
||||
Subject: [PATCH] Issue 6509 - Race condition with Paged Result searches
|
||||
|
||||
Description:
|
||||
|
||||
There is a race condition with Paged Result searches when a new operation comes
|
||||
in while a paged search is finishing. This triggers an invalid time out error
|
||||
and closes the connection with a T3 code.
|
||||
|
||||
The problem is that we do not use the "PagedResult lock" when checking the
|
||||
connection's paged result data for a timeout event. This causes the paged
|
||||
result timeout value to change unexpectedly and trigger a false timeout when a
|
||||
new operation arrives.
|
||||
|
||||
Now we check the timeout without hte conn lock, if its expired it could
|
||||
be a race condition and false positive. Try the lock again and test the
|
||||
timeout. This also prevents blocking non-paged result searches from
|
||||
getting held up by the lock when it's not necessary.
|
||||
|
||||
This also fixes some memory leaks that occur when an error happens.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6509
|
||||
|
||||
Reviewed by: tbordaz & proger (Thanks!!)
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 61 ++++++++++++++++++-------------
|
||||
ldap/servers/slapd/opshared.c | 58 ++++++++++++++---------------
|
||||
ldap/servers/slapd/pagedresults.c | 9 +++++
|
||||
ldap/servers/slapd/slap.h | 2 +-
|
||||
4 files changed, 75 insertions(+), 55 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index bb80dae36..13dfe250d 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1578,7 +1578,29 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
if (c->c_state == CONN_STATE_FREE) {
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else {
|
||||
- /* we try to acquire the connection mutex, if it is already
|
||||
+ /* Check for a timeout for PAGED RESULTS */
|
||||
+ if (pagedresults_is_timedout_nolock(c)) {
|
||||
+ /*
|
||||
+ * There could be a race condition so lets try again with the
|
||||
+ * right lock
|
||||
+ */
|
||||
+ pthread_mutex_t *pr_mutex = pageresult_lock_get_addr(c);
|
||||
+ if (pthread_mutex_trylock(pr_mutex) == EBUSY) {
|
||||
+ c = next;
|
||||
+ continue;
|
||||
+ }
|
||||
+ if (pagedresults_is_timedout_nolock(c)) {
|
||||
+ pthread_mutex_unlock(pr_mutex);
|
||||
+ disconnect_server(c, c->c_connid, -1,
|
||||
+ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
+ 0);
|
||||
+ } else {
|
||||
+ pthread_mutex_unlock(pr_mutex);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * we try to acquire the connection mutex, if it is already
|
||||
* acquired by another thread, don't wait
|
||||
*/
|
||||
if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
@@ -1586,35 +1608,24 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
continue;
|
||||
}
|
||||
if (c->c_flags & CONN_FLAG_CLOSING) {
|
||||
- /* A worker thread has marked that this connection
|
||||
- * should be closed by calling disconnect_server.
|
||||
- * move this connection out of the active list
|
||||
- * the last thread to use the connection will close it
|
||||
+ /*
|
||||
+ * A worker thread, or paged result timeout, has marked that
|
||||
+ * this connection should be closed by calling
|
||||
+ * disconnect_server(). Move this connection out of the active
|
||||
+ * list then the last thread to use the connection will close
|
||||
+ * it.
|
||||
*/
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_sd == SLAPD_INVALID_SOCKET) {
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_prfd != NULL) {
|
||||
if ((!c->c_gettingber) && (c->c_threadnumber < c->c_max_threads_per_conn)) {
|
||||
- int add_fd = 1;
|
||||
- /* check timeout for PAGED RESULTS */
|
||||
- if (pagedresults_is_timedout_nolock(c)) {
|
||||
- /* Exceeded the paged search timelimit; disconnect the client */
|
||||
- disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
- 0);
|
||||
- connection_table_move_connection_out_of_active_list(ct,
|
||||
- c);
|
||||
- add_fd = 0; /* do not poll on this fd */
|
||||
- }
|
||||
- if (add_fd) {
|
||||
- ct->fd[count].fd = c->c_prfd;
|
||||
- ct->fd[count].in_flags = SLAPD_POLL_FLAGS;
|
||||
- /* slot i of the connection table is mapped to slot
|
||||
- * count of the fds array */
|
||||
- c->c_fdi = count;
|
||||
- count++;
|
||||
- }
|
||||
+ ct->fd[listnum][count].fd = c->c_prfd;
|
||||
+ ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS;
|
||||
+ /* slot i of the connection table is mapped to slot
|
||||
+ * count of the fds array */
|
||||
+ c->c_fdi = count;
|
||||
+ count++;
|
||||
} else {
|
||||
if (c->c_threadnumber >= c->c_max_threads_per_conn) {
|
||||
c->c_maxthreadsblocked++;
|
||||
@@ -1675,7 +1686,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused
|
||||
continue;
|
||||
}
|
||||
|
||||
- /* Try to get connection mutex, if not available just skip the connection and
|
||||
+ /* Try to get connection mutex, if not available just skip the connection and
|
||||
* process other connections events. May generates cpu load for listening thread
|
||||
* if connection mutex is held for a long time
|
||||
*/
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index 7ab4117cd..a29eed052 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -250,7 +250,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
char *errtext = NULL;
|
||||
int nentries, pnentries;
|
||||
int flag_search_base_found = 0;
|
||||
- int flag_no_such_object = 0;
|
||||
+ bool flag_no_such_object = false;
|
||||
int flag_referral = 0;
|
||||
int flag_psearch = 0;
|
||||
int err_code = LDAP_SUCCESS;
|
||||
@@ -315,7 +315,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
rc = -1;
|
||||
goto free_and_return_nolock;
|
||||
}
|
||||
-
|
||||
+
|
||||
/* Set the time we actually started the operation */
|
||||
slapi_operation_set_time_started(operation);
|
||||
|
||||
@@ -798,11 +798,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
}
|
||||
|
||||
/* subtree searches :
|
||||
- * if the search was started above the backend suffix
|
||||
- * - temporarily set the SLAPI_SEARCH_TARGET_SDN to the
|
||||
- * base of the node so that we don't get a NO SUCH OBJECT error
|
||||
- * - do not change the scope
|
||||
- */
|
||||
+ * if the search was started above the backend suffix
|
||||
+ * - temporarily set the SLAPI_SEARCH_TARGET_SDN to the
|
||||
+ * base of the node so that we don't get a NO SUCH OBJECT error
|
||||
+ * - do not change the scope
|
||||
+ */
|
||||
if (scope == LDAP_SCOPE_SUBTREE) {
|
||||
if (slapi_sdn_issuffix(be_suffix, basesdn)) {
|
||||
if (free_sdn) {
|
||||
@@ -825,53 +825,53 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
switch (rc) {
|
||||
case 1:
|
||||
/* if the backend returned LDAP_NO_SUCH_OBJECT for a SEARCH request,
|
||||
- * it will not have sent back a result - otherwise, it will have
|
||||
- * sent a result */
|
||||
+ * it will not have sent back a result - otherwise, it will have
|
||||
+ * sent a result */
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
if (err == LDAP_NO_SUCH_OBJECT) {
|
||||
/* may be the object exist somewhere else
|
||||
- * wait the end of the loop to send back this error
|
||||
- */
|
||||
- flag_no_such_object = 1;
|
||||
+ * wait the end of the loop to send back this error
|
||||
+ */
|
||||
+ flag_no_such_object = true;
|
||||
} else {
|
||||
/* err something other than LDAP_NO_SUCH_OBJECT, so the backend will
|
||||
- * have sent the result -
|
||||
- * Set a flag here so we don't return another result. */
|
||||
+ * have sent the result -
|
||||
+ * Set a flag here so we don't return another result. */
|
||||
sent_result = 1;
|
||||
}
|
||||
- /* fall through */
|
||||
+ /* fall through */
|
||||
|
||||
case -1: /* an error occurred */
|
||||
+ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
/* PAGED RESULTS */
|
||||
if (op_is_pagedresults(operation)) {
|
||||
/* cleanup the slot */
|
||||
pthread_mutex_lock(pagedresults_mutex);
|
||||
+ if (err != LDAP_NO_SUCH_OBJECT && !flag_no_such_object) {
|
||||
+ /* Free the results if not "no_such_object" */
|
||||
+ void *sr = NULL;
|
||||
+ slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
|
||||
+ be->be_search_results_release(&sr);
|
||||
+ }
|
||||
pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx);
|
||||
rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1);
|
||||
pthread_mutex_unlock(pagedresults_mutex);
|
||||
}
|
||||
- if (1 == flag_no_such_object) {
|
||||
- break;
|
||||
- }
|
||||
- slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
- if (err == LDAP_NO_SUCH_OBJECT) {
|
||||
- /* may be the object exist somewhere else
|
||||
- * wait the end of the loop to send back this error
|
||||
- */
|
||||
- flag_no_such_object = 1;
|
||||
+
|
||||
+ if (err == LDAP_NO_SUCH_OBJECT || flag_no_such_object) {
|
||||
+ /* Maybe the object exists somewhere else, wait to the end
|
||||
+ * of the loop to send back this error */
|
||||
+ flag_no_such_object = true;
|
||||
break;
|
||||
} else {
|
||||
- /* for error other than LDAP_NO_SUCH_OBJECT
|
||||
- * the error has already been sent
|
||||
- * stop the search here
|
||||
- */
|
||||
+ /* For error other than LDAP_NO_SUCH_OBJECT the error has
|
||||
+ * already been sent stop the search here */
|
||||
cache_return_target_entry(pb, be, operation);
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
/* when rc == SLAPI_FAIL_DISKFULL this case is executed */
|
||||
-
|
||||
case SLAPI_FAIL_DISKFULL:
|
||||
operation_out_of_disk_space();
|
||||
cache_return_target_entry(pb, be, operation);
|
||||
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
|
||||
index db87e486e..4aa1fa3e5 100644
|
||||
--- a/ldap/servers/slapd/pagedresults.c
|
||||
+++ b/ldap/servers/slapd/pagedresults.c
|
||||
@@ -121,12 +121,15 @@ pagedresults_parse_control_value(Slapi_PBlock *pb,
|
||||
if (ber_scanf(ber, "{io}", pagesize, &cookie) == LBER_ERROR) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value",
|
||||
"<= corrupted control value\n");
|
||||
+ ber_free(ber, 1);
|
||||
return LDAP_PROTOCOL_ERROR;
|
||||
}
|
||||
if (!maxreqs) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value",
|
||||
"Simple paged results requests per conn exceeded the limit: %d\n",
|
||||
maxreqs);
|
||||
+ ber_free(ber, 1);
|
||||
+ slapi_ch_free_string(&cookie.bv_val);
|
||||
return LDAP_UNWILLING_TO_PERFORM;
|
||||
}
|
||||
|
||||
@@ -376,6 +379,10 @@ pagedresults_free_one_msgid(Connection *conn, ber_int_t msgid, pthread_mutex_t *
|
||||
}
|
||||
prp->pr_flags |= CONN_FLAG_PAGEDRESULTS_ABANDONED;
|
||||
prp->pr_flags &= ~CONN_FLAG_PAGEDRESULTS_PROCESSING;
|
||||
+ if (conn->c_pagedresults.prl_count > 0) {
|
||||
+ _pr_cleanup_one_slot(prp);
|
||||
+ conn->c_pagedresults.prl_count--;
|
||||
+ }
|
||||
rc = 0;
|
||||
break;
|
||||
}
|
||||
@@ -940,7 +947,9 @@ pagedresults_is_timedout_nolock(Connection *conn)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "<-- pagedresults_is_timedout", "<= false 2\n");
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
|
||||
index 072f6f962..469874fd1 100644
|
||||
--- a/ldap/servers/slapd/slap.h
|
||||
+++ b/ldap/servers/slapd/slap.h
|
||||
@@ -74,7 +74,7 @@ static char ptokPBE[34] = "Internal (Software) Token ";
|
||||
#include <sys/stat.h>
|
||||
#include <sys/socket.h>
|
||||
#include <netinet/in.h>
|
||||
-
|
||||
+#include <stdbool.h>
|
||||
#include <time.h> /* For timespec definitions */
|
||||
|
||||
/* Provides our int types and platform specific requirements. */
|
||||
--
|
||||
2.48.0
|
||||
|
@ -0,0 +1,642 @@
|
||||
From d2ac7e98d53cfe6c74c99ddf3504b1072418f05a Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 11 Mar 2021 10:12:46 -0500
|
||||
Subject: [PATCH] Issue 4656 - remove problematic language from ds-replcheck
|
||||
|
||||
Description: remove master from ds-replcheck and replace it with supplier
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4656
|
||||
|
||||
Reviewed by: mreynolds
|
||||
|
||||
e with '#' will be ignored, and an empty message aborts the commit.
|
||||
---
|
||||
ldap/admin/src/scripts/ds-replcheck | 202 ++++++++++++++--------------
|
||||
1 file changed, 101 insertions(+), 101 deletions(-)
|
||||
|
||||
diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
|
||||
index 169496e8f..f411f357a 100755
|
||||
--- a/ldap/admin/src/scripts/ds-replcheck
|
||||
+++ b/ldap/admin/src/scripts/ds-replcheck
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2020 Red Hat, Inc.
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -63,7 +63,7 @@ def remove_entry(rentries, dn):
|
||||
def get_ruv_time(ruv, rid):
|
||||
"""Take a RUV element (nsds50ruv attribute) and extract the timestamp from maxcsn
|
||||
:param ruv - A lsit of RUV elements
|
||||
- :param rid - The rid of the master to extractthe maxcsn time from
|
||||
+ :param rid - The rid of the supplier to extract the maxcsn time from
|
||||
:return: The time in seconds of the maxcsn, or 0 if there is no maxcsn, or -1 if
|
||||
the rid was not found
|
||||
"""
|
||||
@@ -213,22 +213,22 @@ def get_ruv_state(opts):
|
||||
:param opts - all the script options
|
||||
:return - A text description of the replicaton state
|
||||
"""
|
||||
- mtime = get_ruv_time(opts['master_ruv'], opts['rid'])
|
||||
+ mtime = get_ruv_time(opts['supplier_ruv'], opts['rid'])
|
||||
rtime = get_ruv_time(opts['replica_ruv'], opts['rid'])
|
||||
if mtime == -1:
|
||||
- repl_state = "Replication State: Replica ID ({}) not found in Master's RUV".format(opts['rid'])
|
||||
+ repl_state = "Replication State: Replica ID ({}) not found in Supplier's RUV".format(opts['rid'])
|
||||
elif rtime == -1:
|
||||
repl_state = "Replication State: Replica ID ({}) not found in Replica's RUV (not initialized?)".format(opts['rid'])
|
||||
elif mtime == 0:
|
||||
- repl_state = "Replication State: Master has not seen any updates"
|
||||
+ repl_state = "Replication State: Supplier has not seen any updates"
|
||||
elif rtime == 0:
|
||||
- repl_state = "Replication State: Replica has not seen any changes from the Master"
|
||||
+ repl_state = "Replication State: Replica has not seen any changes from the Supplier"
|
||||
elif mtime > rtime:
|
||||
- repl_state = "Replication State: Replica is behind Master by: {} seconds".format(mtime - rtime)
|
||||
+ repl_state = "Replication State: Replica is behind Supplier by: {} seconds".format(mtime - rtime)
|
||||
elif mtime < rtime:
|
||||
- repl_state = "Replication State: Replica is ahead of Master by: {} seconds".format(rtime - mtime)
|
||||
+ repl_state = "Replication State: Replica is ahead of Supplier by: {} seconds".format(rtime - mtime)
|
||||
else:
|
||||
- repl_state = "Replication State: Master and Replica are in perfect synchronization"
|
||||
+ repl_state = "Replication State: Supplier and Replica are in perfect synchronization"
|
||||
|
||||
return repl_state
|
||||
|
||||
@@ -238,11 +238,11 @@ def get_ruv_report(opts):
|
||||
:param opts - all the script options
|
||||
:return - A text blob to display in the report
|
||||
"""
|
||||
- opts['master_ruv'].sort()
|
||||
+ opts['supplier_ruv'].sort()
|
||||
opts['replica_ruv'].sort()
|
||||
|
||||
- report = "Master RUV:\n"
|
||||
- for element in opts['master_ruv']:
|
||||
+ report = "Supplier RUV:\n"
|
||||
+ for element in opts['supplier_ruv']:
|
||||
report += " %s\n" % (element)
|
||||
report += "\nReplica RUV:\n"
|
||||
for element in opts['replica_ruv']:
|
||||
@@ -521,7 +521,7 @@ def get_ldif_ruv(LDIF, opts):
|
||||
|
||||
def cmp_entry(mentry, rentry, opts):
|
||||
"""Compare the two entries, and return a "diff map"
|
||||
- :param mentry - A Master entry
|
||||
+ :param mentry - A Supplier entry
|
||||
:param rentry - A Replica entry
|
||||
:param opts - A Dict of the scripts options
|
||||
:return - A Dict of the differences in the entry, or None
|
||||
@@ -536,7 +536,7 @@ def cmp_entry(mentry, rentry, opts):
|
||||
mlist = list(mentry.data.keys())
|
||||
|
||||
#
|
||||
- # Check master
|
||||
+ # Check Supplier
|
||||
#
|
||||
for mattr in mlist:
|
||||
if mattr in opts['ignore']:
|
||||
@@ -555,7 +555,7 @@ def cmp_entry(mentry, rentry, opts):
|
||||
if not found:
|
||||
diff['missing'].append("")
|
||||
found = True
|
||||
- diff['missing'].append(" - Master's State Info: %s" % (val))
|
||||
+ diff['missing'].append(" - Supplier's State Info: %s" % (val))
|
||||
diff['missing'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
|
||||
else:
|
||||
# No state info, just move on
|
||||
@@ -566,18 +566,18 @@ def cmp_entry(mentry, rentry, opts):
|
||||
if report_conflict(rentry, mattr, opts) and report_conflict(mentry, mattr, opts):
|
||||
diff['diff'].append(" - Attribute '%s' is different:" % mattr)
|
||||
if 'nscpentrywsi' in mentry.data:
|
||||
- # Process Master
|
||||
+ # Process Supplier
|
||||
found = False
|
||||
for val in mentry.data['nscpentrywsi']:
|
||||
if val.lower().startswith(mattr + ';'):
|
||||
if not found:
|
||||
- diff['diff'].append(" Master:")
|
||||
+ diff['diff'].append(" Supplier:")
|
||||
diff['diff'].append(" - Value: %s" % (val.split(':')[1].lstrip()))
|
||||
diff['diff'].append(" - State Info: %s" % (val))
|
||||
diff['diff'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
|
||||
found = True
|
||||
if not found:
|
||||
- diff['diff'].append(" Master: ")
|
||||
+ diff['diff'].append(" Supplier: ")
|
||||
for val in mentry.data[mattr]:
|
||||
# This is an "origin" value which means it's never been
|
||||
# updated since replication was set up. So its the
|
||||
@@ -605,7 +605,7 @@ def cmp_entry(mentry, rentry, opts):
|
||||
diff['diff'].append("")
|
||||
else:
|
||||
# no state info, report what we got
|
||||
- diff['diff'].append(" Master: ")
|
||||
+ diff['diff'].append(" Supplier: ")
|
||||
for val in mentry.data[mattr]:
|
||||
diff['diff'].append(" - %s: %s" % (mattr, val))
|
||||
diff['diff'].append(" Replica: ")
|
||||
@@ -622,9 +622,9 @@ def cmp_entry(mentry, rentry, opts):
|
||||
continue
|
||||
|
||||
if rattr not in mlist:
|
||||
- # Master is missing the attribute
|
||||
+ # Supplier is missing the attribute
|
||||
if report_conflict(rentry, rattr, opts):
|
||||
- diff['missing'].append(" - Master missing attribute: \"%s\"" % (rattr))
|
||||
+ diff['missing'].append(" - Supplier missing attribute: \"%s\"" % (rattr))
|
||||
diff_count += 1
|
||||
if 'nscpentrywsi' in rentry.data:
|
||||
found = False
|
||||
@@ -663,7 +663,7 @@ def do_offline_report(opts, output_file=None):
|
||||
try:
|
||||
MLDIF = open(opts['mldif'], "r")
|
||||
except Exception as e:
|
||||
- print('Failed to open Master LDIF: ' + str(e))
|
||||
+ print('Failed to open Supplier LDIF: ' + str(e))
|
||||
return
|
||||
|
||||
try:
|
||||
@@ -676,10 +676,10 @@ def do_offline_report(opts, output_file=None):
|
||||
# Verify LDIF Files
|
||||
try:
|
||||
if opts['verbose']:
|
||||
- print("Validating Master ldif file ({})...".format(opts['mldif']))
|
||||
+ print("Validating Supplier ldif file ({})...".format(opts['mldif']))
|
||||
LDIFRecordList(MLDIF).parse()
|
||||
except ValueError:
|
||||
- print('Master LDIF file in invalid, aborting...')
|
||||
+ print('Supplier LDIF file in invalid, aborting...')
|
||||
MLDIF.close()
|
||||
RLDIF.close()
|
||||
return
|
||||
@@ -696,34 +696,34 @@ def do_offline_report(opts, output_file=None):
|
||||
# Get all the dn's, and entry counts
|
||||
if opts['verbose']:
|
||||
print ("Gathering all the DN's...")
|
||||
- master_dns = get_dns(MLDIF, opts['mldif'], opts)
|
||||
+ supplier_dns = get_dns(MLDIF, opts['mldif'], opts)
|
||||
replica_dns = get_dns(RLDIF, opts['rldif'], opts)
|
||||
- if master_dns is None or replica_dns is None:
|
||||
+ if supplier_dns is None or replica_dns is None:
|
||||
print("Aborting scan...")
|
||||
MLDIF.close()
|
||||
RLDIF.close()
|
||||
sys.exit(1)
|
||||
- m_count = len(master_dns)
|
||||
+ m_count = len(supplier_dns)
|
||||
r_count = len(replica_dns)
|
||||
|
||||
# Get DB RUV
|
||||
if opts['verbose']:
|
||||
print ("Gathering the database RUV's...")
|
||||
- opts['master_ruv'] = get_ldif_ruv(MLDIF, opts)
|
||||
+ opts['supplier_ruv'] = get_ldif_ruv(MLDIF, opts)
|
||||
opts['replica_ruv'] = get_ldif_ruv(RLDIF, opts)
|
||||
|
||||
- """ Compare the master entries with the replica's. Take our list of dn's from
|
||||
- the master ldif and get that entry( dn) from the master and replica ldif. In
|
||||
+ """ Compare the Supplier entries with the replica's. Take our list of dn's from
|
||||
+ the Supplier ldif and get that entry( dn) from the Supplier and replica ldif. In
|
||||
this phase we keep keep track of conflict/tombstone counts, and we check for
|
||||
missing entries and entry differences. We only need to do the entry diff
|
||||
checking in this phase - we do not need to do it when process the replica dn's
|
||||
because if the entry exists in both LDIF's then we already checked or diffs
|
||||
- while processing the master dn's.
|
||||
+ while processing the Supplier dn's.
|
||||
"""
|
||||
if opts['verbose']:
|
||||
- print ("Comparing Master to Replica...")
|
||||
+ print ("Comparing Supplier to Replica...")
|
||||
missing = False
|
||||
- for dn in master_dns:
|
||||
+ for dn in supplier_dns:
|
||||
mresult = ldif_search(MLDIF, dn)
|
||||
if mresult['entry'] is None and mresult['conflict'] is None and not mresult['tombstone']:
|
||||
# Try from the beginning
|
||||
@@ -736,7 +736,7 @@ def do_offline_report(opts, output_file=None):
|
||||
rresult['conflict'] is not None or rresult['tombstone']):
|
||||
""" We can safely remove this DN from the replica dn list as it
|
||||
does not need to be checked again. This also speeds things up
|
||||
- when doing the replica vs master phase.
|
||||
+ when doing the replica vs Supplier phase.
|
||||
"""
|
||||
replica_dns.remove(dn)
|
||||
|
||||
@@ -766,7 +766,7 @@ def do_offline_report(opts, output_file=None):
|
||||
missing_report += (' Entries missing on Replica:\n')
|
||||
missing = True
|
||||
if mresult['entry'] and 'createtimestamp' in mresult['entry'].data:
|
||||
- missing_report += (' - %s (Created on Master at: %s)\n' %
|
||||
+ missing_report += (' - %s (Created on Supplier at: %s)\n' %
|
||||
(dn, convert_timestamp(mresult['entry'].data['createtimestamp'][0])))
|
||||
else:
|
||||
missing_report += (' - %s\n' % dn)
|
||||
@@ -791,7 +791,7 @@ def do_offline_report(opts, output_file=None):
|
||||
remaining conflict & tombstone entries as well.
|
||||
"""
|
||||
if opts['verbose']:
|
||||
- print ("Comparing Replica to Master...")
|
||||
+ print ("Comparing Replica to Supplier...")
|
||||
MLDIF.seek(0)
|
||||
RLDIF.seek(0)
|
||||
missing = False
|
||||
@@ -811,7 +811,7 @@ def do_offline_report(opts, output_file=None):
|
||||
if mresult['entry'] is None and mresult['glue'] is None:
|
||||
MLDIF.seek(rresult['idx']) # Set the LDIF cursor/index to the last good line
|
||||
if not missing:
|
||||
- missing_report += (' Entries missing on Master:\n')
|
||||
+ missing_report += (' Entries missing on Supplier:\n')
|
||||
missing = True
|
||||
if rresult['entry'] and 'createtimestamp' in rresult['entry'].data:
|
||||
missing_report += (' - %s (Created on Replica at: %s)\n' %
|
||||
@@ -837,12 +837,12 @@ def do_offline_report(opts, output_file=None):
|
||||
final_report += get_ruv_report(opts)
|
||||
final_report += ('Entry Counts\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
- final_report += ('Master: %d\n' % (m_count))
|
||||
+ final_report += ('Supplier: %d\n' % (m_count))
|
||||
final_report += ('Replica: %d\n\n' % (r_count))
|
||||
|
||||
final_report += ('\nTombstones\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
- final_report += ('Master: %d\n' % (mtombstones))
|
||||
+ final_report += ('Supplier: %d\n' % (mtombstones))
|
||||
final_report += ('Replica: %d\n' % (rtombstones))
|
||||
|
||||
final_report += get_conflict_report(mconflicts, rconflicts, opts['conflicts'])
|
||||
@@ -859,9 +859,9 @@ def do_offline_report(opts, output_file=None):
|
||||
final_report += ('\nResult\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
if missing_report == "" and len(diff_report) == 0:
|
||||
- final_report += ('No replication differences between Master and Replica\n')
|
||||
+ final_report += ('No replication differences between Supplier and Replica\n')
|
||||
else:
|
||||
- final_report += ('There are replication differences between Master and Replica\n')
|
||||
+ final_report += ('There are replication differences between Supplier and Replica\n')
|
||||
|
||||
if output_file:
|
||||
output_file.write(final_report)
|
||||
@@ -871,8 +871,8 @@ def do_offline_report(opts, output_file=None):
|
||||
|
||||
def check_for_diffs(mentries, mglue, rentries, rglue, report, opts):
|
||||
"""Online mode only - Check for diffs, return the updated report
|
||||
- :param mentries - Master entries
|
||||
- :param mglue - Master glue entries
|
||||
+ :param mentries - Supplier entries
|
||||
+ :param mglue - Supplier glue entries
|
||||
:param rentries - Replica entries
|
||||
:param rglue - Replica glue entries
|
||||
:param report - A Dict of the entire report
|
||||
@@ -947,8 +947,8 @@ def validate_suffix(ldapnode, suffix, hostname):
|
||||
# Check suffix is replicated
|
||||
try:
|
||||
replica_filter = "(&(objectclass=nsds5replica)(nsDS5ReplicaRoot=%s))" % suffix
|
||||
- master_replica = ldapnode.search_s("cn=config",ldap.SCOPE_SUBTREE,replica_filter)
|
||||
- if (len(master_replica) != 1):
|
||||
+ supplier_replica = ldapnode.search_s("cn=config",ldap.SCOPE_SUBTREE,replica_filter)
|
||||
+ if (len(supplier_replica) != 1):
|
||||
print("Error: Failed to validate suffix in {}. {} is not replicated.".format(hostname, suffix))
|
||||
return False
|
||||
except ldap.LDAPError as e:
|
||||
@@ -969,7 +969,7 @@ def connect_to_replicas(opts):
|
||||
muri = "%s://%s" % (opts['mprotocol'], opts['mhost'].replace("/", "%2f"))
|
||||
else:
|
||||
muri = "%s://%s:%s/" % (opts['mprotocol'], opts['mhost'], opts['mport'])
|
||||
- master = SimpleLDAPObject(muri)
|
||||
+ supplier = SimpleLDAPObject(muri)
|
||||
|
||||
if opts['rprotocol'].lower() == 'ldapi':
|
||||
ruri = "%s://%s" % (opts['rprotocol'], opts['rhost'].replace("/", "%2f"))
|
||||
@@ -978,23 +978,23 @@ def connect_to_replicas(opts):
|
||||
replica = SimpleLDAPObject(ruri)
|
||||
|
||||
# Set timeouts
|
||||
- master.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
|
||||
- master.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
|
||||
+ supplier.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
|
||||
+ supplier.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
|
||||
replica.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
|
||||
replica.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
|
||||
|
||||
# Setup Secure Connection
|
||||
if opts['certdir'] is not None:
|
||||
- # Setup Master
|
||||
+ # Setup Supplier
|
||||
if opts['mprotocol'] != LDAPI:
|
||||
- master.set_option(ldap.OPT_X_TLS_CACERTDIR, opts['certdir'])
|
||||
- master.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD)
|
||||
+ supplier.set_option(ldap.OPT_X_TLS_CACERTDIR, opts['certdir'])
|
||||
+ supplier.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD)
|
||||
if opts['mprotocol'] == LDAP:
|
||||
# Do StartTLS
|
||||
try:
|
||||
- master.start_tls_s()
|
||||
+ supplier.start_tls_s()
|
||||
except ldap.LDAPError as e:
|
||||
- print('TLS negotiation failed on Master: {}'.format(str(e)))
|
||||
+ print('TLS negotiation failed on Supplier: {}'.format(str(e)))
|
||||
exit(1)
|
||||
|
||||
# Setup Replica
|
||||
@@ -1006,17 +1006,17 @@ def connect_to_replicas(opts):
|
||||
try:
|
||||
replica.start_tls_s()
|
||||
except ldap.LDAPError as e:
|
||||
- print('TLS negotiation failed on Master: {}'.format(str(e)))
|
||||
+ print('TLS negotiation failed on Supplier: {}'.format(str(e)))
|
||||
exit(1)
|
||||
|
||||
- # Open connection to master
|
||||
+ # Open connection to Supplier
|
||||
try:
|
||||
- master.simple_bind_s(opts['binddn'], opts['bindpw'])
|
||||
+ supplier.simple_bind_s(opts['binddn'], opts['bindpw'])
|
||||
except ldap.SERVER_DOWN as e:
|
||||
print(f"Cannot connect to {muri} ({str(e)})")
|
||||
sys.exit(1)
|
||||
except ldap.LDAPError as e:
|
||||
- print("Error: Failed to authenticate to Master: ({}). "
|
||||
+ print("Error: Failed to authenticate to Supplier: ({}). "
|
||||
"Please check your credentials and LDAP urls are correct.".format(str(e)))
|
||||
sys.exit(1)
|
||||
|
||||
@@ -1034,7 +1034,7 @@ def connect_to_replicas(opts):
|
||||
# Validate suffix
|
||||
if opts['verbose']:
|
||||
print ("Validating suffix ...")
|
||||
- if not validate_suffix(master, opts['suffix'], opts['mhost']):
|
||||
+ if not validate_suffix(supplier, opts['suffix'], opts['mhost']):
|
||||
sys.exit(1)
|
||||
|
||||
if not validate_suffix(replica,opts['suffix'], opts['rhost']):
|
||||
@@ -1042,16 +1042,16 @@ def connect_to_replicas(opts):
|
||||
|
||||
# Get the RUVs
|
||||
if opts['verbose']:
|
||||
- print ("Gathering Master's RUV...")
|
||||
+ print ("Gathering Supplier's RUV...")
|
||||
try:
|
||||
- master_ruv = master.search_s(opts['suffix'], ldap.SCOPE_SUBTREE, RUV_FILTER, ['nsds50ruv'])
|
||||
- if len(master_ruv) > 0:
|
||||
- opts['master_ruv'] = ensure_list_str(master_ruv[0][1]['nsds50ruv'])
|
||||
+ supplier_ruv = supplier.search_s(opts['suffix'], ldap.SCOPE_SUBTREE, RUV_FILTER, ['nsds50ruv'])
|
||||
+ if len(supplier_ruv) > 0:
|
||||
+ opts['supplier_ruv'] = ensure_list_str(supplier_ruv[0][1]['nsds50ruv'])
|
||||
else:
|
||||
- print("Error: Master does not have an RUV entry")
|
||||
+ print("Error: Supplier does not have an RUV entry")
|
||||
sys.exit(1)
|
||||
except ldap.LDAPError as e:
|
||||
- print("Error: Failed to get Master RUV entry: {}".format(str(e)))
|
||||
+ print("Error: Failed to get Supplier RUV entry: {}".format(str(e)))
|
||||
sys.exit(1)
|
||||
|
||||
if opts['verbose']:
|
||||
@@ -1067,12 +1067,12 @@ def connect_to_replicas(opts):
|
||||
print("Error: Failed to get Replica RUV entry: {}".format(str(e)))
|
||||
sys.exit(1)
|
||||
|
||||
- # Get the master RID
|
||||
+ # Get the Supplier RID
|
||||
if opts['verbose']:
|
||||
- print("Getting Master's replica ID")
|
||||
+ print("Getting Supplier's replica ID")
|
||||
try:
|
||||
search_filter = "(&(objectclass=nsds5Replica)(nsDS5ReplicaRoot={})(nsDS5ReplicaId=*))".format(opts['suffix'])
|
||||
- replica_entry = master.search_s("cn=config", ldap.SCOPE_SUBTREE, search_filter)
|
||||
+ replica_entry = supplier.search_s("cn=config", ldap.SCOPE_SUBTREE, search_filter)
|
||||
if len(replica_entry) > 0:
|
||||
opts['rid'] = ensure_int(replica_entry[0][1]['nsDS5ReplicaId'][0])
|
||||
else:
|
||||
@@ -1081,7 +1081,7 @@ def connect_to_replicas(opts):
|
||||
print("Error: Failed to get Replica entry: {}".format(str(e)))
|
||||
sys.exit(1)
|
||||
|
||||
- return (master, replica, opts)
|
||||
+ return (supplier, replica, opts)
|
||||
|
||||
|
||||
def print_online_report(report, opts, output_file):
|
||||
@@ -1104,11 +1104,11 @@ def print_online_report(report, opts, output_file):
|
||||
final_report += get_ruv_report(opts)
|
||||
final_report += ('Entry Counts\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
- final_report += ('Master: %d\n' % (report['m_count']))
|
||||
+ final_report += ('Supplier: %d\n' % (report['m_count']))
|
||||
final_report += ('Replica: %d\n\n' % (report['r_count']))
|
||||
final_report += ('\nTombstones\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
- final_report += ('Master: %d\n' % (report['mtombstones']))
|
||||
+ final_report += ('Supplier: %d\n' % (report['mtombstones']))
|
||||
final_report += ('Replica: %d\n' % (report['rtombstones']))
|
||||
final_report += report['conflict']
|
||||
missing = False
|
||||
@@ -1121,7 +1121,7 @@ def print_online_report(report, opts, output_file):
|
||||
final_report += (' Entries missing on Replica:\n')
|
||||
for entry in report['r_missing']:
|
||||
if 'createtimestamp' in entry.data:
|
||||
- final_report += (' - %s (Created on Master at: %s)\n' %
|
||||
+ final_report += (' - %s (Created on Supplier at: %s)\n' %
|
||||
(entry.dn, convert_timestamp(entry.data['createtimestamp'][0])))
|
||||
else:
|
||||
final_report += (' - %s\n' % (entry.dn))
|
||||
@@ -1129,7 +1129,7 @@ def print_online_report(report, opts, output_file):
|
||||
if m_missing > 0:
|
||||
if r_missing > 0:
|
||||
final_report += ('\n')
|
||||
- final_report += (' Entries missing on Master:\n')
|
||||
+ final_report += (' Entries missing on Supplier:\n')
|
||||
for entry in report['m_missing']:
|
||||
if 'createtimestamp' in entry.data:
|
||||
final_report += (' - %s (Created on Replica at: %s)\n' %
|
||||
@@ -1146,9 +1146,9 @@ def print_online_report(report, opts, output_file):
|
||||
final_report += ('\nResult\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
if not missing and len(report['diff']) == 0:
|
||||
- final_report += ('No replication differences between Master and Replica\n')
|
||||
+ final_report += ('No replication differences between Supplier and Replica\n')
|
||||
else:
|
||||
- final_report += ('There are replication differences between Master and Replica\n')
|
||||
+ final_report += ('There are replication differences between Supplier and Replica\n')
|
||||
|
||||
if output_file:
|
||||
output_file.write(final_report)
|
||||
@@ -1170,7 +1170,7 @@ def remove_state_info(entry):
|
||||
|
||||
def get_conflict_report(mentries, rentries, verbose):
|
||||
"""Gather the conflict entry dn's for each replica
|
||||
- :param mentries - Master entries
|
||||
+ :param mentries - Supplier entries
|
||||
:param rentries - Replica entries
|
||||
:param verbose - verbose logging
|
||||
:return - A text blob to dispaly in the report
|
||||
@@ -1197,7 +1197,7 @@ def get_conflict_report(mentries, rentries, verbose):
|
||||
report = "\n\nConflict Entries\n"
|
||||
report += "=====================================================\n\n"
|
||||
if len(m_conflicts) > 0:
|
||||
- report += ('Master Conflict Entries: %d\n' % (len(m_conflicts)))
|
||||
+ report += ('Supplier Conflict Entries: %d\n' % (len(m_conflicts)))
|
||||
if verbose:
|
||||
for entry in m_conflicts:
|
||||
report += ('\n - %s\n' % (entry['dn']))
|
||||
@@ -1239,8 +1239,8 @@ def do_online_report(opts, output_file=None):
|
||||
rconflicts = []
|
||||
mconflicts = []
|
||||
|
||||
- # Fire off paged searches on Master and Replica
|
||||
- master, replica, opts = connect_to_replicas(opts)
|
||||
+ # Fire off paged searches on Supplier and Replica
|
||||
+ supplier, replica, opts = connect_to_replicas(opts)
|
||||
|
||||
if opts['verbose']:
|
||||
print('Start searching and comparing...')
|
||||
@@ -1248,12 +1248,12 @@ def do_online_report(opts, output_file=None):
|
||||
controls = [paged_ctrl]
|
||||
req_pr_ctrl = controls[0]
|
||||
try:
|
||||
- master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
|
||||
- "(|(objectclass=*)(objectclass=ldapsubentry)(objectclass=nstombstone))",
|
||||
- ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'],
|
||||
- serverctrls=controls)
|
||||
+ supplier_msgid = supplier.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
|
||||
+ "(|(objectclass=*)(objectclass=ldapsubentry)(objectclass=nstombstone))",
|
||||
+ ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'],
|
||||
+ serverctrls=controls)
|
||||
except ldap.LDAPError as e:
|
||||
- print("Error: Failed to get Master entries: %s", str(e))
|
||||
+ print("Error: Failed to get Supplier entries: %s", str(e))
|
||||
sys.exit(1)
|
||||
try:
|
||||
replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
|
||||
@@ -1268,11 +1268,11 @@ def do_online_report(opts, output_file=None):
|
||||
while not m_done or not r_done:
|
||||
try:
|
||||
if not m_done:
|
||||
- m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid)
|
||||
+ m_rtype, m_rdata, m_rmsgid, m_rctrls = supplier.result3(supplier_msgid)
|
||||
elif not r_done:
|
||||
m_rdata = []
|
||||
except ldap.LDAPError as e:
|
||||
- print("Error: Problem getting the results from the master: %s", str(e))
|
||||
+ print("Error: Problem getting the results from the Supplier: %s", str(e))
|
||||
sys.exit(1)
|
||||
try:
|
||||
if not r_done:
|
||||
@@ -1299,7 +1299,7 @@ def do_online_report(opts, output_file=None):
|
||||
report, opts)
|
||||
|
||||
if not m_done:
|
||||
- # Master
|
||||
+ # Supplier
|
||||
m_pctrls = [
|
||||
c
|
||||
for c in m_rctrls
|
||||
@@ -1310,11 +1310,11 @@ def do_online_report(opts, output_file=None):
|
||||
try:
|
||||
# Copy cookie from response control to request control
|
||||
req_pr_ctrl.cookie = m_pctrls[0].cookie
|
||||
- master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
|
||||
+ supplier_msgid = supplier.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
|
||||
"(|(objectclass=*)(objectclass=ldapsubentry))",
|
||||
['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
|
||||
except ldap.LDAPError as e:
|
||||
- print("Error: Problem searching the master: %s", str(e))
|
||||
+ print("Error: Problem searching the Supplier: %s", str(e))
|
||||
sys.exit(1)
|
||||
else:
|
||||
m_done = True # No more pages available
|
||||
@@ -1354,7 +1354,7 @@ def do_online_report(opts, output_file=None):
|
||||
print_online_report(report, opts, output_file)
|
||||
|
||||
# unbind
|
||||
- master.unbind_s()
|
||||
+ supplier.unbind_s()
|
||||
replica.unbind_s()
|
||||
|
||||
|
||||
@@ -1367,18 +1367,18 @@ def init_online_params(args):
|
||||
|
||||
# Make sure the URLs are different
|
||||
if args.murl == args.rurl:
|
||||
- print("Master and Replica LDAP URLs are the same, they must be different")
|
||||
+ print("Supplier and Replica LDAP URLs are the same, they must be different")
|
||||
sys.exit(1)
|
||||
|
||||
- # Parse Master url
|
||||
+ # Parse Supplier url
|
||||
if not ldapurl.isLDAPUrl(args.murl):
|
||||
- print("Master LDAP URL is invalid")
|
||||
+ print("Supplier LDAP URL is invalid")
|
||||
sys.exit(1)
|
||||
murl = ldapurl.LDAPUrl(args.murl)
|
||||
if murl.urlscheme in VALID_PROTOCOLS:
|
||||
opts['mprotocol'] = murl.urlscheme
|
||||
else:
|
||||
- print('Unsupported ldap url protocol (%s) for Master, please use "ldaps" or "ldap"' %
|
||||
+ print('Unsupported ldap url protocol (%s) for Supplier, please use "ldaps" or "ldap"' %
|
||||
murl.urlscheme)
|
||||
sys.exit(1)
|
||||
|
||||
@@ -1520,7 +1520,7 @@ def offline_report(args):
|
||||
print ("LDIF file ({}) is empty".format(ldif_dir))
|
||||
sys.exit(1)
|
||||
if opts['mldif'] == opts['rldif']:
|
||||
- print("The Master and Replica LDIF files must be different")
|
||||
+ print("The Supplier and Replica LDIF files must be different")
|
||||
sys.exit(1)
|
||||
|
||||
OUTPUT_FILE = None
|
||||
@@ -1547,7 +1547,7 @@ def get_state(args):
|
||||
"""Just do the RUV comparision
|
||||
"""
|
||||
opts = init_online_params(args)
|
||||
- master, replica, opts = connect_to_replicas(opts)
|
||||
+ supplier, replica, opts = connect_to_replicas(opts)
|
||||
print(get_ruv_state(opts))
|
||||
|
||||
|
||||
@@ -1569,10 +1569,10 @@ def main():
|
||||
# Get state
|
||||
state_parser = subparsers.add_parser('state', help="Get the current replicaton state between two replicas")
|
||||
state_parser.set_defaults(func=get_state)
|
||||
- state_parser.add_argument('-m', '--master-url', help='The LDAP URL for the Master server',
|
||||
- dest='murl', default=None, required=True)
|
||||
+ state_parser.add_argument('-m', '--supplier-url', help='The LDAP URL for the Supplier server',
|
||||
+ dest='murl', default=None, required=True)
|
||||
state_parser.add_argument('-r', '--replica-url', help='The LDAP URL for the Replica server',
|
||||
- dest='rurl', required=True, default=None)
|
||||
+ dest='rurl', required=True, default=None)
|
||||
state_parser.add_argument('-b', '--suffix', help='Replicated suffix', dest='suffix', required=True)
|
||||
state_parser.add_argument('-D', '--bind-dn', help='The Bind DN', required=True, dest='binddn', default=None)
|
||||
state_parser.add_argument('-w', '--bind-pw', help='The Bind password', dest='bindpw', default=None)
|
||||
@@ -1586,7 +1586,7 @@ def main():
|
||||
# Online mode
|
||||
online_parser = subparsers.add_parser('online', help="Compare two online replicas for differences")
|
||||
online_parser.set_defaults(func=online_report)
|
||||
- online_parser.add_argument('-m', '--master-url', help='The LDAP URL for the Master server (REQUIRED)',
|
||||
+ online_parser.add_argument('-m', '--supplier-url', help='The LDAP URL for the Supplier server (REQUIRED)',
|
||||
dest='murl', default=None, required=True)
|
||||
online_parser.add_argument('-r', '--replica-url', help='The LDAP URL for the Replica server (REQUIRED)',
|
||||
dest='rurl', required=True, default=None)
|
||||
@@ -1612,12 +1612,12 @@ def main():
|
||||
# Offline LDIF mode
|
||||
offline_parser = subparsers.add_parser('offline', help="Compare two replication LDIF files for differences (LDIF file generated by 'db2ldif -r')")
|
||||
offline_parser.set_defaults(func=offline_report)
|
||||
- offline_parser.add_argument('-m', '--master-ldif', help='Master LDIF file',
|
||||
+ offline_parser.add_argument('-m', '--supplier-ldif', help='Supplier LDIF file',
|
||||
dest='mldif', default=None, required=True)
|
||||
offline_parser.add_argument('-r', '--replica-ldif', help='Replica LDIF file',
|
||||
dest='rldif', default=None, required=True)
|
||||
offline_parser.add_argument('--rid', dest='rid', default=None, required=True,
|
||||
- help='The Replica Identifer (rid) for the "Master" server')
|
||||
+ help='The Replica Identifier (rid) for the "Supplier" server')
|
||||
offline_parser.add_argument('-b', '--suffix', help='Replicated suffix', dest='suffix', required=True)
|
||||
offline_parser.add_argument('-c', '--conflicts', help='Display verbose conflict information', action='store_true',
|
||||
dest='conflicts', default=False)
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,29 +0,0 @@
|
||||
From 27cd055197bc3cae458a1f86621aa5410c66dd2c Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 20 Jan 2025 15:51:24 -0500
|
||||
Subject: [PATCH] Issue 6509 - Fix cherry pick issue (race condition in Paged
|
||||
results)
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6509
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 13dfe250d..57e07e5f5 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1620,8 +1620,8 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_prfd != NULL) {
|
||||
if ((!c->c_gettingber) && (c->c_threadnumber < c->c_max_threads_per_conn)) {
|
||||
- ct->fd[listnum][count].fd = c->c_prfd;
|
||||
- ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS;
|
||||
+ ct->fd[count].fd = c->c_prfd;
|
||||
+ ct->fd[count].in_flags = SLAPD_POLL_FLAGS;
|
||||
/* slot i of the connection table is mapped to slot
|
||||
* count of the fds array */
|
||||
c->c_fdi = count;
|
||||
--
|
||||
2.48.0
|
||||
|
@ -0,0 +1,373 @@
|
||||
From 55a47c1bfe1ce1c27e470384c4f1d50895db25f7 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Tue, 13 Jul 2021 14:18:03 -0400
|
||||
Subject: [PATCH] Issue 4443 - Internal unindexed searches in syncrepl/retro
|
||||
changelog
|
||||
|
||||
Bug Description:
|
||||
|
||||
When a non-system index is added to a backend it is
|
||||
disabled until the database is initialized or reindexed.
|
||||
So in the case of the retro changelog the changenumber index
|
||||
is alway disabled by default since it is never initialized.
|
||||
This leads to unexpected unindexed searches of the retro
|
||||
changelog.
|
||||
|
||||
Fix Description:
|
||||
|
||||
If an index has "nsSystemIndex" set to "true" then enable it
|
||||
immediately.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4443
|
||||
|
||||
Reviewed by: spichugi & tbordaz(Thanks!!)
|
||||
---
|
||||
.../tests/suites/retrocl/basic_test.py | 53 ++++++++-------
|
||||
.../suites/retrocl/retrocl_indexing_test.py | 68 +++++++++++++++++++
|
||||
ldap/servers/plugins/retrocl/retrocl_create.c | 2 +-
|
||||
.../slapd/back-ldbm/ldbm_index_config.c | 25 +++++--
|
||||
src/lib389/lib389/_mapped_object.py | 13 ++++
|
||||
5 files changed, 130 insertions(+), 31 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
index f3bc50f29..84d513829 100644
|
||||
--- a/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
@@ -8,7 +8,6 @@
|
||||
|
||||
import logging
|
||||
import ldap
|
||||
-import time
|
||||
import pytest
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.plugins import RetroChangelogPlugin
|
||||
@@ -18,7 +17,8 @@ from lib389.tasks import *
|
||||
from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
|
||||
from lib389.cli_base.dsrc import dsrc_arg_concat
|
||||
from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add_attr
|
||||
-from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
|
||||
+from lib389.idm.user import UserAccount, UserAccounts
|
||||
+from lib389._mapped_object import DSLdapObjects
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
@@ -82,7 +82,7 @@ def test_retrocl_exclude_attr_add(topology_st):
|
||||
|
||||
log.info('Adding user1')
|
||||
try:
|
||||
- user1 = users.create(properties={
|
||||
+ users.create(properties={
|
||||
'sn': '1',
|
||||
'cn': 'user 1',
|
||||
'uid': 'user1',
|
||||
@@ -97,17 +97,18 @@ def test_retrocl_exclude_attr_add(topology_st):
|
||||
except ldap.ALREADY_EXISTS:
|
||||
pass
|
||||
except ldap.LDAPError as e:
|
||||
- log.error("Failed to add user1")
|
||||
+ log.error("Failed to add user1: " + str(e))
|
||||
|
||||
log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
|
||||
try:
|
||||
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX)
|
||||
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
|
||||
except ldap.LDAPError as e:
|
||||
- log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ log.fatal("Changelog search failed, error: " + str(e))
|
||||
assert False
|
||||
assert len(cllist) > 0
|
||||
- if cllist[0].hasAttr('changes'):
|
||||
- clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ if cllist[0].present('changes'):
|
||||
+ clstr = str(cllist[0].get_attr_vals_utf8('changes'))
|
||||
assert ATTR_HOMEPHONE in clstr
|
||||
assert ATTR_CARLICENSE in clstr
|
||||
|
||||
@@ -134,7 +135,7 @@ def test_retrocl_exclude_attr_add(topology_st):
|
||||
|
||||
log.info('Adding user2')
|
||||
try:
|
||||
- user2 = users.create(properties={
|
||||
+ users.create(properties={
|
||||
'sn': '2',
|
||||
'cn': 'user 2',
|
||||
'uid': 'user2',
|
||||
@@ -149,18 +150,18 @@ def test_retrocl_exclude_attr_add(topology_st):
|
||||
except ldap.ALREADY_EXISTS:
|
||||
pass
|
||||
except ldap.LDAPError as e:
|
||||
- log.error("Failed to add user2")
|
||||
+ log.error("Failed to add user2: " + str(e))
|
||||
|
||||
log.info('Verify homePhone attr is not in the changelog changestring')
|
||||
try:
|
||||
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN)
|
||||
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER2_DN})')
|
||||
assert len(cllist) > 0
|
||||
- if cllist[0].hasAttr('changes'):
|
||||
- clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ if cllist[0].present('changes'):
|
||||
+ clstr = str(cllist[0].get_attr_vals_utf8('changes'))
|
||||
assert ATTR_HOMEPHONE not in clstr
|
||||
assert ATTR_CARLICENSE in clstr
|
||||
except ldap.LDAPError as e:
|
||||
- log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ log.fatal("Changelog search failed, error: " + str(e))
|
||||
assert False
|
||||
|
||||
def test_retrocl_exclude_attr_mod(topology_st):
|
||||
@@ -228,19 +229,20 @@ def test_retrocl_exclude_attr_mod(topology_st):
|
||||
'homeDirectory': '/home/user1',
|
||||
'userpassword': USER_PW})
|
||||
except ldap.ALREADY_EXISTS:
|
||||
- pass
|
||||
+ user1 = UserAccount(st, dn=USER1_DN)
|
||||
except ldap.LDAPError as e:
|
||||
- log.error("Failed to add user1")
|
||||
+ log.error("Failed to add user1: " + str(e))
|
||||
|
||||
log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
|
||||
try:
|
||||
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX)
|
||||
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
|
||||
except ldap.LDAPError as e:
|
||||
- log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ log.fatal("Changelog search failed, error: " + str(e))
|
||||
assert False
|
||||
assert len(cllist) > 0
|
||||
- if cllist[0].hasAttr('changes'):
|
||||
- clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ if cllist[0].present('changes'):
|
||||
+ clstr = str(cllist[0].get_attr_vals_utf8('changes'))
|
||||
assert ATTR_HOMEPHONE in clstr
|
||||
assert ATTR_CARLICENSE in clstr
|
||||
|
||||
@@ -267,24 +269,25 @@ def test_retrocl_exclude_attr_mod(topology_st):
|
||||
|
||||
log.info('Modify user1 carLicense attribute')
|
||||
try:
|
||||
- st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")])
|
||||
+ user1.replace(ATTR_CARLICENSE, "123WX321")
|
||||
except ldap.LDAPError as e:
|
||||
log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc'])
|
||||
assert False
|
||||
|
||||
log.info('Verify carLicense attr is not in the changelog changestring')
|
||||
try:
|
||||
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
|
||||
assert len(cllist) > 0
|
||||
# There will be 2 entries in the changelog for this user, we are only
|
||||
#interested in the second one, the modify operation.
|
||||
- if cllist[1].hasAttr('changes'):
|
||||
- clstr = (cllist[1].getValue('changes')).decode()
|
||||
+ if cllist[1].present('changes'):
|
||||
+ clstr = str(cllist[1].get_attr_vals_utf8('changes'))
|
||||
assert ATTR_CARLICENSE not in clstr
|
||||
except ldap.LDAPError as e:
|
||||
- log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ log.fatal("Changelog search failed, error: " + str(e))
|
||||
assert False
|
||||
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py
|
||||
new file mode 100644
|
||||
index 000000000..b1dfe962c
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py
|
||||
@@ -0,0 +1,68 @@
|
||||
+import logging
|
||||
+import pytest
|
||||
+import os
|
||||
+from lib389._constants import RETROCL_SUFFIX, DEFAULT_SUFFIX
|
||||
+from lib389.topologies import topology_st as topo
|
||||
+from lib389.plugins import RetroChangelogPlugin
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389._mapped_object import DSLdapObjects
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+def test_indexing_is_online(topo):
|
||||
+ """Test that the changenmumber index is online right after enabling the plugin
|
||||
+
|
||||
+ :id: 16f4c001-9e0c-4448-a2b3-08ac1e85d40f
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Enable retro cl
|
||||
+ 2. Perform some updates
|
||||
+ 3. Search for "(changenumber>=-1)", and it is not partially unindexed
|
||||
+ 4. Search for "(&(changenumber>=-1)(targetuniqueid=*))", and it is not partially unindexed
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ # Enable plugin
|
||||
+ topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off')
|
||||
+ plugin = RetroChangelogPlugin(topo.standalone)
|
||||
+ plugin.enable()
|
||||
+ topo.standalone.restart()
|
||||
+
|
||||
+ # Do a bunch of updates
|
||||
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ user_entry = users.create(properties={
|
||||
+ 'sn': '1',
|
||||
+ 'cn': 'user 1',
|
||||
+ 'uid': 'user1',
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'givenname': 'user1',
|
||||
+ 'homePhone': '0861234567',
|
||||
+ 'carLicense': '131D16674',
|
||||
+ 'mail': 'user1@whereever.com',
|
||||
+ 'homeDirectory': '/home'
|
||||
+ })
|
||||
+ for count in range(0, 10):
|
||||
+ user_entry.replace('mail', f'test{count}@test.com')
|
||||
+
|
||||
+ # Search the retro cl, and check for error messages
|
||||
+ filter_simple = '(changenumber>=-1)'
|
||||
+ filter_compound = '(&(changenumber>=-1)(targetuniqueid=*))'
|
||||
+ retro_changelog_suffix = DSLdapObjects(topo.standalone, basedn=RETROCL_SUFFIX)
|
||||
+ retro_changelog_suffix.filter(filter_simple)
|
||||
+ assert not topo.standalone.searchAccessLog('Partially Unindexed Filter')
|
||||
+
|
||||
+ # Search the retro cl again with compound filter
|
||||
+ retro_changelog_suffix.filter(filter_compound)
|
||||
+ assert not topo.standalone.searchAccessLog('Partially Unindexed Filter')
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
diff --git a/ldap/servers/plugins/retrocl/retrocl_create.c b/ldap/servers/plugins/retrocl/retrocl_create.c
|
||||
index 571e6899f..5bfde7831 100644
|
||||
--- a/ldap/servers/plugins/retrocl/retrocl_create.c
|
||||
+++ b/ldap/servers/plugins/retrocl/retrocl_create.c
|
||||
@@ -133,7 +133,7 @@ retrocl_create_be(const char *bedir)
|
||||
val.bv_len = strlen(val.bv_val);
|
||||
slapi_entry_add_values(e, "cn", vals);
|
||||
|
||||
- val.bv_val = "false";
|
||||
+ val.bv_val = "true"; /* enables the index */
|
||||
val.bv_len = strlen(val.bv_val);
|
||||
slapi_entry_add_values(e, "nssystemindex", vals);
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
|
||||
index 9722d0ce7..38e7368e1 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
|
||||
@@ -25,7 +25,7 @@ int ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, Slapi_Entry *en
|
||||
#define INDEXTYPE_NONE 1
|
||||
|
||||
static int
|
||||
-ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, char *err_buf)
|
||||
+ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, PRBool *is_system_index, char *err_buf)
|
||||
{
|
||||
Slapi_Attr *attr;
|
||||
const struct berval *attrValue;
|
||||
@@ -78,6 +78,15 @@ ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_st
|
||||
}
|
||||
}
|
||||
|
||||
+ *is_system_index = PR_FALSE;
|
||||
+ if (0 == slapi_entry_attr_find(e, "nsSystemIndex", &attr)) {
|
||||
+ slapi_attr_first_value(attr, &sval);
|
||||
+ attrValue = slapi_value_get_berval(sval);
|
||||
+ if (strcasecmp(attrValue->bv_val, "true") == 0) {
|
||||
+ *is_system_index = PR_TRUE;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
/* ok the entry is good to process, pass it to attr_index_config */
|
||||
if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0, err_buf)) {
|
||||
slapi_ch_free_string(index_name);
|
||||
@@ -101,9 +110,10 @@ ldbm_index_init_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
|
||||
void *arg)
|
||||
{
|
||||
ldbm_instance *inst = (ldbm_instance *)arg;
|
||||
+ PRBool is_system_index = PR_FALSE;
|
||||
|
||||
returntext[0] = '\0';
|
||||
- *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, NULL);
|
||||
+ *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, &is_system_index /* not used */, NULL);
|
||||
if (*returncode == LDAP_SUCCESS) {
|
||||
return SLAPI_DSE_CALLBACK_OK;
|
||||
} else {
|
||||
@@ -126,17 +136,21 @@ ldbm_instance_index_config_add_callback(Slapi_PBlock *pb __attribute__((unused))
|
||||
{
|
||||
ldbm_instance *inst = (ldbm_instance *)arg;
|
||||
char *index_name = NULL;
|
||||
+ PRBool is_system_index = PR_FALSE;
|
||||
|
||||
returntext[0] = '\0';
|
||||
- *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, returntext);
|
||||
+ *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, &is_system_index, returntext);
|
||||
if (*returncode == LDAP_SUCCESS) {
|
||||
struct attrinfo *ai = NULL;
|
||||
/* if the index is a "system" index, we assume it's being added by
|
||||
* by the server, and it's okay for the index to go online immediately.
|
||||
* if not, we set the index "offline" so it won't actually be used
|
||||
* until someone runs db2index on it.
|
||||
+ * If caller wants to add an index that they want to be online
|
||||
+ * immediately they can also set "nsSystemIndex" to "true" in the
|
||||
+ * index config entry (e.g. is_system_index).
|
||||
*/
|
||||
- if (!ldbm_attribute_always_indexed(index_name)) {
|
||||
+ if (!is_system_index && !ldbm_attribute_always_indexed(index_name)) {
|
||||
ainfo_get(inst->inst_be, index_name, &ai);
|
||||
PR_ASSERT(ai != NULL);
|
||||
ai->ai_indexmask |= INDEX_OFFLINE;
|
||||
@@ -386,13 +400,14 @@ ldbm_instance_index_config_enable_index(ldbm_instance *inst, Slapi_Entry *e)
|
||||
char *index_name = NULL;
|
||||
int rc = LDAP_SUCCESS;
|
||||
struct attrinfo *ai = NULL;
|
||||
+ PRBool is_system_index = PR_FALSE;
|
||||
|
||||
index_name = slapi_entry_attr_get_charptr(e, "cn");
|
||||
if (index_name) {
|
||||
ainfo_get(inst->inst_be, index_name, &ai);
|
||||
}
|
||||
if (!ai) {
|
||||
- rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, NULL);
|
||||
+ rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, &is_system_index /* not used */, NULL);
|
||||
}
|
||||
if (rc == LDAP_SUCCESS) {
|
||||
/* Assume the caller knows if it is OK to go online immediately */
|
||||
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
|
||||
index b6d778b01..fe610d175 100644
|
||||
--- a/src/lib389/lib389/_mapped_object.py
|
||||
+++ b/src/lib389/lib389/_mapped_object.py
|
||||
@@ -148,6 +148,19 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
|
||||
return True
|
||||
|
||||
+ def search(self, scope="subtree", filter='objectclass=*'):
|
||||
+ search_scope = ldap.SCOPE_SUBTREE
|
||||
+ if scope == 'base':
|
||||
+ search_scope = ldap.SCOPE_BASE
|
||||
+ elif scope == 'one':
|
||||
+ search_scope = ldap.SCOPE_ONE
|
||||
+ elif scope == 'subtree':
|
||||
+ search_scope = ldap.SCOPE_SUBTREE
|
||||
+ return self._instance.search_ext_s(self._dn, search_scope, filter,
|
||||
+ serverctrls=self._server_controls,
|
||||
+ clientctrls=self._client_controls,
|
||||
+ escapehatch='i am sure')
|
||||
+
|
||||
def display(self, attrlist=['*']):
|
||||
"""Get an entry but represent it as a string LDIF
|
||||
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,121 @@
|
||||
From 2f0218f91d35c83a2aaecb71849a54b2481390ab Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Fri, 9 Jul 2021 11:53:35 +1000
|
||||
Subject: [PATCH] Issue 4817 - BUG - locked crypt accounts on import may allow
|
||||
all passwords (#4819)
|
||||
|
||||
Bug Description: Due to mishanding of short dbpwd hashes, the
|
||||
crypt_r algorithm was misused and was only comparing salts
|
||||
in some cases, rather than checking the actual content
|
||||
of the password.
|
||||
|
||||
Fix Description: Stricter checks on dbpwd lengths to ensure
|
||||
that content passed to crypt_r has at least 2 salt bytes and
|
||||
1 hash byte, as well as stricter checks on ct_memcmp to ensure
|
||||
that compared values are the same length, rather than potentially
|
||||
allowing overruns/short comparisons.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/4817
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @mreynolds389
|
||||
---
|
||||
.../password/pwd_crypt_asterisk_test.py | 50 +++++++++++++++++++
|
||||
ldap/servers/plugins/pwdstorage/crypt_pwd.c | 20 +++++---
|
||||
2 files changed, 64 insertions(+), 6 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py b/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py
|
||||
new file mode 100644
|
||||
index 000000000..d76614db1
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py
|
||||
@@ -0,0 +1,50 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2021 William Brown <william@blackhats.net.au>
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import ldap
|
||||
+import pytest
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389._constants import (DEFAULT_SUFFIX, PASSWORD)
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+def test_password_crypt_asterisk_is_rejected(topology_st):
|
||||
+ """It was reported that {CRYPT}* was allowing all passwords to be
|
||||
+ valid in the bind process. This checks that we should be rejecting
|
||||
+ these as they should represent locked accounts. Similar, {CRYPT}!
|
||||
+
|
||||
+ :id: 0b8f1a6a-f3eb-4443-985e-da14d0939dc3
|
||||
+ :setup: Single instance
|
||||
+ :steps: 1. Set a password hash in with CRYPT and the content *
|
||||
+ 2. Test a bind
|
||||
+ 3. Set a password hash in with CRYPT and the content !
|
||||
+ 4. Test a bind
|
||||
+ :expectedresults:
|
||||
+ 1. Successfully set the values
|
||||
+ 2. The bind fails
|
||||
+ 3. Successfully set the values
|
||||
+ 4. The bind fails
|
||||
+ """
|
||||
+ topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on')
|
||||
+ topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'off')
|
||||
+
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ user = users.create_test_user()
|
||||
+
|
||||
+ user.set('userPassword', "{CRYPT}*")
|
||||
+
|
||||
+ # Attempt to bind with incorrect password.
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ badconn = user.bind('badpassword')
|
||||
+
|
||||
+ user.set('userPassword', "{CRYPT}!")
|
||||
+ # Attempt to bind with incorrect password.
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ badconn = user.bind('badpassword')
|
||||
+
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/crypt_pwd.c b/ldap/servers/plugins/pwdstorage/crypt_pwd.c
|
||||
index 9031b2199..1b37d41ed 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/crypt_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/crypt_pwd.c
|
||||
@@ -48,15 +48,23 @@ static unsigned char itoa64[] = /* 0 ... 63 => ascii - 64 */
|
||||
int
|
||||
crypt_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
{
|
||||
- int rc;
|
||||
- char *cp;
|
||||
+ int rc = -1;
|
||||
+ char *cp = NULL;
|
||||
+ size_t dbpwd_len = strlen(dbpwd);
|
||||
struct crypt_data data;
|
||||
data.initialized = 0;
|
||||
|
||||
- /* we use salt (first 2 chars) of encoded password in call to crypt_r() */
|
||||
- cp = crypt_r(userpwd, dbpwd, &data);
|
||||
- if (cp) {
|
||||
- rc = slapi_ct_memcmp(dbpwd, cp, strlen(dbpwd));
|
||||
+ /*
|
||||
+ * there MUST be at least 2 chars of salt and some pw bytes, else this is INVALID and will
|
||||
+ * allow any password to bind as we then only compare SALTS.
|
||||
+ */
|
||||
+ if (dbpwd_len >= 3) {
|
||||
+ /* we use salt (first 2 chars) of encoded password in call to crypt_r() */
|
||||
+ cp = crypt_r(userpwd, dbpwd, &data);
|
||||
+ }
|
||||
+ /* If these are not the same length, we can not proceed safely with memcmp. */
|
||||
+ if (cp && dbpwd_len == strlen(cp)) {
|
||||
+ rc = slapi_ct_memcmp(dbpwd, cp, dbpwd_len);
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,39 @@
|
||||
From 31d53e7da585723e66b838dcf34b77ea7c9968c6 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 21 Jul 2021 09:16:30 +0200
|
||||
Subject: [PATCH] Issue 4837 - persistent search returns entries even when an
|
||||
error is returned by content-sync-plugin (#4838)
|
||||
|
||||
Bug description:
|
||||
When a ldap client sends a sync request control, the server response may contain a sync state control.
|
||||
If the server fails to create the control the search should fail.
|
||||
|
||||
Fix description:
|
||||
In case the server fails to create the response control
|
||||
logs the failure of the pre_search
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4837
|
||||
|
||||
Reviewed by: Simon Pichugin
|
||||
|
||||
Platforms tested: RH8.4
|
||||
---
|
||||
ldap/servers/plugins/sync/sync_refresh.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/sync/sync_refresh.c b/ldap/servers/plugins/sync/sync_refresh.c
|
||||
index 646ff760b..4cbb6a949 100644
|
||||
--- a/ldap/servers/plugins/sync/sync_refresh.c
|
||||
+++ b/ldap/servers/plugins/sync/sync_refresh.c
|
||||
@@ -213,7 +213,7 @@ sync_srch_refresh_pre_entry(Slapi_PBlock *pb)
|
||||
Slapi_Entry *e;
|
||||
slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_ENTRY, &e);
|
||||
LDAPControl **ctrl = (LDAPControl **)slapi_ch_calloc(2, sizeof(LDAPControl *));
|
||||
- sync_create_state_control(e, &ctrl[0], LDAP_SYNC_ADD, NULL);
|
||||
+ rc = sync_create_state_control(e, &ctrl[0], LDAP_SYNC_ADD, NULL);
|
||||
slapi_pblock_set(pb, SLAPI_SEARCH_CTRLS, ctrl);
|
||||
}
|
||||
return (rc);
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,49 @@
|
||||
From 616dc9964a4675dea2ab2c2efb9bd31c3903e29d Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 26 Jul 2021 15:22:08 -0400
|
||||
Subject: [PATCH] Hardcode gost crypt passsword storage scheme
|
||||
|
||||
---
|
||||
.../plugins/pwdstorage/gost_yescrypt.c | 22 -------------------
|
||||
1 file changed, 22 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/gost_yescrypt.c b/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
|
||||
index 67b39395e..7b0d1653c 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
|
||||
@@ -11,7 +11,6 @@
|
||||
|
||||
#include <crypt.h>
|
||||
|
||||
-#ifdef XCRYPT_VERSION_STR
|
||||
#include <errno.h>
|
||||
int
|
||||
gost_yescrypt_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
@@ -64,24 +63,3 @@ gost_yescrypt_pw_enc(const char *pwd)
|
||||
return enc;
|
||||
}
|
||||
|
||||
-#else
|
||||
-
|
||||
-/*
|
||||
- * We do not have xcrypt, so always fail all checks.
|
||||
- */
|
||||
-int
|
||||
-gost_yescrypt_pw_cmp(const char *userpwd __attribute__((unused)), const char *dbpwd __attribute__((unused)))
|
||||
-{
|
||||
- slapi_log_err(SLAPI_LOG_ERR, GOST_YESCRYPT_SCHEME_NAME,
|
||||
- "Unable to use gost_yescrypt_pw_cmp, xcrypt is not available.\n");
|
||||
- return 1;
|
||||
-}
|
||||
-
|
||||
-char *
|
||||
-gost_yescrypt_pw_enc(const char *pwd __attribute__((unused)))
|
||||
-{
|
||||
- slapi_log_err(SLAPI_LOG_ERR, GOST_YESCRYPT_SCHEME_NAME,
|
||||
- "Unable to use gost_yescrypt_pw_enc, xcrypt is not available.\n");
|
||||
- return NULL;
|
||||
-}
|
||||
-#endif
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,39 @@
|
||||
From a2a51130b2f95316237b85da099a8be734969e54 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Sat, 24 Apr 2021 21:37:54 +0100
|
||||
Subject: [PATCH] Issue 4734 - import of entry with no parent warning (#4735)
|
||||
|
||||
Description: Online import of ldif file that contains an entry with
|
||||
no parent doesnt generate a task warning.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4734
|
||||
|
||||
Author: vashirov@redhat.com (Thanks)
|
||||
|
||||
Reviewed by: mreynolds, jchapma
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c | 6 ++++++
|
||||
1 file changed, 6 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
index 905a84e74..35183ed59 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
@@ -2767,8 +2767,14 @@ import_foreman(void *param)
|
||||
if (job->flags & FLAG_ABORT) {
|
||||
goto error;
|
||||
}
|
||||
+
|
||||
+ /* capture skipped entry warnings for this task */
|
||||
+ if((job) && (job->skipped)) {
|
||||
+ slapi_task_set_warning(job->task, WARN_SKIPPED_IMPORT_ENTRY);
|
||||
+ }
|
||||
}
|
||||
|
||||
+
|
||||
slapi_pblock_destroy(pb);
|
||||
info->state = FINISHED;
|
||||
return;
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,37 @@
|
||||
From f9bc249b2baa11a8ac0eb54e4077eb706d137e38 Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Thu, 19 Aug 2021 11:06:06 +1000
|
||||
Subject: [PATCH] Issue 4872 - BUG - entryuuid enabled by default causes
|
||||
replication issues (#4876)
|
||||
|
||||
Bug Description: Due to older servers missing the syntax
|
||||
plugin this breaks schema replication and causes cascading
|
||||
errors.
|
||||
|
||||
Fix Description: This changes the syntax to be a case
|
||||
insensitive string, while leaving the plugins in place
|
||||
for other usage.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/4872
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @mreynolds389 @progier389
|
||||
---
|
||||
ldap/schema/03entryuuid.ldif | 3 ++-
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/schema/03entryuuid.ldif b/ldap/schema/03entryuuid.ldif
|
||||
index cbde981fe..f7a7f40d5 100644
|
||||
--- a/ldap/schema/03entryuuid.ldif
|
||||
+++ b/ldap/schema/03entryuuid.ldif
|
||||
@@ -13,4 +13,5 @@ dn: cn=schema
|
||||
#
|
||||
# attributes
|
||||
#
|
||||
-attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
|
||||
+# attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
|
||||
+attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
|
||||
--
|
||||
2.31.1
|
||||
|
125
SOURCES/0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch
Normal file
125
SOURCES/0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch
Normal file
@ -0,0 +1,125 @@
|
||||
From 120511d35095a48d60abbb7cb2367d0c30fbc757 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 25 Aug 2021 13:20:56 -0400
|
||||
Subject: [PATCH] Remove GOST-YESCRYPT password sotrage scheme
|
||||
|
||||
---
|
||||
.../tests/suites/password/pwd_algo_test.py | 1 -
|
||||
ldap/ldif/template-dse-minimal.ldif.in | 9 ---------
|
||||
ldap/ldif/template-dse.ldif.in | 9 ---------
|
||||
ldap/servers/plugins/pwdstorage/pwd_init.c | 18 ------------------
|
||||
ldap/servers/slapd/fedse.c | 13 -------------
|
||||
5 files changed, 50 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/pwd_algo_test.py b/dirsrvtests/tests/suites/password/pwd_algo_test.py
|
||||
index 66bda420e..88f8e40b7 100644
|
||||
--- a/dirsrvtests/tests/suites/password/pwd_algo_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/pwd_algo_test.py
|
||||
@@ -124,7 +124,6 @@ def _test_algo_for_pbkdf2(inst, algo_name):
|
||||
('CLEAR', 'CRYPT', 'CRYPT-MD5', 'CRYPT-SHA256', 'CRYPT-SHA512',
|
||||
'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA',
|
||||
'SSHA256', 'SSHA384', 'SSHA512', 'PBKDF2_SHA256', 'DEFAULT',
|
||||
- 'GOST_YESCRYPT',
|
||||
))
|
||||
def test_pwd_algo_test(topology_st, algo):
|
||||
"""Assert that all of our password algorithms correctly PASS and FAIL varying
|
||||
diff --git a/ldap/ldif/template-dse-minimal.ldif.in b/ldap/ldif/template-dse-minimal.ldif.in
|
||||
index 2eccae9b2..1a05f4a67 100644
|
||||
--- a/ldap/ldif/template-dse-minimal.ldif.in
|
||||
+++ b/ldap/ldif/template-dse-minimal.ldif.in
|
||||
@@ -194,15 +194,6 @@ nsslapd-pluginarg1: nsds5ReplicaCredentials
|
||||
nsslapd-pluginid: aes-storage-scheme
|
||||
nsslapd-pluginprecedence: 1
|
||||
|
||||
-dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config
|
||||
-objectclass: top
|
||||
-objectclass: nsSlapdPlugin
|
||||
-cn: GOST_YESCRYPT
|
||||
-nsslapd-pluginpath: libpwdstorage-plugin
|
||||
-nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init
|
||||
-nsslapd-plugintype: pwdstoragescheme
|
||||
-nsslapd-pluginenabled: on
|
||||
-
|
||||
dn: cn=Syntax Validation Task,cn=plugins,cn=config
|
||||
objectclass: top
|
||||
objectclass: nsSlapdPlugin
|
||||
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
|
||||
index 7e7480cba..f30531bec 100644
|
||||
--- a/ldap/ldif/template-dse.ldif.in
|
||||
+++ b/ldap/ldif/template-dse.ldif.in
|
||||
@@ -242,15 +242,6 @@ nsslapd-pluginarg2: nsds5ReplicaBootstrapCredentials
|
||||
nsslapd-pluginid: aes-storage-scheme
|
||||
nsslapd-pluginprecedence: 1
|
||||
|
||||
-dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config
|
||||
-objectclass: top
|
||||
-objectclass: nsSlapdPlugin
|
||||
-cn: GOST_YESCRYPT
|
||||
-nsslapd-pluginpath: libpwdstorage-plugin
|
||||
-nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init
|
||||
-nsslapd-plugintype: pwdstoragescheme
|
||||
-nsslapd-pluginenabled: on
|
||||
-
|
||||
dn: cn=Syntax Validation Task,cn=plugins,cn=config
|
||||
objectclass: top
|
||||
objectclass: nsSlapdPlugin
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/pwd_init.c b/ldap/servers/plugins/pwdstorage/pwd_init.c
|
||||
index 606e63404..59cfc4684 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/pwd_init.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/pwd_init.c
|
||||
@@ -52,8 +52,6 @@ static Slapi_PluginDesc smd5_pdesc = {"smd5-password-storage-scheme", VENDOR, DS
|
||||
|
||||
static Slapi_PluginDesc pbkdf2_sha256_pdesc = {"pbkdf2-sha256-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Salted PBKDF2 SHA256 hash algorithm (PBKDF2_SHA256)"};
|
||||
|
||||
-static Slapi_PluginDesc gost_yescrypt_pdesc = {"gost-yescrypt-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Yescrypt KDF algorithm (Streebog256)"};
|
||||
-
|
||||
static char *plugin_name = "NSPwdStoragePlugin";
|
||||
|
||||
int
|
||||
@@ -431,19 +429,3 @@ pbkdf2_sha256_pwd_storage_scheme_init(Slapi_PBlock *pb)
|
||||
return rc;
|
||||
}
|
||||
|
||||
-int
|
||||
-gost_yescrypt_pwd_storage_scheme_init(Slapi_PBlock *pb)
|
||||
-{
|
||||
- int rc;
|
||||
-
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "=> gost_yescrypt_pwd_storage_scheme_init\n");
|
||||
-
|
||||
- rc = slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION, (void *)SLAPI_PLUGIN_VERSION_01);
|
||||
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION, (void *)&gost_yescrypt_pdesc);
|
||||
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_ENC_FN, (void *)gost_yescrypt_pw_enc);
|
||||
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *)gost_yescrypt_pw_cmp);
|
||||
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, GOST_YESCRYPT_SCHEME_NAME);
|
||||
-
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "<= gost_yescrypt_pwd_storage_scheme_init %d\n", rc);
|
||||
- return rc;
|
||||
-}
|
||||
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
|
||||
index 44159c991..24b7ed11c 100644
|
||||
--- a/ldap/servers/slapd/fedse.c
|
||||
+++ b/ldap/servers/slapd/fedse.c
|
||||
@@ -203,19 +203,6 @@ static const char *internal_entries[] =
|
||||
"nsslapd-pluginVersion: none\n"
|
||||
"nsslapd-pluginVendor: 389 Project\n"
|
||||
"nsslapd-pluginDescription: CRYPT-SHA512\n",
|
||||
-
|
||||
- "dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config\n"
|
||||
- "objectclass: top\n"
|
||||
- "objectclass: nsSlapdPlugin\n"
|
||||
- "cn: GOST_YESCRYPT\n"
|
||||
- "nsslapd-pluginpath: libpwdstorage-plugin\n"
|
||||
- "nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init\n"
|
||||
- "nsslapd-plugintype: pwdstoragescheme\n"
|
||||
- "nsslapd-pluginenabled: on\n"
|
||||
- "nsslapd-pluginId: GOST_YESCRYPT\n"
|
||||
- "nsslapd-pluginVersion: none\n"
|
||||
- "nsslapd-pluginVendor: 389 Project\n"
|
||||
- "nsslapd-pluginDescription: GOST_YESCRYPT\n",
|
||||
};
|
||||
|
||||
static int NUM_INTERNAL_ENTRIES = sizeof(internal_entries) / sizeof(internal_entries[0]);
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,44 @@
|
||||
From df0ccce06259b9ef06d522e61da4e3ffcbbf5016 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 25 Aug 2021 16:54:57 -0400
|
||||
Subject: [PATCH] Issue 4884 - server crashes when dnaInterval attribute is set
|
||||
to zero
|
||||
|
||||
Bug Description:
|
||||
|
||||
A division by zero crash occurs if the dnaInterval is set to zero
|
||||
|
||||
Fix Description:
|
||||
|
||||
Validate the config value of dnaInterval and adjust it to the
|
||||
default/safe value of "1" if needed.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4884
|
||||
|
||||
Reviewed by: tbordaz(Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/dna/dna.c | 7 +++++++
|
||||
1 file changed, 7 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
|
||||
index 928a3f54a..c983ebdd0 100644
|
||||
--- a/ldap/servers/plugins/dna/dna.c
|
||||
+++ b/ldap/servers/plugins/dna/dna.c
|
||||
@@ -1025,7 +1025,14 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
|
||||
|
||||
value = slapi_entry_attr_get_charptr(e, DNA_INTERVAL);
|
||||
if (value) {
|
||||
+ errno = 0;
|
||||
entry->interval = strtoull(value, 0, 0);
|
||||
+ if (entry->interval == 0 || errno == ERANGE) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, DNA_PLUGIN_SUBSYSTEM,
|
||||
+ "dna_parse_config_entry - Invalid value for dnaInterval (%s), "
|
||||
+ "Using default value of 1\n", value);
|
||||
+ entry->interval = 1;
|
||||
+ }
|
||||
slapi_ch_free_string(&value);
|
||||
}
|
||||
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,933 +0,0 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "addr2line"
|
||||
version = "0.21.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
|
||||
dependencies = [
|
||||
"gimli",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "adler"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
|
||||
|
||||
[[package]]
|
||||
name = "ahash"
|
||||
version = "0.7.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"once_cell",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.69"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837"
|
||||
dependencies = [
|
||||
"addr2line",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"miniz_oxide",
|
||||
"object",
|
||||
"rustc-demangle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
|
||||
|
||||
[[package]]
|
||||
name = "cbindgen"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"syn 1.0.109",
|
||||
"tempfile",
|
||||
"toml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.83"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.34.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
|
||||
dependencies = [
|
||||
"ansi_term",
|
||||
"atty",
|
||||
"bitflags 1.3.2",
|
||||
"strsim",
|
||||
"textwrap",
|
||||
"unicode-width",
|
||||
"vec_map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "concread"
|
||||
version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcc9816f5ac93ebd51c37f7f9a6bf2b40dfcd42978ad2aea5d542016e9244cf6"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"crossbeam",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"lru",
|
||||
"parking_lot",
|
||||
"rand",
|
||||
"smallvec",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"crossbeam-deque",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-queue",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-deque"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
|
||||
dependencies = [
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-epoch"
|
||||
version = "0.9.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-queue"
|
||||
version = "0.3.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.8.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"
|
||||
|
||||
[[package]]
|
||||
name = "entryuuid"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"paste",
|
||||
"slapi_r_plugin",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "entryuuid_syntax"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"paste",
|
||||
"slapi_r_plugin",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fastrand"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5"
|
||||
|
||||
[[package]]
|
||||
name = "fernet"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"byteorder",
|
||||
"getrandom",
|
||||
"openssl",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
|
||||
dependencies = [
|
||||
"foreign-types-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types-shared"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"wasi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gimli"
|
||||
version = "0.28.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "instant"
|
||||
version = "0.1.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
|
||||
|
||||
[[package]]
|
||||
name = "jobserver"
|
||||
version = "0.1.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.152"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7"
|
||||
|
||||
[[package]]
|
||||
name = "librnsslapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cbindgen",
|
||||
"libc",
|
||||
"slapd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "librslapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cbindgen",
|
||||
"concread",
|
||||
"libc",
|
||||
"slapd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.4.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
||||
|
||||
[[package]]
|
||||
name = "lru"
|
||||
version = "0.7.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a"
|
||||
dependencies = [
|
||||
"hashbrown",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149"
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
|
||||
dependencies = [
|
||||
"adler",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.32.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
|
||||
|
||||
[[package]]
|
||||
name = "openssl"
|
||||
version = "0.10.62"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671"
|
||||
dependencies = [
|
||||
"bitflags 2.4.1",
|
||||
"cfg-if",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"openssl-macros",
|
||||
"openssl-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-macros"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.48",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.98"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.11.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
|
||||
dependencies = [
|
||||
"instant",
|
||||
"lock_api",
|
||||
"parking_lot_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot_core"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"instant",
|
||||
"libc",
|
||||
"redox_syscall 0.2.16",
|
||||
"smallvec",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
|
||||
dependencies = [
|
||||
"paste-impl",
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste-impl"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
|
||||
dependencies = [
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-lite"
|
||||
version = "0.2.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
|
||||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.28"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.20+deprecated"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.76"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pwdchan"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"cc",
|
||||
"libc",
|
||||
"openssl",
|
||||
"paste",
|
||||
"slapi_r_plugin",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.35"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_chacha"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rsds"
|
||||
version = "0.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "0.38.30"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca"
|
||||
dependencies = [
|
||||
"bitflags 2.4.1",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.195"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.195"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.48",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.111"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"fernet",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slapi_r_plugin"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"paste",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2593d31f82ead8df961d8bd23a64c2ccf2eb5dd34b0a34bfb4dd54011c72009e"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.109"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.48"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"fastrand",
|
||||
"redox_syscall 0.4.1",
|
||||
"rustix",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
dependencies = [
|
||||
"unicode-width",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.35.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"pin-project-lite",
|
||||
"tokio-macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-macros"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.48",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.5.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85"
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "vcpkg"
|
||||
version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
|
||||
|
||||
[[package]]
|
||||
name = "vec_map"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.11.0+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu",
|
||||
"winapi-x86_64-pc-windows-gnu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.52.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
|
||||
dependencies = [
|
||||
"windows-targets",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.52.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
"windows_x86_64_msvc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.52.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.52.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.52.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.52.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.52.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.52.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.52.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
|
||||
|
||||
[[package]]
|
||||
name = "zeroize"
|
||||
version = "1.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d"
|
||||
dependencies = [
|
||||
"zeroize_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zeroize_derive"
|
||||
version = "1.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.48",
|
||||
]
|
565
SOURCES/Cargo.lock
generated
Normal file
565
SOURCES/Cargo.lock
generated
Normal file
@ -0,0 +1,565 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
|
||||
|
||||
[[package]]
|
||||
name = "cbindgen"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"syn",
|
||||
"tempfile",
|
||||
"toml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.68"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.33.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
|
||||
dependencies = [
|
||||
"ansi_term",
|
||||
"atty",
|
||||
"bitflags",
|
||||
"strsim",
|
||||
"textwrap",
|
||||
"unicode-width",
|
||||
"vec_map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "entryuuid"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"paste",
|
||||
"slapi_r_plugin",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "entryuuid_syntax"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"paste",
|
||||
"slapi_r_plugin",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fernet"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"byteorder",
|
||||
"getrandom",
|
||||
"openssl",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
|
||||
dependencies = [
|
||||
"foreign-types-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types-shared"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"wasi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "0.4.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
|
||||
|
||||
[[package]]
|
||||
name = "jobserver"
|
||||
version = "0.1.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.95"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36"
|
||||
|
||||
[[package]]
|
||||
name = "librnsslapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cbindgen",
|
||||
"libc",
|
||||
"slapd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "librslapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cbindgen",
|
||||
"libc",
|
||||
"slapd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3"
|
||||
|
||||
[[package]]
|
||||
name = "openssl"
|
||||
version = "0.10.34"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cfg-if",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"openssl-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.63"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"cc",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
|
||||
dependencies = [
|
||||
"paste-impl",
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste-impl"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
|
||||
dependencies = [
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
|
||||
dependencies = [
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
"rand_hc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_chacha"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.6.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_hc"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
|
||||
dependencies = [
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "remove_dir_all"
|
||||
version = "0.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rsds"
|
||||
version = "0.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.126"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.126"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.64"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"fernet",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slapi_r_plugin"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"paste",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.72"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "synstructure"
|
||||
version = "0.12.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"rand",
|
||||
"redox_syscall",
|
||||
"remove_dir_all",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
dependencies = [
|
||||
"unicode-width",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.5.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "vcpkg"
|
||||
version = "0.2.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "025ce40a007e1907e58d5bc1a594def78e5573bb0b1160bc389634e8f12e4faa"
|
||||
|
||||
[[package]]
|
||||
name = "vec_map"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.10.2+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu",
|
||||
"winapi-x86_64-pc-windows-gnu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "zeroize"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd"
|
||||
dependencies = [
|
||||
"zeroize_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zeroize_derive"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"synstructure",
|
||||
]
|
@ -25,7 +25,7 @@ ExcludeArch: i686
|
||||
|
||||
%if %{bundle_jemalloc}
|
||||
%global jemalloc_name jemalloc
|
||||
%global jemalloc_ver 5.3.0
|
||||
%global jemalloc_ver 5.2.1
|
||||
%global __provides_exclude ^libjemalloc\\.so.*$
|
||||
%endif
|
||||
|
||||
@ -47,9 +47,9 @@ ExcludeArch: i686
|
||||
|
||||
Summary: 389 Directory Server (base)
|
||||
Name: 389-ds-base
|
||||
Version: 1.4.3.39
|
||||
Release: %{?relprefix}11%{?prerel}%{?dist}
|
||||
License: GPLv3+ and (ASL 2.0 or MIT)
|
||||
Version: 1.4.3.23
|
||||
Release: %{?relprefix}10%{?prerel}%{?dist}
|
||||
License: GPLv3+
|
||||
URL: https://www.port389.org
|
||||
Group: System Environment/Daemons
|
||||
Conflicts: selinux-policy-base < 3.9.8
|
||||
@ -58,116 +58,63 @@ Obsoletes: %{name} <= 1.4.0.9
|
||||
Provides: ldif2ldbm >= 0
|
||||
|
||||
##### Bundled cargo crates list - START #####
|
||||
Provides: bundled(crate(addr2line)) = 0.21.0
|
||||
Provides: bundled(crate(adler)) = 1.0.2
|
||||
Provides: bundled(crate(ahash)) = 0.7.7
|
||||
Provides: bundled(crate(ansi_term)) = 0.12.1
|
||||
Provides: bundled(crate(ansi_term)) = 0.11.0
|
||||
Provides: bundled(crate(atty)) = 0.2.14
|
||||
Provides: bundled(crate(autocfg)) = 1.1.0
|
||||
Provides: bundled(crate(backtrace)) = 0.3.69
|
||||
Provides: bundled(crate(base64)) = 0.13.1
|
||||
Provides: bundled(crate(bitflags)) = 1.3.2
|
||||
Provides: bundled(crate(bitflags)) = 2.4.1
|
||||
Provides: bundled(crate(byteorder)) = 1.5.0
|
||||
Provides: bundled(crate(autocfg)) = 1.0.1
|
||||
Provides: bundled(crate(base64)) = 0.10.1
|
||||
Provides: bundled(crate(bitflags)) = 1.2.1
|
||||
Provides: bundled(crate(byteorder)) = 1.4.2
|
||||
Provides: bundled(crate(cbindgen)) = 0.9.1
|
||||
Provides: bundled(crate(cc)) = 1.0.83
|
||||
Provides: bundled(crate(cc)) = 1.0.66
|
||||
Provides: bundled(crate(cfg-if)) = 0.1.10
|
||||
Provides: bundled(crate(cfg-if)) = 1.0.0
|
||||
Provides: bundled(crate(clap)) = 2.34.0
|
||||
Provides: bundled(crate(concread)) = 0.2.21
|
||||
Provides: bundled(crate(crossbeam)) = 0.8.4
|
||||
Provides: bundled(crate(crossbeam-channel)) = 0.5.11
|
||||
Provides: bundled(crate(crossbeam-deque)) = 0.8.5
|
||||
Provides: bundled(crate(crossbeam-epoch)) = 0.9.18
|
||||
Provides: bundled(crate(crossbeam-queue)) = 0.3.11
|
||||
Provides: bundled(crate(crossbeam-utils)) = 0.8.19
|
||||
Provides: bundled(crate(entryuuid)) = 0.1.0
|
||||
Provides: bundled(crate(entryuuid_syntax)) = 0.1.0
|
||||
Provides: bundled(crate(errno)) = 0.3.8
|
||||
Provides: bundled(crate(fastrand)) = 2.0.1
|
||||
Provides: bundled(crate(fernet)) = 0.1.4
|
||||
Provides: bundled(crate(clap)) = 2.33.3
|
||||
Provides: bundled(crate(fernet)) = 0.1.3
|
||||
Provides: bundled(crate(foreign-types)) = 0.3.2
|
||||
Provides: bundled(crate(foreign-types-shared)) = 0.1.1
|
||||
Provides: bundled(crate(getrandom)) = 0.2.12
|
||||
Provides: bundled(crate(gimli)) = 0.28.1
|
||||
Provides: bundled(crate(hashbrown)) = 0.12.3
|
||||
Provides: bundled(crate(hermit-abi)) = 0.1.19
|
||||
Provides: bundled(crate(instant)) = 0.1.12
|
||||
Provides: bundled(crate(itoa)) = 1.0.10
|
||||
Provides: bundled(crate(jobserver)) = 0.1.27
|
||||
Provides: bundled(crate(libc)) = 0.2.152
|
||||
Provides: bundled(crate(getrandom)) = 0.1.16
|
||||
Provides: bundled(crate(hermit-abi)) = 0.1.17
|
||||
Provides: bundled(crate(itoa)) = 0.4.7
|
||||
Provides: bundled(crate(lazy_static)) = 1.4.0
|
||||
Provides: bundled(crate(libc)) = 0.2.82
|
||||
Provides: bundled(crate(librnsslapd)) = 0.1.0
|
||||
Provides: bundled(crate(librslapd)) = 0.1.0
|
||||
Provides: bundled(crate(linux-raw-sys)) = 0.4.12
|
||||
Provides: bundled(crate(lock_api)) = 0.4.11
|
||||
Provides: bundled(crate(log)) = 0.4.20
|
||||
Provides: bundled(crate(lru)) = 0.7.8
|
||||
Provides: bundled(crate(memchr)) = 2.7.1
|
||||
Provides: bundled(crate(miniz_oxide)) = 0.7.1
|
||||
Provides: bundled(crate(object)) = 0.32.2
|
||||
Provides: bundled(crate(once_cell)) = 1.19.0
|
||||
Provides: bundled(crate(openssl)) = 0.10.62
|
||||
Provides: bundled(crate(openssl-macros)) = 0.1.1
|
||||
Provides: bundled(crate(openssl-sys)) = 0.9.98
|
||||
Provides: bundled(crate(parking_lot)) = 0.11.2
|
||||
Provides: bundled(crate(parking_lot_core)) = 0.8.6
|
||||
Provides: bundled(crate(paste)) = 0.1.18
|
||||
Provides: bundled(crate(paste-impl)) = 0.1.18
|
||||
Provides: bundled(crate(pin-project-lite)) = 0.2.13
|
||||
Provides: bundled(crate(pkg-config)) = 0.3.28
|
||||
Provides: bundled(crate(ppv-lite86)) = 0.2.17
|
||||
Provides: bundled(crate(proc-macro-hack)) = 0.5.20+deprecated
|
||||
Provides: bundled(crate(proc-macro2)) = 1.0.76
|
||||
Provides: bundled(crate(pwdchan)) = 0.1.0
|
||||
Provides: bundled(crate(quote)) = 1.0.35
|
||||
Provides: bundled(crate(rand)) = 0.8.5
|
||||
Provides: bundled(crate(rand_chacha)) = 0.3.1
|
||||
Provides: bundled(crate(rand_core)) = 0.6.4
|
||||
Provides: bundled(crate(redox_syscall)) = 0.2.16
|
||||
Provides: bundled(crate(redox_syscall)) = 0.4.1
|
||||
Provides: bundled(crate(log)) = 0.4.11
|
||||
Provides: bundled(crate(openssl)) = 0.10.32
|
||||
Provides: bundled(crate(openssl-sys)) = 0.9.60
|
||||
Provides: bundled(crate(pkg-config)) = 0.3.19
|
||||
Provides: bundled(crate(ppv-lite86)) = 0.2.10
|
||||
Provides: bundled(crate(proc-macro2)) = 1.0.24
|
||||
Provides: bundled(crate(quote)) = 1.0.8
|
||||
Provides: bundled(crate(rand)) = 0.7.3
|
||||
Provides: bundled(crate(rand_chacha)) = 0.2.2
|
||||
Provides: bundled(crate(rand_core)) = 0.5.1
|
||||
Provides: bundled(crate(rand_hc)) = 0.2.0
|
||||
Provides: bundled(crate(redox_syscall)) = 0.1.57
|
||||
Provides: bundled(crate(remove_dir_all)) = 0.5.3
|
||||
Provides: bundled(crate(rsds)) = 0.1.0
|
||||
Provides: bundled(crate(rustc-demangle)) = 0.1.23
|
||||
Provides: bundled(crate(rustix)) = 0.38.30
|
||||
Provides: bundled(crate(ryu)) = 1.0.16
|
||||
Provides: bundled(crate(scopeguard)) = 1.2.0
|
||||
Provides: bundled(crate(serde)) = 1.0.195
|
||||
Provides: bundled(crate(serde_derive)) = 1.0.195
|
||||
Provides: bundled(crate(serde_json)) = 1.0.111
|
||||
Provides: bundled(crate(ryu)) = 1.0.5
|
||||
Provides: bundled(crate(serde)) = 1.0.118
|
||||
Provides: bundled(crate(serde_derive)) = 1.0.118
|
||||
Provides: bundled(crate(serde_json)) = 1.0.61
|
||||
Provides: bundled(crate(slapd)) = 0.1.0
|
||||
Provides: bundled(crate(slapi_r_plugin)) = 0.1.0
|
||||
Provides: bundled(crate(smallvec)) = 1.12.0
|
||||
Provides: bundled(crate(strsim)) = 0.8.0
|
||||
Provides: bundled(crate(syn)) = 1.0.109
|
||||
Provides: bundled(crate(syn)) = 2.0.48
|
||||
Provides: bundled(crate(tempfile)) = 3.9.0
|
||||
Provides: bundled(crate(syn)) = 1.0.58
|
||||
Provides: bundled(crate(tempfile)) = 3.1.0
|
||||
Provides: bundled(crate(textwrap)) = 0.11.0
|
||||
Provides: bundled(crate(tokio)) = 1.35.1
|
||||
Provides: bundled(crate(tokio-macros)) = 2.2.0
|
||||
Provides: bundled(crate(toml)) = 0.5.11
|
||||
Provides: bundled(crate(unicode-ident)) = 1.0.12
|
||||
Provides: bundled(crate(unicode-width)) = 0.1.11
|
||||
Provides: bundled(crate(uuid)) = 0.8.2
|
||||
Provides: bundled(crate(vcpkg)) = 0.2.15
|
||||
Provides: bundled(crate(toml)) = 0.5.8
|
||||
Provides: bundled(crate(unicode-width)) = 0.1.8
|
||||
Provides: bundled(crate(unicode-xid)) = 0.2.1
|
||||
Provides: bundled(crate(vcpkg)) = 0.2.11
|
||||
Provides: bundled(crate(vec_map)) = 0.8.2
|
||||
Provides: bundled(crate(version_check)) = 0.9.4
|
||||
Provides: bundled(crate(wasi)) = 0.11.0+wasi_snapshot_preview1
|
||||
Provides: bundled(crate(wasi)) = 0.9.0+wasi_snapshot_preview1
|
||||
Provides: bundled(crate(winapi)) = 0.3.9
|
||||
Provides: bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0
|
||||
Provides: bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0
|
||||
Provides: bundled(crate(windows-sys)) = 0.52.0
|
||||
Provides: bundled(crate(windows-targets)) = 0.52.0
|
||||
Provides: bundled(crate(windows_aarch64_gnullvm)) = 0.52.0
|
||||
Provides: bundled(crate(windows_aarch64_msvc)) = 0.52.0
|
||||
Provides: bundled(crate(windows_i686_gnu)) = 0.52.0
|
||||
Provides: bundled(crate(windows_i686_msvc)) = 0.52.0
|
||||
Provides: bundled(crate(windows_x86_64_gnu)) = 0.52.0
|
||||
Provides: bundled(crate(windows_x86_64_gnullvm)) = 0.52.0
|
||||
Provides: bundled(crate(windows_x86_64_msvc)) = 0.52.0
|
||||
Provides: bundled(crate(zeroize)) = 1.7.0
|
||||
Provides: bundled(crate(zeroize_derive)) = 1.4.2
|
||||
##### Bundled cargo crates list - END #####
|
||||
|
||||
BuildRequires: nspr-devel >= 4.32
|
||||
BuildRequires: nss-devel >= 3.67.0-7
|
||||
BuildRequires: nspr-devel
|
||||
BuildRequires: nss-devel >= 3.34
|
||||
BuildRequires: perl-generators
|
||||
BuildRequires: openldap-devel
|
||||
BuildRequires: libdb-devel
|
||||
@ -227,7 +174,6 @@ BuildRequires: python%{python3_pkgversion}-argcomplete
|
||||
BuildRequires: python%{python3_pkgversion}-argparse-manpage
|
||||
BuildRequires: python%{python3_pkgversion}-policycoreutils
|
||||
BuildRequires: python%{python3_pkgversion}-libselinux
|
||||
BuildRequires: python%{python3_pkgversion}-cryptography
|
||||
|
||||
# For cockpit
|
||||
BuildRequires: rsync
|
||||
@ -250,8 +196,7 @@ Requires: python%{python3_pkgversion}-ldap
|
||||
# this is needed to setup SSL if you are not using the
|
||||
# administration server package
|
||||
Requires: nss-tools
|
||||
Requires: nspr >= 4.32
|
||||
Requires: nss >= 3.67.0-7
|
||||
Requires: nss >= 3.34
|
||||
|
||||
# these are not found by the auto-dependency method
|
||||
# they are required to support the mandatory LDAP SASL mechs
|
||||
@ -289,32 +234,40 @@ Source2: %{name}-devel.README
|
||||
Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2
|
||||
%endif
|
||||
%if %{use_rust}
|
||||
Source4: vendor-%{version}-1.tar.gz
|
||||
Source5: Cargo-%{version}-1.lock
|
||||
Source4: vendor-%{version}-2.tar.gz
|
||||
Source5: Cargo.lock
|
||||
%endif
|
||||
Patch01: 0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch
|
||||
Patch02: 0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch
|
||||
Patch03: 0003-Ticket-137-Implement-EntryUUID-plugin.patch
|
||||
Patch04: 0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch
|
||||
Patch05: 0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch
|
||||
Patch06: 0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch
|
||||
Patch07: 0007-Ticket-51175-resolve-plugin-name-leaking.patch
|
||||
Patch08: 0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch
|
||||
Patch09: 0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch
|
||||
Patch10: 0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch
|
||||
Patch11: 0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch
|
||||
Patch12: 0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch
|
||||
Patch13: 0013-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch
|
||||
Patch14: 0014-Issue-4396-Minor-memory-leak-in-backend-4558-4572.patch
|
||||
Patch15: 0015-Issue-4700-Regression-in-winsync-replication-agreeme.patch
|
||||
Patch16: 0016-Issue-4725-Fix-compiler-warnings.patch
|
||||
Patch17: 0017-Issue-4814-_cl5_get_tod_expiration-may-crash-at-star.patch
|
||||
Patch18: 0018-Issue-4789-Temporary-password-rules-are-not-enforce-.patch
|
||||
Patch19: 0019-Issue-4788-CLI-should-support-Temporary-Password-Rul.patch
|
||||
Patch20: 0020-Issue-4447-Crash-when-the-Referential-Integrity-log-.patch
|
||||
Patch21: 0021-Issue-4791-Missing-dependency-for-RetroCL-RFE.patch
|
||||
Patch22: 0022-Issue-4656-remove-problematic-language-from-ds-replc.patch
|
||||
Patch23: 0023-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch
|
||||
Patch24: 0024-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch
|
||||
Patch25: 0025-Issue-4837-persistent-search-returns-entries-even-wh.patch
|
||||
Patch26: 0026-Hardcode-gost-crypt-passsword-storage-scheme.patch
|
||||
Patch27: 0027-Issue-4734-import-of-entry-with-no-parent-warning-47.patch
|
||||
Patch28: 0028-Issue-4872-BUG-entryuuid-enabled-by-default-causes-r.patch
|
||||
Patch29: 0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch
|
||||
Patch30: 0030-Issue-4884-server-crashes-when-dnaInterval-attribute.patch
|
||||
|
||||
Patch01: 0001-issue-5647-covscan-memory-leak-in-audit-log-when-add.patch
|
||||
Patch02: 0002-Issue-5647-Fix-unused-variable-warning-from-previous.patch
|
||||
Patch03: 0003-Issue-5407-sync_repl-crashes-if-enabled-while-dynami.patch
|
||||
Patch04: 0004-Issue-5547-automember-plugin-improvements.patch
|
||||
Patch05: 0005-Issue-3527-Support-HAProxy-and-Instance-on-the-same-.patch
|
||||
Patch06: 0006-CVE-2024-2199.patch
|
||||
Patch07: 0007-CVE-2024-3657.patch
|
||||
Patch08: 0008-Issue-6096-Improve-connection-timeout-error-logging-.patch
|
||||
Patch09: 0009-Issue-6103-New-connection-timeout-error-breaks-error.patch
|
||||
Patch10: 0010-Issue-6103-New-connection-timeout-error-breaks-error.patch
|
||||
Patch11: 0011-Issue-6172-RFE-improve-the-performance-of-evaluation.patch
|
||||
Patch12: 0012-Security-fix-for-CVE-2024-5953.patch
|
||||
Patch13: 0013-Issue-4778-Add-COMPACT_CL5-task-to-dsconf-replicatio.patch
|
||||
Patch14: 0014-Issue-6417-If-an-entry-RDN-is-identical-to-the-suffi.patch
|
||||
Patch15: 0015-Issue-6224-d2entry-Could-not-open-id2entry-err-0-at-.patch
|
||||
Patch16: 0016-Issue-6224-Fix-merge-issue-in-389-ds-base-2.1-for-ds.patch
|
||||
Patch17: 0017-Issue-6224-Remove-test_referral_subsuffix-from-ds_lo.patch
|
||||
Patch18: 0018-Issue-6417-2nd-If-an-entry-RDN-is-identical-to-the-s.patch
|
||||
Patch19: 0019-Issue-6417-2nd-fix-typo.patch
|
||||
Patch20: 0020-Issue-6417-3rd-If-an-entry-RDN-is-identical-to-the-s.patch
|
||||
Patch21: 0021-Issue-6509-Race-condition-with-Paged-Result-searches.patch
|
||||
Patch22: 0022-Issue-6509-Fix-cherry-pick-issue-race-condition-in-P.patch
|
||||
|
||||
%description
|
||||
389 Directory Server is an LDAPv3 compliant server. The base package includes
|
||||
@ -328,8 +281,8 @@ Please see http://seclists.org/oss-sec/2016/q1/363 for more information.
|
||||
%package libs
|
||||
Summary: Core libraries for 389 Directory Server
|
||||
Group: System Environment/Daemons
|
||||
BuildRequires: nspr-devel >= 4.32
|
||||
BuildRequires: nss-devel >= 3.67.0-7
|
||||
BuildRequires: nspr-devel
|
||||
BuildRequires: nss-devel >= 3.34
|
||||
BuildRequires: openldap-devel
|
||||
BuildRequires: libdb-devel
|
||||
BuildRequires: cyrus-sasl-devel
|
||||
@ -382,8 +335,8 @@ Summary: Development libraries for 389 Directory Server
|
||||
Group: Development/Libraries
|
||||
Requires: %{name}-libs = %{version}-%{release}
|
||||
Requires: pkgconfig
|
||||
Requires: nspr-devel >= 4.32
|
||||
Requires: nss-devel >= 3.67.0-7
|
||||
Requires: nspr-devel
|
||||
Requires: nss-devel >= 3.34
|
||||
Requires: openldap-devel
|
||||
Requires: libtalloc
|
||||
Requires: libevent
|
||||
@ -410,7 +363,6 @@ SNMP Agent for the 389 Directory Server base package.
|
||||
Summary: A library for accessing, testing, and configuring the 389 Directory Server
|
||||
BuildArch: noarch
|
||||
Group: Development/Libraries
|
||||
Requires: 389-ds-base
|
||||
Requires: openssl
|
||||
Requires: iproute
|
||||
Requires: platform-python
|
||||
@ -424,7 +376,6 @@ Requires: python%{python3_pkgversion}-argcomplete
|
||||
Requires: python%{python3_pkgversion}-libselinux
|
||||
Requires: python%{python3_pkgversion}-setuptools
|
||||
Requires: python%{python3_pkgversion}-distro
|
||||
Requires: python%{python3_pkgversion}-cryptography
|
||||
%{?python_provide:%python_provide python%{python3_pkgversion}-lib389}
|
||||
|
||||
%description -n python%{python3_pkgversion}-lib389
|
||||
@ -442,10 +393,10 @@ Requires: python%{python3_pkgversion}-lib389
|
||||
A cockpit UI Plugin for configuring and administering the 389 Directory Server
|
||||
|
||||
%prep
|
||||
%autosetup -p1 -n %{name}-%{version}%{?prerel}
|
||||
%autosetup -p1 -v -n %{name}-%{version}%{?prerel}
|
||||
%if %{use_rust}
|
||||
tar xzf %{SOURCE4}
|
||||
cp %{SOURCE5} src/Cargo.lock
|
||||
tar xvzf %{SOURCE4}
|
||||
cp %{SOURCE5} src/
|
||||
%endif
|
||||
%if %{bundle_jemalloc}
|
||||
%setup -q -n %{name}-%{version}%{?prerel} -T -D -b 3
|
||||
@ -744,7 +695,6 @@ exit 0
|
||||
%{_sbindir}/ns-slapd
|
||||
%{_mandir}/man8/ns-slapd.8.gz
|
||||
%{_libexecdir}/%{pkgname}/ds_systemd_ask_password_acl
|
||||
%{_libexecdir}/%{pkgname}/ds_selinux_restorecon.sh
|
||||
%{_mandir}/man5/99user.ldif.5.gz
|
||||
%{_mandir}/man5/certmap.conf.5.gz
|
||||
%{_mandir}/man5/slapd-collations.conf.5.gz
|
||||
@ -936,87 +886,63 @@ exit 0
|
||||
%doc README.md
|
||||
|
||||
%changelog
|
||||
* Thu Jan 23 2025 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-11
|
||||
- Resolves: RHEL-72487 - IPA LDAP error code T3 when no exceeded time limit from a paged search result [rhel-8.10.z]
|
||||
* Thu Aug 26 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-10
|
||||
- Bump version to 1.4.3.23-10
|
||||
- Resolves: Bug 1997138 - LDAP server crashes when dnaInterval attribute is set to 0
|
||||
|
||||
* Fri Jan 17 2025 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-10
|
||||
- Resolves: RHEL-69822 - "Duplicated DN detected" errors when creating indexes or importing entries. [rhel-8.10.z]
|
||||
- Resolves: RHEL-71215 - Sub suffix causes "id2entry - Could not open id2entry err 0" error when the Directory Server starts [rhel-8.10.z]
|
||||
* Wed Aug 25 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-9
|
||||
- Bump version to 1.4.3.23-9
|
||||
- Resolves: Bug 1947044 - remove unsupported GOST password storage scheme
|
||||
|
||||
* Fri Nov 22 2024 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-9
|
||||
- Resolves: RHEL-64360 - Cannot compact the replication changelog using dsconf. [rhel-8.10.z]
|
||||
* Thu Aug 19 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-8
|
||||
- Bump version to 1.4.3.23-8
|
||||
- Resolves: Bug 1947044 - add missing patch for import result code
|
||||
- Resolves: Bug 1944494 - support for RFC 4530 entryUUID attribute
|
||||
|
||||
* Mon Sep 09 2024 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-8
|
||||
- Bump version to 1.4.3.39-8
|
||||
- Resolves: RHEL-40943 - CVE-2024-5953 389-ds:1.4/389-ds-base: Malformed userPassword hash may cause Denial of Service [rhel-8.10.z]
|
||||
- Resolves: RHEL-58069 - perf search result investigation for many large static groups and members [rhel-8.10.0.z]
|
||||
* Mon Jul 26 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-7
|
||||
- Bump version to 1.4.3.23-7
|
||||
- Resolves: Bug 1983921 - persistent search returns entries even when an error is returned by content-sync-plugin
|
||||
|
||||
* Thu Jun 13 2024 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-7
|
||||
- Bump version to 1.4.3.39-7
|
||||
- Resolves: RHEL-16277 - LDAP connections are closed with code T2 before the IO block timeout is reached. [rhel-8.10.0.z]
|
||||
* Fri Jul 16 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-6
|
||||
- Bump version to 1.4.3.23-6
|
||||
- Resolves: Bug 1982787 - CRYPT password hash with asterisk allows any bind attempt to succeed
|
||||
|
||||
* Thu Jun 13 2024 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-6
|
||||
- Bump version to 1.4.3.39-6
|
||||
- Resolves: RHEL-16277 - LDAP connections are closed with code T2 before the IO block timeout is reached. [rhel-8.10.0.z]
|
||||
* Thu Jul 15 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-5
|
||||
- Bump version to 1.4.3.23-5
|
||||
- Resolves: Bug 1951020 - Internal unindexed searches in syncrepl
|
||||
- Resolves: Bug 1978279 - ds-replcheck state output message has 'Master' instead of 'Supplier'
|
||||
|
||||
* Tue Jun 11 2024 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-5
|
||||
- Bump version to 1.4.3.39-5
|
||||
- Resolves: RHEL-16277 - LDAP connections are closed with code T2 before the IO block timeout is reached. [rhel-8.10.0.z]
|
||||
* Tue Jun 29 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-4
|
||||
- Bump version to 1.4.3.23-4
|
||||
- Resolves: Bug 1976906 - Instance crash at restart after changelog configuration
|
||||
- Resolves: Bug 1480323 - ns-slapd crash at startup - Segmentation fault in strcmpi_fast() when the Referential Integrity log is manually edited
|
||||
- Resolves: Bug 1967596 - Temporary password - add CLI and fix compiler errors
|
||||
|
||||
* Thu Jun 06 2024 James Chapman <jachapma@redhat.com> - 1.4.3.39-4
|
||||
- Bump version to 1.4.3.39-4
|
||||
- Resolves: RHEL-34818 - redhat-ds:11/389-ds-base: Malformed userPassword may cause crash at do_modify in slapd/modify.c
|
||||
- Resolves: RHEL-34824 - redhat-ds:11/389-ds-base: potential denial of service via specially crafted kerberos AS-REQ request
|
||||
* Thu Jun 17 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-3
|
||||
- Bump version to 1.4.3.23-3
|
||||
- Resolves: Bug 1944494 - support for RFC 4530 entryUUID attribute
|
||||
- Resolves: Bug 1967839 - ACIs are being evaluated against the Replication Manager account in a replication context
|
||||
- Resolves: Bug 1970259 - A connection can be erroneously flagged as replication conn during evaluation of an aci with ip bind rule
|
||||
- Resolves: Bug 1972590 - Large updates can reset the CLcache to the beginning of the changelog
|
||||
- Resolves: Bug 1903221 - Memory leak in 389ds backend (Minor)
|
||||
|
||||
* Thu Mar 14 2024 Simon Pichugin <spichugi@redhat.com> - 1.4.3.39-3
|
||||
- Bump version to 1.4.3.39-3
|
||||
- Resolves: RHEL-19240 - RFE Add PROXY protocol support to 389-ds-base via confiuration item - similar to Postfix
|
||||
* Sun May 30 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-2
|
||||
- Bump version to 1.4.3.23-2
|
||||
- Resolves: Bug 1812286 - RFE - Monitor the current DB locks ( nsslapd-db-current-locks )
|
||||
- Resolves: Bug 1748441 - RFE - Schedule execution of "compactdb" at specific date/time
|
||||
- Resolves: Bug 1938239 - RFE - Extend DNA plugin to support intervals sizes for subuids
|
||||
|
||||
* Mon Feb 05 2024 Thierry Bordaz <tbordaz@redhat.com> - 1.4.3.39-2
|
||||
- Bump version to 1.4.3.39-2
|
||||
- Resolves: RHEL-23209 - CVE-2024-1062 389-ds:1.4/389-ds-base: a heap overflow leading to denail-of-servce while writing a value larger than 256 chars (in log_entry_attr)
|
||||
- Resolves: RHEL-5390 - schema-compat-plugin expensive with automember rebuild
|
||||
- Resolves: RHEL-5135 - crash in sync_update_persist_op() of content sync plugin
|
||||
|
||||
* Tue Jan 16 2024 Simon Pichugin <spichugi@redhat.com> - 1.4.3.39-1
|
||||
- Bump version to 1.4.3.39-1
|
||||
- Resolves: RHEL-19028 - Rebase 389-ds-base in RHEL 8.10 to 1.4.3.39
|
||||
- Resolves: RHEL-19240 - [RFE] Add PROXY protocol support to 389-ds-base
|
||||
- Resolves: RHEL-5143 - SELinux labeling for dirsrv files seen during ipa install/uninstall should be moved to DEBUG.
|
||||
- Resolves: RHEL-5107 - bdb_start - Detected Disorderly Shutdown directory server is not starting
|
||||
- Resolves: RHEL-16338 - ns-slapd crash in slapi_attr_basetype
|
||||
- Resolves: RHEL-14025 - After an upgrade the LDAP server won't start if nsslapd-conntablesize is present in the dse.ldif file.
|
||||
|
||||
|
||||
* Fri Dec 08 2023 James Chapman <jachapma@redhat.com> - 1.4.3.38-1
|
||||
- Bump version to 1.4.3.38-1
|
||||
- Resolves: RHEL-19028 - Rebase 389-ds-base in RHEL 8.10 to 1.4.3.38
|
||||
|
||||
* Wed Aug 16 2023 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.37-1
|
||||
- Bump versionto 1.4.3.37-1
|
||||
- Resolves: rhbz#2224505 - Paged search impacts performance
|
||||
- Resolves: rhbz#2220890 - healthcheck tool needs to be updates for new default password storage scheme
|
||||
- Resolves: rhbz#2218235 - python3-lib389: Python tarfile extraction needs change to avoid a warning
|
||||
- Resolves: rhbz#2210491 - dtablesize being set to soft maxfiledescriptor limit causing massive slowdown in large enviroments.
|
||||
- Resolves: rhbz#2149967 - SELinux labeling for dirsrv files seen during ipa install/uninstall should be moved to DEBUG
|
||||
|
||||
* Tue Jul 11 2023 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.36-2
|
||||
- Bump version to 1.4.3.36-2
|
||||
- Resolves: rhbz#2220890 - healthcheck tool needs to be updates for new default password storage scheme
|
||||
|
||||
* Wed Jun 14 2023 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.36-1
|
||||
- Bump version to 1.4.3.36-1
|
||||
- Resolves: rhbz#2188628 - Rebase 389-ds-base in RHEL 8.9 to 1.4.3.36
|
||||
|
||||
* Mon May 22 2023 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.35-1
|
||||
- Bump version to 1.4.3.35-1
|
||||
- Resolves: rhbz#2188628 - Rebase 389-ds-base in RHEL 8.9 to 1.4.3.35
|
||||
|
||||
* Tue Nov 15 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.32-1
|
||||
- Bump version to 1.4.3.32-1
|
||||
- Resolves: Bug 2098138 - broken nsslapd-subtree-rename-switch option in rhds11
|
||||
- Resolves: Bug 2119063 - entryuuid fixup tasks fails because entryUUID is not mutable
|
||||
- Resolves: Bug 2136610 - [RFE] Add 'cn' attribute to IPA audit logs
|
||||
- Resolves: Bug 2142638 - pam mutex lock causing high etimes, affecting red hat internal sso
|
||||
- Resolves: Bug 2096795 - [RFE] Support ECDSA private keys for TLS
|
||||
* Fri May 14 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-1
|
||||
- Bump version to 1.4.3.23-1
|
||||
- Resolves: Bug 1947044 - Rebase 389 DS with 389-ds-base-1.4.3.23 for RHEL 8.5
|
||||
- Resolves: Bug 1850664 - RFE - Add an option for the Retro Changelog to ignore some attributes
|
||||
- Resolves: Bug 1903221 - Memory leak in 389ds backend (Minor)
|
||||
- Resolves: Bug 1898541 - Changelog cache can upload updates from a wrong starting point (CSN)
|
||||
- Resolves: Bug 1889562 - client psearch with multiple threads hangs if nsslapd-maxthreadsperconn is under sized
|
||||
- Resolves: Bug 1924848 - Negative wtime on ldapcompare
|
||||
- Resolves: Bug 1895460 - RFE - Log an additional message if the server certificate nickname doesn't match nsSSLPersonalitySSL value
|
||||
- Resolves: Bug 1897614 - Performance search rate: change entry cache monitor to recursive pthread mutex
|
||||
- Resolves: Bug 1939607 - hang because of incorrect accounting of readers in vattr rwlock
|
||||
- Resolves: Bug 1626633 - [RFE] DS - Update the password policy to support a Temporary Password with expiration
|
||||
- Resolves: Bug 1952804 - CVE-2021-3514 389-ds:1.4/389-ds-base: sync_repl NULL pointer dereference in sync_create_state_control()
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user