import 389-ds-base-1.4.3.23-10.module+el8.5.0+12398+47000435
This commit is contained in:
parent
924c45b0af
commit
8a7f112eaf
@ -1,2 +1,3 @@
|
||||
90cda7aea8d8644eea5a2af28c72350dd915db34 SOURCES/389-ds-base-1.4.3.16.tar.bz2
|
||||
c69c175a2f27053dffbfefac9c84ff16c7ff4cbf SOURCES/389-ds-base-1.4.3.23.tar.bz2
|
||||
9e06b5cc57fd185379d007696da153893cf73e30 SOURCES/jemalloc-5.2.1.tar.bz2
|
||||
22b1ef11852864027e184bb4bee56286b855b703 SOURCES/vendor-1.4.3.23-2.tar.gz
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,2 +1,3 @@
|
||||
SOURCES/389-ds-base-1.4.3.16.tar.bz2
|
||||
SOURCES/389-ds-base-1.4.3.23.tar.bz2
|
||||
SOURCES/jemalloc-5.2.1.tar.bz2
|
||||
SOURCES/vendor-1.4.3.23-2.tar.gz
|
||||
|
@ -1,159 +0,0 @@
|
||||
From 81dcaf1c37c2de24c46672df8d4f968c2fb40a6e Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 11 Nov 2020 08:59:18 -0500
|
||||
Subject: [PATCH 1/3] Issue 4383 - Do not normalize escaped spaces in a DN
|
||||
|
||||
Bug Description: Adding an entry with an escaped leading space leads to many
|
||||
problems. Mainly id2entry can get corrupted during an
|
||||
import of such an entry, and the entryrdn index is not
|
||||
updated correctly
|
||||
|
||||
Fix Description: In slapi_dn_normalize_ext() leave an escaped space intact.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/4383
|
||||
|
||||
Reviewed by: firstyear, progier, and tbordaz (Thanks!!!)
|
||||
---
|
||||
.../tests/suites/syntax/acceptance_test.py | 75 ++++++++++++++++++-
|
||||
ldap/servers/slapd/dn.c | 8 +-
|
||||
2 files changed, 77 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py
|
||||
index 543718689..7939a99a7 100644
|
||||
--- a/dirsrvtests/tests/suites/syntax/acceptance_test.py
|
||||
+++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2019 Red Hat, Inc.
|
||||
+# Copyright (C) 2020 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -7,13 +7,12 @@
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
|
||||
import ldap
|
||||
-import logging
|
||||
import pytest
|
||||
import os
|
||||
from lib389.schema import Schema
|
||||
from lib389.config import Config
|
||||
from lib389.idm.user import UserAccounts
|
||||
-from lib389.idm.group import Groups
|
||||
+from lib389.idm.group import Group, Groups
|
||||
from lib389._constants import DEFAULT_SUFFIX
|
||||
from lib389.topologies import log, topology_st as topo
|
||||
|
||||
@@ -127,7 +126,7 @@ def test_invalid_dn_syntax_crash(topo):
|
||||
4. Success
|
||||
"""
|
||||
|
||||
- # Create group
|
||||
+ # Create group
|
||||
groups = Groups(topo.standalone, DEFAULT_SUFFIX)
|
||||
group = groups.create(properties={'cn': ' test'})
|
||||
|
||||
@@ -145,6 +144,74 @@ def test_invalid_dn_syntax_crash(topo):
|
||||
groups.list()
|
||||
|
||||
|
||||
+@pytest.mark.parametrize("props, rawdn", [
|
||||
+ ({'cn': ' leadingSpace'}, "cn=\\20leadingSpace,ou=Groups,dc=example,dc=com"),
|
||||
+ ({'cn': 'trailingSpace '}, "cn=trailingSpace\\20,ou=Groups,dc=example,dc=com")])
|
||||
+def test_dn_syntax_spaces_delete(topo, props, rawdn):
|
||||
+ """Test that an entry with a space as the first character in the DN can be
|
||||
+ deleted without error. We also want to make sure the indexes are properly
|
||||
+ updated by repeatedly adding and deleting the entry, and that the entry cache
|
||||
+ is properly maintained.
|
||||
+
|
||||
+ :id: b993f37c-c2b0-4312-992c-a9048ff98965
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Create a group with a DN that has a space as the first/last
|
||||
+ character.
|
||||
+ 2. Delete group
|
||||
+ 3. Add group
|
||||
+ 4. Modify group
|
||||
+ 5. Restart server and modify entry
|
||||
+ 6. Delete group
|
||||
+ 7. Add group back
|
||||
+ 8. Delete group using specific DN
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Success
|
||||
+ """
|
||||
+
|
||||
+ # Create group
|
||||
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ group = groups.create(properties=props.copy())
|
||||
+
|
||||
+ # Delete group (verifies DN/RDN parsing works and cache is correct)
|
||||
+ group.delete()
|
||||
+
|
||||
+ # Add group again (verifies entryrdn index was properly updated)
|
||||
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ group = groups.create(properties=props.copy())
|
||||
+
|
||||
+ # Modify the group (verifies dn/rdn parsing is correct)
|
||||
+ group.replace('description', 'escaped space group')
|
||||
+
|
||||
+ # Restart the server. This will pull the entry from the database and
|
||||
+ # convert it into a cache entry, which is different than how a client
|
||||
+ # first adds an entry and is put into the cache before being written to
|
||||
+ # disk.
|
||||
+ topo.standalone.restart()
|
||||
+
|
||||
+ # Make sure we can modify the entry (verifies cache entry was created
|
||||
+ # correctly)
|
||||
+ group.replace('description', 'escaped space group after restart')
|
||||
+
|
||||
+ # Make sure it can still be deleted (verifies cache again).
|
||||
+ group.delete()
|
||||
+
|
||||
+ # Add it back so we can delete it using a specific DN (sanity test to verify
|
||||
+ # another DN/RDN parsing variation).
|
||||
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ group = groups.create(properties=props.copy())
|
||||
+ group = Group(topo.standalone, dn=rawdn)
|
||||
+ group.delete()
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c
|
||||
index 2af3f38fc..3980b897f 100644
|
||||
--- a/ldap/servers/slapd/dn.c
|
||||
+++ b/ldap/servers/slapd/dn.c
|
||||
@@ -894,8 +894,7 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len)
|
||||
s++;
|
||||
}
|
||||
}
|
||||
- } else if (s + 2 < ends &&
|
||||
- isxdigit(*(s + 1)) && isxdigit(*(s + 2))) {
|
||||
+ } else if (s + 2 < ends && isxdigit(*(s + 1)) && isxdigit(*(s + 2))) {
|
||||
/* esc hexpair ==> real character */
|
||||
int n = slapi_hexchar2int(*(s + 1));
|
||||
int n2 = slapi_hexchar2int(*(s + 2));
|
||||
@@ -903,6 +902,11 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len)
|
||||
if (n == 0) { /* don't change \00 */
|
||||
*d++ = *++s;
|
||||
*d++ = *++s;
|
||||
+ } else if (n == 32) { /* leave \20 (space) intact */
|
||||
+ *d++ = *s;
|
||||
+ *d++ = *++s;
|
||||
+ *d++ = *++s;
|
||||
+ s++;
|
||||
} else {
|
||||
*d++ = n;
|
||||
s += 3;
|
||||
--
|
||||
2.26.2
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,322 @@
|
||||
From 1e1c2b23c35282481628af7e971ac683da334502 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Tue, 27 Apr 2021 17:00:15 +0100
|
||||
Subject: [PATCH 02/12] Issue 4701 - RFE - Exclude attributes from retro
|
||||
changelog (#4723)
|
||||
|
||||
Description: When the retro changelog plugin is enabled it writes the
|
||||
added/modified values to the "cn-changelog" suffix. In
|
||||
some cases an entries attribute values can be of a
|
||||
sensitive nature and should be excluded. This RFE adds
|
||||
functionality that will allow an admin exclude certain
|
||||
attributes from the retro changelog DB.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/4701
|
||||
|
||||
Reviewed by: mreynolds389, droideck (Thanks folks)
|
||||
---
|
||||
.../tests/suites/retrocl/basic_test.py | 292 ++++++++++++++++++
|
||||
1 file changed, 292 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
new file mode 100644
|
||||
index 000000000..112c73cb9
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
@@ -0,0 +1,292 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+
|
||||
+import logging
|
||||
+import ldap
|
||||
+import time
|
||||
+import pytest
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389.plugins import RetroChangelogPlugin
|
||||
+from lib389._constants import *
|
||||
+from lib389.utils import *
|
||||
+from lib389.tasks import *
|
||||
+from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
|
||||
+from lib389.cli_base.dsrc import dsrc_arg_concat
|
||||
+from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add
|
||||
+from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+USER1_DN = 'uid=user1,ou=people,'+ DEFAULT_SUFFIX
|
||||
+USER2_DN = 'uid=user2,ou=people,'+ DEFAULT_SUFFIX
|
||||
+USER_PW = 'password'
|
||||
+ATTR_HOMEPHONE = 'homePhone'
|
||||
+ATTR_CARLICENSE = 'carLicense'
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+def test_retrocl_exclude_attr_add(topology_st):
|
||||
+ """ Test exclude attribute feature of the retrocl plugin for add operation
|
||||
+
|
||||
+ :id: 3481650f-2070-45ef-9600-2500cfc51559
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Enable dynamic plugins
|
||||
+ 2. Confige retro changelog plugin
|
||||
+ 3. Add an entry
|
||||
+ 4. Ensure entry attrs are in the changelog
|
||||
+ 5. Exclude an attr
|
||||
+ 6. Add another entry
|
||||
+ 7. Ensure excluded attr is not in the changelog
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ """
|
||||
+
|
||||
+ st = topology_st.standalone
|
||||
+
|
||||
+ log.info('Enable dynamic plugins')
|
||||
+ try:
|
||||
+ st.config.set('nsslapd-dynamic-plugins', 'on')
|
||||
+ except ldap.LDAPError as e:
|
||||
+ ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('Configure retrocl plugin')
|
||||
+ rcl = RetroChangelogPlugin(st)
|
||||
+ rcl.disable()
|
||||
+ rcl.enable()
|
||||
+ rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
|
||||
+
|
||||
+ log.info('Restarting instance')
|
||||
+ try:
|
||||
+ st.restart()
|
||||
+ except ldap.LDAPError as e:
|
||||
+ ldap.error('Failed to restart instance ' + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ users = UserAccounts(st, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ log.info('Adding user1')
|
||||
+ try:
|
||||
+ user1 = users.create(properties={
|
||||
+ 'sn': '1',
|
||||
+ 'cn': 'user 1',
|
||||
+ 'uid': 'user1',
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'givenname': 'user1',
|
||||
+ 'homePhone': '0861234567',
|
||||
+ 'carLicense': '131D16674',
|
||||
+ 'mail': 'user1@whereever.com',
|
||||
+ 'homeDirectory': '/home/user1',
|
||||
+ 'userpassword': USER_PW})
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error("Failed to add user1")
|
||||
+
|
||||
+ log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
|
||||
+ try:
|
||||
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ assert False
|
||||
+ assert len(cllist) > 0
|
||||
+ if cllist[0].hasAttr('changes'):
|
||||
+ clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ assert ATTR_HOMEPHONE in clstr
|
||||
+ assert ATTR_CARLICENSE in clstr
|
||||
+
|
||||
+ log.info('Excluding attribute ' + ATTR_HOMEPHONE)
|
||||
+ args = FakeArgs()
|
||||
+ args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM]
|
||||
+ args.instance = 'standalone1'
|
||||
+ args.basedn = None
|
||||
+ args.binddn = None
|
||||
+ args.starttls = False
|
||||
+ args.pwdfile = None
|
||||
+ args.bindpw = None
|
||||
+ args.prompt = False
|
||||
+ args.exclude_attrs = ATTR_HOMEPHONE
|
||||
+ args.func = retrochangelog_add
|
||||
+ dsrc_inst = dsrc_arg_concat(args, None)
|
||||
+ inst = connect_instance(dsrc_inst, False, args)
|
||||
+ result = args.func(inst, None, log, args)
|
||||
+ disconnect_instance(inst)
|
||||
+ assert result is None
|
||||
+
|
||||
+ log.info("5s delay for retrocl plugin to restart")
|
||||
+ time.sleep(5)
|
||||
+
|
||||
+ log.info('Adding user2')
|
||||
+ try:
|
||||
+ user2 = users.create(properties={
|
||||
+ 'sn': '2',
|
||||
+ 'cn': 'user 2',
|
||||
+ 'uid': 'user2',
|
||||
+ 'uidNumber': '22',
|
||||
+ 'gidNumber': '222',
|
||||
+ 'givenname': 'user2',
|
||||
+ 'homePhone': '0879088363',
|
||||
+ 'carLicense': '04WX11038',
|
||||
+ 'mail': 'user2@whereever.com',
|
||||
+ 'homeDirectory': '/home/user2',
|
||||
+ 'userpassword': USER_PW})
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error("Failed to add user2")
|
||||
+
|
||||
+ log.info('Verify homePhone attr is not in the changelog changestring')
|
||||
+ try:
|
||||
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN)
|
||||
+ assert len(cllist) > 0
|
||||
+ if cllist[0].hasAttr('changes'):
|
||||
+ clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ assert ATTR_HOMEPHONE not in clstr
|
||||
+ assert ATTR_CARLICENSE in clstr
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ assert False
|
||||
+
|
||||
+def test_retrocl_exclude_attr_mod(topology_st):
|
||||
+ """ Test exclude attribute feature of the retrocl plugin for mod operation
|
||||
+
|
||||
+ :id: f6bef689-685b-4f86-a98d-f7e6b1fcada3
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Enable dynamic plugins
|
||||
+ 2. Confige retro changelog plugin
|
||||
+ 3. Add user1 entry
|
||||
+ 4. Ensure entry attrs are in the changelog
|
||||
+ 5. Exclude an attr
|
||||
+ 6. Modify user1 entry
|
||||
+ 7. Ensure excluded attr is not in the changelog
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ """
|
||||
+
|
||||
+ st = topology_st.standalone
|
||||
+
|
||||
+ log.info('Enable dynamic plugins')
|
||||
+ try:
|
||||
+ st.config.set('nsslapd-dynamic-plugins', 'on')
|
||||
+ except ldap.LDAPError as e:
|
||||
+ ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('Configure retrocl plugin')
|
||||
+ rcl = RetroChangelogPlugin(st)
|
||||
+ rcl.disable()
|
||||
+ rcl.enable()
|
||||
+ rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
|
||||
+
|
||||
+ log.info('Restarting instance')
|
||||
+ try:
|
||||
+ st.restart()
|
||||
+ except ldap.LDAPError as e:
|
||||
+ ldap.error('Failed to restart instance ' + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ users = UserAccounts(st, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ log.info('Adding user1')
|
||||
+ try:
|
||||
+ user1 = users.create(properties={
|
||||
+ 'sn': '1',
|
||||
+ 'cn': 'user 1',
|
||||
+ 'uid': 'user1',
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'givenname': 'user1',
|
||||
+ 'homePhone': '0861234567',
|
||||
+ 'carLicense': '131D16674',
|
||||
+ 'mail': 'user1@whereever.com',
|
||||
+ 'homeDirectory': '/home/user1',
|
||||
+ 'userpassword': USER_PW})
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error("Failed to add user1")
|
||||
+
|
||||
+ log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
|
||||
+ try:
|
||||
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ assert False
|
||||
+ assert len(cllist) > 0
|
||||
+ if cllist[0].hasAttr('changes'):
|
||||
+ clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ assert ATTR_HOMEPHONE in clstr
|
||||
+ assert ATTR_CARLICENSE in clstr
|
||||
+
|
||||
+ log.info('Excluding attribute ' + ATTR_CARLICENSE)
|
||||
+ args = FakeArgs()
|
||||
+ args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM]
|
||||
+ args.instance = 'standalone1'
|
||||
+ args.basedn = None
|
||||
+ args.binddn = None
|
||||
+ args.starttls = False
|
||||
+ args.pwdfile = None
|
||||
+ args.bindpw = None
|
||||
+ args.prompt = False
|
||||
+ args.exclude_attrs = ATTR_CARLICENSE
|
||||
+ args.func = retrochangelog_add
|
||||
+ dsrc_inst = dsrc_arg_concat(args, None)
|
||||
+ inst = connect_instance(dsrc_inst, False, args)
|
||||
+ result = args.func(inst, None, log, args)
|
||||
+ disconnect_instance(inst)
|
||||
+ assert result is None
|
||||
+
|
||||
+ log.info("5s delay for retrocl plugin to restart")
|
||||
+ time.sleep(5)
|
||||
+
|
||||
+ log.info('Modify user1 carLicense attribute')
|
||||
+ try:
|
||||
+ st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")])
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('Verify carLicense attr is not in the changelog changestring')
|
||||
+ try:
|
||||
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ assert len(cllist) > 0
|
||||
+ # There will be 2 entries in the changelog for this user, we are only
|
||||
+ #interested in the second one, the modify operation.
|
||||
+ if cllist[1].hasAttr('changes'):
|
||||
+ clstr = (cllist[1].getValue('changes')).decode()
|
||||
+ assert ATTR_CARLICENSE not in clstr
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ assert False
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,232 +0,0 @@
|
||||
From 29c9e1c3c760f0941b022d45d14c248e9ceb9738 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <72748589+progier389@users.noreply.github.com>
|
||||
Date: Tue, 3 Nov 2020 12:18:50 +0100
|
||||
Subject: [PATCH 2/3] ticket 2058: Add keep alive entry after on-line
|
||||
initialization - second version (#4399)
|
||||
|
||||
Bug description:
|
||||
Keep alive entry is not created on target master after on line initialization,
|
||||
and its RUVelement stays empty until a direct update is issued on that master
|
||||
|
||||
Fix description:
|
||||
The patch allows a consumer (configured as a master) to create (if it did not
|
||||
exist before) the consumer's keep alive entry. It creates it at the end of a
|
||||
replication session at a time we are sure the changelog exists and will not
|
||||
be reset. It allows a consumer to have RUVelement with csn in the RUV at the
|
||||
first incoming replication session.
|
||||
|
||||
That is basically lkrispen's proposal with an associated pytest testcase
|
||||
|
||||
Second version changes:
|
||||
- moved the testcase to suites/replication/regression_test.py
|
||||
- set up the topology from a 2 master topology then
|
||||
reinitialized the replicas from an ldif without replication metadata
|
||||
rather than using the cli.
|
||||
- search for keepalive entries using search_s instead of getEntry
|
||||
- add a comment about keep alive entries purpose
|
||||
|
||||
last commit:
|
||||
- wait that ruv are in sync before checking keep alive entries
|
||||
|
||||
Reviewed by: droideck, Firstyear
|
||||
|
||||
Platforms tested: F32
|
||||
|
||||
relates: #2058
|
||||
---
|
||||
.../suites/replication/regression_test.py | 130 ++++++++++++++++++
|
||||
.../plugins/replication/repl5_replica.c | 14 ++
|
||||
ldap/servers/plugins/replication/repl_extop.c | 4 +
|
||||
3 files changed, 148 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py
|
||||
index 844d762b9..14b9d6a44 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_test.py
|
||||
@@ -98,6 +98,30 @@ def _move_ruv(ldif_file):
|
||||
for dn, entry in ldif_list:
|
||||
ldif_writer.unparse(dn, entry)
|
||||
|
||||
+def _remove_replication_data(ldif_file):
|
||||
+ """ Remove the replication data from ldif file:
|
||||
+ db2lif without -r includes some of the replica data like
|
||||
+ - nsUniqueId
|
||||
+ - keepalive entries
|
||||
+ This function filters the ldif fil to remove these data
|
||||
+ """
|
||||
+
|
||||
+ with open(ldif_file) as f:
|
||||
+ parser = ldif.LDIFRecordList(f)
|
||||
+ parser.parse()
|
||||
+
|
||||
+ ldif_list = parser.all_records
|
||||
+ # Iterate on a copy of the ldif entry list
|
||||
+ for dn, entry in ldif_list[:]:
|
||||
+ if dn.startswith('cn=repl keep alive'):
|
||||
+ ldif_list.remove((dn,entry))
|
||||
+ else:
|
||||
+ entry.pop('nsUniqueId')
|
||||
+ with open(ldif_file, 'w') as f:
|
||||
+ ldif_writer = ldif.LDIFWriter(f)
|
||||
+ for dn, entry in ldif_list:
|
||||
+ ldif_writer.unparse(dn, entry)
|
||||
+
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def topo_with_sigkill(request):
|
||||
@@ -897,6 +921,112 @@ def test_moving_entry_make_online_init_fail(topology_m2):
|
||||
assert len(m1entries) == len(m2entries)
|
||||
|
||||
|
||||
+def get_keepalive_entries(instance,replica):
|
||||
+ # Returns the keep alive entries that exists with the suffix of the server instance
|
||||
+ try:
|
||||
+ entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL,
|
||||
+ "(&(objectclass=ldapsubentry)(cn=repl keep alive*))",
|
||||
+ ['cn', 'nsUniqueId', 'modifierTimestamp'])
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e)))
|
||||
+ assert False
|
||||
+ # No error, so lets log the keepalive entries
|
||||
+ if log.isEnabledFor(logging.DEBUG):
|
||||
+ for ret in entries:
|
||||
+ log.debug("Found keepalive entry:\n"+str(ret));
|
||||
+ return entries
|
||||
+
|
||||
+def verify_keepalive_entries(topo, expected):
|
||||
+ #Check that keep alive entries exists (or not exists) for every masters on every masters
|
||||
+ #Note: The testing method is quite basic: counting that there is one keepalive entry per master.
|
||||
+ # that is ok for simple test cases like test_online_init_should_create_keepalive_entries but
|
||||
+ # not for the general case as keep alive associated with no more existing master may exists
|
||||
+ # (for example after: db2ldif / demote a master / ldif2db / init other masters)
|
||||
+ # ==> if the function is somehow pushed in lib389, a check better than simply counting the entries
|
||||
+ # should be done.
|
||||
+ for masterId in topo.ms:
|
||||
+ master=topo.ms[masterId]
|
||||
+ for replica in Replicas(master).list():
|
||||
+ if (replica.get_role() != ReplicaRole.MASTER):
|
||||
+ continue
|
||||
+ replica_info = f'master: {masterId} RID: {replica.get_rid()} suffix: {replica.get_suffix()}'
|
||||
+ log.debug(f'Checking keepAliveEntries on {replica_info}')
|
||||
+ keepaliveEntries = get_keepalive_entries(master, replica);
|
||||
+ expectedCount = len(topo.ms) if expected else 0
|
||||
+ foundCount = len(keepaliveEntries)
|
||||
+ if (foundCount == expectedCount):
|
||||
+ log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.')
|
||||
+ else:
|
||||
+ log.error(f'{foundCount} Keepalive entries are found '
|
||||
+ f'while {expectedCount} were expected on {replica_info}.')
|
||||
+ assert False
|
||||
+
|
||||
+
|
||||
+def test_online_init_should_create_keepalive_entries(topo_m2):
|
||||
+ """Check that keep alive entries are created when initializinf a master from another one
|
||||
+
|
||||
+ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
|
||||
+ :setup: Two masters replication setup
|
||||
+ :steps:
|
||||
+ 1. Generate ldif without replication data
|
||||
+ 2 Init both masters from that ldif
|
||||
+ 3 Check that keep alive entries does not exists
|
||||
+ 4 Perform on line init of master2 from master1
|
||||
+ 5 Check that keep alive entries exists
|
||||
+ :expectedresults:
|
||||
+ 1. No error while generating ldif
|
||||
+ 2. No error while importing the ldif file
|
||||
+ 3. No keepalive entrie should exists on any masters
|
||||
+ 4. No error while initializing master2
|
||||
+ 5. All keepalive entries should exist on every masters
|
||||
+
|
||||
+ """
|
||||
+
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ m1 = topo_m2.ms["master1"]
|
||||
+ m2 = topo_m2.ms["master2"]
|
||||
+ # Step 1: Generate ldif without replication data
|
||||
+ m1.stop()
|
||||
+ m2.stop()
|
||||
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
|
||||
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
|
||||
+ excludeSuffixes=None, repl_data=False,
|
||||
+ outputfile=ldif_file, encrypt=False)
|
||||
+ # Remove replication metadata that are still in the ldif
|
||||
+ _remove_replication_data(ldif_file)
|
||||
+
|
||||
+ # Step 2: Init both masters from that ldif
|
||||
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
|
||||
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+
|
||||
+ """ Replica state is now as if CLI setup has been done using:
|
||||
+ dsconf master1 replication enable --suffix "${SUFFIX}" --role master
|
||||
+ dsconf master2 replication enable --suffix "${SUFFIX}" --role master
|
||||
+ dsconf master1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
|
||||
+ dsconf master2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
|
||||
+ dsconf master1 repl-agmt create --suffix "${SUFFIX}"
|
||||
+ dsconf master2 repl-agmt create --suffix "${SUFFIX}"
|
||||
+ """
|
||||
+
|
||||
+ # Step 3: No keepalive entrie should exists on any masters
|
||||
+ verify_keepalive_entries(topo_m2, False)
|
||||
+
|
||||
+ # Step 4: Perform on line init of master2 from master1
|
||||
+ agmt = Agreements(m1).list()[0]
|
||||
+ agmt.begin_reinit()
|
||||
+ (done, error) = agmt.wait_reinit()
|
||||
+ assert done is True
|
||||
+ assert error is False
|
||||
+
|
||||
+ # Step 5: All keepalive entries should exists on every masters
|
||||
+ # Verify the keep alive entry once replication is in sync
|
||||
+ # (that is the step that fails when bug is not fixed)
|
||||
+ repl.wait_for_ruv(m2,m1)
|
||||
+ verify_keepalive_entries(topo_m2, True);
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
index f01782330..f0ea0f8ef 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_replica.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
@@ -373,6 +373,20 @@ replica_destroy(void **arg)
|
||||
slapi_ch_free((void **)arg);
|
||||
}
|
||||
|
||||
+/******************************************************************************
|
||||
+ ******************** REPLICATION KEEP ALIVE ENTRIES **************************
|
||||
+ ******************************************************************************
|
||||
+ * They are subentries of the replicated suffix and there is one per master. *
|
||||
+ * These entries exist only to trigger a change that get replicated over the *
|
||||
+ * topology. *
|
||||
+ * Their main purpose is to generate records in the changelog and they are *
|
||||
+ * updated from time to time by fractional replication to insure that at *
|
||||
+ * least a change must be replicated by FR after a great number of not *
|
||||
+ * replicated changes are found in the changelog. The interest is that the *
|
||||
+ * fractional RUV get then updated so less changes need to be walked in the *
|
||||
+ * changelog when searching for the first change to send *
|
||||
+ ******************************************************************************/
|
||||
+
|
||||
#define KEEP_ALIVE_ATTR "keepalivetimestamp"
|
||||
#define KEEP_ALIVE_ENTRY "repl keep alive"
|
||||
#define KEEP_ALIVE_DN_FORMAT "cn=%s %d,%s"
|
||||
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
|
||||
index 14c8e0bcc..af486f730 100644
|
||||
--- a/ldap/servers/plugins/replication/repl_extop.c
|
||||
+++ b/ldap/servers/plugins/replication/repl_extop.c
|
||||
@@ -1173,6 +1173,10 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb)
|
||||
*/
|
||||
if (cl5GetState() == CL5_STATE_OPEN) {
|
||||
replica_log_ruv_elements(r);
|
||||
+ /* now that the changelog is open and started, we can alos cretae the
|
||||
+ * keep alive entry without risk that db and cl will not match
|
||||
+ */
|
||||
+ replica_subentry_check(replica_get_root(r), replica_get_rid(r));
|
||||
}
|
||||
|
||||
/* ONREPL code that dealt with new RUV, etc was moved into the code
|
||||
--
|
||||
2.26.2
|
||||
|
5307
SOURCES/0003-Ticket-137-Implement-EntryUUID-plugin.patch
Normal file
5307
SOURCES/0003-Ticket-137-Implement-EntryUUID-plugin.patch
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,513 +0,0 @@
|
||||
From e202c62c3b4c92163d2de9f3da9a9f3efc81e4b8 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <72748589+progier389@users.noreply.github.com>
|
||||
Date: Thu, 12 Nov 2020 18:50:04 +0100
|
||||
Subject: [PATCH 3/3] do not add referrals for masters with different data
|
||||
generation #2054 (#4427)
|
||||
|
||||
Bug description:
|
||||
The problem is that some operation mandatory in the usual cases are
|
||||
also performed when replication cannot take place because the
|
||||
database set are differents (i.e: RUV generation ids are different)
|
||||
|
||||
One of the issue is that the csn generator state is updated when
|
||||
starting a replication session (it is a problem when trying to
|
||||
reset the time skew, as freshly reinstalled replicas get infected
|
||||
by the old ones)
|
||||
|
||||
A second issue is that the RUV got updated when ending a replication session
|
||||
(which may add replica that does not share the same data set,
|
||||
then update operations on consumer retun referrals towards wrong masters
|
||||
|
||||
Fix description:
|
||||
The fix checks the RUVs generation id before updating the csn generator
|
||||
and before updating the RUV.
|
||||
|
||||
Reviewed by: mreynolds
|
||||
firstyear
|
||||
vashirov
|
||||
|
||||
Platforms tested: F32
|
||||
---
|
||||
.../suites/replication/regression_test.py | 290 ++++++++++++++++++
|
||||
ldap/servers/plugins/replication/repl5.h | 1 +
|
||||
.../plugins/replication/repl5_inc_protocol.c | 20 +-
|
||||
.../plugins/replication/repl5_replica.c | 39 ++-
|
||||
src/lib389/lib389/dseldif.py | 37 +++
|
||||
5 files changed, 368 insertions(+), 19 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py
|
||||
index 14b9d6a44..a72af6b30 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_test.py
|
||||
@@ -13,6 +13,7 @@ from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts
|
||||
from lib389.pwpolicy import PwPolicyManager
|
||||
from lib389.utils import *
|
||||
from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db, topology_i2 as topo_i2
|
||||
+from lib389.topologies import topology_m2c2 as topo_m2c2
|
||||
from lib389._constants import *
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
from lib389.idm.user import UserAccount
|
||||
@@ -22,6 +23,7 @@ from lib389.idm.directorymanager import DirectoryManager
|
||||
from lib389.replica import Replicas, ReplicationManager, Changelog5, BootstrapReplicationManager
|
||||
from lib389.agreement import Agreements
|
||||
from lib389 import pid_from_file
|
||||
+from lib389.dseldif import *
|
||||
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
@@ -1027,6 +1029,294 @@ def test_online_init_should_create_keepalive_entries(topo_m2):
|
||||
verify_keepalive_entries(topo_m2, True);
|
||||
|
||||
|
||||
+def get_agreement(agmts, consumer):
|
||||
+ # Get agreement towards consumer among the agremment list
|
||||
+ for agmt in agmts.list():
|
||||
+ if (agmt.get_attr_val_utf8('nsDS5ReplicaPort') == str(consumer.port) and
|
||||
+ agmt.get_attr_val_utf8('nsDS5ReplicaHost') == consumer.host):
|
||||
+ return agmt
|
||||
+ return None;
|
||||
+
|
||||
+
|
||||
+def test_ruv_url_not_added_if_different_uuid(topo_m2c2):
|
||||
+ """Check that RUV url is not updated if RUV generation uuid are different
|
||||
+
|
||||
+ :id: 7cc30a4e-0ffd-4758-8f00-e500279af344
|
||||
+ :setup: Two masters + two consumers replication setup
|
||||
+ :steps:
|
||||
+ 1. Generate ldif without replication data
|
||||
+ 2. Init both masters from that ldif
|
||||
+ (to clear the ruvs and generates different generation uuid)
|
||||
+ 3. Perform on line init from master1 to consumer1
|
||||
+ and from master2 to consumer2
|
||||
+ 4. Perform update on both masters
|
||||
+ 5. Check that c1 RUV does not contains URL towards m2
|
||||
+ 6. Check that c2 RUV does contains URL towards m2
|
||||
+ 7. Perform on line init from master1 to master2
|
||||
+ 8. Perform update on master2
|
||||
+ 9. Check that c1 RUV does contains URL towards m2
|
||||
+ :expectedresults:
|
||||
+ 1. No error while generating ldif
|
||||
+ 2. No error while importing the ldif file
|
||||
+ 3. No error and Initialization done.
|
||||
+ 4. No error
|
||||
+ 5. master2 replicaid should not be in the consumer1 RUV
|
||||
+ 6. master2 replicaid should be in the consumer2 RUV
|
||||
+ 7. No error and Initialization done.
|
||||
+ 8. No error
|
||||
+ 9. master2 replicaid should be in the consumer1 RUV
|
||||
+
|
||||
+ """
|
||||
+
|
||||
+ # Variables initialization
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+
|
||||
+ m1 = topo_m2c2.ms["master1"]
|
||||
+ m2 = topo_m2c2.ms["master2"]
|
||||
+ c1 = topo_m2c2.cs["consumer1"]
|
||||
+ c2 = topo_m2c2.cs["consumer2"]
|
||||
+
|
||||
+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
|
||||
+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
|
||||
+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
|
||||
+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)
|
||||
+
|
||||
+ replicid_m2 = replica_m2.get_rid()
|
||||
+
|
||||
+ agmts_m1 = Agreements(m1, replica_m1.dn)
|
||||
+ agmts_m2 = Agreements(m2, replica_m2.dn)
|
||||
+
|
||||
+ m1_m2 = get_agreement(agmts_m1, m2)
|
||||
+ m1_c1 = get_agreement(agmts_m1, c1)
|
||||
+ m1_c2 = get_agreement(agmts_m1, c2)
|
||||
+ m2_m1 = get_agreement(agmts_m2, m1)
|
||||
+ m2_c1 = get_agreement(agmts_m2, c1)
|
||||
+ m2_c2 = get_agreement(agmts_m2, c2)
|
||||
+
|
||||
+ # Step 1: Generate ldif without replication data
|
||||
+ m1.stop()
|
||||
+ m2.stop()
|
||||
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
|
||||
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
|
||||
+ excludeSuffixes=None, repl_data=False,
|
||||
+ outputfile=ldif_file, encrypt=False)
|
||||
+ # Remove replication metadata that are still in the ldif
|
||||
+ # _remove_replication_data(ldif_file)
|
||||
+
|
||||
+ # Step 2: Init both masters from that ldif
|
||||
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
|
||||
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+
|
||||
+ # Step 3: Perform on line init from master1 to consumer1
|
||||
+ # and from master2 to consumer2
|
||||
+ m1_c1.begin_reinit()
|
||||
+ m2_c2.begin_reinit()
|
||||
+ (done, error) = m1_c1.wait_reinit()
|
||||
+ assert done is True
|
||||
+ assert error is False
|
||||
+ (done, error) = m2_c2.wait_reinit()
|
||||
+ assert done is True
|
||||
+ assert error is False
|
||||
+
|
||||
+ # Step 4: Perform update on both masters
|
||||
+ repl.test_replication(m1, c1)
|
||||
+ repl.test_replication(m2, c2)
|
||||
+
|
||||
+ # Step 5: Check that c1 RUV does not contains URL towards m2
|
||||
+ ruv = replica_c1.get_ruv()
|
||||
+ log.debug(f"c1 RUV: {ruv}")
|
||||
+ url=ruv._rid_url.get(replica_m2.get_rid())
|
||||
+ if (url == None):
|
||||
+ log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV");
|
||||
+ else:
|
||||
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
|
||||
+ log.error(f"URL for RID {replica_m2.get_rid()} found in RUV")
|
||||
+ #Note: this assertion fails if issue 2054 is not fixed.
|
||||
+ assert False
|
||||
+
|
||||
+ # Step 6: Check that c2 RUV does contains URL towards m2
|
||||
+ ruv = replica_c2.get_ruv()
|
||||
+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
|
||||
+ url=ruv._rid_url.get(replica_m2.get_rid())
|
||||
+ if (url == None):
|
||||
+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV");
|
||||
+ assert False
|
||||
+ else:
|
||||
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
|
||||
+
|
||||
+
|
||||
+ # Step 7: Perform on line init from master1 to master2
|
||||
+ m1_m2.begin_reinit()
|
||||
+ (done, error) = m1_m2.wait_reinit()
|
||||
+ assert done is True
|
||||
+ assert error is False
|
||||
+
|
||||
+ # Step 8: Perform update on master2
|
||||
+ repl.test_replication(m2, c1)
|
||||
+
|
||||
+ # Step 9: Check that c1 RUV does contains URL towards m2
|
||||
+ ruv = replica_c1.get_ruv()
|
||||
+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
|
||||
+ url=ruv._rid_url.get(replica_m2.get_rid())
|
||||
+ if (url == None):
|
||||
+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV");
|
||||
+ assert False
|
||||
+ else:
|
||||
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
|
||||
+
|
||||
+
|
||||
+def test_csngen_state_not_updated_if_different_uuid(topo_m2c2):
|
||||
+ """Check that csngen remote offset is not updated if RUV generation uuid are different
|
||||
+
|
||||
+ :id: 77694b8e-22ae-11eb-89b2-482ae39447e5
|
||||
+ :setup: Two masters + two consumers replication setup
|
||||
+ :steps:
|
||||
+ 1. Disable m1<->m2 agreement to avoid propagate timeSkew
|
||||
+ 2. Generate ldif without replication data
|
||||
+ 3. Increase time skew on master2
|
||||
+ 4. Init both masters from that ldif
|
||||
+ (to clear the ruvs and generates different generation uuid)
|
||||
+ 5. Perform on line init from master1 to consumer1 and master2 to consumer2
|
||||
+ 6. Perform update on both masters
|
||||
+ 7: Check that c1 has no time skew
|
||||
+ 8: Check that c2 has time skew
|
||||
+ 9. Init master2 from master1
|
||||
+ 10. Perform update on master2
|
||||
+ 11. Check that c1 has time skew
|
||||
+ :expectedresults:
|
||||
+ 1. No error
|
||||
+ 2. No error while generating ldif
|
||||
+ 3. No error
|
||||
+ 4. No error while importing the ldif file
|
||||
+ 5. No error and Initialization done.
|
||||
+ 6. No error
|
||||
+ 7. c1 time skew should be lesser than threshold
|
||||
+ 8. c2 time skew should be higher than threshold
|
||||
+ 9. No error and Initialization done.
|
||||
+ 10. No error
|
||||
+ 11. c1 time skew should be higher than threshold
|
||||
+
|
||||
+ """
|
||||
+
|
||||
+ # Variables initialization
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+
|
||||
+ m1 = topo_m2c2.ms["master1"]
|
||||
+ m2 = topo_m2c2.ms["master2"]
|
||||
+ c1 = topo_m2c2.cs["consumer1"]
|
||||
+ c2 = topo_m2c2.cs["consumer2"]
|
||||
+
|
||||
+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
|
||||
+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
|
||||
+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
|
||||
+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)
|
||||
+
|
||||
+ replicid_m2 = replica_m2.get_rid()
|
||||
+
|
||||
+ agmts_m1 = Agreements(m1, replica_m1.dn)
|
||||
+ agmts_m2 = Agreements(m2, replica_m2.dn)
|
||||
+
|
||||
+ m1_m2 = get_agreement(agmts_m1, m2)
|
||||
+ m1_c1 = get_agreement(agmts_m1, c1)
|
||||
+ m1_c2 = get_agreement(agmts_m1, c2)
|
||||
+ m2_m1 = get_agreement(agmts_m2, m1)
|
||||
+ m2_c1 = get_agreement(agmts_m2, c1)
|
||||
+ m2_c2 = get_agreement(agmts_m2, c2)
|
||||
+
|
||||
+ # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew
|
||||
+ m1_m2.pause()
|
||||
+ m2_m1.pause()
|
||||
+
|
||||
+ # Step 2: Generate ldif without replication data
|
||||
+ m1.stop()
|
||||
+ m2.stop()
|
||||
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
|
||||
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
|
||||
+ excludeSuffixes=None, repl_data=False,
|
||||
+ outputfile=ldif_file, encrypt=False)
|
||||
+ # Remove replication metadata that are still in the ldif
|
||||
+ # _remove_replication_data(ldif_file)
|
||||
+
|
||||
+ # Step 3: Increase time skew on master2
|
||||
+ timeSkew=6*3600
|
||||
+ # We can modify master2 time skew
|
||||
+ # But the time skew on the consumer may be smaller
|
||||
+ # depending on when the cnsgen generation time is updated
|
||||
+ # and when first csn get replicated.
|
||||
+ # Since we use timeSkew has threshold value to detect
|
||||
+ # whether there are time skew or not,
|
||||
+ # lets add a significative margin (longer than the test duration)
|
||||
+ # to avoid any risk of erroneous failure
|
||||
+ timeSkewMargin = 300
|
||||
+ DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew+timeSkewMargin)
|
||||
+
|
||||
+ # Step 4: Init both masters from that ldif
|
||||
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
|
||||
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+
|
||||
+ # Step 5: Perform on line init from master1 to consumer1
|
||||
+ # and from master2 to consumer2
|
||||
+ m1_c1.begin_reinit()
|
||||
+ m2_c2.begin_reinit()
|
||||
+ (done, error) = m1_c1.wait_reinit()
|
||||
+ assert done is True
|
||||
+ assert error is False
|
||||
+ (done, error) = m2_c2.wait_reinit()
|
||||
+ assert done is True
|
||||
+ assert error is False
|
||||
+
|
||||
+ # Step 6: Perform update on both masters
|
||||
+ repl.test_replication(m1, c1)
|
||||
+ repl.test_replication(m2, c2)
|
||||
+
|
||||
+ # Step 7: Check that c1 has no time skew
|
||||
+ # Stop server to insure that dse.ldif is uptodate
|
||||
+ c1.stop()
|
||||
+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
|
||||
+ c1_timeSkew = int(c1_nsState['time_skew'])
|
||||
+ log.debug(f"c1 time skew: {c1_timeSkew}")
|
||||
+ if (c1_timeSkew >= timeSkew):
|
||||
+ log.error(f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}")
|
||||
+ assert False
|
||||
+ c1.start()
|
||||
+
|
||||
+ # Step 8: Check that c2 has time skew
|
||||
+ # Stop server to insure that dse.ldif is uptodate
|
||||
+ c2.stop()
|
||||
+ c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0]
|
||||
+ c2_timeSkew = int(c2_nsState['time_skew'])
|
||||
+ log.debug(f"c2 time skew: {c2_timeSkew}")
|
||||
+ if (c2_timeSkew < timeSkew):
|
||||
+ log.error(f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}")
|
||||
+ assert False
|
||||
+ c2.start()
|
||||
+
|
||||
+ # Step 9: Perform on line init from master1 to master2
|
||||
+ m1_c1.pause()
|
||||
+ m1_m2.resume()
|
||||
+ m1_m2.begin_reinit()
|
||||
+ (done, error) = m1_m2.wait_reinit()
|
||||
+ assert done is True
|
||||
+ assert error is False
|
||||
+
|
||||
+ # Step 10: Perform update on master2
|
||||
+ repl.test_replication(m2, c1)
|
||||
+
|
||||
+ # Step 11: Check that c1 has time skew
|
||||
+ # Stop server to insure that dse.ldif is uptodate
|
||||
+ c1.stop()
|
||||
+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
|
||||
+ c1_timeSkew = int(c1_nsState['time_skew'])
|
||||
+ log.debug(f"c1 time skew: {c1_timeSkew}")
|
||||
+ if (c1_timeSkew < timeSkew):
|
||||
+ log.error(f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}")
|
||||
+ assert False
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
|
||||
index b35f724c2..f1c596a3f 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5.h
|
||||
+++ b/ldap/servers/plugins/replication/repl5.h
|
||||
@@ -708,6 +708,7 @@ void replica_dump(Replica *r);
|
||||
void replica_set_enabled(Replica *r, PRBool enable);
|
||||
Replica *replica_get_replica_from_dn(const Slapi_DN *dn);
|
||||
Replica *replica_get_replica_from_root(const char *repl_root);
|
||||
+int replica_check_generation(Replica *r, const RUV *remote_ruv);
|
||||
int replica_update_ruv(Replica *replica, const CSN *csn, const char *replica_purl);
|
||||
Replica *replica_get_replica_for_op(Slapi_PBlock *pb);
|
||||
/* the functions below manipulate replica hash */
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
|
||||
index 29b1fb073..af5e5897c 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
|
||||
@@ -2161,26 +2161,12 @@ examine_update_vector(Private_Repl_Protocol *prp, RUV *remote_ruv)
|
||||
} else if (NULL == remote_ruv) {
|
||||
return_value = EXAMINE_RUV_PRISTINE_REPLICA;
|
||||
} else {
|
||||
- char *local_gen = NULL;
|
||||
- char *remote_gen = ruv_get_replica_generation(remote_ruv);
|
||||
- Object *local_ruv_obj;
|
||||
- RUV *local_ruv;
|
||||
-
|
||||
PR_ASSERT(NULL != prp->replica);
|
||||
- local_ruv_obj = replica_get_ruv(prp->replica);
|
||||
- if (NULL != local_ruv_obj) {
|
||||
- local_ruv = (RUV *)object_get_data(local_ruv_obj);
|
||||
- PR_ASSERT(local_ruv);
|
||||
- local_gen = ruv_get_replica_generation(local_ruv);
|
||||
- object_release(local_ruv_obj);
|
||||
- }
|
||||
- if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) {
|
||||
- return_value = EXAMINE_RUV_GENERATION_MISMATCH;
|
||||
- } else {
|
||||
+ if (replica_check_generation(prp->replica, remote_ruv)) {
|
||||
return_value = EXAMINE_RUV_OK;
|
||||
+ } else {
|
||||
+ return_value = EXAMINE_RUV_GENERATION_MISMATCH;
|
||||
}
|
||||
- slapi_ch_free((void **)&remote_gen);
|
||||
- slapi_ch_free((void **)&local_gen);
|
||||
}
|
||||
return return_value;
|
||||
}
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
index f0ea0f8ef..7e56d6557 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_replica.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
@@ -812,6 +812,36 @@ replica_set_ruv(Replica *r, RUV *ruv)
|
||||
replica_unlock(r->repl_lock);
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Check if replica generation is the same than the remote ruv one
|
||||
+ */
|
||||
+int
|
||||
+replica_check_generation(Replica *r, const RUV *remote_ruv)
|
||||
+{
|
||||
+ int return_value;
|
||||
+ char *local_gen = NULL;
|
||||
+ char *remote_gen = ruv_get_replica_generation(remote_ruv);
|
||||
+ Object *local_ruv_obj;
|
||||
+ RUV *local_ruv;
|
||||
+
|
||||
+ PR_ASSERT(NULL != r);
|
||||
+ local_ruv_obj = replica_get_ruv(r);
|
||||
+ if (NULL != local_ruv_obj) {
|
||||
+ local_ruv = (RUV *)object_get_data(local_ruv_obj);
|
||||
+ PR_ASSERT(local_ruv);
|
||||
+ local_gen = ruv_get_replica_generation(local_ruv);
|
||||
+ object_release(local_ruv_obj);
|
||||
+ }
|
||||
+ if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) {
|
||||
+ return_value = PR_FALSE;
|
||||
+ } else {
|
||||
+ return_value = PR_TRUE;
|
||||
+ }
|
||||
+ slapi_ch_free_string(&remote_gen);
|
||||
+ slapi_ch_free_string(&local_gen);
|
||||
+ return return_value;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Update one particular CSN in an RUV. This is meant to be called
|
||||
* whenever (a) the server has processed a client operation and
|
||||
@@ -1298,6 +1328,11 @@ replica_update_csngen_state_ext(Replica *r, const RUV *ruv, const CSN *extracsn)
|
||||
|
||||
PR_ASSERT(r && ruv);
|
||||
|
||||
+ if (!replica_check_generation(r, ruv)) /* ruv has wrong generation - we are done */
|
||||
+ {
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
rc = ruv_get_max_csn(ruv, &csn);
|
||||
if (rc != RUV_SUCCESS) {
|
||||
return -1;
|
||||
@@ -3713,8 +3748,8 @@ replica_update_ruv_consumer(Replica *r, RUV *supplier_ruv)
|
||||
replica_lock(r->repl_lock);
|
||||
|
||||
local_ruv = (RUV *)object_get_data(r->repl_ruv);
|
||||
-
|
||||
- if (is_cleaned_rid(supplier_id) || local_ruv == NULL) {
|
||||
+ if (is_cleaned_rid(supplier_id) || local_ruv == NULL ||
|
||||
+ !replica_check_generation(r, supplier_ruv)) {
|
||||
replica_unlock(r->repl_lock);
|
||||
return;
|
||||
}
|
||||
diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py
|
||||
index 10baba4d7..6850c9a8a 100644
|
||||
--- a/src/lib389/lib389/dseldif.py
|
||||
+++ b/src/lib389/lib389/dseldif.py
|
||||
@@ -317,6 +317,43 @@ class DSEldif(DSLint):
|
||||
|
||||
return states
|
||||
|
||||
+ def _increaseTimeSkew(self, suffix, timeSkew):
|
||||
+ # Increase csngen state local_offset by timeSkew
|
||||
+ # Warning: instance must be stopped before calling this function
|
||||
+ assert (timeSkew >= 0)
|
||||
+ nsState = self.readNsState(suffix)[0]
|
||||
+ self._instance.log.debug(f'_increaseTimeSkew nsState is {nsState}')
|
||||
+ oldNsState = self.get(nsState['dn'], 'nsState', True)
|
||||
+ self._instance.log.debug(f'oldNsState is {oldNsState}')
|
||||
+
|
||||
+ # Lets reencode the new nsState
|
||||
+ from lib389.utils import print_nice_time
|
||||
+ if pack('<h', 1) == pack('=h',1):
|
||||
+ end = '<'
|
||||
+ elif pack('>h', 1) == pack('=h',1):
|
||||
+ end = '>'
|
||||
+ else:
|
||||
+ raise ValueError("Unknown endian, unable to proceed")
|
||||
+
|
||||
+ thelen = len(oldNsState)
|
||||
+ if thelen <= 20:
|
||||
+ pad = 2 # padding for short H values
|
||||
+ timefmt = 'I' # timevals are unsigned 32-bit int
|
||||
+ else:
|
||||
+ pad = 6 # padding for short H values
|
||||
+ timefmt = 'Q' # timevals are unsigned 64-bit int
|
||||
+ fmtstr = "%sH%dx3%sH%dx" % (end, pad, timefmt, pad)
|
||||
+ newNsState = base64.b64encode(pack(fmtstr, int(nsState['rid']),
|
||||
+ int(nsState['gen_time']), int(nsState['local_offset'])+timeSkew,
|
||||
+ int(nsState['remote_offset']), int(nsState['seq_num'])))
|
||||
+ newNsState = newNsState.decode('utf-8')
|
||||
+ self._instance.log.debug(f'newNsState is {newNsState}')
|
||||
+ # Lets replace the value.
|
||||
+ (entry_dn_i, attr_data) = self._find_attr(nsState['dn'], 'nsState')
|
||||
+ attr_i = next(iter(attr_data))
|
||||
+ self._contents[entry_dn_i + attr_i] = f"nsState:: {newNsState}"
|
||||
+ self._update()
|
||||
+
|
||||
|
||||
class FSChecks(DSLint):
|
||||
"""This is for the healthcheck feature, check commonly used system config files the
|
||||
--
|
||||
2.26.2
|
||||
|
@ -0,0 +1,373 @@
|
||||
From c167d6127db45d8426437c273060c8c8f7fbcb9b Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william.brown@suse.com>
|
||||
Date: Wed, 23 Sep 2020 09:19:34 +1000
|
||||
Subject: [PATCH 04/12] Ticket 4326 - entryuuid fixup did not work correctly
|
||||
(#4328)
|
||||
|
||||
Bug Description: due to an oversight in how fixup tasks
|
||||
worked, the entryuuid fixup task did not work correctly and
|
||||
would not persist over restarts.
|
||||
|
||||
Fix Description: Correctly implement entryuuid fixup.
|
||||
|
||||
fixes: #4326
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: mreynolds (thanks!)
|
||||
---
|
||||
.../tests/suites/entryuuid/basic_test.py | 24 +++-
|
||||
src/plugins/entryuuid/src/lib.rs | 43 ++++++-
|
||||
src/slapi_r_plugin/src/constants.rs | 5 +
|
||||
src/slapi_r_plugin/src/entry.rs | 8 ++
|
||||
src/slapi_r_plugin/src/lib.rs | 2 +
|
||||
src/slapi_r_plugin/src/macros.rs | 2 +-
|
||||
src/slapi_r_plugin/src/modify.rs | 118 ++++++++++++++++++
|
||||
src/slapi_r_plugin/src/pblock.rs | 7 ++
|
||||
src/slapi_r_plugin/src/value.rs | 4 +
|
||||
9 files changed, 206 insertions(+), 7 deletions(-)
|
||||
create mode 100644 src/slapi_r_plugin/src/modify.rs
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/entryuuid/basic_test.py b/dirsrvtests/tests/suites/entryuuid/basic_test.py
|
||||
index beb73701d..4d8a40909 100644
|
||||
--- a/dirsrvtests/tests/suites/entryuuid/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/entryuuid/basic_test.py
|
||||
@@ -12,6 +12,7 @@ import time
|
||||
import shutil
|
||||
from lib389.idm.user import nsUserAccounts, UserAccounts
|
||||
from lib389.idm.account import Accounts
|
||||
+from lib389.idm.domain import Domain
|
||||
from lib389.topologies import topology_st as topology
|
||||
from lib389.backend import Backends
|
||||
from lib389.paths import Paths
|
||||
@@ -190,6 +191,7 @@ def test_entryuuid_fixup_task(topology):
|
||||
3. Enable the entryuuid plugin
|
||||
4. Run the fixup
|
||||
5. Assert the entryuuid now exists
|
||||
+ 6. Restart and check they persist
|
||||
|
||||
:expectedresults:
|
||||
1. Success
|
||||
@@ -197,6 +199,7 @@ def test_entryuuid_fixup_task(topology):
|
||||
3. Success
|
||||
4. Success
|
||||
5. Suddenly EntryUUID!
|
||||
+ 6. Still has EntryUUID!
|
||||
"""
|
||||
# 1. Disable the plugin
|
||||
plug = EntryUUIDPlugin(topology.standalone)
|
||||
@@ -220,7 +223,22 @@ def test_entryuuid_fixup_task(topology):
|
||||
assert(task.is_complete() and task.get_exit_code() == 0)
|
||||
topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
|
||||
- # 5. Assert the uuid.
|
||||
- euuid = account.get_attr_val_utf8('entryUUID')
|
||||
- assert(euuid is not None)
|
||||
+ # 5.1 Assert the uuid on the user.
|
||||
+ euuid_user = account.get_attr_val_utf8('entryUUID')
|
||||
+ assert(euuid_user is not None)
|
||||
+
|
||||
+ # 5.2 Assert it on the domain entry.
|
||||
+ domain = Domain(topology.standalone, dn=DEFAULT_SUFFIX)
|
||||
+ euuid_domain = domain.get_attr_val_utf8('entryUUID')
|
||||
+ assert(euuid_domain is not None)
|
||||
+
|
||||
+ # Assert it persists after a restart.
|
||||
+ topology.standalone.restart()
|
||||
+ # 6.1 Assert the uuid on the use.
|
||||
+ euuid_user_2 = account.get_attr_val_utf8('entryUUID')
|
||||
+ assert(euuid_user_2 == euuid_user)
|
||||
+
|
||||
+ # 6.2 Assert it on the domain entry.
|
||||
+ euuid_domain_2 = domain.get_attr_val_utf8('entryUUID')
|
||||
+ assert(euuid_domain_2 == euuid_domain)
|
||||
|
||||
diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
|
||||
index 6b5e8d1bb..92977db05 100644
|
||||
--- a/src/plugins/entryuuid/src/lib.rs
|
||||
+++ b/src/plugins/entryuuid/src/lib.rs
|
||||
@@ -187,9 +187,46 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
}
|
||||
}
|
||||
|
||||
-pub fn entryuuid_fixup_mapfn(mut e: EntryRef, _data: &()) -> Result<(), PluginError> {
|
||||
- assign_uuid(&mut e);
|
||||
- Ok(())
|
||||
+pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError> {
|
||||
+ /* Supply a modification to the entry. */
|
||||
+ let sdn = e.get_sdnref();
|
||||
+
|
||||
+ /* Sanity check that entryuuid doesn't already exist */
|
||||
+ if e.contains_attr("entryUUID") {
|
||||
+ log_error!(
|
||||
+ ErrorLevel::Trace,
|
||||
+ "skipping fixup for -> {}",
|
||||
+ sdn.to_dn_string()
|
||||
+ );
|
||||
+ return Ok(());
|
||||
+ }
|
||||
+
|
||||
+ // Setup the modifications
|
||||
+ let mut mods = SlapiMods::new();
|
||||
+
|
||||
+ let u: Uuid = Uuid::new_v4();
|
||||
+ let uuid_value = Value::from(&u);
|
||||
+ let values: ValueArray = std::iter::once(uuid_value).collect();
|
||||
+ mods.append(ModType::Replace, "entryUUID", values);
|
||||
+
|
||||
+ /* */
|
||||
+ let lmod = Modify::new(&sdn, mods, plugin_id())?;
|
||||
+
|
||||
+ match lmod.execute() {
|
||||
+ Ok(_) => {
|
||||
+ log_error!(ErrorLevel::Trace, "fixed-up -> {}", sdn.to_dn_string());
|
||||
+ Ok(())
|
||||
+ }
|
||||
+ Err(e) => {
|
||||
+ log_error!(
|
||||
+ ErrorLevel::Error,
|
||||
+ "entryuuid_fixup_mapfn -> fixup failed -> {} {:?}",
|
||||
+ sdn.to_dn_string(),
|
||||
+ e
|
||||
+ );
|
||||
+ Err(PluginError::GenericFailure)
|
||||
+ }
|
||||
+ }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs
|
||||
index cf76ccbdb..34845c2f4 100644
|
||||
--- a/src/slapi_r_plugin/src/constants.rs
|
||||
+++ b/src/slapi_r_plugin/src/constants.rs
|
||||
@@ -5,6 +5,11 @@ use std::os::raw::c_char;
|
||||
pub const LDAP_SUCCESS: i32 = 0;
|
||||
pub const PLUGIN_DEFAULT_PRECEDENCE: i32 = 50;
|
||||
|
||||
+#[repr(i32)]
|
||||
+pub enum OpFlags {
|
||||
+ ByassReferrals = 0x0040_0000,
|
||||
+}
|
||||
+
|
||||
#[repr(i32)]
|
||||
/// The set of possible function handles we can register via the pblock. These
|
||||
/// values correspond to slapi-plugin.h.
|
||||
diff --git a/src/slapi_r_plugin/src/entry.rs b/src/slapi_r_plugin/src/entry.rs
|
||||
index 034efe692..22ae45189 100644
|
||||
--- a/src/slapi_r_plugin/src/entry.rs
|
||||
+++ b/src/slapi_r_plugin/src/entry.rs
|
||||
@@ -70,6 +70,14 @@ impl EntryRef {
|
||||
}
|
||||
}
|
||||
|
||||
+ pub fn contains_attr(&self, name: &str) -> bool {
|
||||
+ let cname = CString::new(name).expect("invalid attr name");
|
||||
+ let va = unsafe { slapi_entry_attr_get_valuearray(self.raw_e, cname.as_ptr()) };
|
||||
+
|
||||
+ // If it's null, it's not present, so flip the logic.
|
||||
+ !va.is_null()
|
||||
+ }
|
||||
+
|
||||
pub fn add_value(&mut self, a: &str, v: &ValueRef) {
|
||||
// turn the attr to a c string.
|
||||
// TODO FIX
|
||||
diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs
|
||||
index d7fc22e52..076907bae 100644
|
||||
--- a/src/slapi_r_plugin/src/lib.rs
|
||||
+++ b/src/slapi_r_plugin/src/lib.rs
|
||||
@@ -9,6 +9,7 @@ pub mod dn;
|
||||
pub mod entry;
|
||||
pub mod error;
|
||||
pub mod log;
|
||||
+pub mod modify;
|
||||
pub mod pblock;
|
||||
pub mod plugin;
|
||||
pub mod search;
|
||||
@@ -24,6 +25,7 @@ pub mod prelude {
|
||||
pub use crate::entry::EntryRef;
|
||||
pub use crate::error::{DseCallbackStatus, LDAPError, PluginError, RPluginError};
|
||||
pub use crate::log::{log_error, ErrorLevel};
|
||||
+ pub use crate::modify::{ModType, Modify, SlapiMods};
|
||||
pub use crate::pblock::{Pblock, PblockRef};
|
||||
pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3};
|
||||
pub use crate::search::{Search, SearchScope};
|
||||
diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs
|
||||
index 030449632..bc8dfa60f 100644
|
||||
--- a/src/slapi_r_plugin/src/macros.rs
|
||||
+++ b/src/slapi_r_plugin/src/macros.rs
|
||||
@@ -825,7 +825,7 @@ macro_rules! slapi_r_search_callback_mapfn {
|
||||
let e = EntryRef::new(raw_e);
|
||||
let data_ptr = raw_data as *const _;
|
||||
let data = unsafe { &(*data_ptr) };
|
||||
- match $cb_mod_ident(e, data) {
|
||||
+ match $cb_mod_ident(&e, data) {
|
||||
Ok(_) => LDAPError::Success as i32,
|
||||
Err(e) => e as i32,
|
||||
}
|
||||
diff --git a/src/slapi_r_plugin/src/modify.rs b/src/slapi_r_plugin/src/modify.rs
|
||||
new file mode 100644
|
||||
index 000000000..30864377a
|
||||
--- /dev/null
|
||||
+++ b/src/slapi_r_plugin/src/modify.rs
|
||||
@@ -0,0 +1,118 @@
|
||||
+use crate::constants::OpFlags;
|
||||
+use crate::dn::SdnRef;
|
||||
+use crate::error::{LDAPError, PluginError};
|
||||
+use crate::pblock::Pblock;
|
||||
+use crate::plugin::PluginIdRef;
|
||||
+use crate::value::{slapi_value, ValueArray};
|
||||
+
|
||||
+use std::ffi::CString;
|
||||
+use std::ops::{Deref, DerefMut};
|
||||
+use std::os::raw::c_char;
|
||||
+
|
||||
+extern "C" {
|
||||
+ fn slapi_modify_internal_set_pb_ext(
|
||||
+ pb: *const libc::c_void,
|
||||
+ dn: *const libc::c_void,
|
||||
+ mods: *const *const libc::c_void,
|
||||
+ controls: *const *const libc::c_void,
|
||||
+ uniqueid: *const c_char,
|
||||
+ plugin_ident: *const libc::c_void,
|
||||
+ op_flags: i32,
|
||||
+ );
|
||||
+ fn slapi_modify_internal_pb(pb: *const libc::c_void);
|
||||
+ fn slapi_mods_free(smods: *const *const libc::c_void);
|
||||
+ fn slapi_mods_get_ldapmods_byref(smods: *const libc::c_void) -> *const *const libc::c_void;
|
||||
+ fn slapi_mods_new() -> *const libc::c_void;
|
||||
+ fn slapi_mods_add_mod_values(
|
||||
+ smods: *const libc::c_void,
|
||||
+ mtype: i32,
|
||||
+ attrtype: *const c_char,
|
||||
+ value: *const *const slapi_value,
|
||||
+ );
|
||||
+}
|
||||
+
|
||||
+#[derive(Debug)]
|
||||
+#[repr(i32)]
|
||||
+pub enum ModType {
|
||||
+ Add = 0,
|
||||
+ Delete = 1,
|
||||
+ Replace = 2,
|
||||
+}
|
||||
+
|
||||
+pub struct SlapiMods {
|
||||
+ inner: *const libc::c_void,
|
||||
+ vas: Vec<ValueArray>,
|
||||
+}
|
||||
+
|
||||
+impl Drop for SlapiMods {
|
||||
+ fn drop(&mut self) {
|
||||
+ unsafe { slapi_mods_free(&self.inner as *const *const libc::c_void) }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+impl SlapiMods {
|
||||
+ pub fn new() -> Self {
|
||||
+ SlapiMods {
|
||||
+ inner: unsafe { slapi_mods_new() },
|
||||
+ vas: Vec::new(),
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ pub fn append(&mut self, modtype: ModType, attrtype: &str, values: ValueArray) {
|
||||
+ // We can get the value array pointer here to push to the inner
|
||||
+ // because the internal pointers won't change even when we push them
|
||||
+ // to the list to preserve their lifetime.
|
||||
+ let vas = values.as_ptr();
|
||||
+ // We take ownership of this to ensure it lives as least as long as our
|
||||
+ // slapimods structure.
|
||||
+ self.vas.push(values);
|
||||
+ // now we can insert these to the modes.
|
||||
+ let c_attrtype = CString::new(attrtype).expect("failed to allocate attrtype");
|
||||
+ unsafe { slapi_mods_add_mod_values(self.inner, modtype as i32, c_attrtype.as_ptr(), vas) };
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+pub struct Modify {
|
||||
+ pb: Pblock,
|
||||
+ mods: SlapiMods,
|
||||
+}
|
||||
+
|
||||
+pub struct ModifyResult {
|
||||
+ pb: Pblock,
|
||||
+}
|
||||
+
|
||||
+impl Modify {
|
||||
+ pub fn new(dn: &SdnRef, mods: SlapiMods, plugin_id: PluginIdRef) -> Result<Self, PluginError> {
|
||||
+ let pb = Pblock::new();
|
||||
+ let lmods = unsafe { slapi_mods_get_ldapmods_byref(mods.inner) };
|
||||
+ // OP_FLAG_ACTION_LOG_ACCESS
|
||||
+
|
||||
+ unsafe {
|
||||
+ slapi_modify_internal_set_pb_ext(
|
||||
+ pb.deref().as_ptr(),
|
||||
+ dn.as_ptr(),
|
||||
+ lmods,
|
||||
+ std::ptr::null(),
|
||||
+ std::ptr::null(),
|
||||
+ plugin_id.raw_pid,
|
||||
+ OpFlags::ByassReferrals as i32,
|
||||
+ )
|
||||
+ };
|
||||
+
|
||||
+ Ok(Modify { pb, mods })
|
||||
+ }
|
||||
+
|
||||
+ pub fn execute(self) -> Result<ModifyResult, LDAPError> {
|
||||
+ let Modify {
|
||||
+ mut pb,
|
||||
+ mods: _mods,
|
||||
+ } = self;
|
||||
+ unsafe { slapi_modify_internal_pb(pb.deref().as_ptr()) };
|
||||
+ let result = pb.get_op_result();
|
||||
+
|
||||
+ match result {
|
||||
+ 0 => Ok(ModifyResult { pb }),
|
||||
+ _e => Err(LDAPError::from(result)),
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs
|
||||
index b69ce1680..0f83914f3 100644
|
||||
--- a/src/slapi_r_plugin/src/pblock.rs
|
||||
+++ b/src/slapi_r_plugin/src/pblock.rs
|
||||
@@ -11,6 +11,7 @@ pub use crate::log::{log_error, ErrorLevel};
|
||||
extern "C" {
|
||||
fn slapi_pblock_set(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32;
|
||||
fn slapi_pblock_get(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32;
|
||||
+ fn slapi_pblock_destroy(pb: *const libc::c_void);
|
||||
fn slapi_pblock_new() -> *const libc::c_void;
|
||||
}
|
||||
|
||||
@@ -41,6 +42,12 @@ impl DerefMut for Pblock {
|
||||
}
|
||||
}
|
||||
|
||||
+impl Drop for Pblock {
|
||||
+ fn drop(&mut self) {
|
||||
+ unsafe { slapi_pblock_destroy(self.value.raw_pb) }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
pub struct PblockRef {
|
||||
raw_pb: *const libc::c_void,
|
||||
}
|
||||
diff --git a/src/slapi_r_plugin/src/value.rs b/src/slapi_r_plugin/src/value.rs
|
||||
index 5a40dd279..46246837a 100644
|
||||
--- a/src/slapi_r_plugin/src/value.rs
|
||||
+++ b/src/slapi_r_plugin/src/value.rs
|
||||
@@ -96,6 +96,10 @@ impl ValueArray {
|
||||
let bs = vs.into_boxed_slice();
|
||||
Box::leak(bs) as *const _ as *const *const slapi_value
|
||||
}
|
||||
+
|
||||
+ pub fn as_ptr(&self) -> *const *const slapi_value {
|
||||
+ self.data.as_ptr() as *const *const slapi_value
|
||||
+ }
|
||||
}
|
||||
|
||||
impl FromIterator<Value> for ValueArray {
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,179 +0,0 @@
|
||||
From 826a1bb4ea88915ac492828d1cc4a901623f7866 Mon Sep 17 00:00:00 2001
|
||||
From: William Brown <william@blackhats.net.au>
|
||||
Date: Thu, 14 May 2020 14:31:47 +1000
|
||||
Subject: [PATCH 1/2] Ticket 50933 - Update 2307compat.ldif
|
||||
|
||||
Bug Description: This resolves a potential conflict between 60nis.ldif
|
||||
in freeipa and others with 2307compat, by removing the conflicting
|
||||
definitions from 2307bis that were included.
|
||||
|
||||
Fix Description: By not including these in 2307compat, this means that
|
||||
sites that rely on the values provided by 2307bis may ALSO need
|
||||
60nis.ldif to be present. However, these nis values seem like they are
|
||||
likely very rare in reality, and this also will avoid potential
|
||||
issues with freeipa. It also is the least disruptive as we don't need
|
||||
to change an already defined file, and we don't have values where the name
|
||||
to oid relationship changes.
|
||||
|
||||
Fixes: #50933
|
||||
https://pagure.io/389-ds-base/issue/50933
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: tbordaz (Thanks!)
|
||||
---
|
||||
ldap/schema/10rfc2307compat.ldif | 66 --------------------------------
|
||||
ldap/schema/60autofs.ldif | 39 ++++++++++++-------
|
||||
2 files changed, 26 insertions(+), 79 deletions(-)
|
||||
|
||||
diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif
|
||||
index 8810231ac..78c588d08 100644
|
||||
--- a/ldap/schema/10rfc2307compat.ldif
|
||||
+++ b/ldap/schema/10rfc2307compat.ldif
|
||||
@@ -176,50 +176,6 @@ attributeTypes: (
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
|
||||
SINGLE-VALUE
|
||||
)
|
||||
-attributeTypes: (
|
||||
- 1.3.6.1.1.1.1.28 NAME 'nisPublicKey'
|
||||
- DESC 'NIS public key'
|
||||
- EQUALITY octetStringMatch
|
||||
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.40
|
||||
- SINGLE-VALUE
|
||||
- )
|
||||
-attributeTypes: (
|
||||
- 1.3.6.1.1.1.1.29 NAME 'nisSecretKey'
|
||||
- DESC 'NIS secret key'
|
||||
- EQUALITY octetStringMatch
|
||||
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.40
|
||||
- SINGLE-VALUE
|
||||
- )
|
||||
-attributeTypes: (
|
||||
- 1.3.6.1.1.1.1.30 NAME 'nisDomain'
|
||||
- DESC 'NIS domain'
|
||||
- EQUALITY caseIgnoreIA5Match
|
||||
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
|
||||
- )
|
||||
-attributeTypes: (
|
||||
- 1.3.6.1.1.1.1.31 NAME 'automountMapName'
|
||||
- DESC 'automount Map Name'
|
||||
- EQUALITY caseExactIA5Match
|
||||
- SUBSTR caseExactIA5SubstringsMatch
|
||||
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
|
||||
- SINGLE-VALUE
|
||||
- )
|
||||
-attributeTypes: (
|
||||
- 1.3.6.1.1.1.1.32 NAME 'automountKey'
|
||||
- DESC 'Automount Key value'
|
||||
- EQUALITY caseExactIA5Match
|
||||
- SUBSTR caseExactIA5SubstringsMatch
|
||||
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
|
||||
- SINGLE-VALUE
|
||||
- )
|
||||
-attributeTypes: (
|
||||
- 1.3.6.1.1.1.1.33 NAME 'automountInformation'
|
||||
- DESC 'Automount information'
|
||||
- EQUALITY caseExactIA5Match
|
||||
- SUBSTR caseExactIA5SubstringsMatch
|
||||
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
|
||||
- SINGLE-VALUE
|
||||
- )
|
||||
# end of attribute types - beginning of objectclasses
|
||||
objectClasses: (
|
||||
1.3.6.1.1.1.2.0 NAME 'posixAccount' SUP top AUXILIARY
|
||||
@@ -324,28 +280,6 @@ objectClasses: (
|
||||
seeAlso $ serialNumber'
|
||||
MAY ( bootFile $ bootParameter $ cn $ description $ l $ o $ ou $ owner $ seeAlso $ serialNumber )
|
||||
)
|
||||
-objectClasses: (
|
||||
- 1.3.6.1.1.1.2.14 NAME 'nisKeyObject' SUP top AUXILIARY
|
||||
- DESC 'An object with a public and secret key'
|
||||
- MUST ( cn $ nisPublicKey $ nisSecretKey )
|
||||
- MAY ( uidNumber $ description )
|
||||
- )
|
||||
-objectClasses: (
|
||||
- 1.3.6.1.1.1.2.15 NAME 'nisDomainObject' SUP top AUXILIARY
|
||||
- DESC 'Associates a NIS domain with a naming context'
|
||||
- MUST nisDomain
|
||||
- )
|
||||
-objectClasses: (
|
||||
- 1.3.6.1.1.1.2.16 NAME 'automountMap' SUP top STRUCTURAL
|
||||
- MUST ( automountMapName )
|
||||
- MAY description
|
||||
- )
|
||||
-objectClasses: (
|
||||
- 1.3.6.1.1.1.2.17 NAME 'automount' SUP top STRUCTURAL
|
||||
- DESC 'Automount information'
|
||||
- MUST ( automountKey $ automountInformation )
|
||||
- MAY description
|
||||
- )
|
||||
## namedObject is needed for groups without members
|
||||
objectClasses: (
|
||||
1.3.6.1.4.1.5322.13.1.1 NAME 'namedObject' SUP top STRUCTURAL
|
||||
diff --git a/ldap/schema/60autofs.ldif b/ldap/schema/60autofs.ldif
|
||||
index 084e9ec30..de3922aa2 100644
|
||||
--- a/ldap/schema/60autofs.ldif
|
||||
+++ b/ldap/schema/60autofs.ldif
|
||||
@@ -6,7 +6,23 @@ dn: cn=schema
|
||||
################################################################################
|
||||
#
|
||||
attributeTypes: (
|
||||
- 1.3.6.1.1.1.1.33
|
||||
+ 1.3.6.1.1.1.1.31 NAME 'automountMapName'
|
||||
+ DESC 'automount Map Name'
|
||||
+ EQUALITY caseExactIA5Match
|
||||
+ SUBSTR caseExactIA5SubstringsMatch
|
||||
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
|
||||
+ SINGLE-VALUE
|
||||
+ )
|
||||
+attributeTypes: (
|
||||
+ 1.3.6.1.1.1.1.32 NAME 'automountKey'
|
||||
+ DESC 'Automount Key value'
|
||||
+ EQUALITY caseExactIA5Match
|
||||
+ SUBSTR caseExactIA5SubstringsMatch
|
||||
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
|
||||
+ SINGLE-VALUE
|
||||
+ )
|
||||
+attributeTypes: (
|
||||
+ 1.3.6.1.1.1.1.33
|
||||
NAME 'automountInformation'
|
||||
DESC 'Information used by the autofs automounter'
|
||||
EQUALITY caseExactIA5Match
|
||||
@@ -18,25 +34,22 @@ attributeTypes: (
|
||||
################################################################################
|
||||
#
|
||||
objectClasses: (
|
||||
- 1.3.6.1.1.1.2.17
|
||||
- NAME 'automount'
|
||||
- DESC 'An entry in an automounter map'
|
||||
+ 1.3.6.1.1.1.2.16
|
||||
+ NAME 'automountMap'
|
||||
+ DESC 'An group of related automount objects'
|
||||
SUP top
|
||||
STRUCTURAL
|
||||
- MUST ( cn $ automountInformation )
|
||||
- MAY ( description )
|
||||
+ MAY ( ou $ automountMapName $ description )
|
||||
X-ORIGIN 'draft-howard-rfc2307bis'
|
||||
)
|
||||
-#
|
||||
-################################################################################
|
||||
-#
|
||||
objectClasses: (
|
||||
- 1.3.6.1.1.1.2.16
|
||||
- NAME 'automountMap'
|
||||
- DESC 'An group of related automount objects'
|
||||
+ 1.3.6.1.1.1.2.17
|
||||
+ NAME 'automount'
|
||||
+ DESC 'An entry in an automounter map'
|
||||
SUP top
|
||||
STRUCTURAL
|
||||
- MUST ( ou )
|
||||
+ MUST ( automountInformation )
|
||||
+ MAY ( cn $ description $ automountKey )
|
||||
X-ORIGIN 'draft-howard-rfc2307bis'
|
||||
)
|
||||
#
|
||||
--
|
||||
2.26.2
|
||||
|
@ -0,0 +1,192 @@
|
||||
From b2e0a1d405d15383064e547fd15008bc136d3efe Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Thu, 17 Dec 2020 08:22:23 +1000
|
||||
Subject: [PATCH 05/12] Issue 4498 - BUG - entryuuid replication may not work
|
||||
(#4503)
|
||||
|
||||
Bug Description: EntryUUID can be duplicated in replication,
|
||||
due to a missing check in assign_uuid
|
||||
|
||||
Fix Description: Add a test case to determine how this occurs,
|
||||
and add the correct check for existing entryUUID.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/4498
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @mreynolds389
|
||||
---
|
||||
.../tests/suites/entryuuid/replicated_test.py | 77 +++++++++++++++++++
|
||||
rpm.mk | 2 +-
|
||||
src/plugins/entryuuid/src/lib.rs | 20 ++++-
|
||||
src/slapi_r_plugin/src/constants.rs | 2 +
|
||||
src/slapi_r_plugin/src/pblock.rs | 7 ++
|
||||
5 files changed, 106 insertions(+), 2 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/entryuuid/replicated_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/entryuuid/replicated_test.py b/dirsrvtests/tests/suites/entryuuid/replicated_test.py
|
||||
new file mode 100644
|
||||
index 000000000..a2ebc8ff7
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/entryuuid/replicated_test.py
|
||||
@@ -0,0 +1,77 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2020 William Brown <william@blackhats.net.au>
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+
|
||||
+import ldap
|
||||
+import pytest
|
||||
+import logging
|
||||
+from lib389.topologies import topology_m2 as topo_m2
|
||||
+from lib389.idm.user import nsUserAccounts
|
||||
+from lib389.paths import Paths
|
||||
+from lib389.utils import ds_is_older
|
||||
+from lib389._constants import *
|
||||
+from lib389.replica import ReplicationManager
|
||||
+
|
||||
+default_paths = Paths()
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions")
|
||||
+
|
||||
+def test_entryuuid_with_replication(topo_m2):
|
||||
+ """ Check that entryuuid works with replication
|
||||
+
|
||||
+ :id: a5f15bf9-7f63-473a-840c-b9037b787024
|
||||
+
|
||||
+ :setup: two node mmr
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Create an entry on one server
|
||||
+ 2. Wait for replication
|
||||
+ 3. Assert it is on the second
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 1. Success
|
||||
+ 1. Success
|
||||
+ """
|
||||
+
|
||||
+ server_a = topo_m2.ms["supplier1"]
|
||||
+ server_b = topo_m2.ms["supplier2"]
|
||||
+ server_a.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE))
|
||||
+ server_b.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE))
|
||||
+
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+
|
||||
+ account_a = nsUserAccounts(server_a, DEFAULT_SUFFIX).create_test_user(uid=2000)
|
||||
+ euuid_a = account_a.get_attr_vals_utf8('entryUUID')
|
||||
+ print("🧩 %s" % euuid_a)
|
||||
+ assert(euuid_a is not None)
|
||||
+ assert(len(euuid_a) == 1)
|
||||
+
|
||||
+ repl.wait_for_replication(server_a, server_b)
|
||||
+
|
||||
+ account_b = nsUserAccounts(server_b, DEFAULT_SUFFIX).get("test_user_2000")
|
||||
+ euuid_b = account_b.get_attr_vals_utf8('entryUUID')
|
||||
+ print("🧩 %s" % euuid_b)
|
||||
+
|
||||
+ server_a.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
+ server_b.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
+
|
||||
+ assert(euuid_b is not None)
|
||||
+ assert(len(euuid_b) == 1)
|
||||
+ assert(euuid_b == euuid_a)
|
||||
+
|
||||
+ account_b.set("description", "update")
|
||||
+ repl.wait_for_replication(server_b, server_a)
|
||||
+
|
||||
+ euuid_c = account_a.get_attr_vals_utf8('entryUUID')
|
||||
+ print("🧩 %s" % euuid_c)
|
||||
+ assert(euuid_c is not None)
|
||||
+ assert(len(euuid_c) == 1)
|
||||
+ assert(euuid_c == euuid_a)
|
||||
+
|
||||
diff --git a/rpm.mk b/rpm.mk
|
||||
index 02f5bba37..d1cdff7df 100644
|
||||
--- a/rpm.mk
|
||||
+++ b/rpm.mk
|
||||
@@ -25,7 +25,7 @@ TSAN_ON = 0
|
||||
# Undefined Behaviour Sanitizer
|
||||
UBSAN_ON = 0
|
||||
|
||||
-RUST_ON = 0
|
||||
+RUST_ON = 1
|
||||
|
||||
# PERL_ON is deprecated and turns on the LEGACY_ON, this for not breaking people's workflows.
|
||||
PERL_ON = 1
|
||||
diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
|
||||
index 92977db05..0197c5e83 100644
|
||||
--- a/src/plugins/entryuuid/src/lib.rs
|
||||
+++ b/src/plugins/entryuuid/src/lib.rs
|
||||
@@ -30,6 +30,16 @@ slapi_r_search_callback_mapfn!(entryuuid, entryuuid_fixup_cb, entryuuid_fixup_ma
|
||||
fn assign_uuid(e: &mut EntryRef) {
|
||||
let sdn = e.get_sdnref();
|
||||
|
||||
+ // 🚧 safety barrier 🚧
|
||||
+ if e.contains_attr("entryUUID") {
|
||||
+ log_error!(
|
||||
+ ErrorLevel::Trace,
|
||||
+ "assign_uuid -> entryUUID exists, skipping dn {}",
|
||||
+ sdn.to_dn_string()
|
||||
+ );
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
// We could consider making these lazy static.
|
||||
let config_sdn = Sdn::try_from("cn=config").expect("Invalid static dn");
|
||||
let schema_sdn = Sdn::try_from("cn=schema").expect("Invalid static dn");
|
||||
@@ -66,7 +76,15 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
}
|
||||
|
||||
fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> {
|
||||
- log_error!(ErrorLevel::Trace, "betxn_pre_add");
|
||||
+ if pb.get_is_replicated_operation() {
|
||||
+ log_error!(
|
||||
+ ErrorLevel::Trace,
|
||||
+ "betxn_pre_add -> replicated operation, will not change"
|
||||
+ );
|
||||
+ return Ok(());
|
||||
+ }
|
||||
+
|
||||
+ log_error!(ErrorLevel::Trace, "betxn_pre_add -> start");
|
||||
|
||||
let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?;
|
||||
assign_uuid(&mut e);
|
||||
diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs
|
||||
index 34845c2f4..aa0691acc 100644
|
||||
--- a/src/slapi_r_plugin/src/constants.rs
|
||||
+++ b/src/slapi_r_plugin/src/constants.rs
|
||||
@@ -164,6 +164,8 @@ pub(crate) enum PblockType {
|
||||
AddEntry = 60,
|
||||
/// SLAPI_BACKEND
|
||||
Backend = 130,
|
||||
+ /// SLAPI_IS_REPLICATED_OPERATION
|
||||
+ IsReplicationOperation = 142,
|
||||
/// SLAPI_PLUGIN_MR_NAMES
|
||||
MRNames = 624,
|
||||
/// SLAPI_PLUGIN_SYNTAX_NAMES
|
||||
diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs
|
||||
index 0f83914f3..718ff2ca7 100644
|
||||
--- a/src/slapi_r_plugin/src/pblock.rs
|
||||
+++ b/src/slapi_r_plugin/src/pblock.rs
|
||||
@@ -279,4 +279,11 @@ impl PblockRef {
|
||||
pub fn get_op_result(&mut self) -> i32 {
|
||||
self.get_value_i32(PblockType::OpResult).unwrap_or(-1)
|
||||
}
|
||||
+
|
||||
+ pub fn get_is_replicated_operation(&mut self) -> bool {
|
||||
+ let i = self.get_value_i32(PblockType::IsReplicationOperation).unwrap_or(0);
|
||||
+ // Because rust returns the result of the last evaluation, we can
|
||||
+ // just return if not equal 0.
|
||||
+ i != 0
|
||||
+ }
|
||||
}
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,36 +0,0 @@
|
||||
From 3d9ced9e340678cc02b1a36c2139492c95ef15a6 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 12 Aug 2020 12:46:42 -0400
|
||||
Subject: [PATCH 2/2] Issue 50933 - Fix OID change between 10rfc2307 and
|
||||
10rfc2307compat
|
||||
|
||||
Bug Description: 10rfc2307compat changed the OID for nisMap objectclass to
|
||||
match the standard OID, but this breaks replication with
|
||||
older versions of DS.
|
||||
|
||||
Fix Description: Continue to use the old(invalid?) oid for nisMap so that
|
||||
replication does not break in a mixed version environment.
|
||||
|
||||
Fixes: https://pagure.io/389-ds-base/issue/50933
|
||||
|
||||
Reviewed by: firstyear & tbordaz(Thanks!!)
|
||||
---
|
||||
ldap/schema/10rfc2307compat.ldif | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif
|
||||
index 78c588d08..8ba72e1e3 100644
|
||||
--- a/ldap/schema/10rfc2307compat.ldif
|
||||
+++ b/ldap/schema/10rfc2307compat.ldif
|
||||
@@ -253,7 +253,7 @@ objectClasses: (
|
||||
MAY ( nisNetgroupTriple $ memberNisNetgroup $ description )
|
||||
)
|
||||
objectClasses: (
|
||||
- 1.3.6.1.1.1.2.9 NAME 'nisMap' SUP top STRUCTURAL
|
||||
+ 1.3.6.1.1.1.2.13 NAME 'nisMap' SUP top STRUCTURAL
|
||||
DESC 'A generic abstraction of a NIS map'
|
||||
MUST nisMapName
|
||||
MAY description
|
||||
--
|
||||
2.26.2
|
||||
|
@ -0,0 +1,626 @@
|
||||
From 04c44e74503a842561b6c6e58001faf86d924b20 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 7 Dec 2020 11:00:45 -0500
|
||||
Subject: [PATCH 06/12] Issue 4421 - Unable to build with Rust enabled in
|
||||
closed environment
|
||||
|
||||
Description: Add Makefile flags and update rpm.mk that allow updating
|
||||
and downloading all the cargo/rust dependencies. This is
|
||||
needed for nightly tests and upstream/downstream releases.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4421
|
||||
|
||||
Reviewed by: firstyear(Thanks!)
|
||||
---
|
||||
rpm.mk | 3 +-
|
||||
rpm/389-ds-base.spec.in | 2 +-
|
||||
src/Cargo.lock | 563 ----------------------------------------
|
||||
3 files changed, 3 insertions(+), 565 deletions(-)
|
||||
delete mode 100644 src/Cargo.lock
|
||||
|
||||
diff --git a/rpm.mk b/rpm.mk
|
||||
index d1cdff7df..ef810c63c 100644
|
||||
--- a/rpm.mk
|
||||
+++ b/rpm.mk
|
||||
@@ -44,6 +44,7 @@ update-cargo-dependencies:
|
||||
cargo update --manifest-path=./src/Cargo.toml
|
||||
|
||||
download-cargo-dependencies:
|
||||
+ cargo update --manifest-path=./src/Cargo.toml
|
||||
cargo vendor --manifest-path=./src/Cargo.toml
|
||||
cargo fetch --manifest-path=./src/Cargo.toml
|
||||
tar -czf vendor.tar.gz vendor
|
||||
@@ -114,7 +115,7 @@ rpmbuildprep:
|
||||
cp dist/sources/$(JEMALLOC_TARBALL) $(RPMBUILD)/SOURCES/ ; \
|
||||
fi
|
||||
|
||||
-srpms: rpmroot srpmdistdir tarballs rpmbuildprep
|
||||
+srpms: rpmroot srpmdistdir download-cargo-dependencies tarballs rpmbuildprep
|
||||
rpmbuild --define "_topdir $(RPMBUILD)" -bs $(RPMBUILD)/SPECS/$(PACKAGE).spec
|
||||
cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)*.src.rpm dist/srpms/
|
||||
rm -rf $(RPMBUILD)
|
||||
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
|
||||
index b9f85489b..d80de8422 100644
|
||||
--- a/rpm/389-ds-base.spec.in
|
||||
+++ b/rpm/389-ds-base.spec.in
|
||||
@@ -357,7 +357,7 @@ UBSAN_FLAGS="--enable-ubsan --enable-debug"
|
||||
%endif
|
||||
|
||||
%if %{use_rust}
|
||||
-RUST_FLAGS="--enable-rust"
|
||||
+RUST_FLAGS="--enable-rust --enable-rust-offline"
|
||||
%endif
|
||||
|
||||
%if %{use_legacy}
|
||||
diff --git a/src/Cargo.lock b/src/Cargo.lock
|
||||
deleted file mode 100644
|
||||
index 33d7b8f23..000000000
|
||||
--- a/src/Cargo.lock
|
||||
+++ /dev/null
|
||||
@@ -1,563 +0,0 @@
|
||||
-# This file is automatically @generated by Cargo.
|
||||
-# It is not intended for manual editing.
|
||||
-[[package]]
|
||||
-name = "ansi_term"
|
||||
-version = "0.11.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
|
||||
-dependencies = [
|
||||
- "winapi",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "atty"
|
||||
-version = "0.2.14"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
-dependencies = [
|
||||
- "hermit-abi",
|
||||
- "libc",
|
||||
- "winapi",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "autocfg"
|
||||
-version = "1.0.1"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "base64"
|
||||
-version = "0.13.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "bitflags"
|
||||
-version = "1.2.1"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "byteorder"
|
||||
-version = "1.4.3"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "cbindgen"
|
||||
-version = "0.9.1"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
|
||||
-dependencies = [
|
||||
- "clap",
|
||||
- "log",
|
||||
- "proc-macro2",
|
||||
- "quote",
|
||||
- "serde",
|
||||
- "serde_json",
|
||||
- "syn",
|
||||
- "tempfile",
|
||||
- "toml",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "cc"
|
||||
-version = "1.0.67"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd"
|
||||
-dependencies = [
|
||||
- "jobserver",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "cfg-if"
|
||||
-version = "1.0.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "clap"
|
||||
-version = "2.33.3"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
|
||||
-dependencies = [
|
||||
- "ansi_term",
|
||||
- "atty",
|
||||
- "bitflags",
|
||||
- "strsim",
|
||||
- "textwrap",
|
||||
- "unicode-width",
|
||||
- "vec_map",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "entryuuid"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "cc",
|
||||
- "libc",
|
||||
- "paste",
|
||||
- "slapi_r_plugin",
|
||||
- "uuid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "entryuuid_syntax"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "cc",
|
||||
- "libc",
|
||||
- "paste",
|
||||
- "slapi_r_plugin",
|
||||
- "uuid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "fernet"
|
||||
-version = "0.1.4"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
|
||||
-dependencies = [
|
||||
- "base64",
|
||||
- "byteorder",
|
||||
- "getrandom",
|
||||
- "openssl",
|
||||
- "zeroize",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "foreign-types"
|
||||
-version = "0.3.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
|
||||
-dependencies = [
|
||||
- "foreign-types-shared",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "foreign-types-shared"
|
||||
-version = "0.1.1"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "getrandom"
|
||||
-version = "0.2.3"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
|
||||
-dependencies = [
|
||||
- "cfg-if",
|
||||
- "libc",
|
||||
- "wasi",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "hermit-abi"
|
||||
-version = "0.1.18"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
|
||||
-dependencies = [
|
||||
- "libc",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "itoa"
|
||||
-version = "0.4.7"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "jobserver"
|
||||
-version = "0.1.22"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd"
|
||||
-dependencies = [
|
||||
- "libc",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "lazy_static"
|
||||
-version = "1.4.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "libc"
|
||||
-version = "0.2.94"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "librnsslapd"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "cbindgen",
|
||||
- "libc",
|
||||
- "slapd",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "librslapd"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "cbindgen",
|
||||
- "libc",
|
||||
- "slapd",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "log"
|
||||
-version = "0.4.14"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
|
||||
-dependencies = [
|
||||
- "cfg-if",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "once_cell"
|
||||
-version = "1.7.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "openssl"
|
||||
-version = "0.10.34"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8"
|
||||
-dependencies = [
|
||||
- "bitflags",
|
||||
- "cfg-if",
|
||||
- "foreign-types",
|
||||
- "libc",
|
||||
- "once_cell",
|
||||
- "openssl-sys",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "openssl-sys"
|
||||
-version = "0.9.63"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98"
|
||||
-dependencies = [
|
||||
- "autocfg",
|
||||
- "cc",
|
||||
- "libc",
|
||||
- "pkg-config",
|
||||
- "vcpkg",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "paste"
|
||||
-version = "0.1.18"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
|
||||
-dependencies = [
|
||||
- "paste-impl",
|
||||
- "proc-macro-hack",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "paste-impl"
|
||||
-version = "0.1.18"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
|
||||
-dependencies = [
|
||||
- "proc-macro-hack",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "pkg-config"
|
||||
-version = "0.3.19"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "ppv-lite86"
|
||||
-version = "0.2.10"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "proc-macro-hack"
|
||||
-version = "0.5.19"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "proc-macro2"
|
||||
-version = "1.0.27"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
|
||||
-dependencies = [
|
||||
- "unicode-xid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "quote"
|
||||
-version = "1.0.9"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
|
||||
-dependencies = [
|
||||
- "proc-macro2",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "rand"
|
||||
-version = "0.8.3"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
|
||||
-dependencies = [
|
||||
- "libc",
|
||||
- "rand_chacha",
|
||||
- "rand_core",
|
||||
- "rand_hc",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "rand_chacha"
|
||||
-version = "0.3.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
|
||||
-dependencies = [
|
||||
- "ppv-lite86",
|
||||
- "rand_core",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "rand_core"
|
||||
-version = "0.6.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"
|
||||
-dependencies = [
|
||||
- "getrandom",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "rand_hc"
|
||||
-version = "0.3.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
|
||||
-dependencies = [
|
||||
- "rand_core",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "redox_syscall"
|
||||
-version = "0.2.8"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc"
|
||||
-dependencies = [
|
||||
- "bitflags",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "remove_dir_all"
|
||||
-version = "0.5.3"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
|
||||
-dependencies = [
|
||||
- "winapi",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "rsds"
|
||||
-version = "0.1.0"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "ryu"
|
||||
-version = "1.0.5"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "serde"
|
||||
-version = "1.0.126"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03"
|
||||
-dependencies = [
|
||||
- "serde_derive",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "serde_derive"
|
||||
-version = "1.0.126"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43"
|
||||
-dependencies = [
|
||||
- "proc-macro2",
|
||||
- "quote",
|
||||
- "syn",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "serde_json"
|
||||
-version = "1.0.64"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79"
|
||||
-dependencies = [
|
||||
- "itoa",
|
||||
- "ryu",
|
||||
- "serde",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "slapd"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "fernet",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "slapi_r_plugin"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "lazy_static",
|
||||
- "libc",
|
||||
- "paste",
|
||||
- "uuid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "strsim"
|
||||
-version = "0.8.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "syn"
|
||||
-version = "1.0.72"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82"
|
||||
-dependencies = [
|
||||
- "proc-macro2",
|
||||
- "quote",
|
||||
- "unicode-xid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "synstructure"
|
||||
-version = "0.12.4"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
|
||||
-dependencies = [
|
||||
- "proc-macro2",
|
||||
- "quote",
|
||||
- "syn",
|
||||
- "unicode-xid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "tempfile"
|
||||
-version = "3.2.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
|
||||
-dependencies = [
|
||||
- "cfg-if",
|
||||
- "libc",
|
||||
- "rand",
|
||||
- "redox_syscall",
|
||||
- "remove_dir_all",
|
||||
- "winapi",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "textwrap"
|
||||
-version = "0.11.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
-dependencies = [
|
||||
- "unicode-width",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "toml"
|
||||
-version = "0.5.8"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa"
|
||||
-dependencies = [
|
||||
- "serde",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "unicode-width"
|
||||
-version = "0.1.8"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "unicode-xid"
|
||||
-version = "0.2.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "uuid"
|
||||
-version = "0.8.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
|
||||
-dependencies = [
|
||||
- "getrandom",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "vcpkg"
|
||||
-version = "0.2.12"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "cbdbff6266a24120518560b5dc983096efb98462e51d0d68169895b237be3e5d"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "vec_map"
|
||||
-version = "0.8.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "wasi"
|
||||
-version = "0.10.2+wasi-snapshot-preview1"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "winapi"
|
||||
-version = "0.3.9"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
||||
-dependencies = [
|
||||
- "winapi-i686-pc-windows-gnu",
|
||||
- "winapi-x86_64-pc-windows-gnu",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "winapi-i686-pc-windows-gnu"
|
||||
-version = "0.4.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "winapi-x86_64-pc-windows-gnu"
|
||||
-version = "0.4.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "zeroize"
|
||||
-version = "1.3.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd"
|
||||
-dependencies = [
|
||||
- "zeroize_derive",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "zeroize_derive"
|
||||
-version = "1.1.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1"
|
||||
-dependencies = [
|
||||
- "proc-macro2",
|
||||
- "quote",
|
||||
- "syn",
|
||||
- "synstructure",
|
||||
-]
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,147 +0,0 @@
|
||||
From 1085823bf5586d55103cfba249fdf212e9afcb7c Mon Sep 17 00:00:00 2001
|
||||
From: William Brown <william@blackhats.net.au>
|
||||
Date: Thu, 4 Jun 2020 11:51:53 +1000
|
||||
Subject: [PATCH] Ticket 51131 - improve mutex alloc in conntable
|
||||
|
||||
Bug Description: We previously did delayed allocation
|
||||
of mutexs, which @tbordaz noted can lead to high usage
|
||||
of the pthread mutex init routines. This was done under
|
||||
the conntable lock, as well as cleaning the connection
|
||||
|
||||
Fix Description: rather than delayed allocation, we
|
||||
initialise everything at start up instead, which means
|
||||
that while startup may have a delay, at run time we have
|
||||
a smaller and lighter connection allocation routine,
|
||||
that is able to release the CT lock sooner.
|
||||
|
||||
https://pagure.io/389-ds-base/issue/51131
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: ???
|
||||
---
|
||||
ldap/servers/slapd/conntable.c | 86 +++++++++++++++++++---------------
|
||||
1 file changed, 47 insertions(+), 39 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c
|
||||
index b23dc3435..feb9c0d75 100644
|
||||
--- a/ldap/servers/slapd/conntable.c
|
||||
+++ b/ldap/servers/slapd/conntable.c
|
||||
@@ -138,10 +138,21 @@ connection_table_new(int table_size)
|
||||
ct->conn_next_offset = 1;
|
||||
ct->conn_free_offset = 1;
|
||||
|
||||
+ pthread_mutexattr_t monitor_attr = {0};
|
||||
+ pthread_mutexattr_init(&monitor_attr);
|
||||
+ pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE);
|
||||
+
|
||||
/* We rely on the fact that we called calloc, which zeros the block, so we don't
|
||||
* init any structure element unless a zero value is troublesome later
|
||||
*/
|
||||
for (i = 0; i < table_size; i++) {
|
||||
+ /*
|
||||
+ * Technically this is a no-op due to calloc, but we should always be
|
||||
+ * careful with things like this ....
|
||||
+ */
|
||||
+ ct->c[i].c_state = CONN_STATE_FREE;
|
||||
+ /* Start the conn setup. */
|
||||
+
|
||||
LBER_SOCKET invalid_socket;
|
||||
/* DBDB---move this out of here once everything works */
|
||||
ct->c[i].c_sb = ber_sockbuf_alloc();
|
||||
@@ -161,11 +172,20 @@ connection_table_new(int table_size)
|
||||
ct->c[i].c_prev = NULL;
|
||||
ct->c[i].c_ci = i;
|
||||
ct->c[i].c_fdi = SLAPD_INVALID_SOCKET_INDEX;
|
||||
- /*
|
||||
- * Technically this is a no-op due to calloc, but we should always be
|
||||
- * careful with things like this ....
|
||||
- */
|
||||
- ct->c[i].c_state = CONN_STATE_FREE;
|
||||
+
|
||||
+ if (pthread_mutex_init(&(ct->c[i].c_mutex), &monitor_attr) != 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "pthread_mutex_init failed\n");
|
||||
+ exit(1);
|
||||
+ }
|
||||
+
|
||||
+ ct->c[i].c_pdumutex = PR_NewLock();
|
||||
+ if (ct->c[i].c_pdumutex == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "PR_NewLock failed\n");
|
||||
+ exit(1);
|
||||
+ }
|
||||
+
|
||||
+ /* Ready to rock, mark as such. */
|
||||
+ ct->c[i].c_state = CONN_STATE_INIT;
|
||||
/* Prepare the connection into the freelist. */
|
||||
ct->c_freelist[i] = &(ct->c[i]);
|
||||
}
|
||||
@@ -241,44 +261,32 @@ connection_table_get_connection(Connection_Table *ct, int sd)
|
||||
/* Never use slot 0 */
|
||||
ct->conn_next_offset += 1;
|
||||
}
|
||||
- /* Now prep the slot for usage. */
|
||||
- PR_ASSERT(c->c_next == NULL);
|
||||
- PR_ASSERT(c->c_prev == NULL);
|
||||
- PR_ASSERT(c->c_extension == NULL);
|
||||
-
|
||||
- if (c->c_state == CONN_STATE_FREE) {
|
||||
-
|
||||
- c->c_state = CONN_STATE_INIT;
|
||||
-
|
||||
- pthread_mutexattr_t monitor_attr = {0};
|
||||
- pthread_mutexattr_init(&monitor_attr);
|
||||
- pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE);
|
||||
- if (pthread_mutex_init(&(c->c_mutex), &monitor_attr) != 0) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "pthread_mutex_init failed\n");
|
||||
- exit(1);
|
||||
- }
|
||||
-
|
||||
- c->c_pdumutex = PR_NewLock();
|
||||
- if (c->c_pdumutex == NULL) {
|
||||
- c->c_pdumutex = NULL;
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "PR_NewLock failed\n");
|
||||
- exit(1);
|
||||
- }
|
||||
- }
|
||||
- /* Let's make sure there's no cruft left on there from the last time this connection was used. */
|
||||
- /* Note: no need to lock c->c_mutex because this function is only
|
||||
- * called by one thread (the slapd_daemon thread), and if we got this
|
||||
- * far then `c' is not being used by any operation threads, etc.
|
||||
- */
|
||||
- connection_cleanup(c);
|
||||
- c->c_ct = ct; /* pointer to connection table that owns this connection */
|
||||
+ PR_Unlock(ct->table_mutex);
|
||||
} else {
|
||||
- /* couldn't find a Connection */
|
||||
+ /* couldn't find a Connection, table must be full */
|
||||
slapi_log_err(SLAPI_LOG_CONNS, "connection_table_get_connection", "Max open connections reached\n");
|
||||
+ PR_Unlock(ct->table_mutex);
|
||||
+ return NULL;
|
||||
}
|
||||
|
||||
- /* We could move this to before the c alloc as there is no point to remain here. */
|
||||
- PR_Unlock(ct->table_mutex);
|
||||
+ /* Now prep the slot for usage. */
|
||||
+ PR_ASSERT(c != NULL);
|
||||
+ PR_ASSERT(c->c_next == NULL);
|
||||
+ PR_ASSERT(c->c_prev == NULL);
|
||||
+ PR_ASSERT(c->c_extension == NULL);
|
||||
+ PR_ASSERT(c->c_state == CONN_STATE_INIT);
|
||||
+ /* Let's make sure there's no cruft left on there from the last time this connection was used. */
|
||||
+
|
||||
+ /*
|
||||
+ * Note: no need to lock c->c_mutex because this function is only
|
||||
+ * called by one thread (the slapd_daemon thread), and if we got this
|
||||
+ * far then `c' is not being used by any operation threads, etc. The
|
||||
+ * memory ordering will be provided by the work queue sending c to a
|
||||
+ * thread.
|
||||
+ */
|
||||
+ connection_cleanup(c);
|
||||
+ /* pointer to connection table that owns this connection */
|
||||
+ c->c_ct = ct;
|
||||
|
||||
return c;
|
||||
}
|
||||
--
|
||||
2.26.2
|
||||
|
@ -1,66 +0,0 @@
|
||||
From a9f53e9958861e6a7a827bd852d72d51a6512396 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 25 Nov 2020 18:07:34 +0100
|
||||
Subject: [PATCH] Issue 4297 - 2nd fix for on ADD replication URP issue
|
||||
internal searches with filter containing unescaped chars (#4439)
|
||||
|
||||
Bug description:
|
||||
Previous fix is buggy because slapi_filter_escape_filter_value returns
|
||||
a escaped filter component not an escaped assertion value.
|
||||
|
||||
Fix description:
|
||||
use the escaped filter component
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4297
|
||||
|
||||
Reviewed by: William Brown
|
||||
|
||||
Platforms tested: F31
|
||||
---
|
||||
ldap/servers/plugins/replication/urp.c | 16 ++++++++--------
|
||||
1 file changed, 8 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c
|
||||
index f41dbc72d..ed340c9d8 100644
|
||||
--- a/ldap/servers/plugins/replication/urp.c
|
||||
+++ b/ldap/servers/plugins/replication/urp.c
|
||||
@@ -1411,12 +1411,12 @@ urp_add_check_tombstone (Slapi_PBlock *pb, char *sessionid, Slapi_Entry *entry,
|
||||
Slapi_Entry **entries = NULL;
|
||||
Slapi_PBlock *newpb;
|
||||
char *basedn = slapi_entry_get_ndn(entry);
|
||||
- char *escaped_basedn;
|
||||
+ char *escaped_filter;
|
||||
const Slapi_DN *suffix = slapi_get_suffix_by_dn(slapi_entry_get_sdn (entry));
|
||||
- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
|
||||
+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", basedn);
|
||||
|
||||
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
|
||||
- slapi_ch_free((void **)&escaped_basedn);
|
||||
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter);
|
||||
+ slapi_ch_free((void **)&escaped_filter);
|
||||
newpb = slapi_pblock_new();
|
||||
slapi_search_internal_set_pb(newpb,
|
||||
slapi_sdn_get_dn(suffix), /* Base DN */
|
||||
@@ -1605,15 +1605,15 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr
|
||||
Slapi_Entry **entries = NULL;
|
||||
Slapi_PBlock *newpb;
|
||||
const char *basedn = slapi_sdn_get_dn(parentdn);
|
||||
- char *escaped_basedn;
|
||||
- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn);
|
||||
+ char *escaped_filter;
|
||||
+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn);
|
||||
|
||||
char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn");
|
||||
CSN *conflict_csn = csn_new_by_string(conflict_csnstr);
|
||||
CSN *tombstone_csn = NULL;
|
||||
|
||||
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
|
||||
- slapi_ch_free((void **)&escaped_basedn);
|
||||
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter);
|
||||
+ slapi_ch_free((void **)&escaped_filter);
|
||||
newpb = slapi_pblock_new();
|
||||
char *parent_dn = slapi_dn_parent (basedn);
|
||||
slapi_search_internal_set_pb(newpb,
|
||||
--
|
||||
2.26.2
|
||||
|
412
SOURCES/0007-Ticket-51175-resolve-plugin-name-leaking.patch
Normal file
412
SOURCES/0007-Ticket-51175-resolve-plugin-name-leaking.patch
Normal file
@ -0,0 +1,412 @@
|
||||
From 279bdb5148eb0b67ddab40c4dd9d08e9e1672f13 Mon Sep 17 00:00:00 2001
|
||||
From: William Brown <william@blackhats.net.au>
|
||||
Date: Fri, 26 Jun 2020 10:27:56 +1000
|
||||
Subject: [PATCH 07/12] Ticket 51175 - resolve plugin name leaking
|
||||
|
||||
Bug Description: Previously pblock.c assumed that all plugin
|
||||
names were static c strings. Rust can't create static C
|
||||
strings, so these were intentionally leaked.
|
||||
|
||||
Fix Description: Rather than leak these, we do a dup/free
|
||||
through the slapiplugin struct instead, meaning we can use
|
||||
ephemeral, and properly managed strings in rust. This does not
|
||||
affect any other existing code which will still handle the
|
||||
static strings correctly.
|
||||
|
||||
https://pagure.io/389-ds-base/issue/51175
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: mreynolds, tbordaz (Thanks!)
|
||||
---
|
||||
Makefile.am | 1 +
|
||||
configure.ac | 2 +-
|
||||
ldap/servers/slapd/pagedresults.c | 6 +--
|
||||
ldap/servers/slapd/pblock.c | 9 ++--
|
||||
ldap/servers/slapd/plugin.c | 7 +++
|
||||
ldap/servers/slapd/pw_verify.c | 1 +
|
||||
ldap/servers/slapd/tools/pwenc.c | 2 +-
|
||||
src/slapi_r_plugin/README.md | 6 +--
|
||||
src/slapi_r_plugin/src/charray.rs | 32 ++++++++++++++
|
||||
src/slapi_r_plugin/src/lib.rs | 8 ++--
|
||||
src/slapi_r_plugin/src/macros.rs | 17 +++++---
|
||||
src/slapi_r_plugin/src/syntax_plugin.rs | 57 +++++++------------------
|
||||
12 files changed, 85 insertions(+), 63 deletions(-)
|
||||
create mode 100644 src/slapi_r_plugin/src/charray.rs
|
||||
|
||||
diff --git a/Makefile.am b/Makefile.am
|
||||
index 627953850..36434cf17 100644
|
||||
--- a/Makefile.am
|
||||
+++ b/Makefile.am
|
||||
@@ -1312,6 +1312,7 @@ rust-nsslapd-private.h: @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a
|
||||
libslapi_r_plugin_SOURCES = \
|
||||
src/slapi_r_plugin/src/backend.rs \
|
||||
src/slapi_r_plugin/src/ber.rs \
|
||||
+ src/slapi_r_plugin/src/charray.rs \
|
||||
src/slapi_r_plugin/src/constants.rs \
|
||||
src/slapi_r_plugin/src/dn.rs \
|
||||
src/slapi_r_plugin/src/entry.rs \
|
||||
diff --git a/configure.ac b/configure.ac
|
||||
index b3cf77d08..61bf35e4a 100644
|
||||
--- a/configure.ac
|
||||
+++ b/configure.ac
|
||||
@@ -122,7 +122,7 @@ if test "$enable_debug" = yes ; then
|
||||
debug_defs="-DDEBUG -DMCC_DEBUG"
|
||||
debug_cflags="-g3 -O0 -rdynamic"
|
||||
debug_cxxflags="-g3 -O0 -rdynamic"
|
||||
- debug_rust_defs="-C debuginfo=2"
|
||||
+ debug_rust_defs="-C debuginfo=2 -Z macro-backtrace"
|
||||
cargo_defs=""
|
||||
rust_target_dir="debug"
|
||||
else
|
||||
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
|
||||
index d8b8798b6..e3444e944 100644
|
||||
--- a/ldap/servers/slapd/pagedresults.c
|
||||
+++ b/ldap/servers/slapd/pagedresults.c
|
||||
@@ -738,10 +738,10 @@ pagedresults_cleanup(Connection *conn, int needlock)
|
||||
int i;
|
||||
PagedResults *prp = NULL;
|
||||
|
||||
- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n");
|
||||
+ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n"); */
|
||||
|
||||
if (NULL == conn) {
|
||||
- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n");
|
||||
+ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n"); */
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -767,7 +767,7 @@ pagedresults_cleanup(Connection *conn, int needlock)
|
||||
if (needlock) {
|
||||
pthread_mutex_unlock(&(conn->c_mutex));
|
||||
}
|
||||
- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc);
|
||||
+ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc); */
|
||||
return rc;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
|
||||
index 1ad9d0399..f7d1f8885 100644
|
||||
--- a/ldap/servers/slapd/pblock.c
|
||||
+++ b/ldap/servers/slapd/pblock.c
|
||||
@@ -3351,13 +3351,15 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
|
||||
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
|
||||
return (-1);
|
||||
}
|
||||
- pblock->pb_plugin->plg_syntax_names = (char **)value;
|
||||
+ PR_ASSERT(pblock->pb_plugin->plg_syntax_names == NULL);
|
||||
+ pblock->pb_plugin->plg_syntax_names = slapi_ch_array_dup((char **)value);
|
||||
break;
|
||||
case SLAPI_PLUGIN_SYNTAX_OID:
|
||||
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
|
||||
return (-1);
|
||||
}
|
||||
- pblock->pb_plugin->plg_syntax_oid = (char *)value;
|
||||
+ PR_ASSERT(pblock->pb_plugin->plg_syntax_oid == NULL);
|
||||
+ pblock->pb_plugin->plg_syntax_oid = slapi_ch_strdup((char *)value);
|
||||
break;
|
||||
case SLAPI_PLUGIN_SYNTAX_FLAGS:
|
||||
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
|
||||
@@ -3806,7 +3808,8 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
|
||||
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
|
||||
return (-1);
|
||||
}
|
||||
- pblock->pb_plugin->plg_mr_names = (char **)value;
|
||||
+ PR_ASSERT(pblock->pb_plugin->plg_mr_names == NULL);
|
||||
+ pblock->pb_plugin->plg_mr_names = slapi_ch_array_dup((char **)value);
|
||||
break;
|
||||
case SLAPI_PLUGIN_MR_COMPARE:
|
||||
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
|
||||
diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c
|
||||
index 282b98738..e6b48de60 100644
|
||||
--- a/ldap/servers/slapd/plugin.c
|
||||
+++ b/ldap/servers/slapd/plugin.c
|
||||
@@ -2694,6 +2694,13 @@ plugin_free(struct slapdplugin *plugin)
|
||||
if (plugin->plg_type == SLAPI_PLUGIN_PWD_STORAGE_SCHEME || plugin->plg_type == SLAPI_PLUGIN_REVER_PWD_STORAGE_SCHEME) {
|
||||
slapi_ch_free_string(&plugin->plg_pwdstorageschemename);
|
||||
}
|
||||
+ if (plugin->plg_type == SLAPI_PLUGIN_SYNTAX) {
|
||||
+ slapi_ch_free_string(&plugin->plg_syntax_oid);
|
||||
+ slapi_ch_array_free(plugin->plg_syntax_names);
|
||||
+ }
|
||||
+ if (plugin->plg_type == SLAPI_PLUGIN_MATCHINGRULE) {
|
||||
+ slapi_ch_array_free(plugin->plg_mr_names);
|
||||
+ }
|
||||
release_componentid(plugin->plg_identity);
|
||||
slapi_counter_destroy(&plugin->plg_op_counter);
|
||||
if (!plugin->plg_group) {
|
||||
diff --git a/ldap/servers/slapd/pw_verify.c b/ldap/servers/slapd/pw_verify.c
|
||||
index 4f0944b73..4ff1fa2fd 100644
|
||||
--- a/ldap/servers/slapd/pw_verify.c
|
||||
+++ b/ldap/servers/slapd/pw_verify.c
|
||||
@@ -111,6 +111,7 @@ pw_verify_token_dn(Slapi_PBlock *pb) {
|
||||
if (fernet_verify_token(dn, cred->bv_val, key, tok_ttl) != 0) {
|
||||
rc = SLAPI_BIND_SUCCESS;
|
||||
}
|
||||
+ slapi_ch_free_string(&key);
|
||||
#endif
|
||||
return rc;
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/tools/pwenc.c b/ldap/servers/slapd/tools/pwenc.c
|
||||
index 1629c06cd..d89225e34 100644
|
||||
--- a/ldap/servers/slapd/tools/pwenc.c
|
||||
+++ b/ldap/servers/slapd/tools/pwenc.c
|
||||
@@ -34,7 +34,7 @@
|
||||
|
||||
int ldap_syslog;
|
||||
int ldap_syslog_level;
|
||||
-int slapd_ldap_debug = LDAP_DEBUG_ANY;
|
||||
+/* int slapd_ldap_debug = LDAP_DEBUG_ANY; */
|
||||
int detached;
|
||||
FILE *error_logfp;
|
||||
FILE *access_logfp;
|
||||
diff --git a/src/slapi_r_plugin/README.md b/src/slapi_r_plugin/README.md
|
||||
index af9743ec9..1c9bcbf17 100644
|
||||
--- a/src/slapi_r_plugin/README.md
|
||||
+++ b/src/slapi_r_plugin/README.md
|
||||
@@ -15,7 +15,7 @@ the [Rust Nomicon](https://doc.rust-lang.org/nomicon/index.html)
|
||||
> warning about danger.
|
||||
|
||||
This document will not detail the specifics of unsafe or the invariants you must adhere to for rust
|
||||
-to work with C.
|
||||
+to work with C. Failure to uphold these invariants will lead to less than optimal consequences.
|
||||
|
||||
If you still want to see more about the plugin bindings, go on ...
|
||||
|
||||
@@ -135,7 +135,7 @@ associated functions.
|
||||
Now, you may notice that not all members of the trait are implemented. This is due to a feature
|
||||
of rust known as default trait impls. This allows the trait origin (src/plugin.rs) to provide
|
||||
template versions of these functions. If you "overwrite" them, your implementation is used. Unlike
|
||||
-OO, you may not inherit or call the default function.
|
||||
+OO, you may not inherit or call the default function.
|
||||
|
||||
If a default is not provided you *must* implement that function to be considered valid. Today (20200422)
|
||||
this only applies to `start` and `close`.
|
||||
@@ -183,7 +183,7 @@ It's important to understand how Rust manages memory both on the stack and the h
|
||||
As a result, this means that we must express in code, assertions about the proper ownership of memory
|
||||
and who is responsible for it (unlike C, where it can be hard to determine who or what is responsible
|
||||
for freeing some value.) Failure to handle this correctly, can and will lead to crashes, leaks or
|
||||
-*hand waving* magical failures that are eXtReMeLy FuN to debug.
|
||||
+*hand waving* magical failures that are `eXtReMeLy FuN` to debug.
|
||||
|
||||
### Reference Types
|
||||
|
||||
diff --git a/src/slapi_r_plugin/src/charray.rs b/src/slapi_r_plugin/src/charray.rs
|
||||
new file mode 100644
|
||||
index 000000000..d2e44693c
|
||||
--- /dev/null
|
||||
+++ b/src/slapi_r_plugin/src/charray.rs
|
||||
@@ -0,0 +1,32 @@
|
||||
+use std::ffi::CString;
|
||||
+use std::iter::once;
|
||||
+use std::os::raw::c_char;
|
||||
+use std::ptr;
|
||||
+
|
||||
+pub struct Charray {
|
||||
+ pin: Vec<CString>,
|
||||
+ charray: Vec<*const c_char>,
|
||||
+}
|
||||
+
|
||||
+impl Charray {
|
||||
+ pub fn new(input: &[&str]) -> Result<Self, ()> {
|
||||
+ let pin: Result<Vec<_>, ()> = input
|
||||
+ .iter()
|
||||
+ .map(|s| CString::new(*s).map_err(|_e| ()))
|
||||
+ .collect();
|
||||
+
|
||||
+ let pin = pin?;
|
||||
+
|
||||
+ let charray: Vec<_> = pin
|
||||
+ .iter()
|
||||
+ .map(|s| s.as_ptr())
|
||||
+ .chain(once(ptr::null()))
|
||||
+ .collect();
|
||||
+
|
||||
+ Ok(Charray { pin, charray })
|
||||
+ }
|
||||
+
|
||||
+ pub fn as_ptr(&self) -> *const *const c_char {
|
||||
+ self.charray.as_ptr()
|
||||
+ }
|
||||
+}
|
||||
diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs
|
||||
index 076907bae..be28cac95 100644
|
||||
--- a/src/slapi_r_plugin/src/lib.rs
|
||||
+++ b/src/slapi_r_plugin/src/lib.rs
|
||||
@@ -1,9 +1,11 @@
|
||||
-// extern crate lazy_static;
|
||||
+#[macro_use]
|
||||
+extern crate lazy_static;
|
||||
|
||||
#[macro_use]
|
||||
pub mod macros;
|
||||
pub mod backend;
|
||||
pub mod ber;
|
||||
+pub mod charray;
|
||||
mod constants;
|
||||
pub mod dn;
|
||||
pub mod entry;
|
||||
@@ -20,6 +22,7 @@ pub mod value;
|
||||
pub mod prelude {
|
||||
pub use crate::backend::{BackendRef, BackendRefTxn};
|
||||
pub use crate::ber::BerValRef;
|
||||
+ pub use crate::charray::Charray;
|
||||
pub use crate::constants::{FilterType, PluginFnType, PluginType, PluginVersion, LDAP_SUCCESS};
|
||||
pub use crate::dn::{Sdn, SdnRef};
|
||||
pub use crate::entry::EntryRef;
|
||||
@@ -30,8 +33,7 @@ pub mod prelude {
|
||||
pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3};
|
||||
pub use crate::search::{Search, SearchScope};
|
||||
pub use crate::syntax_plugin::{
|
||||
- matchingrule_register, name_to_leaking_char, names_to_leaking_char_array, SlapiOrdMr,
|
||||
- SlapiSubMr, SlapiSyntaxPlugin1,
|
||||
+ matchingrule_register, SlapiOrdMr, SlapiSubMr, SlapiSyntaxPlugin1,
|
||||
};
|
||||
pub use crate::task::{task_register_handler_fn, task_unregister_handler_fn, Task, TaskRef};
|
||||
pub use crate::value::{Value, ValueArray, ValueArrayRef, ValueRef};
|
||||
diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs
|
||||
index bc8dfa60f..97fc5d7ef 100644
|
||||
--- a/src/slapi_r_plugin/src/macros.rs
|
||||
+++ b/src/slapi_r_plugin/src/macros.rs
|
||||
@@ -249,6 +249,7 @@ macro_rules! slapi_r_syntax_plugin_hooks {
|
||||
paste::item! {
|
||||
use libc;
|
||||
use std::convert::TryFrom;
|
||||
+ use std::ffi::CString;
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn [<$mod_ident _plugin_init>](raw_pb: *const libc::c_void) -> i32 {
|
||||
@@ -261,15 +262,15 @@ macro_rules! slapi_r_syntax_plugin_hooks {
|
||||
};
|
||||
|
||||
// Setup the names/oids that this plugin provides syntaxes for.
|
||||
-
|
||||
- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::attr_supported_names()) };
|
||||
- match pb.register_syntax_names(name_ptr) {
|
||||
+ // DS will clone these, so they can be ephemeral to this function.
|
||||
+ let name_vec = Charray::new($hooks_ident::attr_supported_names().as_slice()).expect("invalid supported names");
|
||||
+ match pb.register_syntax_names(name_vec.as_ptr()) {
|
||||
0 => {},
|
||||
e => return e,
|
||||
};
|
||||
|
||||
- let name_ptr = unsafe { name_to_leaking_char($hooks_ident::attr_oid()) };
|
||||
- match pb.register_syntax_oid(name_ptr) {
|
||||
+ let attr_oid = CString::new($hooks_ident::attr_oid()).expect("invalid attr oid");
|
||||
+ match pb.register_syntax_oid(attr_oid.as_ptr()) {
|
||||
0 => {},
|
||||
e => return e,
|
||||
};
|
||||
@@ -430,7 +431,8 @@ macro_rules! slapi_r_syntax_plugin_hooks {
|
||||
e => return e,
|
||||
};
|
||||
|
||||
- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::eq_mr_supported_names()) };
|
||||
+ let name_vec = Charray::new($hooks_ident::eq_mr_supported_names().as_slice()).expect("invalid mr supported names");
|
||||
+ let name_ptr = name_vec.as_ptr();
|
||||
// SLAPI_PLUGIN_MR_NAMES
|
||||
match pb.register_mr_names(name_ptr) {
|
||||
0 => {},
|
||||
@@ -672,7 +674,8 @@ macro_rules! slapi_r_syntax_plugin_hooks {
|
||||
e => return e,
|
||||
};
|
||||
|
||||
- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::ord_mr_supported_names()) };
|
||||
+ let name_vec = Charray::new($hooks_ident::ord_mr_supported_names().as_slice()).expect("invalid ord supported names");
|
||||
+ let name_ptr = name_vec.as_ptr();
|
||||
// SLAPI_PLUGIN_MR_NAMES
|
||||
match pb.register_mr_names(name_ptr) {
|
||||
0 => {},
|
||||
diff --git a/src/slapi_r_plugin/src/syntax_plugin.rs b/src/slapi_r_plugin/src/syntax_plugin.rs
|
||||
index e7d5c01bd..86f84bdd8 100644
|
||||
--- a/src/slapi_r_plugin/src/syntax_plugin.rs
|
||||
+++ b/src/slapi_r_plugin/src/syntax_plugin.rs
|
||||
@@ -1,11 +1,11 @@
|
||||
use crate::ber::BerValRef;
|
||||
// use crate::constants::FilterType;
|
||||
+use crate::charray::Charray;
|
||||
use crate::error::PluginError;
|
||||
use crate::pblock::PblockRef;
|
||||
use crate::value::{ValueArray, ValueArrayRef};
|
||||
use std::cmp::Ordering;
|
||||
use std::ffi::CString;
|
||||
-use std::iter::once;
|
||||
use std::os::raw::c_char;
|
||||
use std::ptr;
|
||||
|
||||
@@ -26,37 +26,6 @@ struct slapi_matchingRuleEntry {
|
||||
mr_compat_syntax: *const *const c_char,
|
||||
}
|
||||
|
||||
-pub unsafe fn name_to_leaking_char(name: &str) -> *const c_char {
|
||||
- let n = CString::new(name)
|
||||
- .expect("An invalid string has been hardcoded!")
|
||||
- .into_boxed_c_str();
|
||||
- let n_ptr = n.as_ptr();
|
||||
- // Now we intentionally leak the name here, and the pointer will remain valid.
|
||||
- Box::leak(n);
|
||||
- n_ptr
|
||||
-}
|
||||
-
|
||||
-pub unsafe fn names_to_leaking_char_array(names: &[&str]) -> *const *const c_char {
|
||||
- let n_arr: Vec<CString> = names
|
||||
- .iter()
|
||||
- .map(|s| CString::new(*s).expect("An invalid string has been hardcoded!"))
|
||||
- .collect();
|
||||
- let n_arr = n_arr.into_boxed_slice();
|
||||
- let n_ptr_arr: Vec<*const c_char> = n_arr
|
||||
- .iter()
|
||||
- .map(|v| v.as_ptr())
|
||||
- .chain(once(ptr::null()))
|
||||
- .collect();
|
||||
- let n_ptr_arr = n_ptr_arr.into_boxed_slice();
|
||||
-
|
||||
- // Now we intentionally leak these names here,
|
||||
- let _r_n_arr = Box::leak(n_arr);
|
||||
- let r_n_ptr_arr = Box::leak(n_ptr_arr);
|
||||
-
|
||||
- let name_ptr = r_n_ptr_arr as *const _ as *const *const c_char;
|
||||
- name_ptr
|
||||
-}
|
||||
-
|
||||
// oid - the oid of the matching rule
|
||||
// name - the name of the mr
|
||||
// desc - description
|
||||
@@ -69,20 +38,24 @@ pub unsafe fn matchingrule_register(
|
||||
syntax: &str,
|
||||
compat_syntax: &[&str],
|
||||
) -> i32 {
|
||||
- let oid_ptr = name_to_leaking_char(oid);
|
||||
- let name_ptr = name_to_leaking_char(name);
|
||||
- let desc_ptr = name_to_leaking_char(desc);
|
||||
- let syntax_ptr = name_to_leaking_char(syntax);
|
||||
- let compat_syntax_ptr = names_to_leaking_char_array(compat_syntax);
|
||||
+ // Make everything CStrings that live long enough.
|
||||
+
|
||||
+ let oid_cs = CString::new(oid).expect("invalid oid");
|
||||
+ let name_cs = CString::new(name).expect("invalid name");
|
||||
+ let desc_cs = CString::new(desc).expect("invalid desc");
|
||||
+ let syntax_cs = CString::new(syntax).expect("invalid syntax");
|
||||
+
|
||||
+ // We have to do this so the cstrings live long enough.
|
||||
+ let compat_syntax_ca = Charray::new(compat_syntax).expect("invalid compat_syntax");
|
||||
|
||||
let new_mr = slapi_matchingRuleEntry {
|
||||
- mr_oid: oid_ptr,
|
||||
+ mr_oid: oid_cs.as_ptr(),
|
||||
_mr_oidalias: ptr::null(),
|
||||
- mr_name: name_ptr,
|
||||
- mr_desc: desc_ptr,
|
||||
- mr_syntax: syntax_ptr,
|
||||
+ mr_name: name_cs.as_ptr(),
|
||||
+ mr_desc: desc_cs.as_ptr(),
|
||||
+ mr_syntax: syntax_cs.as_ptr(),
|
||||
_mr_obsolete: 0,
|
||||
- mr_compat_syntax: compat_syntax_ptr,
|
||||
+ mr_compat_syntax: compat_syntax_ca.as_ptr(),
|
||||
};
|
||||
|
||||
let new_mr_ptr = &new_mr as *const _;
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,502 +0,0 @@
|
||||
From 4faec52810e12070ef72da347bb590c57d8761e4 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Fri, 20 Nov 2020 17:47:18 -0500
|
||||
Subject: [PATCH 1/2] Issue 3657 - Add options to dsctl for dsrc file
|
||||
|
||||
Description: Add options to create, modify, delete, and display
|
||||
the .dsrc CLI tool shortcut file.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/3657
|
||||
|
||||
Reviewed by: firstyear(Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/clu/dsrc_test.py | 136 ++++++++++
|
||||
src/lib389/cli/dsctl | 2 +
|
||||
src/lib389/lib389/cli_ctl/dsrc.py | 312 ++++++++++++++++++++++
|
||||
3 files changed, 450 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/clu/dsrc_test.py
|
||||
create mode 100644 src/lib389/lib389/cli_ctl/dsrc.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsrc_test.py b/dirsrvtests/tests/suites/clu/dsrc_test.py
|
||||
new file mode 100644
|
||||
index 000000000..1b27700ec
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsrc_test.py
|
||||
@@ -0,0 +1,136 @@
|
||||
+import logging
|
||||
+import pytest
|
||||
+import os
|
||||
+from os.path import expanduser
|
||||
+from lib389.cli_base import FakeArgs
|
||||
+from lib389.cli_ctl.dsrc import create_dsrc, modify_dsrc, delete_dsrc, display_dsrc
|
||||
+from lib389._constants import DEFAULT_SUFFIX, DN_DM
|
||||
+from lib389.topologies import topology_st as topo
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def setup(topo, request):
|
||||
+ """Preserve any existing .dsrc file"""
|
||||
+
|
||||
+ dsrc_file = f'{expanduser("~")}/.dsrc'
|
||||
+ backup_file = dsrc_file + ".original"
|
||||
+ if os.path.exists(dsrc_file):
|
||||
+ os.rename(dsrc_file, backup_file)
|
||||
+
|
||||
+ def fin():
|
||||
+ if os.path.exists(backup_file):
|
||||
+ os.rename(backup_file, dsrc_file)
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+
|
||||
+def test_dsrc(topo, setup):
|
||||
+ """Test "dsctl dsrc" command
|
||||
+
|
||||
+ :id: 0610de6c-e167-4761-bdab-3e677b2d44bb
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Test creation works
|
||||
+ 2. Test creating duplicate section
|
||||
+ 3. Test adding an additional inst config works
|
||||
+ 4. Test removing an instance works
|
||||
+ 5. Test modify works
|
||||
+ 6. Test delete works
|
||||
+ 7. Test display fails when no file is present
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+ serverid = inst.serverid
|
||||
+ second_inst_name = "Second"
|
||||
+ second_inst_basedn = "o=second"
|
||||
+ different_suffix = "o=different"
|
||||
+
|
||||
+ # Setup our args
|
||||
+ args = FakeArgs()
|
||||
+ args.basedn = DEFAULT_SUFFIX
|
||||
+ args.binddn = DN_DM
|
||||
+ args.json = None
|
||||
+ args.uri = None
|
||||
+ args.saslmech = None
|
||||
+ args.tls_cacertdir = None
|
||||
+ args.tls_cert = None
|
||||
+ args.tls_key = None
|
||||
+ args.tls_reqcert = None
|
||||
+ args.starttls = None
|
||||
+ args.cancel_starttls = None
|
||||
+ args.pwdfile = None
|
||||
+ args.do_it = True
|
||||
+
|
||||
+ # Create a dsrc configuration entry
|
||||
+ create_dsrc(inst, log, args)
|
||||
+ display_dsrc(inst, topo.logcap.log, args)
|
||||
+ assert topo.logcap.contains("basedn = " + args.basedn)
|
||||
+ assert topo.logcap.contains("binddn = " + args.binddn)
|
||||
+ assert topo.logcap.contains("[" + serverid + "]")
|
||||
+ topo.logcap.flush()
|
||||
+
|
||||
+ # Attempt to add duplicate instance section
|
||||
+ with pytest.raises(ValueError):
|
||||
+ create_dsrc(inst, log, args)
|
||||
+
|
||||
+ # Test adding a second instance works correctly
|
||||
+ inst.serverid = second_inst_name
|
||||
+ args.basedn = second_inst_basedn
|
||||
+ create_dsrc(inst, log, args)
|
||||
+ display_dsrc(inst, topo.logcap.log, args)
|
||||
+ assert topo.logcap.contains("basedn = " + args.basedn)
|
||||
+ assert topo.logcap.contains("[" + second_inst_name + "]")
|
||||
+ topo.logcap.flush()
|
||||
+
|
||||
+ # Delete second instance
|
||||
+ delete_dsrc(inst, log, args)
|
||||
+ inst.serverid = serverid # Restore original instance name
|
||||
+ display_dsrc(inst, topo.logcap.log, args)
|
||||
+ assert not topo.logcap.contains("[" + second_inst_name + "]")
|
||||
+ assert not topo.logcap.contains("basedn = " + args.basedn)
|
||||
+ # Make sure first instance config is still present
|
||||
+ assert topo.logcap.contains("[" + serverid + "]")
|
||||
+ assert topo.logcap.contains("binddn = " + args.binddn)
|
||||
+ topo.logcap.flush()
|
||||
+
|
||||
+ # Modify the config
|
||||
+ args.basedn = different_suffix
|
||||
+ modify_dsrc(inst, log, args)
|
||||
+ display_dsrc(inst, topo.logcap.log, args)
|
||||
+ assert topo.logcap.contains(different_suffix)
|
||||
+ topo.logcap.flush()
|
||||
+
|
||||
+ # Remove an arg from the config
|
||||
+ args.basedn = ""
|
||||
+ modify_dsrc(inst, log, args)
|
||||
+ display_dsrc(inst, topo.logcap.log, args)
|
||||
+ assert not topo.logcap.contains(different_suffix)
|
||||
+ topo.logcap.flush()
|
||||
+
|
||||
+ # Remove the last entry, which should delete the file
|
||||
+ delete_dsrc(inst, log, args)
|
||||
+ dsrc_file = f'{expanduser("~")}/.dsrc'
|
||||
+ assert not os.path.exists(dsrc_file)
|
||||
+
|
||||
+ # Make sure display fails
|
||||
+ with pytest.raises(ValueError):
|
||||
+ display_dsrc(inst, log, args)
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
+
|
||||
diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl
|
||||
index fe9bc10e9..69f069297 100755
|
||||
--- a/src/lib389/cli/dsctl
|
||||
+++ b/src/lib389/cli/dsctl
|
||||
@@ -23,6 +23,7 @@ from lib389.cli_ctl import tls as cli_tls
|
||||
from lib389.cli_ctl import health as cli_health
|
||||
from lib389.cli_ctl import nsstate as cli_nsstate
|
||||
from lib389.cli_ctl import dbgen as cli_dbgen
|
||||
+from lib389.cli_ctl import dsrc as cli_dsrc
|
||||
from lib389.cli_ctl.instance import instance_remove_all
|
||||
from lib389.cli_base import (
|
||||
disconnect_instance,
|
||||
@@ -61,6 +62,7 @@ cli_tls.create_parser(subparsers)
|
||||
cli_health.create_parser(subparsers)
|
||||
cli_nsstate.create_parser(subparsers)
|
||||
cli_dbgen.create_parser(subparsers)
|
||||
+cli_dsrc.create_parser(subparsers)
|
||||
|
||||
argcomplete.autocomplete(parser)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_ctl/dsrc.py b/src/lib389/lib389/cli_ctl/dsrc.py
|
||||
new file mode 100644
|
||||
index 000000000..e49c7f819
|
||||
--- /dev/null
|
||||
+++ b/src/lib389/lib389/cli_ctl/dsrc.py
|
||||
@@ -0,0 +1,312 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2020 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+
|
||||
+import json
|
||||
+from os.path import expanduser
|
||||
+from os import path, remove
|
||||
+from ldapurl import isLDAPUrl
|
||||
+from ldap.dn import is_dn
|
||||
+import configparser
|
||||
+
|
||||
+
|
||||
+def create_dsrc(inst, log, args):
|
||||
+ """Create the .dsrc file
|
||||
+
|
||||
+ [instance]
|
||||
+ uri = ldaps://hostname:port
|
||||
+ basedn = dc=example,dc=com
|
||||
+ binddn = uid=user,....
|
||||
+ saslmech = [EXTERNAL|PLAIN]
|
||||
+ tls_cacertdir = /path/to/cacertdir
|
||||
+ tls_cert = /path/to/user.crt
|
||||
+ tls_key = /path/to/user.key
|
||||
+ tls_reqcert = [never, hard, allow]
|
||||
+ starttls = [true, false]
|
||||
+ pwdfile = /path/to/file
|
||||
+ """
|
||||
+
|
||||
+ dsrc_file = f'{expanduser("~")}/.dsrc'
|
||||
+ config = configparser.ConfigParser()
|
||||
+ config.read(dsrc_file)
|
||||
+
|
||||
+ # Verify this section does not already exist
|
||||
+ instances = config.sections()
|
||||
+ if inst.serverid in instances:
|
||||
+ raise ValueError("There is already a configuration section for this instance!")
|
||||
+
|
||||
+ # Process and validate the args
|
||||
+ config[inst.serverid] = {}
|
||||
+
|
||||
+ if args.uri is not None:
|
||||
+ if not isLDAPUrl(args.uri):
|
||||
+ raise ValueError("The uri is not a valid LDAP URL!")
|
||||
+ if args.uri.startswith("ldapi"):
|
||||
+ # We must use EXTERNAL saslmech for LDAPI
|
||||
+ args.saslmech = "EXTERNAL"
|
||||
+ config[inst.serverid]['uri'] = args.uri
|
||||
+ if args.basedn is not None:
|
||||
+ if not is_dn(args.basedn):
|
||||
+ raise ValueError("The basedn is not a valid DN!")
|
||||
+ config[inst.serverid]['basedn'] = args.basedn
|
||||
+ if args.binddn is not None:
|
||||
+ if not is_dn(args.binddn):
|
||||
+ raise ValueError("The binddn is not a valid DN!")
|
||||
+ config[inst.serverid]['binddn'] = args.binddn
|
||||
+ if args.saslmech is not None:
|
||||
+ if args.saslmech not in ['EXTERNAL', 'PLAIN']:
|
||||
+ raise ValueError("The saslmech must be EXTERNAL or PLAIN!")
|
||||
+ config[inst.serverid]['saslmech'] = args.saslmech
|
||||
+ if args.tls_cacertdir is not None:
|
||||
+ if not path.exists(args.tls_cacertdir):
|
||||
+ raise ValueError('--tls-cacertdir directory does not exist!')
|
||||
+ config[inst.serverid]['tls_cacertdir'] = args.tls_cacertdir
|
||||
+ if args.tls_cert is not None:
|
||||
+ if not path.exists(args.tls_cert):
|
||||
+ raise ValueError('--tls-cert does not point to an existing file!')
|
||||
+ config[inst.serverid]['tls_cert'] = args.tls_cert
|
||||
+ if args.tls_key is not None:
|
||||
+ if not path.exists(args.tls_key):
|
||||
+ raise ValueError('--tls-key does not point to an existing file!')
|
||||
+ config[inst.serverid]['tls_key'] = args.tls_key
|
||||
+ if args.tls_reqcert is not None:
|
||||
+ if args.tls_reqcert not in ['never', 'hard', 'allow']:
|
||||
+ raise ValueError('--tls-reqcert value is invalid (must be either "never", "allow", or "hard")!')
|
||||
+ config[inst.serverid]['tls_reqcert'] = args.tls_reqcert
|
||||
+ if args.starttls:
|
||||
+ config[inst.serverid]['starttls'] = 'true'
|
||||
+ if args.pwdfile is not None:
|
||||
+ if not path.exists(args.pwdfile):
|
||||
+ raise ValueError('--pwdfile does not exist!')
|
||||
+ config[inst.serverid]['pwdfile'] = args.pwdfile
|
||||
+
|
||||
+ if len(config[inst.serverid]) == 0:
|
||||
+ # No args set
|
||||
+ raise ValueError("You must set at least one argument for the new dsrc file!")
|
||||
+
|
||||
+ # Print a preview of the config
|
||||
+ log.info(f'Updating "{dsrc_file}" with:\n')
|
||||
+ log.info(f' [{inst.serverid}]')
|
||||
+ for k, v in config[inst.serverid].items():
|
||||
+ log.info(f' {k} = {v}')
|
||||
+
|
||||
+ # Perform confirmation?
|
||||
+ if not args.do_it:
|
||||
+ while 1:
|
||||
+ val = input(f'\nUpdate "{dsrc_file}" ? [yes]: ').rstrip().lower()
|
||||
+ if val == '' or val == 'y' or val == 'yes':
|
||||
+ break
|
||||
+ if val == 'n' or val == 'no':
|
||||
+ return
|
||||
+
|
||||
+ # Now write the file
|
||||
+ with open(dsrc_file, 'w') as configfile:
|
||||
+ config.write(configfile)
|
||||
+
|
||||
+ log.info(f'Successfully updated: {dsrc_file}')
|
||||
+
|
||||
+
|
||||
+def modify_dsrc(inst, log, args):
|
||||
+ """Modify the instance config
|
||||
+ """
|
||||
+ dsrc_file = f'{expanduser("~")}/.dsrc'
|
||||
+
|
||||
+ if path.exists(dsrc_file):
|
||||
+ config = configparser.ConfigParser()
|
||||
+ config.read(dsrc_file)
|
||||
+
|
||||
+ # Verify we have a section to modify
|
||||
+ instances = config.sections()
|
||||
+ if inst.serverid not in instances:
|
||||
+ raise ValueError("There is no configuration section for this instance to modify!")
|
||||
+
|
||||
+ # Process and validate the args
|
||||
+ if args.uri is not None:
|
||||
+ if not isLDAPUrl(args.uri):
|
||||
+ raise ValueError("The uri is not a valid LDAP URL!")
|
||||
+ if args.uri.startswith("ldapi"):
|
||||
+ # We must use EXTERNAL saslmech for LDAPI
|
||||
+ args.saslmech = "EXTERNAL"
|
||||
+ if args.uri == '':
|
||||
+ del config[inst.serverid]['uri']
|
||||
+ else:
|
||||
+ config[inst.serverid]['uri'] = args.uri
|
||||
+ if args.basedn is not None:
|
||||
+ if not is_dn(args.basedn):
|
||||
+ raise ValueError("The basedn is not a valid DN!")
|
||||
+ if args.basedn == '':
|
||||
+ del config[inst.serverid]['basedn']
|
||||
+ else:
|
||||
+ config[inst.serverid]['basedn'] = args.basedn
|
||||
+ if args.binddn is not None:
|
||||
+ if not is_dn(args.binddn):
|
||||
+ raise ValueError("The binddn is not a valid DN!")
|
||||
+ if args.binddn == '':
|
||||
+ del config[inst.serverid]['binddn']
|
||||
+ else:
|
||||
+ config[inst.serverid]['binddn'] = args.binddn
|
||||
+ if args.saslmech is not None:
|
||||
+ if args.saslmech not in ['EXTERNAL', 'PLAIN']:
|
||||
+ raise ValueError("The saslmech must be EXTERNAL or PLAIN!")
|
||||
+ if args.saslmech == '':
|
||||
+ del config[inst.serverid]['saslmech']
|
||||
+ else:
|
||||
+ config[inst.serverid]['saslmech'] = args.saslmech
|
||||
+ if args.tls_cacertdir is not None:
|
||||
+ if not path.exists(args.tls_cacertdir):
|
||||
+ raise ValueError('--tls-cacertdir directory does not exist!')
|
||||
+ if args.tls_cacertdir == '':
|
||||
+ del config[inst.serverid]['tls_cacertdir']
|
||||
+ else:
|
||||
+ config[inst.serverid]['tls_cacertdir'] = args.tls_cacertdir
|
||||
+ if args.tls_cert is not None:
|
||||
+ if not path.exists(args.tls_cert):
|
||||
+ raise ValueError('--tls-cert does not point to an existing file!')
|
||||
+ if args.tls_cert == '':
|
||||
+ del config[inst.serverid]['tls_cert']
|
||||
+ else:
|
||||
+ config[inst.serverid]['tls_cert'] = args.tls_cert
|
||||
+ if args.tls_key is not None:
|
||||
+ if not path.exists(args.tls_key):
|
||||
+ raise ValueError('--tls-key does not point to an existing file!')
|
||||
+ if args.tls_key == '':
|
||||
+ del config[inst.serverid]['tls_key']
|
||||
+ else:
|
||||
+ config[inst.serverid]['tls_key'] = args.tls_key
|
||||
+ if args.tls_reqcert is not None:
|
||||
+ if args.tls_reqcert not in ['never', 'hard', 'allow']:
|
||||
+ raise ValueError('--tls-reqcert value is invalid (must be either "never", "allow", or "hard")!')
|
||||
+ if args.tls_reqcert == '':
|
||||
+ del config[inst.serverid]['tls_reqcert']
|
||||
+ else:
|
||||
+ config[inst.serverid]['tls_reqcert'] = args.tls_reqcert
|
||||
+ if args.starttls:
|
||||
+ config[inst.serverid]['starttls'] = 'true'
|
||||
+ if args.cancel_starttls:
|
||||
+ config[inst.serverid]['starttls'] = 'false'
|
||||
+ if args.pwdfile is not None:
|
||||
+ if not path.exists(args.pwdfile):
|
||||
+ raise ValueError('--pwdfile does not exist!')
|
||||
+ if args.pwdfile == '':
|
||||
+ del config[inst.serverid]['pwdfile']
|
||||
+ else:
|
||||
+ config[inst.serverid]['pwdfile'] = args.pwdfile
|
||||
+
|
||||
+ # Okay now rewrite the file
|
||||
+ with open(dsrc_file, 'w') as configfile:
|
||||
+ config.write(configfile)
|
||||
+
|
||||
+ log.info(f'Successfully updated: {dsrc_file}')
|
||||
+ else:
|
||||
+ raise ValueError(f'There is no .dsrc file "{dsrc_file}" to update!')
|
||||
+
|
||||
+
|
||||
+def delete_dsrc(inst, log, args):
|
||||
+ """Delete the .dsrc file
|
||||
+ """
|
||||
+ dsrc_file = f'{expanduser("~")}/.dsrc'
|
||||
+ if path.exists(dsrc_file):
|
||||
+ if not args.do_it:
|
||||
+ # Get confirmation
|
||||
+ while 1:
|
||||
+ val = input(f'\nAre you sure you want to remove this instances configuration ? [no]: ').rstrip().lower()
|
||||
+ if val == 'y' or val == 'yes':
|
||||
+ break
|
||||
+ if val == '' or val == 'n' or val == 'no':
|
||||
+ return
|
||||
+
|
||||
+ config = configparser.ConfigParser()
|
||||
+ config.read(dsrc_file)
|
||||
+ instances = config.sections()
|
||||
+ if inst.serverid not in instances:
|
||||
+ raise ValueError("The is no configuration for this instance")
|
||||
+
|
||||
+ # Update the config object
|
||||
+ del config[inst.serverid]
|
||||
+
|
||||
+ if len(config.sections()) == 0:
|
||||
+ # The file would be empty so just delete it
|
||||
+ try:
|
||||
+ remove(dsrc_file)
|
||||
+ log.info(f'Successfully removed: {dsrc_file}')
|
||||
+ return
|
||||
+ except OSError as e:
|
||||
+ raise ValueError(f'Failed to delete "{dsrc_file}", error: {str(e)}')
|
||||
+ else:
|
||||
+ # write the updated config
|
||||
+ with open(dsrc_file, 'w') as configfile:
|
||||
+ config.write(configfile)
|
||||
+ else:
|
||||
+ raise ValueError(f'There is no .dsrc file "{dsrc_file}" to update!')
|
||||
+
|
||||
+ log.info(f'Successfully updated: {dsrc_file}')
|
||||
+
|
||||
+def display_dsrc(inst, log, args):
|
||||
+ """Display the contents of the ~/.dsrc file
|
||||
+ """
|
||||
+ dsrc_file = f'{expanduser("~")}/.dsrc'
|
||||
+
|
||||
+ if not path.exists(dsrc_file):
|
||||
+ raise ValueError(f'There is no dsrc file "{dsrc_file}" to display!')
|
||||
+
|
||||
+ config = configparser.ConfigParser()
|
||||
+ config.read(dsrc_file)
|
||||
+ instances = config.sections()
|
||||
+
|
||||
+ for inst_section in instances:
|
||||
+ if args.json:
|
||||
+ log.info(json.dumps({inst_section: dict(config[inst_section])}, indent=4))
|
||||
+ else:
|
||||
+ log.info(f'[{inst_section}]')
|
||||
+ for k, v in config[inst_section].items():
|
||||
+ log.info(f'{k} = {v}')
|
||||
+ log.info("")
|
||||
+
|
||||
+
|
||||
+def create_parser(subparsers):
|
||||
+ dsrc_parser = subparsers.add_parser('dsrc', help="Manage the .dsrc file")
|
||||
+ subcommands = dsrc_parser.add_subparsers(help="action")
|
||||
+
|
||||
+ # Create .dsrc file
|
||||
+ dsrc_create_parser = subcommands.add_parser('create', help='Generate the .dsrc file')
|
||||
+ dsrc_create_parser.set_defaults(func=create_dsrc)
|
||||
+ dsrc_create_parser.add_argument('--uri', help="The URI (LDAP URL) for the Directory Server instance.")
|
||||
+ dsrc_create_parser.add_argument('--basedn', help="The default database suffix.")
|
||||
+ dsrc_create_parser.add_argument('--binddn', help="The default Bind DN used or authentication.")
|
||||
+ dsrc_create_parser.add_argument('--saslmech', help="The SASL mechanism to use: PLAIN or EXTERNAL.")
|
||||
+ dsrc_create_parser.add_argument('--tls-cacertdir', help="The directory containing the Trusted Certificate Authority certificate.")
|
||||
+ dsrc_create_parser.add_argument('--tls-cert', help="The absolute file name to the server certificate.")
|
||||
+ dsrc_create_parser.add_argument('--tls-key', help="The absolute file name to the server certificate key.")
|
||||
+ dsrc_create_parser.add_argument('--tls-reqcert', help="Request certificate strength: 'never', 'allow', 'hard'")
|
||||
+ dsrc_create_parser.add_argument('--starttls', action='store_true', help="Use startTLS for connection to the server.")
|
||||
+ dsrc_create_parser.add_argument('--pwdfile', help="The absolute path to a file containing the Bind DN's password.")
|
||||
+ dsrc_create_parser.add_argument('--do-it', action='store_true', help="Create the file without any confirmation.")
|
||||
+
|
||||
+ dsrc_modify_parser = subcommands.add_parser('modify', help='Modify the .dsrc file')
|
||||
+ dsrc_modify_parser.set_defaults(func=modify_dsrc)
|
||||
+ dsrc_modify_parser.add_argument('--uri', nargs='?', const='', help="The URI (LDAP URL) for the Directory Server instance.")
|
||||
+ dsrc_modify_parser.add_argument('--basedn', nargs='?', const='', help="The default database suffix.")
|
||||
+ dsrc_modify_parser.add_argument('--binddn', nargs='?', const='', help="The default Bind DN used or authentication.")
|
||||
+ dsrc_modify_parser.add_argument('--saslmech', nargs='?', const='', help="The SASL mechanism to use: PLAIN or EXTERNAL.")
|
||||
+ dsrc_modify_parser.add_argument('--tls-cacertdir', nargs='?', const='', help="The directory containing the Trusted Certificate Authority certificate.")
|
||||
+ dsrc_modify_parser.add_argument('--tls-cert', nargs='?', const='', help="The absolute file name to the server certificate.")
|
||||
+ dsrc_modify_parser.add_argument('--tls-key', nargs='?', const='', help="The absolute file name to the server certificate key.")
|
||||
+ dsrc_modify_parser.add_argument('--tls-reqcert', nargs='?', const='', help="Request certificate strength: 'never', 'allow', 'hard'")
|
||||
+ dsrc_modify_parser.add_argument('--starttls', action='store_true', help="Use startTLS for connection to the server.")
|
||||
+ dsrc_modify_parser.add_argument('--cancel-starttls', action='store_true', help="Do not use startTLS for connection to the server.")
|
||||
+ dsrc_modify_parser.add_argument('--pwdfile', nargs='?', const='', help="The absolute path to a file containing the Bind DN's password.")
|
||||
+ dsrc_modify_parser.add_argument('--do-it', action='store_true', help="Update the file without any confirmation.")
|
||||
+
|
||||
+ # Delete the instance from the .dsrc file
|
||||
+ dsrc_delete_parser = subcommands.add_parser('delete', help='Delete instance configuration from the .dsrc file.')
|
||||
+ dsrc_delete_parser.set_defaults(func=delete_dsrc)
|
||||
+ dsrc_delete_parser.add_argument('--do-it', action='store_true',
|
||||
+ help="Delete this instance's configuration from the .dsrc file.")
|
||||
+
|
||||
+ # Display .dsrc file
|
||||
+ dsrc_display_parser = subcommands.add_parser('display', help='Display the contents of the .dsrc file.')
|
||||
+ dsrc_display_parser.set_defaults(func=display_dsrc)
|
||||
--
|
||||
2.26.2
|
||||
|
@ -0,0 +1,37 @@
|
||||
From 40e9a4835a6e95f021a711a7c42ce0c1bddc5ba4 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Fri, 21 May 2021 13:09:12 -0400
|
||||
Subject: [PATCH 08/12] Issue 4773 - Enable interval feature of DNA plugin
|
||||
|
||||
Description: Enable the dormant interval feature in DNA plugin
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4773
|
||||
|
||||
Review by: mreynolds (one line commit rule)
|
||||
---
|
||||
ldap/servers/plugins/dna/dna.c | 2 --
|
||||
1 file changed, 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
|
||||
index bf6b74a99..928a3f54a 100644
|
||||
--- a/ldap/servers/plugins/dna/dna.c
|
||||
+++ b/ldap/servers/plugins/dna/dna.c
|
||||
@@ -1023,7 +1023,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
|
||||
/* Set the default interval to 1 */
|
||||
entry->interval = 1;
|
||||
|
||||
-#ifdef DNA_ENABLE_INTERVAL
|
||||
value = slapi_entry_attr_get_charptr(e, DNA_INTERVAL);
|
||||
if (value) {
|
||||
entry->interval = strtoull(value, 0, 0);
|
||||
@@ -1032,7 +1031,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
|
||||
|
||||
slapi_log_err(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM,
|
||||
"dna_parse_config_entry - %s [%" PRIu64 "]\n", DNA_INTERVAL, entry->interval);
|
||||
-#endif
|
||||
|
||||
value = slapi_entry_attr_get_charptr(e, DNA_GENERATE);
|
||||
if (value) {
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,902 +0,0 @@
|
||||
From 201cb1147c0a34bddbd3e5c03aecd804c47a9905 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <72748589+progier389@users.noreply.github.com>
|
||||
Date: Thu, 19 Nov 2020 10:21:10 +0100
|
||||
Subject: [PATCH 2/2] Issue 4440 - BUG - ldifgen with --start-idx option fails
|
||||
with unsupported operand (#4444)
|
||||
|
||||
Bug description:
|
||||
Got TypeError exception when usign:
|
||||
dsctl -v slapd-localhost ldifgen users --suffix
|
||||
dc=example,dc=com --parent ou=people,dc=example,dc=com
|
||||
--number 100000 --generic --start-idx=50
|
||||
The reason is that by default python parser provides
|
||||
value for numeric options:
|
||||
as an integer if specified by "--option value" or
|
||||
as a string if specified by "--option=value"
|
||||
|
||||
Fix description:
|
||||
convert the numeric parameters to integer when using it.
|
||||
options impacted are:
|
||||
- in users subcommand: --number , --start-idx
|
||||
- in mod-load subcommand: --num-users, --add-users,
|
||||
--del-users, --modrdn-users, --mod-users
|
||||
|
||||
FYI: An alternative solution would have been to indicate the
|
||||
parser that these values are an integer. But two reasons
|
||||
leaded me to implement the first solution:
|
||||
- first solution fix the problem for all users while the
|
||||
second one fixes only dsctl command.
|
||||
- first solution is easier to test:
|
||||
I just added a new test file generated by a script
|
||||
that duplicated existing ldifgen test, renamed the
|
||||
test cases and replaced the numeric arguments by
|
||||
strings.
|
||||
Second solution would need to redesign the test framework
|
||||
to be able to test the parser.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4440
|
||||
|
||||
Reviewed by:
|
||||
|
||||
Platforms tested: F32
|
||||
|
||||
(cherry picked from commit 3c3e1f30cdb046a1aabb93aacebcf261a76a0892)
|
||||
---
|
||||
.../tests/suites/clu/dbgen_test_usan.py | 806 ++++++++++++++++++
|
||||
src/lib389/lib389/cli_ctl/dbgen.py | 10 +-
|
||||
src/lib389/lib389/dbgen.py | 3 +
|
||||
3 files changed, 814 insertions(+), 5 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/clu/dbgen_test_usan.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dbgen_test_usan.py b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py
|
||||
new file mode 100644
|
||||
index 000000000..80ff63417
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py
|
||||
@@ -0,0 +1,806 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2020 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import time
|
||||
+
|
||||
+"""
|
||||
+ This file contains tests similar to dbgen_test.py
|
||||
+ except that paramaters that are number are expressed as string
|
||||
+ (to mimic the parameters parser default behavior which returns an
|
||||
+ int when parsing "option value" and a string when parsing "option=value"
|
||||
+ This file has been generated by usign:
|
||||
+sed '
|
||||
+9r z1
|
||||
+s/ test_/ test_usan/
|
||||
+/args.*= [0-9]/s,[0-9]*$,"&",
|
||||
+/:id:/s/.$/1/
|
||||
+' dbgen_test.py > dbgen_test_usan.py
|
||||
+ ( with z1 file containing this comment )
|
||||
+"""
|
||||
+
|
||||
+
|
||||
+
|
||||
+import subprocess
|
||||
+import pytest
|
||||
+
|
||||
+from lib389.cli_ctl.dbgen import *
|
||||
+from lib389.cos import CosClassicDefinitions, CosPointerDefinitions, CosIndirectDefinitions, CosTemplates
|
||||
+from lib389.idm.account import Accounts
|
||||
+from lib389.idm.group import Groups
|
||||
+from lib389.idm.role import ManagedRoles, FilteredRoles, NestedRoles
|
||||
+from lib389.tasks import *
|
||||
+from lib389.utils import *
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389.cli_base import FakeArgs
|
||||
+
|
||||
+pytestmark = pytest.mark.tier0
|
||||
+
|
||||
+LOG_FILE = '/tmp/dbgen.log'
|
||||
+logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def set_log_file_and_ldif(topology_st, request):
|
||||
+ global ldif_file
|
||||
+ ldif_file = get_ldif_dir(topology_st.standalone) + '/created.ldif'
|
||||
+
|
||||
+ fh = logging.FileHandler(LOG_FILE)
|
||||
+ fh.setLevel(logging.DEBUG)
|
||||
+ log.addHandler(fh)
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Delete files')
|
||||
+ os.remove(LOG_FILE)
|
||||
+ os.remove(ldif_file)
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+
|
||||
+def run_offline_import(instance, ldif_file):
|
||||
+ log.info('Stopping the server and running offline import...')
|
||||
+ instance.stop()
|
||||
+ assert instance.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None,
|
||||
+ import_file=ldif_file)
|
||||
+ instance.start()
|
||||
+
|
||||
+
|
||||
+def run_ldapmodify_from_file(instance, ldif_file, output_to_check=None):
|
||||
+ LDAP_MOD = '/usr/bin/ldapmodify'
|
||||
+ log.info('Add entries from ldif file with ldapmodify')
|
||||
+ result = subprocess.check_output([LDAP_MOD, '-cx', '-D', DN_DM, '-w', PASSWORD,
|
||||
+ '-h', instance.host, '-p', str(instance.port), '-af', ldif_file])
|
||||
+ if output_to_check is not None:
|
||||
+ assert output_to_check in ensure_str(result)
|
||||
+
|
||||
+
|
||||
+def check_value_in_log_and_reset(content_list):
|
||||
+ with open(LOG_FILE, 'r+') as f:
|
||||
+ file_content = f.read()
|
||||
+ log.info('Check if content is present in output')
|
||||
+ for item in content_list:
|
||||
+ assert item in file_content
|
||||
+
|
||||
+ log.info('Reset log file for next test')
|
||||
+ f.truncate(0)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.ds50545
|
||||
+@pytest.mark.bz1798394
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
|
||||
+def test_usandsconf_dbgen_users(topology_st, set_log_file_and_ldif):
|
||||
+ """Test ldifgen (formerly dbgen) tool to create ldif with users
|
||||
+
|
||||
+ :id: 426b5b94-9923-454d-a736-7e71ca985e91
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Run ldifgen to generate ldif with users
|
||||
+ 3. Import generated ldif to database
|
||||
+ 4. Check it was properly imported
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.suffix = DEFAULT_SUFFIX
|
||||
+ args.parent = 'ou=people,dc=example,dc=com'
|
||||
+ args.number = "1000"
|
||||
+ args.rdn_cn = False
|
||||
+ args.generic = True
|
||||
+ args.start_idx = "50"
|
||||
+ args.localize = False
|
||||
+ args.ldif_file = ldif_file
|
||||
+
|
||||
+ content_list = ['Generating LDIF with the following options:',
|
||||
+ 'suffix={}'.format(args.suffix),
|
||||
+ 'parent={}'.format(args.parent),
|
||||
+ 'number={}'.format(args.number),
|
||||
+ 'rdn-cn={}'.format(args.rdn_cn),
|
||||
+ 'generic={}'.format(args.generic),
|
||||
+ 'start-idx={}'.format(args.start_idx),
|
||||
+ 'localize={}'.format(args.localize),
|
||||
+ 'ldif-file={}'.format(args.ldif_file),
|
||||
+ 'Writing LDIF',
|
||||
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
|
||||
+
|
||||
+ log.info('Run ldifgen to create users ldif')
|
||||
+ dbgen_create_users(standalone, log, args)
|
||||
+
|
||||
+ log.info('Check if file exists')
|
||||
+ assert os.path.exists(ldif_file)
|
||||
+
|
||||
+ check_value_in_log_and_reset(content_list)
|
||||
+
|
||||
+ log.info('Get number of accounts before import')
|
||||
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
|
||||
+ count_account = len(accounts.filter('(uid=*)'))
|
||||
+
|
||||
+ run_offline_import(standalone, ldif_file)
|
||||
+
|
||||
+ log.info('Check that accounts are imported')
|
||||
+ assert len(accounts.filter('(uid=*)')) > count_account
|
||||
+
|
||||
+
|
||||
+@pytest.mark.ds50545
|
||||
+@pytest.mark.bz1798394
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
|
||||
+def test_usandsconf_dbgen_groups(topology_st, set_log_file_and_ldif):
|
||||
+ """Test ldifgen (formerly dbgen) tool to create ldif with group
|
||||
+
|
||||
+ :id: 97207413-9a93-4065-a5ec-63aa93801a31
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Run ldifgen to generate ldif with group
|
||||
+ 3. Import generated ldif to database
|
||||
+ 4. Check it was properly imported
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+ LDAP_RESULT = 'adding new entry "cn=myGroup-1,ou=groups,dc=example,dc=com"'
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.NAME = 'myGroup'
|
||||
+ args.parent = 'ou=groups,dc=example,dc=com'
|
||||
+ args.suffix = DEFAULT_SUFFIX
|
||||
+ args.number = "1"
|
||||
+ args.num_members = "1000"
|
||||
+ args.create_members = True
|
||||
+ args.member_attr = 'uniquemember'
|
||||
+ args.member_parent = 'ou=people,dc=example,dc=com'
|
||||
+ args.ldif_file = ldif_file
|
||||
+
|
||||
+ content_list = ['Generating LDIF with the following options:',
|
||||
+ 'NAME={}'.format(args.NAME),
|
||||
+ 'number={}'.format(args.number),
|
||||
+ 'suffix={}'.format(args.suffix),
|
||||
+ 'num-members={}'.format(args.num_members),
|
||||
+ 'create-members={}'.format(args.create_members),
|
||||
+ 'member-parent={}'.format(args.member_parent),
|
||||
+ 'member-attr={}'.format(args.member_attr),
|
||||
+ 'ldif-file={}'.format(args.ldif_file),
|
||||
+ 'Writing LDIF',
|
||||
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
|
||||
+
|
||||
+ log.info('Run ldifgen to create group ldif')
|
||||
+ dbgen_create_groups(standalone, log, args)
|
||||
+
|
||||
+ log.info('Check if file exists')
|
||||
+ assert os.path.exists(ldif_file)
|
||||
+
|
||||
+ check_value_in_log_and_reset(content_list)
|
||||
+
|
||||
+ log.info('Get number of accounts before import')
|
||||
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
|
||||
+ count_account = len(accounts.filter('(uid=*)'))
|
||||
+
|
||||
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
|
||||
+ # ldapmodify will complain about already existing parent which causes subprocess to return exit code != 0
|
||||
+ with pytest.raises(subprocess.CalledProcessError):
|
||||
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
|
||||
+
|
||||
+ log.info('Check that accounts are imported')
|
||||
+ assert len(accounts.filter('(uid=*)')) > count_account
|
||||
+
|
||||
+ log.info('Check that group is imported')
|
||||
+ groups = Groups(standalone, DEFAULT_SUFFIX)
|
||||
+ assert groups.exists(args.NAME + '-1')
|
||||
+ new_group = groups.get(args.NAME + '-1')
|
||||
+ new_group.present('uniquemember', 'uid=group_entry1-0152,ou=people,dc=example,dc=com')
|
||||
+
|
||||
+
|
||||
+@pytest.mark.ds50545
|
||||
+@pytest.mark.bz1798394
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
|
||||
+def test_usandsconf_dbgen_cos_classic(topology_st, set_log_file_and_ldif):
|
||||
+ """Test ldifgen (formerly dbgen) tool to create a COS definition
|
||||
+
|
||||
+ :id: 8557f994-8a91-4f8a-86f6-9cb826a0b8f1
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Run ldifgen to generate ldif with classic COS definition
|
||||
+ 3. Import generated ldif to database
|
||||
+ 4. Check it was properly imported
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def,ou=cos definitions,dc=example,dc=com"'
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.type = 'classic'
|
||||
+ args.NAME = 'My_Postal_Def'
|
||||
+ args.parent = 'ou=cos definitions,dc=example,dc=com'
|
||||
+ args.create_parent = True
|
||||
+ args.cos_specifier = 'businessCategory'
|
||||
+ args.cos_attr = ['postalcode', 'telephonenumber']
|
||||
+ args.cos_template = 'cn=sales,cn=classicCoS,dc=example,dc=com'
|
||||
+ args.ldif_file = ldif_file
|
||||
+
|
||||
+ content_list = ['Generating LDIF with the following options:',
|
||||
+ 'NAME={}'.format(args.NAME),
|
||||
+ 'type={}'.format(args.type),
|
||||
+ 'parent={}'.format(args.parent),
|
||||
+ 'create-parent={}'.format(args.create_parent),
|
||||
+ 'cos-specifier={}'.format(args.cos_specifier),
|
||||
+ 'cos-template={}'.format(args.cos_template),
|
||||
+ 'cos-attr={}'.format(args.cos_attr),
|
||||
+ 'ldif-file={}'.format(args.ldif_file),
|
||||
+ 'Writing LDIF',
|
||||
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
|
||||
+
|
||||
+ log.info('Run ldifgen to create COS definition ldif')
|
||||
+ dbgen_create_cos_def(standalone, log, args)
|
||||
+
|
||||
+ log.info('Check if file exists')
|
||||
+ assert os.path.exists(ldif_file)
|
||||
+
|
||||
+ check_value_in_log_and_reset(content_list)
|
||||
+
|
||||
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
|
||||
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
|
||||
+
|
||||
+ log.info('Check that COS definition is imported')
|
||||
+ cos_def = CosClassicDefinitions(standalone, args.parent)
|
||||
+ assert cos_def.exists(args.NAME)
|
||||
+ new_cos = cos_def.get(args.NAME)
|
||||
+ assert new_cos.present('cosTemplateDN', args.cos_template)
|
||||
+ assert new_cos.present('cosSpecifier', args.cos_specifier)
|
||||
+ assert new_cos.present('cosAttribute', args.cos_attr[0])
|
||||
+ assert new_cos.present('cosAttribute', args.cos_attr[1])
|
||||
+
|
||||
+
|
||||
+@pytest.mark.ds50545
|
||||
+@pytest.mark.bz1798394
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
|
||||
+def test_usandsconf_dbgen_cos_pointer(topology_st, set_log_file_and_ldif):
|
||||
+ """Test ldifgen (formerly dbgen) tool to create a COS definition
|
||||
+
|
||||
+ :id: 6b26ca6d-226a-4f93-925e-faf95cc20211
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Run ldifgen to generate ldif with pointer COS definition
|
||||
+ 3. Import generated ldif to database
|
||||
+ 4. Check it was properly imported
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_pointer,ou=cos pointer definitions,dc=example,dc=com"'
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.type = 'pointer'
|
||||
+ args.NAME = 'My_Postal_Def_pointer'
|
||||
+ args.parent = 'ou=cos pointer definitions,dc=example,dc=com'
|
||||
+ args.create_parent = True
|
||||
+ args.cos_specifier = None
|
||||
+ args.cos_attr = ['postalcode', 'telephonenumber']
|
||||
+ args.cos_template = 'cn=sales,cn=pointerCoS,dc=example,dc=com'
|
||||
+ args.ldif_file = ldif_file
|
||||
+
|
||||
+ content_list = ['Generating LDIF with the following options:',
|
||||
+ 'NAME={}'.format(args.NAME),
|
||||
+ 'type={}'.format(args.type),
|
||||
+ 'parent={}'.format(args.parent),
|
||||
+ 'create-parent={}'.format(args.create_parent),
|
||||
+ 'cos-template={}'.format(args.cos_template),
|
||||
+ 'cos-attr={}'.format(args.cos_attr),
|
||||
+ 'ldif-file={}'.format(args.ldif_file),
|
||||
+ 'Writing LDIF',
|
||||
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
|
||||
+
|
||||
+ log.info('Run ldifgen to create COS definition ldif')
|
||||
+ dbgen_create_cos_def(standalone, log, args)
|
||||
+
|
||||
+ log.info('Check if file exists')
|
||||
+ assert os.path.exists(ldif_file)
|
||||
+
|
||||
+ check_value_in_log_and_reset(content_list)
|
||||
+
|
||||
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
|
||||
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
|
||||
+
|
||||
+ log.info('Check that COS definition is imported')
|
||||
+ cos_def = CosPointerDefinitions(standalone, args.parent)
|
||||
+ assert cos_def.exists(args.NAME)
|
||||
+ new_cos = cos_def.get(args.NAME)
|
||||
+ assert new_cos.present('cosTemplateDN', args.cos_template)
|
||||
+ assert new_cos.present('cosAttribute', args.cos_attr[0])
|
||||
+ assert new_cos.present('cosAttribute', args.cos_attr[1])
|
||||
+
|
||||
+
|
||||
+@pytest.mark.ds50545
|
||||
+@pytest.mark.bz1798394
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
|
||||
+def test_usandsconf_dbgen_cos_indirect(topology_st, set_log_file_and_ldif):
|
||||
+ """Test ldifgen (formerly dbgen) tool to create a COS definition
|
||||
+
|
||||
+ :id: ab4b799e-e801-432a-a61d-badad2628201
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Run ldifgen to generate ldif with indirect COS definition
|
||||
+ 3. Import generated ldif to database
|
||||
+ 4. Check it was properly imported
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_indirect,ou=cos indirect definitions,dc=example,dc=com"'
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.type = 'indirect'
|
||||
+ args.NAME = 'My_Postal_Def_indirect'
|
||||
+ args.parent = 'ou=cos indirect definitions,dc=example,dc=com'
|
||||
+ args.create_parent = True
|
||||
+ args.cos_specifier = 'businessCategory'
|
||||
+ args.cos_attr = ['postalcode', 'telephonenumber']
|
||||
+ args.cos_template = None
|
||||
+ args.ldif_file = ldif_file
|
||||
+
|
||||
+ content_list = ['Generating LDIF with the following options:',
|
||||
+ 'NAME={}'.format(args.NAME),
|
||||
+ 'type={}'.format(args.type),
|
||||
+ 'parent={}'.format(args.parent),
|
||||
+ 'create-parent={}'.format(args.create_parent),
|
||||
+ 'cos-specifier={}'.format(args.cos_specifier),
|
||||
+ 'cos-attr={}'.format(args.cos_attr),
|
||||
+ 'ldif-file={}'.format(args.ldif_file),
|
||||
+ 'Writing LDIF',
|
||||
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
|
||||
+
|
||||
+ log.info('Run ldifgen to create COS definition ldif')
|
||||
+ dbgen_create_cos_def(standalone, log, args)
|
||||
+
|
||||
+ log.info('Check if file exists')
|
||||
+ assert os.path.exists(ldif_file)
|
||||
+
|
||||
+ check_value_in_log_and_reset(content_list)
|
||||
+
|
||||
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
|
||||
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
|
||||
+
|
||||
+ log.info('Check that COS definition is imported')
|
||||
+ cos_def = CosIndirectDefinitions(standalone, args.parent)
|
||||
+ assert cos_def.exists(args.NAME)
|
||||
+ new_cos = cos_def.get(args.NAME)
|
||||
+ assert new_cos.present('cosIndirectSpecifier', args.cos_specifier)
|
||||
+ assert new_cos.present('cosAttribute', args.cos_attr[0])
|
||||
+ assert new_cos.present('cosAttribute', args.cos_attr[1])
|
||||
+
|
||||
+
|
||||
+@pytest.mark.ds50545
|
||||
+@pytest.mark.bz1798394
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
|
||||
+def test_usandsconf_dbgen_cos_template(topology_st, set_log_file_and_ldif):
|
||||
+ """Test ldifgen (formerly dbgen) tool to create a COS template
|
||||
+
|
||||
+ :id: 544017c7-4a82-4e7d-a047-00b68a28e071
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Run ldifgen to generate ldif with COS template
|
||||
+ 3. Import generated ldif to database
|
||||
+ 4. Check it was properly imported
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ LDAP_RESULT = 'adding new entry "cn=My_Template,ou=cos templates,dc=example,dc=com"'
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.NAME = 'My_Template'
|
||||
+ args.parent = 'ou=cos templates,dc=example,dc=com'
|
||||
+ args.create_parent = True
|
||||
+ args.cos_priority = "1"
|
||||
+ args.cos_attr_val = 'postalcode:12345'
|
||||
+ args.ldif_file = ldif_file
|
||||
+
|
||||
+ content_list = ['Generating LDIF with the following options:',
|
||||
+ 'NAME={}'.format(args.NAME),
|
||||
+ 'parent={}'.format(args.parent),
|
||||
+ 'create-parent={}'.format(args.create_parent),
|
||||
+ 'cos-priority={}'.format(args.cos_priority),
|
||||
+ 'cos-attr-val={}'.format(args.cos_attr_val),
|
||||
+ 'ldif-file={}'.format(args.ldif_file),
|
||||
+ 'Writing LDIF',
|
||||
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
|
||||
+
|
||||
+ log.info('Run ldifgen to create COS template ldif')
|
||||
+ dbgen_create_cos_tmp(standalone, log, args)
|
||||
+
|
||||
+ log.info('Check if file exists')
|
||||
+ assert os.path.exists(ldif_file)
|
||||
+
|
||||
+ check_value_in_log_and_reset(content_list)
|
||||
+
|
||||
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
|
||||
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
|
||||
+
|
||||
+ log.info('Check that COS template is imported')
|
||||
+ cos_temp = CosTemplates(standalone, args.parent)
|
||||
+ assert cos_temp.exists(args.NAME)
|
||||
+ new_cos = cos_temp.get(args.NAME)
|
||||
+ assert new_cos.present('cosPriority', str(args.cos_priority))
|
||||
+ assert new_cos.present('postalcode', '12345')
|
||||
+
|
||||
+
|
||||
+@pytest.mark.ds50545
|
||||
+@pytest.mark.bz1798394
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
|
||||
+def test_usandsconf_dbgen_managed_role(topology_st, set_log_file_and_ldif):
|
||||
+ """Test ldifgen (formerly dbgen) tool to create a managed role
|
||||
+
|
||||
+ :id: 10e77b41-0bc1-4ad5-a144-2c5107455b91
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Run ldifgen to generate ldif with managed role
|
||||
+ 3. Import generated ldif to database
|
||||
+ 4. Check it was properly imported
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ LDAP_RESULT = 'adding new entry "cn=My_Managed_Role,ou=managed roles,dc=example,dc=com"'
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+
|
||||
+ args.NAME = 'My_Managed_Role'
|
||||
+ args.parent = 'ou=managed roles,dc=example,dc=com'
|
||||
+ args.create_parent = True
|
||||
+ args.type = 'managed'
|
||||
+ args.filter = None
|
||||
+ args.role_dn = None
|
||||
+ args.ldif_file = ldif_file
|
||||
+
|
||||
+ content_list = ['Generating LDIF with the following options:',
|
||||
+ 'NAME={}'.format(args.NAME),
|
||||
+ 'parent={}'.format(args.parent),
|
||||
+ 'create-parent={}'.format(args.create_parent),
|
||||
+ 'type={}'.format(args.type),
|
||||
+ 'ldif-file={}'.format(args.ldif_file),
|
||||
+ 'Writing LDIF',
|
||||
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
|
||||
+
|
||||
+ log.info('Run ldifgen to create managed role ldif')
|
||||
+ dbgen_create_role(standalone, log, args)
|
||||
+
|
||||
+ log.info('Check if file exists')
|
||||
+ assert os.path.exists(ldif_file)
|
||||
+
|
||||
+ check_value_in_log_and_reset(content_list)
|
||||
+
|
||||
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
|
||||
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
|
||||
+
|
||||
+ log.info('Check that managed role is imported')
|
||||
+ roles = ManagedRoles(standalone, DEFAULT_SUFFIX)
|
||||
+ assert roles.exists(args.NAME)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.ds50545
|
||||
+@pytest.mark.bz1798394
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
|
||||
+def test_usandsconf_dbgen_filtered_role(topology_st, set_log_file_and_ldif):
|
||||
+ """Test ldifgen (formerly dbgen) tool to create a filtered role
|
||||
+
|
||||
+ :id: cb3c8ea8-4234-40e2-8810-fb6a25973921
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Run ldifgen to generate ldif with filtered role
|
||||
+ 3. Import generated ldif to database
|
||||
+ 4. Check it was properly imported
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ LDAP_RESULT = 'adding new entry "cn=My_Filtered_Role,ou=filtered roles,dc=example,dc=com"'
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+
|
||||
+ args.NAME = 'My_Filtered_Role'
|
||||
+ args.parent = 'ou=filtered roles,dc=example,dc=com'
|
||||
+ args.create_parent = True
|
||||
+ args.type = 'filtered'
|
||||
+ args.filter = '"objectclass=posixAccount"'
|
||||
+ args.role_dn = None
|
||||
+ args.ldif_file = ldif_file
|
||||
+
|
||||
+ content_list = ['Generating LDIF with the following options:',
|
||||
+ 'NAME={}'.format(args.NAME),
|
||||
+ 'parent={}'.format(args.parent),
|
||||
+ 'create-parent={}'.format(args.create_parent),
|
||||
+ 'type={}'.format(args.type),
|
||||
+ 'filter={}'.format(args.filter),
|
||||
+ 'ldif-file={}'.format(args.ldif_file),
|
||||
+ 'Writing LDIF',
|
||||
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
|
||||
+
|
||||
+ log.info('Run ldifgen to create filtered role ldif')
|
||||
+ dbgen_create_role(standalone, log, args)
|
||||
+
|
||||
+ log.info('Check if file exists')
|
||||
+ assert os.path.exists(ldif_file)
|
||||
+
|
||||
+ check_value_in_log_and_reset(content_list)
|
||||
+
|
||||
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
|
||||
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
|
||||
+
|
||||
+ log.info('Check that filtered role is imported')
|
||||
+ roles = FilteredRoles(standalone, DEFAULT_SUFFIX)
|
||||
+ assert roles.exists(args.NAME)
|
||||
+ new_role = roles.get(args.NAME)
|
||||
+ assert new_role.present('nsRoleFilter', args.filter)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.ds50545
|
||||
+@pytest.mark.bz1798394
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
|
||||
+def test_usandsconf_dbgen_nested_role(topology_st, set_log_file_and_ldif):
|
||||
+ """Test ldifgen (formerly dbgen) tool to create a nested role
|
||||
+
|
||||
+ :id: 97fff0a8-3103-4adb-be04-2799ff58d8f1
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Run ldifgen to generate ldif with nested role
|
||||
+ 3. Import generated ldif to database
|
||||
+ 4. Check it was properly imported
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ LDAP_RESULT = 'adding new entry "cn=My_Nested_Role,ou=nested roles,dc=example,dc=com"'
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.NAME = 'My_Nested_Role'
|
||||
+ args.parent = 'ou=nested roles,dc=example,dc=com'
|
||||
+ args.create_parent = True
|
||||
+ args.type = 'nested'
|
||||
+ args.filter = None
|
||||
+ args.role_dn = ['cn=some_role,ou=roles,dc=example,dc=com']
|
||||
+ args.ldif_file = ldif_file
|
||||
+
|
||||
+ content_list = ['Generating LDIF with the following options:',
|
||||
+ 'NAME={}'.format(args.NAME),
|
||||
+ 'parent={}'.format(args.parent),
|
||||
+ 'create-parent={}'.format(args.create_parent),
|
||||
+ 'type={}'.format(args.type),
|
||||
+ 'role-dn={}'.format(args.role_dn),
|
||||
+ 'ldif-file={}'.format(args.ldif_file),
|
||||
+ 'Writing LDIF',
|
||||
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
|
||||
+
|
||||
+ log.info('Run ldifgen to create nested role ldif')
|
||||
+ dbgen_create_role(standalone, log, args)
|
||||
+
|
||||
+ log.info('Check if file exists')
|
||||
+ assert os.path.exists(ldif_file)
|
||||
+
|
||||
+ check_value_in_log_and_reset(content_list)
|
||||
+
|
||||
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
|
||||
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
|
||||
+
|
||||
+ log.info('Check that nested role is imported')
|
||||
+ roles = NestedRoles(standalone, DEFAULT_SUFFIX)
|
||||
+ assert roles.exists(args.NAME)
|
||||
+ new_role = roles.get(args.NAME)
|
||||
+ assert new_role.present('nsRoleDN', args.role_dn[0])
|
||||
+
|
||||
+
|
||||
+@pytest.mark.ds50545
|
||||
+@pytest.mark.bz1798394
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
|
||||
+def test_usandsconf_dbgen_mod_ldif_mixed(topology_st, set_log_file_and_ldif):
|
||||
+ """Test ldifgen (formerly dbgen) tool to create mixed modification ldif
|
||||
+
|
||||
+ :id: 4a2e0901-2b48-452e-a4a0-507735132c81
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Run ldifgen to generate modification ldif
|
||||
+ 3. Import generated ldif to database
|
||||
+ 4. Check it was properly imported
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.parent = DEFAULT_SUFFIX
|
||||
+ args.create_users = True
|
||||
+ args.delete_users = True
|
||||
+ args.create_parent = False
|
||||
+ args.num_users = "1000"
|
||||
+ args.add_users = "100"
|
||||
+ args.del_users = "999"
|
||||
+ args.modrdn_users = "100"
|
||||
+ args.mod_users = "10"
|
||||
+ args.mod_attrs = ['cn', 'uid', 'sn']
|
||||
+ args.randomize = False
|
||||
+ args.ldif_file = ldif_file
|
||||
+
|
||||
+ content_list = ['Generating LDIF with the following options:',
|
||||
+ 'create-users={}'.format(args.create_users),
|
||||
+ 'parent={}'.format(args.parent),
|
||||
+ 'create-parent={}'.format(args.create_parent),
|
||||
+ 'delete-users={}'.format(args.delete_users),
|
||||
+ 'num-users={}'.format(args.num_users),
|
||||
+ 'add-users={}'.format(args.add_users),
|
||||
+ 'del-users={}'.format(args.del_users),
|
||||
+ 'modrdn-users={}'.format(args.modrdn_users),
|
||||
+ 'mod-users={}'.format(args.mod_users),
|
||||
+ 'mod-attrs={}'.format(args.mod_attrs),
|
||||
+ 'randomize={}'.format(args.randomize),
|
||||
+ 'ldif-file={}'.format(args.ldif_file),
|
||||
+ 'Writing LDIF',
|
||||
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
|
||||
+
|
||||
+ log.info('Run ldifgen to create modification ldif')
|
||||
+ dbgen_create_mods(standalone, log, args)
|
||||
+
|
||||
+ log.info('Check if file exists')
|
||||
+ assert os.path.exists(ldif_file)
|
||||
+
|
||||
+ check_value_in_log_and_reset(content_list)
|
||||
+
|
||||
+ log.info('Get number of accounts before import')
|
||||
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
|
||||
+ count_account = len(accounts.filter('(uid=*)'))
|
||||
+
|
||||
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
|
||||
+ # ldapmodify will complain about a lot of changes done which causes subprocess to return exit code != 0
|
||||
+ with pytest.raises(subprocess.CalledProcessError):
|
||||
+ run_ldapmodify_from_file(standalone, ldif_file)
|
||||
+
|
||||
+ log.info('Check that some accounts are imported')
|
||||
+ assert len(accounts.filter('(uid=*)')) > count_account
|
||||
+
|
||||
+
|
||||
+@pytest.mark.ds50545
|
||||
+@pytest.mark.bz1798394
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
|
||||
+def test_usandsconf_dbgen_nested_ldif(topology_st, set_log_file_and_ldif):
|
||||
+ """Test ldifgen (formerly dbgen) tool to create nested ldif
|
||||
+
|
||||
+ :id: 9c281c28-4169-45e0-8c07-c5502d9a7581
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Run ldifgen to generate nested ldif
|
||||
+ 3. Import generated ldif to database
|
||||
+ 4. Check it was properly imported
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.suffix = DEFAULT_SUFFIX
|
||||
+ args.node_limit = "100"
|
||||
+ args.num_users = "600"
|
||||
+ args.ldif_file = ldif_file
|
||||
+
|
||||
+ content_list = ['Generating LDIF with the following options:',
|
||||
+ 'suffix={}'.format(args.suffix),
|
||||
+ 'node-limit={}'.format(args.node_limit),
|
||||
+ 'num-users={}'.format(args.num_users),
|
||||
+ 'ldif-file={}'.format(args.ldif_file),
|
||||
+ 'Writing LDIF',
|
||||
+ 'Successfully created nested LDIF file ({}) containing 6 nodes/subtrees'.format(args.ldif_file)]
|
||||
+
|
||||
+ log.info('Run ldifgen to create nested ldif')
|
||||
+ dbgen_create_nested(standalone, log, args)
|
||||
+
|
||||
+ log.info('Check if file exists')
|
||||
+ assert os.path.exists(ldif_file)
|
||||
+
|
||||
+ check_value_in_log_and_reset(content_list)
|
||||
+
|
||||
+ log.info('Get number of accounts before import')
|
||||
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
|
||||
+ count_account = len(accounts.filter('(uid=*)'))
|
||||
+ count_ou = len(accounts.filter('(ou=*)'))
|
||||
+
|
||||
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
|
||||
+ # ldapmodify will complain about already existing suffix which causes subprocess to return exit code != 0
|
||||
+ with pytest.raises(subprocess.CalledProcessError):
|
||||
+ run_ldapmodify_from_file(standalone, ldif_file)
|
||||
+
|
||||
+ standalone.restart()
|
||||
+
|
||||
+ log.info('Check that accounts are imported')
|
||||
+ assert len(accounts.filter('(uid=*)')) > count_account
|
||||
+ assert len(accounts.filter('(ou=*)')) > count_ou
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
diff --git a/src/lib389/lib389/cli_ctl/dbgen.py b/src/lib389/lib389/cli_ctl/dbgen.py
|
||||
index 7bc3892ba..058342fb1 100644
|
||||
--- a/src/lib389/lib389/cli_ctl/dbgen.py
|
||||
+++ b/src/lib389/lib389/cli_ctl/dbgen.py
|
||||
@@ -451,13 +451,13 @@ def dbgen_create_mods(inst, log, args):
|
||||
props = {
|
||||
"createUsers": args.create_users,
|
||||
"deleteUsers": args.delete_users,
|
||||
- "numUsers": args.num_users,
|
||||
+ "numUsers": int(args.num_users),
|
||||
"parent": args.parent,
|
||||
"createParent": args.create_parent,
|
||||
- "addUsers": args.add_users,
|
||||
- "delUsers": args.del_users,
|
||||
- "modrdnUsers": args.modrdn_users,
|
||||
- "modUsers": args.mod_users,
|
||||
+ "addUsers": int(args.add_users),
|
||||
+ "delUsers": int(args.del_users),
|
||||
+ "modrdnUsers": int(args.modrdn_users),
|
||||
+ "modUsers": int(args.mod_users),
|
||||
"random": args.randomize,
|
||||
"modAttrs": args.mod_attrs
|
||||
}
|
||||
diff --git a/src/lib389/lib389/dbgen.py b/src/lib389/lib389/dbgen.py
|
||||
index 6273781a2..10fb200f7 100644
|
||||
--- a/src/lib389/lib389/dbgen.py
|
||||
+++ b/src/lib389/lib389/dbgen.py
|
||||
@@ -220,6 +220,9 @@ def dbgen_users(instance, number, ldif_file, suffix, generic=False, entry_name="
|
||||
"""
|
||||
Generate an LDIF of randomly named entries
|
||||
"""
|
||||
+ # Lets insure that integer parameters are not string
|
||||
+ number=int(number)
|
||||
+ startIdx=int(startIdx)
|
||||
familyname_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-FamilyNames')
|
||||
givename_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-GivenNames')
|
||||
familynames = []
|
||||
--
|
||||
2.26.2
|
||||
|
@ -0,0 +1,926 @@
|
||||
From 8df95679519364d0993572ecbea72ab89e5250a5 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Thu, 20 May 2021 14:24:25 +0200
|
||||
Subject: [PATCH 09/12] Issue 4623 - RFE - Monitor the current DB locks (#4762)
|
||||
|
||||
Description: DB lock gets exhausted because of unindexed internal searches
|
||||
(under a transaction). Indexing those searches is the way to prevent exhaustion.
|
||||
If db lock get exhausted during a txn, it leads to db panic and the later recovery
|
||||
can possibly fail. That leads to a full reinit of the instance where the db locks
|
||||
got exhausted.
|
||||
|
||||
Add three attributes to global BDB config: "nsslapd-db-locks-monitoring-enabled",
|
||||
"nsslapd-db-locks-monitoring-threshold" and "nsslapd-db-locks-monitoring-pause".
|
||||
By default, nsslapd-db-locks-monitoring-enabled is turned on, nsslapd-db-locks-monitoring-threshold is set to 90% and nsslapd-db-locks-monitoring-threshold is 500ms.
|
||||
|
||||
When current locks are close to the maximum locks value of 90% - returning
|
||||
the next candidate will fail until the maximum of locks won't be
|
||||
increased or current locks are released.
|
||||
The monitoring thread runs with the configurable interval of 500ms.
|
||||
|
||||
Add the setting to UI and CLI tools.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4623
|
||||
|
||||
Reviewed by: @Firstyear, @tbordaz, @jchapma, @mreynolds389 (Thank you!!)
|
||||
---
|
||||
.../suites/monitor/db_locks_monitor_test.py | 251 ++++++++++++++++++
|
||||
ldap/servers/slapd/back-ldbm/back-ldbm.h | 13 +-
|
||||
.../slapd/back-ldbm/db-bdb/bdb_config.c | 99 +++++++
|
||||
.../slapd/back-ldbm/db-bdb/bdb_layer.c | 85 ++++++
|
||||
ldap/servers/slapd/back-ldbm/init.c | 3 +
|
||||
ldap/servers/slapd/back-ldbm/ldbm_config.c | 3 +
|
||||
ldap/servers/slapd/back-ldbm/ldbm_config.h | 3 +
|
||||
ldap/servers/slapd/back-ldbm/ldbm_search.c | 13 +
|
||||
ldap/servers/slapd/libglobs.c | 4 +-
|
||||
src/cockpit/389-console/src/css/ds.css | 4 +
|
||||
src/cockpit/389-console/src/database.jsx | 7 +
|
||||
src/cockpit/389-console/src/index.html | 2 +-
|
||||
.../src/lib/database/databaseConfig.jsx | 88 +++++-
|
||||
src/lib389/lib389/backend.py | 3 +
|
||||
src/lib389/lib389/cli_conf/backend.py | 10 +
|
||||
15 files changed, 576 insertions(+), 12 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
|
||||
new file mode 100644
|
||||
index 000000000..7f9938f30
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
|
||||
@@ -0,0 +1,251 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import logging
|
||||
+import pytest
|
||||
+import datetime
|
||||
+import subprocess
|
||||
+from multiprocessing import Process, Queue
|
||||
+from lib389 import pid_from_file
|
||||
+from lib389.utils import ldap, os
|
||||
+from lib389._constants import DEFAULT_SUFFIX, ReplicaRole
|
||||
+from lib389.cli_base import LogCapture
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
+from lib389.tasks import AccessLog
|
||||
+from lib389.backend import Backends
|
||||
+from lib389.ldclt import Ldclt
|
||||
+from lib389.dbgen import dbgen_users
|
||||
+from lib389.tasks import ImportTask
|
||||
+from lib389.index import Indexes
|
||||
+from lib389.plugins import AttributeUniquenessPlugin
|
||||
+from lib389.config import BDB_LDBMConfig
|
||||
+from lib389.monitor import MonitorLDBM
|
||||
+from lib389.topologies import create_topology, _remove_ssca_db
|
||||
+
|
||||
+pytestmark = pytest.mark.tier2
|
||||
+db_locks_monitoring_ack = pytest.mark.skipif(not os.environ.get('DB_LOCKS_MONITORING_ACK', False),
|
||||
+ reason="DB locks monitoring tests may take hours if the feature is not present or another failure exists. "
|
||||
+ "Also, the feature requires a big amount of space as we set nsslapd-db-locks to 1300000.")
|
||||
+
|
||||
+DEBUGGING = os.getenv('DEBUGGING', default=False)
|
||||
+if DEBUGGING:
|
||||
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+else:
|
||||
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+def _kill_ns_slapd(inst):
|
||||
+ pid = str(pid_from_file(inst.ds_paths.pid_file))
|
||||
+ cmd = ['kill', '-9', pid]
|
||||
+ subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def topology_st_fn(request):
|
||||
+ """Create DS standalone instance for each test case"""
|
||||
+
|
||||
+ topology = create_topology({ReplicaRole.STANDALONE: 1})
|
||||
+
|
||||
+ def fin():
|
||||
+ # Kill the hanging process at the end of test to prevent failures in the following tests
|
||||
+ if DEBUGGING:
|
||||
+ [_kill_ns_slapd(inst) for inst in topology]
|
||||
+ else:
|
||||
+ [_kill_ns_slapd(inst) for inst in topology]
|
||||
+ assert _remove_ssca_db(topology)
|
||||
+ [inst.stop() for inst in topology if inst.exists()]
|
||||
+ [inst.delete() for inst in topology if inst.exists()]
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ topology.logcap = LogCapture()
|
||||
+ return topology
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def setup_attruniq_index_be_import(topology_st_fn):
|
||||
+ """Enable Attribute Uniqueness, disable indexes and
|
||||
+ import 120000 entries to the default backend
|
||||
+ """
|
||||
+ inst = topology_st_fn.standalone
|
||||
+
|
||||
+ inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access')
|
||||
+ inst.config.set('nsslapd-plugin-logging', 'on')
|
||||
+ inst.restart()
|
||||
+
|
||||
+ attruniq = AttributeUniquenessPlugin(inst, dn="cn=attruniq,cn=plugins,cn=config")
|
||||
+ attruniq.create(properties={'cn': 'attruniq'})
|
||||
+ for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']:
|
||||
+ attruniq.add_unique_attribute(cn)
|
||||
+ attruniq.add_unique_subtree(DEFAULT_SUFFIX)
|
||||
+ attruniq.enable_all_subtrees()
|
||||
+ attruniq.enable()
|
||||
+
|
||||
+ indexes = Indexes(inst)
|
||||
+ for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']:
|
||||
+ indexes.ensure_state(properties={
|
||||
+ 'cn': cn,
|
||||
+ 'nsSystemIndex': 'false',
|
||||
+ 'nsIndexType': 'none'})
|
||||
+
|
||||
+ bdb_config = BDB_LDBMConfig(inst)
|
||||
+ bdb_config.replace("nsslapd-db-locks", "130000")
|
||||
+ inst.restart()
|
||||
+
|
||||
+ ldif_dir = inst.get_ldif_dir()
|
||||
+ import_ldif = ldif_dir + '/perf_import.ldif'
|
||||
+
|
||||
+ # Valid online import
|
||||
+ import_task = ImportTask(inst)
|
||||
+ dbgen_users(inst, 120000, import_ldif, DEFAULT_SUFFIX, entry_name="userNew")
|
||||
+ import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
|
||||
+ import_task.wait()
|
||||
+ assert import_task.is_complete()
|
||||
+
|
||||
+
|
||||
+def create_user_wrapper(q, users):
|
||||
+ try:
|
||||
+ users.create_test_user()
|
||||
+ except Exception as ex:
|
||||
+ q.put(ex)
|
||||
+
|
||||
+
|
||||
+def spawn_worker_thread(function, users, log, timeout, info):
|
||||
+ log.info(f"Starting the thread - {info}")
|
||||
+ q = Queue()
|
||||
+ p = Process(target=function, args=(q,users,))
|
||||
+ p.start()
|
||||
+
|
||||
+ log.info(f"Waiting for {timeout} seconds for the thread to finish")
|
||||
+ p.join(timeout)
|
||||
+
|
||||
+ if p.is_alive():
|
||||
+ log.info("Killing the thread as it's still running")
|
||||
+ p.terminate()
|
||||
+ p.join()
|
||||
+ raise RuntimeError(f"Function call was aborted: {info}")
|
||||
+ result = q.get()
|
||||
+ if isinstance(result, Exception):
|
||||
+ raise result
|
||||
+ else:
|
||||
+ return result
|
||||
+
|
||||
+
|
||||
+@db_locks_monitoring_ack
|
||||
+@pytest.mark.parametrize("lock_threshold", [("70"), ("80"), ("95")])
|
||||
+def test_exhaust_db_locks_basic(topology_st_fn, setup_attruniq_index_be_import, lock_threshold):
|
||||
+ """Test that when all of the locks are exhausted the instance still working
|
||||
+ and database is not corrupted
|
||||
+
|
||||
+ :id: 299108cc-04d8-4ddc-b58e-99157fccd643
|
||||
+ :setup: Standalone instance with Attr Uniq plugin and user indexes disabled
|
||||
+ :steps: 1. Set nsslapd-db-locks to 11000
|
||||
+ 2. Check that we stop acquiring new locks when the threshold is reached
|
||||
+ 3. Check that we can regulate a pause interval for DB locks monitoring thread
|
||||
+ 4. Make sure the feature works for different backends on the same suffix
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st_fn.standalone
|
||||
+ ADDITIONAL_SUFFIX = 'ou=newpeople,dc=example,dc=com'
|
||||
+
|
||||
+ backends = Backends(inst)
|
||||
+ backends.create(properties={'nsslapd-suffix': ADDITIONAL_SUFFIX,
|
||||
+ 'name': ADDITIONAL_SUFFIX[-3:]})
|
||||
+ ous = OrganizationalUnits(inst, DEFAULT_SUFFIX)
|
||||
+ ous.create(properties={'ou': 'newpeople'})
|
||||
+
|
||||
+ bdb_config = BDB_LDBMConfig(inst)
|
||||
+ bdb_config.replace("nsslapd-db-locks", "11000")
|
||||
+
|
||||
+ # Restart server
|
||||
+ inst.restart()
|
||||
+
|
||||
+ for lock_enabled in ["on", "off"]:
|
||||
+ for lock_pause in ["100", "500", "1000"]:
|
||||
+ bdb_config.replace("nsslapd-db-locks-monitoring-enabled", lock_enabled)
|
||||
+ bdb_config.replace("nsslapd-db-locks-monitoring-threshold", lock_threshold)
|
||||
+ bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause)
|
||||
+ inst.restart()
|
||||
+
|
||||
+ if lock_enabled == "off":
|
||||
+ raised_exception = (RuntimeError, ldap.SERVER_DOWN)
|
||||
+ else:
|
||||
+ raised_exception = ldap.OPERATIONS_ERROR
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ with pytest.raises(raised_exception):
|
||||
+ spawn_worker_thread(create_user_wrapper, users, log, 30,
|
||||
+ f"Adding user with monitoring enabled='{lock_enabled}'; "
|
||||
+ f"threshold='{lock_threshold}'; pause='{lock_pause}'.")
|
||||
+ # Restart because we already run out of locks and the next unindexed searches will fail eventually
|
||||
+ if lock_enabled == "off":
|
||||
+ _kill_ns_slapd(inst)
|
||||
+ inst.restart()
|
||||
+
|
||||
+ users = UserAccounts(inst, ADDITIONAL_SUFFIX, rdn=None)
|
||||
+ with pytest.raises(raised_exception):
|
||||
+ spawn_worker_thread(create_user_wrapper, users, log, 30,
|
||||
+ f"Adding user with monitoring enabled='{lock_enabled}'; "
|
||||
+ f"threshold='{lock_threshold}'; pause='{lock_pause}'.")
|
||||
+ # In case feature is disabled - restart for the clean up
|
||||
+ if lock_enabled == "off":
|
||||
+ _kill_ns_slapd(inst)
|
||||
+ inst.restart()
|
||||
+
|
||||
+
|
||||
+@db_locks_monitoring_ack
|
||||
+def test_exhaust_db_locks_big_pause(topology_st_fn, setup_attruniq_index_be_import):
|
||||
+ """Test that DB lock pause setting increases the wait interval value for the monitoring thread
|
||||
+
|
||||
+ :id: 7d5bf838-5d4e-4ad5-8c03-5716afb84ea6
|
||||
+ :setup: Standalone instance with Attr Uniq plugin and user indexes disabled
|
||||
+ :steps: 1. Set nsslapd-db-locks to 20000 while using the default threshold value (95%)
|
||||
+ 2. Set nsslapd-db-locks-monitoring-pause to 10000 (10 seconds)
|
||||
+ 3. Make sure that the pause is successfully increased a few times in a row
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st_fn.standalone
|
||||
+
|
||||
+ bdb_config = BDB_LDBMConfig(inst)
|
||||
+ bdb_config.replace("nsslapd-db-locks", "20000")
|
||||
+ lock_pause = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-pause")
|
||||
+ assert lock_pause == 500
|
||||
+ lock_pause = "10000"
|
||||
+ bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause)
|
||||
+
|
||||
+ # Restart server
|
||||
+ inst.restart()
|
||||
+
|
||||
+ lock_enabled = bdb_config.get_attr_val_utf8_l("nsslapd-db-locks-monitoring-enabled")
|
||||
+ lock_threshold = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-threshold")
|
||||
+ assert lock_enabled == "on"
|
||||
+ assert lock_threshold == 90
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ start = datetime.datetime.now()
|
||||
+ with pytest.raises(ldap.OPERATIONS_ERROR):
|
||||
+ spawn_worker_thread(create_user_wrapper, users, log, 30,
|
||||
+ f"Adding user with monitoring enabled='{lock_enabled}'; "
|
||||
+ f"threshold='{lock_threshold}'; pause='{lock_pause}'. Expect it to 'Work'")
|
||||
+ end = datetime.datetime.now()
|
||||
+ time_delta = end - start
|
||||
+ if time_delta.seconds < 9:
|
||||
+ raise RuntimeError("nsslapd-db-locks-monitoring-pause attribute doesn't function correctly. "
|
||||
+ f"Finished the execution in {time_delta.seconds} seconds")
|
||||
+ # In case something has failed - restart for the clean up
|
||||
+ inst.restart()
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
index 571b0a58b..afb831c32 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
@@ -155,6 +155,8 @@ typedef unsigned short u_int16_t;
|
||||
#define DEFAULT_DNCACHE_MAXCOUNT -1 /* no limit */
|
||||
#define DEFAULT_DBCACHE_SIZE 33554432
|
||||
#define DEFAULT_DBCACHE_SIZE_STR "33554432"
|
||||
+#define DEFAULT_DBLOCK_PAUSE 500
|
||||
+#define DEFAULT_DBLOCK_PAUSE_STR "500"
|
||||
#define DEFAULT_MODE 0600
|
||||
#define DEFAULT_ALLIDSTHRESHOLD 4000
|
||||
#define DEFAULT_IDL_TUNE 1
|
||||
@@ -575,12 +577,21 @@ struct ldbminfo
|
||||
char *li_backend_implement; /* low layer backend implementation */
|
||||
int li_noparentcheck; /* check if parent exists on add */
|
||||
|
||||
- /* the next 3 fields are for the params that don't get changed until
|
||||
+ /* db lock monitoring */
|
||||
+ /* if we decide to move the values to bdb_config, we can use slapi_back_get_info function to retrieve the values */
|
||||
+ int32_t li_dblock_monitoring; /* enables db locks monitoring thread - requires restart */
|
||||
+ uint32_t li_dblock_monitoring_pause; /* an interval for db locks monitoring thread */
|
||||
+ uint32_t li_dblock_threshold; /* when the percentage is reached, abort the search in ldbm_back_next_search_entry - requires restart*/
|
||||
+ uint32_t li_dblock_threshold_reached;
|
||||
+
|
||||
+ /* the next 4 fields are for the params that don't get changed until
|
||||
* the server is restarted (used by the admin console)
|
||||
*/
|
||||
char *li_new_directory;
|
||||
uint64_t li_new_dbcachesize;
|
||||
int li_new_dblock;
|
||||
+ int32_t li_new_dblock_monitoring;
|
||||
+ uint64_t li_new_dblock_threshold;
|
||||
|
||||
int li_new_dbncache;
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
|
||||
index 738b841aa..167644943 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
|
||||
@@ -190,6 +190,102 @@ bdb_config_db_lock_set(void *arg, void *value, char *errorbuf, int phase, int ap
|
||||
return retval;
|
||||
}
|
||||
|
||||
+static void *
|
||||
+bdb_config_db_lock_monitoring_get(void *arg)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+
|
||||
+ return (void *)((intptr_t)(li->li_new_dblock_monitoring));
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+bdb_config_db_lock_monitoring_set(void *arg, void *value, char *errorbuf __attribute__((unused)), int phase __attribute__((unused)), int apply)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+ int retval = LDAP_SUCCESS;
|
||||
+ int val = (int32_t)((intptr_t)value);
|
||||
+
|
||||
+ if (apply) {
|
||||
+ if (CONFIG_PHASE_RUNNING == phase) {
|
||||
+ li->li_new_dblock_monitoring = val;
|
||||
+ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_monitoring_set",
|
||||
+ "New nsslapd-db-lock-monitoring value will not take affect until the server is restarted\n");
|
||||
+ } else {
|
||||
+ li->li_new_dblock_monitoring = val;
|
||||
+ li->li_dblock_monitoring = val;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ return retval;
|
||||
+}
|
||||
+
|
||||
+static void *
|
||||
+bdb_config_db_lock_pause_get(void *arg)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+
|
||||
+ return (void *)((uintptr_t)(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED)));
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+bdb_config_db_lock_pause_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+ int retval = LDAP_SUCCESS;
|
||||
+ u_int32_t val = (u_int32_t)((uintptr_t)value);
|
||||
+
|
||||
+ if (val == 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_pause_set",
|
||||
+ "%s was set to '0'. The default value will be used (%s)",
|
||||
+ CONFIG_DB_LOCKS_PAUSE, DEFAULT_DBLOCK_PAUSE_STR);
|
||||
+ val = DEFAULT_DBLOCK_PAUSE;
|
||||
+ }
|
||||
+
|
||||
+ if (apply) {
|
||||
+ slapi_atomic_store_32((int32_t *)&(li->li_dblock_monitoring_pause), val, __ATOMIC_RELAXED);
|
||||
+ }
|
||||
+ return retval;
|
||||
+}
|
||||
+
|
||||
+static void *
|
||||
+bdb_config_db_lock_threshold_get(void *arg)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+
|
||||
+ return (void *)((uintptr_t)(li->li_new_dblock_threshold));
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+bdb_config_db_lock_threshold_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+ int retval = LDAP_SUCCESS;
|
||||
+ u_int32_t val = (u_int32_t)((uintptr_t)value);
|
||||
+
|
||||
+ if (val < 70 || val > 95) {
|
||||
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
|
||||
+ "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95",
|
||||
+ CONFIG_DB_LOCKS_THRESHOLD, val);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_lock_threshold_set",
|
||||
+ "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95",
|
||||
+ CONFIG_DB_LOCKS_THRESHOLD, val);
|
||||
+ retval = LDAP_OPERATIONS_ERROR;
|
||||
+ return retval;
|
||||
+ }
|
||||
+
|
||||
+ if (apply) {
|
||||
+ if (CONFIG_PHASE_RUNNING == phase) {
|
||||
+ li->li_new_dblock_threshold = val;
|
||||
+ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_threshold_set",
|
||||
+ "New nsslapd-db-lock-monitoring-threshold value will not take affect until the server is restarted\n");
|
||||
+ } else {
|
||||
+ li->li_new_dblock_threshold = val;
|
||||
+ li->li_dblock_threshold = val;
|
||||
+ }
|
||||
+ }
|
||||
+ return retval;
|
||||
+}
|
||||
+
|
||||
static void *
|
||||
bdb_config_dbcachesize_get(void *arg)
|
||||
{
|
||||
@@ -1409,6 +1505,9 @@ static config_info bdb_config_param[] = {
|
||||
{CONFIG_SERIAL_LOCK, CONFIG_TYPE_ONOFF, "on", &bdb_config_serial_lock_get, &bdb_config_serial_lock_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
|
||||
{CONFIG_USE_LEGACY_ERRORCODE, CONFIG_TYPE_ONOFF, "off", &bdb_config_legacy_errcode_get, &bdb_config_legacy_errcode_set, 0},
|
||||
{CONFIG_DB_DEADLOCK_POLICY, CONFIG_TYPE_INT, STRINGIFYDEFINE(DB_LOCK_YOUNGEST), &bdb_config_db_deadlock_policy_get, &bdb_config_db_deadlock_policy_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
|
||||
+ {CONFIG_DB_LOCKS_MONITORING, CONFIG_TYPE_ONOFF, "on", &bdb_config_db_lock_monitoring_get, &bdb_config_db_lock_monitoring_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
|
||||
+ {CONFIG_DB_LOCKS_THRESHOLD, CONFIG_TYPE_INT, "90", &bdb_config_db_lock_threshold_get, &bdb_config_db_lock_threshold_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
|
||||
+ {CONFIG_DB_LOCKS_PAUSE, CONFIG_TYPE_INT, DEFAULT_DBLOCK_PAUSE_STR, &bdb_config_db_lock_pause_get, &bdb_config_db_lock_pause_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
|
||||
{NULL, 0, NULL, NULL, NULL, 0}};
|
||||
|
||||
void
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
index 6cccad8e6..2f25f67a2 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
@@ -35,6 +35,8 @@
|
||||
(env)->txn_checkpoint((env), (kbyte), (min), (flags))
|
||||
#define MEMP_STAT(env, gsp, fsp, flags, malloc) \
|
||||
(env)->memp_stat((env), (gsp), (fsp), (flags))
|
||||
+#define LOCK_STAT(env, statp, flags, malloc) \
|
||||
+ (env)->lock_stat((env), (statp), (flags))
|
||||
#define MEMP_TRICKLE(env, pct, nwrotep) \
|
||||
(env)->memp_trickle((env), (pct), (nwrotep))
|
||||
#define LOG_ARCHIVE(env, listp, flags, malloc) \
|
||||
@@ -66,6 +68,7 @@
|
||||
#define NEWDIR_MODE 0755
|
||||
#define DB_REGION_PREFIX "__db."
|
||||
|
||||
+static int locks_monitoring_threadmain(void *param);
|
||||
static int perf_threadmain(void *param);
|
||||
static int checkpoint_threadmain(void *param);
|
||||
static int trickle_threadmain(void *param);
|
||||
@@ -84,6 +87,7 @@ static int bdb_start_checkpoint_thread(struct ldbminfo *li);
|
||||
static int bdb_start_trickle_thread(struct ldbminfo *li);
|
||||
static int bdb_start_perf_thread(struct ldbminfo *li);
|
||||
static int bdb_start_txn_test_thread(struct ldbminfo *li);
|
||||
+static int bdb_start_locks_monitoring_thread(struct ldbminfo *li);
|
||||
static int trans_batch_count = 0;
|
||||
static int trans_batch_limit = 0;
|
||||
static int trans_batch_txn_min_sleep = 50; /* ms */
|
||||
@@ -1299,6 +1303,10 @@ bdb_start(struct ldbminfo *li, int dbmode)
|
||||
return return_value;
|
||||
}
|
||||
|
||||
+ if (0 != (return_value = bdb_start_locks_monitoring_thread(li))) {
|
||||
+ return return_value;
|
||||
+ }
|
||||
+
|
||||
/* We need to free the memory to avoid a leak
|
||||
* Also, we have to evaluate if the performance counter
|
||||
* should be preserved or not for database restore.
|
||||
@@ -2885,6 +2893,7 @@ bdb_start_perf_thread(struct ldbminfo *li)
|
||||
return return_value;
|
||||
}
|
||||
|
||||
+
|
||||
/* Performance thread */
|
||||
static int
|
||||
perf_threadmain(void *param)
|
||||
@@ -2910,6 +2919,82 @@ perf_threadmain(void *param)
|
||||
return 0;
|
||||
}
|
||||
|
||||
+
|
||||
+/*
|
||||
+ * create a thread for locks_monitoring_threadmain
|
||||
+ */
|
||||
+static int
|
||||
+bdb_start_locks_monitoring_thread(struct ldbminfo *li)
|
||||
+{
|
||||
+ int return_value = 0;
|
||||
+ if (li->li_dblock_monitoring) {
|
||||
+ if (NULL == PR_CreateThread(PR_USER_THREAD,
|
||||
+ (VFP)(void *)locks_monitoring_threadmain, li,
|
||||
+ PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
|
||||
+ PR_UNJOINABLE_THREAD,
|
||||
+ SLAPD_DEFAULT_THREAD_STACKSIZE)) {
|
||||
+ PRErrorCode prerr = PR_GetError();
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "bdb_start_locks_monitoring_thread",
|
||||
+ "Failed to create database locks monitoring thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
|
||||
+ prerr, slapd_pr_strerror(prerr));
|
||||
+ return_value = -1;
|
||||
+ }
|
||||
+ }
|
||||
+ return return_value;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+/* DB Locks Monitoring thread */
|
||||
+static int
|
||||
+locks_monitoring_threadmain(void *param)
|
||||
+{
|
||||
+ int ret = 0;
|
||||
+ uint64_t current_locks = 0;
|
||||
+ uint64_t max_locks = 0;
|
||||
+ uint32_t lock_exhaustion = 0;
|
||||
+ PRIntervalTime interval;
|
||||
+ struct ldbminfo *li = NULL;
|
||||
+
|
||||
+ PR_ASSERT(NULL != param);
|
||||
+ li = (struct ldbminfo *)param;
|
||||
+
|
||||
+ dblayer_private *priv = li->li_dblayer_private;
|
||||
+ bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
|
||||
+ PR_ASSERT(NULL != priv);
|
||||
+
|
||||
+ INCR_THREAD_COUNT(pEnv);
|
||||
+
|
||||
+ while (!BDB_CONFIG(li)->bdb_stop_threads) {
|
||||
+ if (dblayer_db_uses_locking(pEnv->bdb_DB_ENV)) {
|
||||
+ DB_LOCK_STAT *lockstat = NULL;
|
||||
+ ret = LOCK_STAT(pEnv->bdb_DB_ENV, &lockstat, 0, (void *)slapi_ch_malloc);
|
||||
+ if (0 == ret) {
|
||||
+ current_locks = lockstat->st_nlocks;
|
||||
+ max_locks = lockstat->st_maxlocks;
|
||||
+ if (max_locks){
|
||||
+ lock_exhaustion = (uint32_t)((double)current_locks / (double)max_locks * 100.0);
|
||||
+ } else {
|
||||
+ lock_exhaustion = 0;
|
||||
+ }
|
||||
+ if ((li->li_dblock_threshold) &&
|
||||
+ (lock_exhaustion >= li->li_dblock_threshold)) {
|
||||
+ slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 1, __ATOMIC_RELAXED);
|
||||
+ } else {
|
||||
+ slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 0, __ATOMIC_RELAXED);
|
||||
+ }
|
||||
+ }
|
||||
+ slapi_ch_free((void **)&lockstat);
|
||||
+ }
|
||||
+ interval = PR_MillisecondsToInterval(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED));
|
||||
+ DS_Sleep(interval);
|
||||
+ }
|
||||
+
|
||||
+ DECR_THREAD_COUNT(pEnv);
|
||||
+ slapi_log_err(SLAPI_LOG_TRACE, "locks_monitoring_threadmain", "Leaving locks_monitoring_threadmain\n");
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+
|
||||
/*
|
||||
* create a thread for deadlock_threadmain
|
||||
*/
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c
|
||||
index 893776699..4165c8fad 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/init.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/init.c
|
||||
@@ -70,6 +70,9 @@ ldbm_back_init(Slapi_PBlock *pb)
|
||||
/* Initialize the set of instances. */
|
||||
li->li_instance_set = objset_new(&ldbm_back_instance_set_destructor);
|
||||
|
||||
+ /* Init lock threshold value */
|
||||
+ li->li_dblock_threshold_reached = 0;
|
||||
+
|
||||
/* ask the factory to give us space in the Connection object
|
||||
* (only bulk import uses this)
|
||||
*/
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
|
||||
index 10cef250f..60884cf33 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
|
||||
@@ -87,6 +87,9 @@ static char *ldbm_config_moved_attributes[] =
|
||||
CONFIG_SERIAL_LOCK,
|
||||
CONFIG_USE_LEGACY_ERRORCODE,
|
||||
CONFIG_DB_DEADLOCK_POLICY,
|
||||
+ CONFIG_DB_LOCKS_MONITORING,
|
||||
+ CONFIG_DB_LOCKS_THRESHOLD,
|
||||
+ CONFIG_DB_LOCKS_PAUSE,
|
||||
""};
|
||||
|
||||
/* Used to add an array of entries, like the one above and
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.h b/ldap/servers/slapd/back-ldbm/ldbm_config.h
|
||||
index 58e64799c..6fa8292eb 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.h
|
||||
@@ -104,6 +104,9 @@ struct config_info
|
||||
#define CONFIG_DB_VERBOSE "nsslapd-db-verbose"
|
||||
#define CONFIG_DB_DEBUG "nsslapd-db-debug"
|
||||
#define CONFIG_DB_LOCK "nsslapd-db-locks"
|
||||
+#define CONFIG_DB_LOCKS_MONITORING "nsslapd-db-locks-monitoring-enabled"
|
||||
+#define CONFIG_DB_LOCKS_THRESHOLD "nsslapd-db-locks-monitoring-threshold"
|
||||
+#define CONFIG_DB_LOCKS_PAUSE "nsslapd-db-locks-monitoring-pause"
|
||||
#define CONFIG_DB_NAMED_REGIONS "nsslapd-db-named-regions"
|
||||
#define CONFIG_DB_PRIVATE_MEM "nsslapd-db-private-mem"
|
||||
#define CONFIG_DB_PRIVATE_IMPORT_MEM "nsslapd-db-private-import-mem"
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
index 1a7b510d4..6e22debde 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
@@ -1472,6 +1472,7 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
|
||||
slapi_pblock_get(pb, SLAPI_CONNECTION, &conn);
|
||||
slapi_pblock_get(pb, SLAPI_OPERATION, &op);
|
||||
|
||||
+
|
||||
if ((reverse_list = operation_is_flag_set(op, OP_FLAG_REVERSE_CANDIDATE_ORDER))) {
|
||||
/*
|
||||
* Start at the end of the list and work our way forward. Since a single
|
||||
@@ -1538,6 +1539,18 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
|
||||
|
||||
/* Find the next candidate entry and return it. */
|
||||
while (1) {
|
||||
+ if (li->li_dblock_monitoring &&
|
||||
+ slapi_atomic_load_32((int32_t *)&(li->li_dblock_threshold_reached), __ATOMIC_RELAXED)) {
|
||||
+ slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_next_search_entry",
|
||||
+ "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold "
|
||||
+ "under cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config). "
|
||||
+ "Please, increase nsslapd-db-locks according to your needs.\n");
|
||||
+ slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_ENTRY, NULL);
|
||||
+ delete_search_result_set(pb, &sr);
|
||||
+ rc = SLAPI_FAIL_GENERAL;
|
||||
+ slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold)", 0, NULL);
|
||||
+ goto bail;
|
||||
+ }
|
||||
|
||||
/* check for abandon */
|
||||
if (slapi_op_abandoned(pb) || (NULL == sr)) {
|
||||
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
|
||||
index 388616b36..db7d01bbc 100644
|
||||
--- a/ldap/servers/slapd/libglobs.c
|
||||
+++ b/ldap/servers/slapd/libglobs.c
|
||||
@@ -8171,8 +8171,8 @@ config_set(const char *attr, struct berval **values, char *errorbuf, int apply)
|
||||
#if 0
|
||||
debugHashTable(attr);
|
||||
#endif
|
||||
- slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored", attr);
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored", attr);
|
||||
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored\n", attr);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored\n", attr);
|
||||
return LDAP_NO_SUCH_ATTRIBUTE;
|
||||
}
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/css/ds.css b/src/cockpit/389-console/src/css/ds.css
|
||||
index 9248116e7..3cf50b593 100644
|
||||
--- a/src/cockpit/389-console/src/css/ds.css
|
||||
+++ b/src/cockpit/389-console/src/css/ds.css
|
||||
@@ -639,6 +639,10 @@ option {
|
||||
padding-right: 0 !important;
|
||||
}
|
||||
|
||||
+.ds-vertical-scroll-auto {
|
||||
+ overflow-y: auto !important;
|
||||
+}
|
||||
+
|
||||
.alert {
|
||||
max-width: 750px;
|
||||
}
|
||||
diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx
|
||||
index efa3ce6d5..11cae972c 100644
|
||||
--- a/src/cockpit/389-console/src/database.jsx
|
||||
+++ b/src/cockpit/389-console/src/database.jsx
|
||||
@@ -157,6 +157,7 @@ export class Database extends React.Component {
|
||||
const attrs = config.attrs;
|
||||
let db_cache_auto = false;
|
||||
let import_cache_auto = false;
|
||||
+ let dblocksMonitoring = false;
|
||||
let dbhome = "";
|
||||
|
||||
if ('nsslapd-db-home-directory' in attrs) {
|
||||
@@ -168,6 +169,9 @@ export class Database extends React.Component {
|
||||
if (attrs['nsslapd-import-cache-autosize'] != "0") {
|
||||
import_cache_auto = true;
|
||||
}
|
||||
+ if (attrs['nsslapd-db-locks-monitoring-enabled'][0] == "on") {
|
||||
+ dblocksMonitoring = true;
|
||||
+ }
|
||||
|
||||
this.setState(() => (
|
||||
{
|
||||
@@ -187,6 +191,9 @@ export class Database extends React.Component {
|
||||
txnlogdir: attrs['nsslapd-db-logdirectory'],
|
||||
dbhomedir: dbhome,
|
||||
dblocks: attrs['nsslapd-db-locks'],
|
||||
+ dblocksMonitoring: dblocksMonitoring,
|
||||
+ dblocksMonitoringThreshold: attrs['nsslapd-db-locks-monitoring-threshold'],
|
||||
+ dblocksMonitoringPause: attrs['nsslapd-db-locks-monitoring-pause'],
|
||||
chxpoint: attrs['nsslapd-db-checkpoint-interval'],
|
||||
compactinterval: attrs['nsslapd-db-compactdb-interval'],
|
||||
importcacheauto: attrs['nsslapd-import-cache-autosize'],
|
||||
diff --git a/src/cockpit/389-console/src/index.html b/src/cockpit/389-console/src/index.html
|
||||
index 1278844fc..fd0eeb669 100644
|
||||
--- a/src/cockpit/389-console/src/index.html
|
||||
+++ b/src/cockpit/389-console/src/index.html
|
||||
@@ -12,7 +12,7 @@
|
||||
</head>
|
||||
|
||||
|
||||
-<body>
|
||||
+<body class="ds-vertical-scroll-auto">
|
||||
<div id="dsinstance"></div>
|
||||
<script src="index.js"></script>
|
||||
</body>
|
||||
diff --git a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
|
||||
index f6e662bca..6a71c138d 100644
|
||||
--- a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
|
||||
@@ -31,6 +31,9 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
txnlogdir: this.props.data.txnlogdir,
|
||||
dbhomedir: this.props.data.dbhomedir,
|
||||
dblocks: this.props.data.dblocks,
|
||||
+ dblocksMonitoring: this.props.data.dblocksMonitoring,
|
||||
+ dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold,
|
||||
+ dblocksMonitoringPause: this.props.data.dblocksMonitoringPause,
|
||||
chxpoint: this.props.data.chxpoint,
|
||||
compactinterval: this.props.data.compactinterval,
|
||||
importcachesize: this.props.data.importcachesize,
|
||||
@@ -47,6 +50,9 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
_txnlogdir: this.props.data.txnlogdir,
|
||||
_dbhomedir: this.props.data.dbhomedir,
|
||||
_dblocks: this.props.data.dblocks,
|
||||
+ _dblocksMonitoring: this.props.data.dblocksMonitoring,
|
||||
+ _dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold,
|
||||
+ _dblocksMonitoringPause: this.props.data.dblocksMonitoringPause,
|
||||
_chxpoint: this.props.data.chxpoint,
|
||||
_compactinterval: this.props.data.compactinterval,
|
||||
_importcachesize: this.props.data.importcachesize,
|
||||
@@ -55,6 +61,7 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
_import_cache_auto: this.props.data.import_cache_auto,
|
||||
};
|
||||
this.handleChange = this.handleChange.bind(this);
|
||||
+ this.select_db_locks_monitoring = this.select_db_locks_monitoring.bind(this);
|
||||
this.select_auto_cache = this.select_auto_cache.bind(this);
|
||||
this.select_auto_import_cache = this.select_auto_import_cache.bind(this);
|
||||
this.save_db_config = this.save_db_config.bind(this);
|
||||
@@ -76,6 +83,12 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
}, this.handleChange(e));
|
||||
}
|
||||
|
||||
+ select_db_locks_monitoring (val, e) {
|
||||
+ this.setState({
|
||||
+ dblocksMonitoring: !this.state.dblocksMonitoring
|
||||
+ }, this.handleChange(val, e));
|
||||
+ }
|
||||
+
|
||||
handleChange(e) {
|
||||
// Generic
|
||||
const value = e.target.type === 'checkbox' ? e.target.checked : e.target.value;
|
||||
@@ -150,6 +163,21 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
cmd.push("--locks=" + this.state.dblocks);
|
||||
requireRestart = true;
|
||||
}
|
||||
+ if (this.state._dblocksMonitoring != this.state.dblocksMonitoring) {
|
||||
+ if (this.state.dblocksMonitoring) {
|
||||
+ cmd.push("--locks-monitoring-enabled=on");
|
||||
+ } else {
|
||||
+ cmd.push("--locks-monitoring-enabled=off");
|
||||
+ }
|
||||
+ requireRestart = true;
|
||||
+ }
|
||||
+ if (this.state._dblocksMonitoringThreshold != this.state.dblocksMonitoringThreshold) {
|
||||
+ cmd.push("--locks-monitoring-threshold=" + this.state.dblocksMonitoringThreshold);
|
||||
+ requireRestart = true;
|
||||
+ }
|
||||
+ if (this.state._dblocksMonitoringPause != this.state.dblocksMonitoringPause) {
|
||||
+ cmd.push("--locks-monitoring-pause=" + this.state.dblocksMonitoringPause);
|
||||
+ }
|
||||
if (this.state._chxpoint != this.state.chxpoint) {
|
||||
cmd.push("--checkpoint-interval=" + this.state.chxpoint);
|
||||
requireRestart = true;
|
||||
@@ -216,6 +244,28 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
let import_cache_form;
|
||||
let db_auto_checked = false;
|
||||
let import_auto_checked = false;
|
||||
+ let dblocksMonitor = "";
|
||||
+
|
||||
+ if (this.state.dblocksMonitoring) {
|
||||
+ dblocksMonitor = <div className="ds-margin-top">
|
||||
+ <Row className="ds-margin-top" title="Sets the DB lock exhaustion value in percentage (valid range is 70-95). If too many locks are acquired, the server will abort the searches while the number of locks are not decreased. It helps to avoid DB corruption and long recovery. (nsslapd-db-locks-monitoring-threshold)">
|
||||
+ <Col componentClass={ControlLabel} sm={4}>
|
||||
+ DB Locks Threshold Percentage
|
||||
+ </Col>
|
||||
+ <Col sm={8}>
|
||||
+ <input className="ds-input" type="number" id="dblocksMonitoringThreshold" size="10" onChange={this.handleChange} value={this.state.dblocksMonitoringThreshold} />
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
+ <Row className="ds-margin-top" title="Sets the amount of time (milliseconds) that the monitoring thread spends waiting between checks. (nsslapd-db-locks-monitoring-pause)">
|
||||
+ <Col componentClass={ControlLabel} sm={4}>
|
||||
+ DB Locks Pause Milliseconds
|
||||
+ </Col>
|
||||
+ <Col sm={8}>
|
||||
+ <input className="ds-input" type="number" id="dblocksMonitoringPause" size="10" onChange={this.handleChange} value={this.state.dblocksMonitoringPause} />
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
+ </div>;
|
||||
+ }
|
||||
|
||||
if (this.state.db_cache_auto) {
|
||||
db_cache_form = <div id="auto-cache-form" className="ds-margin-left">
|
||||
@@ -422,14 +472,6 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
<input id="dbhomedir" value={this.state.dbhomedir} onChange={this.handleChange} className="ds-input-auto" type="text" />
|
||||
</Col>
|
||||
</Row>
|
||||
- <Row className="ds-margin-top" title="The number of database locks (nsslapd-db-locks).">
|
||||
- <Col componentClass={ControlLabel} sm={4}>
|
||||
- Database Locks
|
||||
- </Col>
|
||||
- <Col sm={8}>
|
||||
- <input id="dblocks" value={this.state.dblocks} onChange={this.handleChange} className="ds-input-auto" type="text" />
|
||||
- </Col>
|
||||
- </Row>
|
||||
<Row className="ds-margin-top" title="Amount of time in seconds after which the Directory Server sends a checkpoint entry to the database transaction log (nsslapd-db-checkpoint-interval).">
|
||||
<Col componentClass={ControlLabel} sm={4}>
|
||||
Database Checkpoint Interval
|
||||
@@ -446,6 +488,36 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
<input id="compactinterval" value={this.state.compactinterval} onChange={this.handleChange} className="ds-input-auto" type="text" />
|
||||
</Col>
|
||||
</Row>
|
||||
+ <Row className="ds-margin-top" title="The number of database locks (nsslapd-db-locks).">
|
||||
+ <Col componentClass={ControlLabel} sm={4}>
|
||||
+ Database Locks
|
||||
+ </Col>
|
||||
+ <Col sm={8}>
|
||||
+ <input id="dblocks" value={this.state.dblocks} onChange={this.handleChange} className="ds-input-auto" type="text" />
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
+ <Row>
|
||||
+ <Col sm={12}>
|
||||
+ <h5 className="ds-sub-header">DB Locks Monitoring</h5>
|
||||
+ <hr />
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
+ <Row>
|
||||
+ <Col sm={12}>
|
||||
+ <Checkbox title="Set input to be set automatically"
|
||||
+ id="dblocksMonitoring"
|
||||
+ checked={this.state.dblocksMonitoring}
|
||||
+ onChange={this.select_db_locks_monitoring}
|
||||
+ >
|
||||
+ Enable Monitoring
|
||||
+ </Checkbox>
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
+ <Row>
|
||||
+ <Col sm={12}>
|
||||
+ {dblocksMonitor}
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
</Form>
|
||||
</div>
|
||||
</div>
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index bcd7b383f..13bb27842 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -1011,6 +1011,9 @@ class DatabaseConfig(DSLdapObject):
|
||||
'nsslapd-db-transaction-batch-max-wait',
|
||||
'nsslapd-db-logbuf-size',
|
||||
'nsslapd-db-locks',
|
||||
+ 'nsslapd-db-locks-monitoring-enabled',
|
||||
+ 'nsslapd-db-locks-monitoring-threshold',
|
||||
+ 'nsslapd-db-locks-monitoring-pause',
|
||||
'nsslapd-db-private-import-mem',
|
||||
'nsslapd-import-cache-autosize',
|
||||
'nsslapd-cache-autosize',
|
||||
diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py
|
||||
index 6bfbcb036..722764d10 100644
|
||||
--- a/src/lib389/lib389/cli_conf/backend.py
|
||||
+++ b/src/lib389/lib389/cli_conf/backend.py
|
||||
@@ -46,6 +46,9 @@ arg_to_attr = {
|
||||
'txn_batch_max': 'nsslapd-db-transaction-batch-max-wait',
|
||||
'logbufsize': 'nsslapd-db-logbuf-size',
|
||||
'locks': 'nsslapd-db-locks',
|
||||
+ 'locks_monitoring_enabled': 'nsslapd-db-locks-monitoring-enabled',
|
||||
+ 'locks_monitoring_threshold': 'nsslapd-db-locks-monitoring-threshold',
|
||||
+ 'locks_monitoring_pause': 'nsslapd-db-locks-monitoring-pause',
|
||||
'import_cache_autosize': 'nsslapd-import-cache-autosize',
|
||||
'cache_autosize': 'nsslapd-cache-autosize',
|
||||
'cache_autosize_split': 'nsslapd-cache-autosize-split',
|
||||
@@ -998,6 +1001,13 @@ def create_parser(subparsers):
|
||||
'the batch count (only works when txn-batch-val is set)')
|
||||
set_db_config_parser.add_argument('--logbufsize', help='Specifies the transaction log information buffer size')
|
||||
set_db_config_parser.add_argument('--locks', help='Sets the maximum number of database locks')
|
||||
+ set_db_config_parser.add_argument('--locks-monitoring-enabled', help='Set to "on" or "off" to monitor DB locks. When it crosses the percentage value '
|
||||
+ 'set with "--locks-monitoring-threshold" ("on" by default)')
|
||||
+ set_db_config_parser.add_argument('--locks-monitoring-threshold', help='Sets the DB lock exhaustion value in percentage (valid range is 70-95). If too many locks are '
|
||||
+ 'acquired, the server will abort the searches while the number of locks '
|
||||
+ 'are not decreased. It helps to avoid DB corruption and long recovery.')
|
||||
+ set_db_config_parser.add_argument('--locks-monitoring-pause', help='Sets the DB lock monitoring value in milliseconds for the amount of time '
|
||||
+ 'that the monitoring thread spends waiting between checks.')
|
||||
set_db_config_parser.add_argument('--import-cache-autosize', help='Set to "on" or "off" to automatically set the size of the import '
|
||||
'cache to be used during the the import process of LDIF files')
|
||||
set_db_config_parser.add_argument('--cache-autosize', help='Sets the percentage of free memory that is used in total for the database '
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,127 +0,0 @@
|
||||
From 2a2773d4bf8553ba64b396d567fe05506b22c94c Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <72748589+progier389@users.noreply.github.com>
|
||||
Date: Tue, 24 Nov 2020 19:22:49 +0100
|
||||
Subject: [PATCH] Issue 4449 - dsconf replication monitor fails to retrieve
|
||||
database RUV - consumer (Unavailable) (#4451)
|
||||
|
||||
Bug Description:
|
||||
|
||||
"dsconf replication monitor" fails to retrieve database RUV entry from consumer and this
|
||||
appears into the Cockpit web UI too.
|
||||
The problem is that the bind credentials are not rightly propagated when trying to get
|
||||
the consumers agreement status. Then supplier credntials are used instead and RUV
|
||||
is searched anonymously because there is no bind dn in ldapi case.
|
||||
|
||||
Fix Description:
|
||||
|
||||
- Propagates the bind credentials when computing agreement status
|
||||
- Add a credential cache because now a replica password could get asked several times:
|
||||
when discovering the topology and
|
||||
when getting the agreement maxcsn
|
||||
- No testcase in 1.4.3 branch as the file modfied in master does not exists
|
||||
|
||||
- Add a comment about nonlocal keyword
|
||||
|
||||
Relates: #4449
|
||||
|
||||
Reviewers:
|
||||
firstyear
|
||||
droideck
|
||||
mreynolds
|
||||
|
||||
Issue 4449: Add a comment about nonlocal keyword
|
||||
|
||||
(cherry picked from commit 73ee04fa12cd1de3a5e47c109e79e31c1aaaa2ab)
|
||||
---
|
||||
src/lib389/lib389/cli_conf/replication.py | 13 +++++++++++--
|
||||
src/lib389/lib389/replica.py | 16 ++++++++++++----
|
||||
2 files changed, 23 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 9dbaa320a..248972cba 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -369,9 +369,16 @@ def set_repl_config(inst, basedn, log, args):
|
||||
|
||||
def get_repl_monitor_info(inst, basedn, log, args):
|
||||
connection_data = dsrc_to_repl_monitor(DSRC_HOME, log)
|
||||
+ credentials_cache = {}
|
||||
|
||||
# Additional details for the connections to the topology
|
||||
def get_credentials(host, port):
|
||||
+ # credentials_cache is nonlocal to refer to the instance
|
||||
+ # from enclosing function (get_repl_monitor_info)`
|
||||
+ nonlocal credentials_cache
|
||||
+ key = f'{host}:{port}'
|
||||
+ if key in credentials_cache:
|
||||
+ return credentials_cache[key]
|
||||
found = False
|
||||
if args.connections:
|
||||
connections = args.connections
|
||||
@@ -406,8 +413,10 @@ def get_repl_monitor_info(inst, basedn, log, args):
|
||||
binddn = input(f'\nEnter a bind DN for {host}:{port}: ').rstrip()
|
||||
bindpw = getpass(f"Enter a password for {binddn} on {host}:{port}: ").rstrip()
|
||||
|
||||
- return {"binddn": binddn,
|
||||
- "bindpw": bindpw}
|
||||
+ credentials = {"binddn": binddn,
|
||||
+ "bindpw": bindpw}
|
||||
+ credentials_cache[key] = credentials
|
||||
+ return credentials
|
||||
|
||||
repl_monitor = ReplicationMonitor(inst)
|
||||
report_dict = repl_monitor.generate_report(get_credentials, args.json)
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index c2ad2104d..3d89e61fb 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -2487,9 +2487,10 @@ class ReplicationMonitor(object):
|
||||
else:
|
||||
self._log = logging.getLogger(__name__)
|
||||
|
||||
- def _get_replica_status(self, instance, report_data, use_json):
|
||||
+ def _get_replica_status(self, instance, report_data, use_json, get_credentials=None):
|
||||
"""Load all of the status data to report
|
||||
and add new hostname:port pairs for future processing
|
||||
+ :type get_credentials: function
|
||||
"""
|
||||
|
||||
replicas_status = []
|
||||
@@ -2503,6 +2504,13 @@ class ReplicationMonitor(object):
|
||||
for agmt in agmts.list():
|
||||
host = agmt.get_attr_val_utf8_l("nsds5replicahost")
|
||||
port = agmt.get_attr_val_utf8_l("nsds5replicaport")
|
||||
+ if get_credentials is not None:
|
||||
+ credentials = get_credentials(host, port)
|
||||
+ binddn = credentials["binddn"]
|
||||
+ bindpw = credentials["bindpw"]
|
||||
+ else:
|
||||
+ binddn = instance.binddn
|
||||
+ bindpw = instance.bindpw
|
||||
protocol = agmt.get_attr_val_utf8_l('nsds5replicatransportinfo')
|
||||
# Supply protocol here because we need it only for connection
|
||||
# and agreement status is already preformatted for the user output
|
||||
@@ -2510,9 +2518,9 @@ class ReplicationMonitor(object):
|
||||
if consumer not in report_data:
|
||||
report_data[f"{consumer}:{protocol}"] = None
|
||||
if use_json:
|
||||
- agmts_status.append(json.loads(agmt.status(use_json=True)))
|
||||
+ agmts_status.append(json.loads(agmt.status(use_json=True, binddn=binddn, bindpw=bindpw)))
|
||||
else:
|
||||
- agmts_status.append(agmt.status())
|
||||
+ agmts_status.append(agmt.status(binddn=binddn, bindpw=bindpw))
|
||||
replicas_status.append({"replica_id": replica_id,
|
||||
"replica_root": replica_root,
|
||||
"replica_status": "Available",
|
||||
@@ -2535,7 +2543,7 @@ class ReplicationMonitor(object):
|
||||
initial_inst_key = f"{self._instance.config.get_attr_val_utf8_l('nsslapd-localhost')}:{self._instance.config.get_attr_val_utf8_l('nsslapd-port')}"
|
||||
# Do this on an initial instance to get the agreements to other instances
|
||||
try:
|
||||
- report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json)
|
||||
+ report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json, get_credentials)
|
||||
except ldap.LDAPError as e:
|
||||
self._log.debug(f"Connection to consumer ({supplier_hostname}:{supplier_port}) failed, error: {e}")
|
||||
report_data[initial_inst_key] = [{"replica_status": f"Unavailable - {e.args[0]['desc']}"}]
|
||||
--
|
||||
2.26.2
|
||||
|
@ -1,18 +1,19 @@
|
||||
From 76d1b4ff8efdff1dbe6139b51da656880d7a8ec6 Mon Sep 17 00:00:00 2001
|
||||
From 7573c62a2e61293a4800e67919d79341fa1a1532 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Wed, 26 May 2021 16:07:43 +0200
|
||||
Subject: [PATCH 2/2] Issue 4764 - replicated operation sometime checks ACI
|
||||
Subject: [PATCH 10/12] Issue 4764 - replicated operation sometime checks ACI
|
||||
(#4783)
|
||||
|
||||
(cherry picked from commit 0cfdea7abcacfca6686a6cf84dbf7ae1167f3022)
|
||||
---
|
||||
ldap/servers/slapd/connection.c | 8 ++++++++
|
||||
1 file changed, 8 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index 1883fe711..02c02ffb6 100644
|
||||
index c7a15e775..e0c1a52d2 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -1764,6 +1764,14 @@ connection_threadmain()
|
||||
@@ -1771,6 +1771,14 @@ connection_threadmain()
|
||||
}
|
||||
}
|
||||
|
||||
@ -28,5 +29,5 @@ index 1883fe711..02c02ffb6 100644
|
||||
* Call the do_<operation> function to process this request.
|
||||
*/
|
||||
--
|
||||
2.31.1
|
||||
2.26.3
|
||||
|
@ -1,63 +0,0 @@
|
||||
From e540effa692976c2eef766f1f735702ba5dc0950 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 30 Nov 2020 09:03:33 +0100
|
||||
Subject: [PATCH] Issue 4243 - Fix test: SyncRepl plugin provides a wrong
|
||||
cookie (#4467)
|
||||
|
||||
Bug description:
|
||||
This test case was incorrect.
|
||||
During a refreshPersistent search, a cookie is sent
|
||||
with the intermediate message that indicates the end of the refresh phase.
|
||||
Then a second cookie is sent on the updated entry (group10)
|
||||
I believed this test was successful some time ago but neither python-ldap
|
||||
nor sync_repl changed (intermediate sent in post refresh).
|
||||
So the testcase was never successful :(
|
||||
|
||||
Fix description:
|
||||
The fix is just to take into account the two expected cookies
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4243
|
||||
|
||||
Reviewed by: Mark Reynolds
|
||||
|
||||
Platforms tested: F31
|
||||
---
|
||||
.../tests/suites/syncrepl_plugin/basic_test.py | 12 +++++++-----
|
||||
1 file changed, 7 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
index 79ec374bc..7b35537d5 100644
|
||||
--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
@@ -589,7 +589,7 @@ def test_sync_repl_cookie_with_failure(topology, request):
|
||||
sync_repl.start()
|
||||
time.sleep(5)
|
||||
|
||||
- # Add a test group just to check that sync_repl receives only one update
|
||||
+ # Add a test group just to check that sync_repl receives that SyncControlInfo cookie
|
||||
group.append(groups.create(properties={'cn': 'group%d' % 10}))
|
||||
|
||||
# create users, that automember/memberof will generate nested updates
|
||||
@@ -610,13 +610,15 @@ def test_sync_repl_cookie_with_failure(topology, request):
|
||||
time.sleep(10)
|
||||
cookies = sync_repl.get_result()
|
||||
|
||||
- # checking that the cookie list contains only one entry
|
||||
- assert len(cookies) == 1
|
||||
- prev = 0
|
||||
+ # checking that the cookie list contains only two entries
|
||||
+ # the one from the SyncInfo/RefreshDelete that indicates the end of the refresh
|
||||
+ # the the one from SyncStateControl related to the only updated entry (group10)
|
||||
+ assert len(cookies) == 2
|
||||
+ prev = -1
|
||||
for cookie in cookies:
|
||||
log.info('Check cookie %s' % cookie)
|
||||
|
||||
- assert int(cookie) > 0
|
||||
+ assert int(cookie) >= 0
|
||||
assert int(cookie) < 1000
|
||||
assert int(cookie) > prev
|
||||
prev = int(cookie)
|
||||
--
|
||||
2.26.2
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,254 +0,0 @@
|
||||
From 8b0ba11c3dfb577d1696f4b71a6f4e9f8d42349f Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Mon, 30 Nov 2020 12:42:17 +0100
|
||||
Subject: [PATCH] Add dsconf replication monitor test case (gitHub issue 4449)
|
||||
in 1.4.3 branch
|
||||
|
||||
---
|
||||
.../tests/suites/clu/repl_monitor_test.py | 234 ++++++++++++++++++
|
||||
1 file changed, 234 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
new file mode 100644
|
||||
index 000000000..b03d170c8
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
@@ -0,0 +1,234 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2020 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import time
|
||||
+import subprocess
|
||||
+import pytest
|
||||
+
|
||||
+from lib389.cli_conf.replication import get_repl_monitor_info
|
||||
+from lib389.tasks import *
|
||||
+from lib389.utils import *
|
||||
+from lib389.topologies import topology_m2
|
||||
+from lib389.cli_base import FakeArgs
|
||||
+from lib389.cli_base.dsrc import dsrc_arg_concat
|
||||
+from lib389.cli_base import connect_instance
|
||||
+
|
||||
+pytestmark = pytest.mark.tier0
|
||||
+
|
||||
+LOG_FILE = '/tmp/monitor.log'
|
||||
+logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def set_log_file(request):
|
||||
+ fh = logging.FileHandler(LOG_FILE)
|
||||
+ fh.setLevel(logging.DEBUG)
|
||||
+ log.addHandler(fh)
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Delete files')
|
||||
+ os.remove(LOG_FILE)
|
||||
+
|
||||
+ config = os.path.expanduser(DSRC_HOME)
|
||||
+ if os.path.exists(config):
|
||||
+ os.remove(config)
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+
|
||||
+def check_value_in_log_and_reset(content_list, second_list=None, single_value=None, error_list=None):
|
||||
+ with open(LOG_FILE, 'r+') as f:
|
||||
+ file_content = f.read()
|
||||
+
|
||||
+ for item in content_list:
|
||||
+ log.info('Check that "{}" is present'.format(item))
|
||||
+ assert item in file_content
|
||||
+
|
||||
+ if second_list is not None:
|
||||
+ log.info('Check for "{}"'.format(second_list))
|
||||
+ for item in second_list:
|
||||
+ assert item in file_content
|
||||
+
|
||||
+ if single_value is not None:
|
||||
+ log.info('Check for "{}"'.format(single_value))
|
||||
+ assert single_value in file_content
|
||||
+
|
||||
+ if error_list is not None:
|
||||
+ log.info('Check that "{}" is not present'.format(error_list))
|
||||
+ for item in error_list:
|
||||
+ assert item not in file_content
|
||||
+
|
||||
+ log.info('Reset log file')
|
||||
+ f.truncate(0)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.ds50545
|
||||
+@pytest.mark.bz1739718
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.0"), reason="Not implemented")
|
||||
+def test_dsconf_replication_monitor(topology_m2, set_log_file):
|
||||
+ """Test replication monitor that was ported from legacy tools
|
||||
+
|
||||
+ :id: ce48020d-7c30-41b7-8f68-144c9cd757f6
|
||||
+ :setup: 2 MM topology
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Run replication monitor with connections option
|
||||
+ 3. Run replication monitor with aliases option
|
||||
+ 4. Run replication monitor with --json option
|
||||
+ 5. Run replication monitor with .dsrc file created
|
||||
+ 6. Run replication monitor with connections option as if using dsconf CLI
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ """
|
||||
+
|
||||
+ m1 = topology_m2.ms["master1"]
|
||||
+ m2 = topology_m2.ms["master2"]
|
||||
+
|
||||
+ alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')',
|
||||
+ 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')']
|
||||
+
|
||||
+ connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port)
|
||||
+ content_list = ['Replica Root: dc=example,dc=com',
|
||||
+ 'Replica ID: 1',
|
||||
+ 'Replica Status: Available',
|
||||
+ 'Max CSN',
|
||||
+ 'Status For Agreement: "002" ('+ m2.host + ':' + str(m2.port) + ')',
|
||||
+ 'Replica Enabled: on',
|
||||
+ 'Update In Progress: FALSE',
|
||||
+ 'Last Update Start:',
|
||||
+ 'Last Update End:',
|
||||
+ 'Number Of Changes Sent:',
|
||||
+ 'Number Of Changes Skipped: None',
|
||||
+ 'Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded',
|
||||
+ 'Last Init Start:',
|
||||
+ 'Last Init End:',
|
||||
+ 'Last Init Status:',
|
||||
+ 'Reap Active: 0',
|
||||
+ 'Replication Status: In Synchronization',
|
||||
+ 'Replication Lag Time:',
|
||||
+ 'Supplier: ',
|
||||
+ m2.host + ':' + str(m2.port),
|
||||
+ 'Replica Root: dc=example,dc=com',
|
||||
+ 'Replica ID: 2',
|
||||
+ 'Status For Agreement: "001" (' + m1.host + ':' + str(m1.port)+')']
|
||||
+
|
||||
+ error_list = ['consumer (Unavailable)',
|
||||
+ 'Failed to retrieve database RUV entry from consumer']
|
||||
+
|
||||
+ json_list = ['type',
|
||||
+ 'list',
|
||||
+ 'items',
|
||||
+ 'name',
|
||||
+ m1.host + ':' + str(m1.port),
|
||||
+ 'data',
|
||||
+ '"replica_id": "1"',
|
||||
+ '"replica_root": "dc=example,dc=com"',
|
||||
+ '"replica_status": "Available"',
|
||||
+ 'maxcsn',
|
||||
+ 'agmts_status',
|
||||
+ 'agmt-name',
|
||||
+ '002',
|
||||
+ 'replica',
|
||||
+ m2.host + ':' + str(m2.port),
|
||||
+ 'replica-enabled',
|
||||
+ 'update-in-progress',
|
||||
+ 'last-update-start',
|
||||
+ 'last-update-end',
|
||||
+ 'number-changes-sent',
|
||||
+ 'number-changes-skipped',
|
||||
+ 'last-update-status',
|
||||
+ 'Error (0) Replica acquired successfully: Incremental update succeeded',
|
||||
+ 'last-init-start',
|
||||
+ 'last-init-end',
|
||||
+ 'last-init-status',
|
||||
+ 'reap-active',
|
||||
+ 'replication-status',
|
||||
+ 'In Synchronization',
|
||||
+ 'replication-lag-time',
|
||||
+ '"replica_id": "2"',
|
||||
+ '001',
|
||||
+ m1.host + ':' + str(m1.port)]
|
||||
+
|
||||
+ dsrc_content = '[repl-monitor-connections]\n' \
|
||||
+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
|
||||
+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
|
||||
+ '\n' \
|
||||
+ '[repl-monitor-aliases]\n' \
|
||||
+ 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \
|
||||
+ 'M2 = ' + m2.host + ':' + str(m2.port)
|
||||
+
|
||||
+ connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
|
||||
+ m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM]
|
||||
+
|
||||
+ aliases = ['M1=' + m1.host + ':' + str(m1.port),
|
||||
+ 'M2=' + m2.host + ':' + str(m2.port)]
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.connections = connections
|
||||
+ args.aliases = None
|
||||
+ args.json = False
|
||||
+
|
||||
+ log.info('Run replication monitor with connections option')
|
||||
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
|
||||
+ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
|
||||
+
|
||||
+ log.info('Run replication monitor with aliases option')
|
||||
+ args.aliases = aliases
|
||||
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
|
||||
+ check_value_in_log_and_reset(content_list, alias_content)
|
||||
+
|
||||
+ log.info('Run replication monitor with --json option')
|
||||
+ args.aliases = None
|
||||
+ args.json = True
|
||||
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
|
||||
+ check_value_in_log_and_reset(json_list)
|
||||
+
|
||||
+ with open(os.path.expanduser(DSRC_HOME), 'w+') as f:
|
||||
+ f.write(dsrc_content)
|
||||
+
|
||||
+ args.connections = None
|
||||
+ args.aliases = None
|
||||
+ args.json = False
|
||||
+
|
||||
+ log.info('Run replication monitor when .dsrc file is present with content')
|
||||
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
|
||||
+ check_value_in_log_and_reset(content_list, alias_content)
|
||||
+ os.remove(os.path.expanduser(DSRC_HOME))
|
||||
+
|
||||
+ log.info('Run replication monitor with connections option as if using dsconf CLI')
|
||||
+ # Perform same test than steps 2 test but without using directly the topology instance.
|
||||
+ # but with an instance similar to those than dsconf cli generates:
|
||||
+ # step 2 args
|
||||
+ args.connections = connections
|
||||
+ args.aliases = None
|
||||
+ args.json = False
|
||||
+ # args needed to generate an instance with dsrc_arg_concat
|
||||
+ args.instance = 'master1'
|
||||
+ args.basedn = None
|
||||
+ args.binddn = None
|
||||
+ args.bindpw = None
|
||||
+ args.pwdfile = None
|
||||
+ args.prompt = False
|
||||
+ args.starttls = False
|
||||
+ dsrc_inst = dsrc_arg_concat(args, None)
|
||||
+ inst = connect_instance(dsrc_inst, True, args)
|
||||
+ get_repl_monitor_info(inst, DEFAULT_SUFFIX, log, args)
|
||||
+ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
--
|
||||
2.26.2
|
||||
|
@ -0,0 +1,155 @@
|
||||
From 580880a598a8f9972994684c49593a4cf8b8969b Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Sat, 29 May 2021 13:19:53 -0400
|
||||
Subject: [PATCH 12/12] Issue 4778 - RFE - Add changelog compaction task in
|
||||
1.4.3
|
||||
|
||||
Description: In 1.4.3 the replication changelog is a separate database,
|
||||
so it needs a separate "nsds5task" compaction task (COMPACT_CL5)
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4778
|
||||
|
||||
ASAN tested and approved
|
||||
|
||||
Reviewed by: mreynolds
|
||||
---
|
||||
ldap/servers/plugins/replication/cl5_api.c | 21 +++++++++----------
|
||||
ldap/servers/plugins/replication/cl5_api.h | 1 +
|
||||
.../replication/repl5_replica_config.c | 9 +++++++-
|
||||
3 files changed, 19 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
|
||||
index 75a2f46f5..4c5077b48 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.c
|
||||
@@ -266,7 +266,6 @@ static int _cl5TrimInit(void);
|
||||
static void _cl5TrimCleanup(void);
|
||||
static int _cl5TrimMain(void *param);
|
||||
static void _cl5DoTrimming(void);
|
||||
-static void _cl5CompactDBs(void);
|
||||
static void _cl5PurgeRID(Object *file_obj, ReplicaId cleaned_rid);
|
||||
static int _cl5PurgeGetFirstEntry(Object *file_obj, CL5Entry *entry, void **iterator, DB_TXN *txnid, int rid, DBT *key);
|
||||
static int _cl5PurgeGetNextEntry(CL5Entry *entry, void *iterator, DBT *key);
|
||||
@@ -3152,7 +3151,7 @@ _cl5TrimMain(void *param __attribute__((unused)))
|
||||
if (slapi_current_utc_time() > compactdb_time) {
|
||||
/* time to trim */
|
||||
timeCompactPrev = timeNow;
|
||||
- _cl5CompactDBs();
|
||||
+ cl5CompactDBs();
|
||||
compacting = PR_FALSE;
|
||||
}
|
||||
}
|
||||
@@ -3250,8 +3249,8 @@ _cl5DoPurging(cleanruv_purge_data *purge_data)
|
||||
}
|
||||
|
||||
/* clear free page files to reduce changelog */
|
||||
-static void
|
||||
-_cl5CompactDBs(void)
|
||||
+void
|
||||
+cl5CompactDBs(void)
|
||||
{
|
||||
int rc;
|
||||
Object *fileObj = NULL;
|
||||
@@ -3264,14 +3263,14 @@ _cl5CompactDBs(void)
|
||||
rc = TXN_BEGIN(s_cl5Desc.dbEnv, NULL, &txnid, 0);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - Failed to begin transaction; db error - %d %s\n",
|
||||
+ "cl5CompactDBs - Failed to begin transaction; db error - %d %s\n",
|
||||
rc, db_strerror(rc));
|
||||
goto bail;
|
||||
}
|
||||
|
||||
|
||||
slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - compacting replication changelogs...\n");
|
||||
+ "cl5CompactDBs - compacting replication changelogs...\n");
|
||||
for (fileObj = objset_first_obj(s_cl5Desc.dbFiles);
|
||||
fileObj;
|
||||
fileObj = objset_next_obj(s_cl5Desc.dbFiles, fileObj)) {
|
||||
@@ -3284,17 +3283,17 @@ _cl5CompactDBs(void)
|
||||
&c_data, DB_FREE_SPACE, NULL /*end*/);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - Failed to compact %s; db error - %d %s\n",
|
||||
+ "cl5CompactDBs - Failed to compact %s; db error - %d %s\n",
|
||||
dbFile->replName, rc, db_strerror(rc));
|
||||
goto bail;
|
||||
}
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - %s - %d pages freed\n",
|
||||
+ "cl5CompactDBs - %s - %d pages freed\n",
|
||||
dbFile->replName, c_data.compact_pages_free);
|
||||
}
|
||||
|
||||
slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - compacting replication changelogs finished.\n");
|
||||
+ "cl5CompactDBs - compacting replication changelogs finished.\n");
|
||||
bail:
|
||||
if (fileObj) {
|
||||
object_release(fileObj);
|
||||
@@ -3303,14 +3302,14 @@ bail:
|
||||
rc = TXN_ABORT(txnid);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - Failed to abort transaction; db error - %d %s\n",
|
||||
+ "cl5CompactDBs - Failed to abort transaction; db error - %d %s\n",
|
||||
rc, db_strerror(rc));
|
||||
}
|
||||
} else {
|
||||
rc = TXN_COMMIT(txnid);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - Failed to commit transaction; db error - %d %s\n",
|
||||
+ "cl5CompactDBs - Failed to commit transaction; db error - %d %s\n",
|
||||
rc, db_strerror(rc));
|
||||
}
|
||||
}
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h
|
||||
index 4b0949fb3..11db771f2 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.h
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.h
|
||||
@@ -405,5 +405,6 @@ int cl5DeleteRUV(void);
|
||||
void cl5CleanRUV(ReplicaId rid);
|
||||
void cl5NotifyCleanup(int rid);
|
||||
void trigger_cl_purging(cleanruv_purge_data *purge_data);
|
||||
+void cl5CompactDBs(void);
|
||||
|
||||
#endif
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
|
||||
index a969ef82f..e708a1ccb 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
|
||||
@@ -29,6 +29,8 @@
|
||||
#define CLEANRUVLEN 8
|
||||
#define CLEANALLRUV "CLEANALLRUV"
|
||||
#define CLEANALLRUVLEN 11
|
||||
+#define COMPACT_CL5 "COMPACT_CL5"
|
||||
+#define COMPACT_CL5_LEN 11
|
||||
#define REPLICA_RDN "cn=replica"
|
||||
|
||||
#define CLEANALLRUV_MAX_WAIT 7200 /* 2 hours */
|
||||
@@ -1050,7 +1052,6 @@ replica_config_change_flags(Replica *r, const char *new_flags, char *returntext
|
||||
static int
|
||||
replica_execute_task(Replica *r, const char *task_name, char *returntext, int apply_mods)
|
||||
{
|
||||
-
|
||||
if (strcasecmp(task_name, CL2LDIF_TASK) == 0) {
|
||||
if (apply_mods) {
|
||||
return replica_execute_cl2ldif_task(r, returntext);
|
||||
@@ -1084,6 +1085,12 @@ replica_execute_task(Replica *r, const char *task_name, char *returntext, int ap
|
||||
return replica_execute_cleanall_ruv_task(r, (ReplicaId)temprid, empty_task, "no", PR_TRUE, returntext);
|
||||
} else
|
||||
return LDAP_SUCCESS;
|
||||
+ } else if (strncasecmp(task_name, COMPACT_CL5, COMPACT_CL5_LEN) == 0) {
|
||||
+ /* compact the replication changelogs */
|
||||
+ if (apply_mods) {
|
||||
+ cl5CompactDBs();
|
||||
+ }
|
||||
+ return LDAP_SUCCESS;
|
||||
} else {
|
||||
PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, "Unsupported replica task - %s", task_name);
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,100 +0,0 @@
|
||||
From 389b2c825742392365262a719be7c8f594e7e522 Mon Sep 17 00:00:00 2001
|
||||
From: William Brown <william@blackhats.net.au>
|
||||
Date: Thu, 26 Nov 2020 09:08:13 +1000
|
||||
Subject: [PATCH] Issue 4460 - BUG - lib389 should use system tls policy
|
||||
|
||||
Bug Description: Due to some changes in dsrc for tlsreqcert
|
||||
and how def open was structured in lib389, the system ldap.conf
|
||||
policy was ignored.
|
||||
|
||||
Fix Description: Default to using the system ldap.conf policy
|
||||
if undefined in lib389 or the tls_reqcert param in dsrc.
|
||||
|
||||
fixes: #4460
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: ???
|
||||
---
|
||||
src/lib389/lib389/__init__.py | 11 +++++++----
|
||||
src/lib389/lib389/cli_base/dsrc.py | 16 +++++++++-------
|
||||
2 files changed, 16 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index 99ea9cc6a..4e6a1905a 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -962,7 +962,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
# Now, we are still an allocated ds object so we can be re-installed
|
||||
self.state = DIRSRV_STATE_ALLOCATED
|
||||
|
||||
- def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=ldap.OPT_X_TLS_HARD,
|
||||
+ def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=None,
|
||||
usercert=None, userkey=None):
|
||||
'''
|
||||
It opens a ldap bound connection to dirsrv so that online
|
||||
@@ -1025,9 +1025,12 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
try:
|
||||
# Note this sets LDAP.OPT not SELF. Because once self has opened
|
||||
# it can NOT change opts on reused (ie restart)
|
||||
- self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert)
|
||||
- self.log.debug("Using certificate policy %s", reqcert)
|
||||
- self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", reqcert)
|
||||
+ if reqcert is not None:
|
||||
+ self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert)
|
||||
+ self.log.debug("Using lib389 certificate policy %s", reqcert)
|
||||
+ else:
|
||||
+ self.log.debug("Using /etc/openldap/ldap.conf certificate policy")
|
||||
+ self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", self.get_option(ldap.OPT_X_TLS_REQUIRE_CERT))
|
||||
except ldap.LDAPError as e:
|
||||
self.log.fatal('TLS negotiation failed: %s', e)
|
||||
raise e
|
||||
diff --git a/src/lib389/lib389/cli_base/dsrc.py b/src/lib389/lib389/cli_base/dsrc.py
|
||||
index fec18a5f9..9b09ea568 100644
|
||||
--- a/src/lib389/lib389/cli_base/dsrc.py
|
||||
+++ b/src/lib389/lib389/cli_base/dsrc.py
|
||||
@@ -45,7 +45,7 @@ def dsrc_arg_concat(args, dsrc_inst):
|
||||
'tls_cacertdir': None,
|
||||
'tls_cert': None,
|
||||
'tls_key': None,
|
||||
- 'tls_reqcert': ldap.OPT_X_TLS_HARD,
|
||||
+ 'tls_reqcert': None,
|
||||
'starttls': args.starttls,
|
||||
'prompt': False,
|
||||
'pwdfile': None,
|
||||
@@ -134,7 +134,7 @@ def dsrc_to_ldap(path, instance_name, log):
|
||||
dsrc_inst['binddn'] = config.get(instance_name, 'binddn', fallback=None)
|
||||
dsrc_inst['saslmech'] = config.get(instance_name, 'saslmech', fallback=None)
|
||||
if dsrc_inst['saslmech'] is not None and dsrc_inst['saslmech'] not in ['EXTERNAL', 'PLAIN']:
|
||||
- raise Exception("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name))
|
||||
+ raise ValueError("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name))
|
||||
|
||||
dsrc_inst['tls_cacertdir'] = config.get(instance_name, 'tls_cacertdir', fallback=None)
|
||||
# At this point, we should check if the provided cacertdir is indeed, a dir. This can be a cause
|
||||
@@ -145,16 +145,18 @@ def dsrc_to_ldap(path, instance_name, log):
|
||||
|
||||
dsrc_inst['tls_cert'] = config.get(instance_name, 'tls_cert', fallback=None)
|
||||
dsrc_inst['tls_key'] = config.get(instance_name, 'tls_key', fallback=None)
|
||||
- dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback='hard')
|
||||
- if dsrc_inst['tls_reqcert'] not in ['never', 'allow', 'hard']:
|
||||
- raise Exception("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name,
|
||||
- path))
|
||||
+ dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback=None)
|
||||
if dsrc_inst['tls_reqcert'] == 'never':
|
||||
dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_NEVER
|
||||
elif dsrc_inst['tls_reqcert'] == 'allow':
|
||||
dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_ALLOW
|
||||
- else:
|
||||
+ elif dsrc_inst['tls_reqcert'] == 'hard':
|
||||
dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_HARD
|
||||
+ elif dsrc_inst['tls_reqcert'] is None:
|
||||
+ # Use system value
|
||||
+ pass
|
||||
+ else:
|
||||
+ raise ValueError("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name, path))
|
||||
dsrc_inst['starttls'] = config.getboolean(instance_name, 'starttls', fallback=False)
|
||||
dsrc_inst['pwdfile'] = None
|
||||
dsrc_inst['prompt'] = False
|
||||
--
|
||||
2.26.2
|
||||
|
@ -1,7 +1,7 @@
|
||||
From a789f89dbf84dd5f6395198bf5cc4db88453ec4b Mon Sep 17 00:00:00 2001
|
||||
From bc41bbb89405b2059b80e344b2d4c59ae39aabe6 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 10 Jun 2021 15:03:27 +0200
|
||||
Subject: [PATCH] Issue 4797 - ACL IP ADDRESS evaluation may corrupt
|
||||
Subject: [PATCH 1/3] Issue 4797 - ACL IP ADDRESS evaluation may corrupt
|
||||
c_isreplication_session connection flags (#4799)
|
||||
|
||||
Bug description:
|
||||
@ -27,10 +27,10 @@ Platforms tested: F33
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
|
||||
index 1ad9d0399..9fd599bcb 100644
|
||||
index fcac53839..a64986aeb 100644
|
||||
--- a/ldap/servers/slapd/pblock.c
|
||||
+++ b/ldap/servers/slapd/pblock.c
|
||||
@@ -2589,7 +2589,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
|
||||
@@ -2595,7 +2595,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
|
||||
pblock->pb_conn->c_authtype = slapi_ch_strdup((char *)value);
|
||||
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
|
||||
break;
|
||||
@ -39,7 +39,7 @@ index 1ad9d0399..9fd599bcb 100644
|
||||
if (pblock->pb_conn == NULL) {
|
||||
break;
|
||||
}
|
||||
@@ -2597,6 +2597,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
|
||||
@@ -2603,6 +2603,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
|
||||
slapi_ch_free((void **)&pblock->pb_conn->cin_addr_aclip);
|
||||
pblock->pb_conn->cin_addr_aclip = (PRNetAddr *)value;
|
||||
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
|
@ -0,0 +1,79 @@
|
||||
From b3170e39519530c39d59202413b20e6bd466224d Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 27 Jan 2021 09:56:38 +0000
|
||||
Subject: [PATCH 2/3] Issue 4396 - Minor memory leak in backend (#4558) (#4572)
|
||||
|
||||
Bug Description: As multiple suffixes per backend were no longer used, this
|
||||
functionality has been replaced with a single suffix per backend. Legacy
|
||||
code remains that adds multiple suffixes to the dse internal backend,
|
||||
resulting in memory allocations that are lost.
|
||||
|
||||
Also a minor typo is corrected in backend.c
|
||||
|
||||
Fix Description: Calls to be_addsuffix on the DSE backend are removed
|
||||
as they are never used.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4396
|
||||
|
||||
Reviewed by: mreynolds389, Firstyear, droideck (Thank you)
|
||||
---
|
||||
ldap/servers/slapd/backend.c | 2 +-
|
||||
ldap/servers/slapd/fedse.c | 12 +++---------
|
||||
2 files changed, 4 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
|
||||
index bc52b4643..5707504a9 100644
|
||||
--- a/ldap/servers/slapd/backend.c
|
||||
+++ b/ldap/servers/slapd/backend.c
|
||||
@@ -42,7 +42,7 @@ be_init(Slapi_Backend *be, const char *type, const char *name, int isprivate, in
|
||||
}
|
||||
be->be_monitordn = slapi_create_dn_string("cn=monitor,cn=%s,cn=%s,cn=plugins,cn=config",
|
||||
name, type);
|
||||
- if (NULL == be->be_configdn) {
|
||||
+ if (NULL == be->be_monitordn) {
|
||||
slapi_log_err(SLAPI_LOG_ERR,
|
||||
"be_init", "Failed create instance monitor dn for "
|
||||
"plugin %s, instance %s\n",
|
||||
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
|
||||
index 0d645f909..7b820b540 100644
|
||||
--- a/ldap/servers/slapd/fedse.c
|
||||
+++ b/ldap/servers/slapd/fedse.c
|
||||
@@ -2827,7 +2827,7 @@ search_snmp(Slapi_PBlock *pb __attribute__((unused)),
|
||||
}
|
||||
|
||||
/*
|
||||
- * Called from config.c to install the internal backends
|
||||
+ * Called from main.c to install the internal backends
|
||||
*/
|
||||
int
|
||||
setup_internal_backends(char *configdir)
|
||||
@@ -2846,7 +2846,6 @@ setup_internal_backends(char *configdir)
|
||||
Slapi_DN counters;
|
||||
Slapi_DN snmp;
|
||||
Slapi_DN root;
|
||||
- Slapi_Backend *be;
|
||||
Slapi_DN encryption;
|
||||
Slapi_DN saslmapping;
|
||||
Slapi_DN plugins;
|
||||
@@ -2895,16 +2894,11 @@ setup_internal_backends(char *configdir)
|
||||
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &saslmapping, LDAP_SCOPE_SUBTREE, "(objectclass=nsSaslMapping)", sasl_map_config_add, NULL, NULL);
|
||||
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &plugins, LDAP_SCOPE_SUBTREE, "(objectclass=nsSlapdPlugin)", check_plugin_path, NULL, NULL);
|
||||
|
||||
- be = be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
|
||||
- be_addsuffix(be, &root);
|
||||
- be_addsuffix(be, &monitor);
|
||||
- be_addsuffix(be, &config);
|
||||
+ be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
|
||||
|
||||
/*
|
||||
- * Now that the be's are in place, we can
|
||||
- * setup the mapping tree.
|
||||
+ * Now that the be's are in place, we can setup the mapping tree.
|
||||
*/
|
||||
-
|
||||
if (mapping_tree_init()) {
|
||||
slapi_log_err(SLAPI_LOG_EMERG, "setup_internal_backends", "Failed to init mapping tree\n");
|
||||
exit(1);
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,60 +0,0 @@
|
||||
From 05b66529117d1cd85a636ab7d8fc84abdec814de Mon Sep 17 00:00:00 2001
|
||||
From: William Brown <william@blackhats.net.au>
|
||||
Date: Thu, 12 Nov 2020 13:04:21 +1000
|
||||
Subject: [PATCH] Issue 4428 - BUG Paged Results with critical false causes
|
||||
sigsegv in chaining
|
||||
|
||||
Bug Description: When a paged search through chaining backend is
|
||||
received with a false criticality (such as SSSD), chaining backend
|
||||
will sigsegv due to a null context.
|
||||
|
||||
Fix Description: When a NULL ctx is recieved to be freed, this is
|
||||
as paged results have finished being sent, so we check the NULL
|
||||
ctx and move on.
|
||||
|
||||
fixes: #4428
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @droideck, @mreynolds389
|
||||
---
|
||||
ldap/servers/plugins/chainingdb/cb_search.c | 6 ++++++
|
||||
ldap/servers/plugins/chainingdb/cb_utils.c | 4 ++++
|
||||
2 files changed, 10 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/plugins/chainingdb/cb_search.c b/ldap/servers/plugins/chainingdb/cb_search.c
|
||||
index 69d23a6b5..d47cbc8e4 100644
|
||||
--- a/ldap/servers/plugins/chainingdb/cb_search.c
|
||||
+++ b/ldap/servers/plugins/chainingdb/cb_search.c
|
||||
@@ -740,6 +740,12 @@ chaining_back_search_results_release(void **sr)
|
||||
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM,
|
||||
"chaining_back_search_results_release\n");
|
||||
+ if (ctx == NULL) {
|
||||
+ /* The paged search is already complete, just return */
|
||||
+ /* Could we have a ctx state flag instead? */
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
if (ctx->readahead != ctx->tobefreed) {
|
||||
slapi_entry_free(ctx->readahead);
|
||||
}
|
||||
diff --git a/ldap/servers/plugins/chainingdb/cb_utils.c b/ldap/servers/plugins/chainingdb/cb_utils.c
|
||||
index dfd5dd92c..d52fd25a6 100644
|
||||
--- a/ldap/servers/plugins/chainingdb/cb_utils.c
|
||||
+++ b/ldap/servers/plugins/chainingdb/cb_utils.c
|
||||
@@ -279,7 +279,11 @@ cb_add_suffix(cb_backend_instance *inst, struct berval **bvals, int apply_mod, c
|
||||
return LDAP_SUCCESS;
|
||||
}
|
||||
|
||||
+#ifdef DEBUG
|
||||
+static int debug_on = 1;
|
||||
+#else
|
||||
static int debug_on = 0;
|
||||
+#endif
|
||||
|
||||
int
|
||||
cb_debug_on()
|
||||
--
|
||||
2.26.2
|
||||
|
@ -1,50 +0,0 @@
|
||||
From 4c133d448f451b7c3b2ff1b42806c7516d623f09 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 7 Dec 2020 00:41:27 +0100
|
||||
Subject: [PATCH] Issue 4315: performance search rate: nagle triggers high rate
|
||||
of setsocketopt (#4437)
|
||||
|
||||
Bug description:
|
||||
When a socket is set with NO_DELAY=0 (nagle), written pdu are buffered
|
||||
until buffer is full or tcp_cork is set. This reduce network traffic when
|
||||
the application writes partial pdu.
|
||||
DS write complete pdu (results/entries/..) so it gives low benefit for DS.
|
||||
In addition nagle being 'on' by default, DS sets/unset socket tcp_cork to send
|
||||
immediately results/entries at each operation. This is an overhead of syscalls.
|
||||
|
||||
Fix description:
|
||||
Disable nagle by default
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4315
|
||||
|
||||
Reviewed by: @mreynolds389, @Firstyear
|
||||
|
||||
Platforms tested: F33
|
||||
---
|
||||
ldap/servers/slapd/libglobs.c | 9 ++++-----
|
||||
1 file changed, 4 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
|
||||
index 7d5374c90..f8cf162e6 100644
|
||||
--- a/ldap/servers/slapd/libglobs.c
|
||||
+++ b/ldap/servers/slapd/libglobs.c
|
||||
@@ -1635,12 +1635,11 @@ FrontendConfig_init(void)
|
||||
#endif /* USE_SYSCONF */
|
||||
|
||||
init_accesscontrol = cfg->accesscontrol = LDAP_ON;
|
||||
-#if defined(LINUX)
|
||||
- /* On Linux, by default, we use TCP_CORK so we must enable nagle */
|
||||
- init_nagle = cfg->nagle = LDAP_ON;
|
||||
-#else
|
||||
+
|
||||
+ /* nagle triggers set/unset TCP_CORK setsockopt per operation
|
||||
+ * as DS only sends complete PDU there is no benefit of nagle/tcp_cork
|
||||
+ */
|
||||
init_nagle = cfg->nagle = LDAP_OFF;
|
||||
-#endif
|
||||
init_security = cfg->security = LDAP_OFF;
|
||||
init_ssl_check_hostname = cfg->ssl_check_hostname = LDAP_ON;
|
||||
cfg->tls_check_crl = TLS_CHECK_NONE;
|
||||
--
|
||||
2.26.2
|
||||
|
@ -0,0 +1,66 @@
|
||||
From 8d06fdf44b0d337f1e321e61ee1b22972ddea917 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 2 Apr 2021 14:05:41 +0200
|
||||
Subject: [PATCH 3/3] Issue 4700 - Regression in winsync replication agreement
|
||||
(#4712)
|
||||
|
||||
Bug description:
|
||||
#4396 fixes a memory leak but did not set 'cn=config' as
|
||||
DSE backend.
|
||||
It had no signicant impact unless with sidgen IPA plugin
|
||||
|
||||
Fix description:
|
||||
revert the portion of the #4364 patch that set be_suffix
|
||||
in be_addsuffix, free the suffix before setting it
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4700
|
||||
|
||||
Reviewed by: Pierre Rogier (thanks !)
|
||||
|
||||
Platforms tested: F33
|
||||
---
|
||||
ldap/servers/slapd/backend.c | 3 ++-
|
||||
ldap/servers/slapd/fedse.c | 6 +++++-
|
||||
2 files changed, 7 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
|
||||
index 5707504a9..5db706841 100644
|
||||
--- a/ldap/servers/slapd/backend.c
|
||||
+++ b/ldap/servers/slapd/backend.c
|
||||
@@ -173,7 +173,8 @@ void
|
||||
be_addsuffix(Slapi_Backend *be, const Slapi_DN *suffix)
|
||||
{
|
||||
if (be->be_state != BE_STATE_DELETED) {
|
||||
- be->be_suffix = slapi_sdn_dup(suffix);;
|
||||
+ slapi_sdn_free(&be->be_suffix);
|
||||
+ be->be_suffix = slapi_sdn_dup(suffix);
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
|
||||
index 7b820b540..44159c991 100644
|
||||
--- a/ldap/servers/slapd/fedse.c
|
||||
+++ b/ldap/servers/slapd/fedse.c
|
||||
@@ -2846,6 +2846,7 @@ setup_internal_backends(char *configdir)
|
||||
Slapi_DN counters;
|
||||
Slapi_DN snmp;
|
||||
Slapi_DN root;
|
||||
+ Slapi_Backend *be;
|
||||
Slapi_DN encryption;
|
||||
Slapi_DN saslmapping;
|
||||
Slapi_DN plugins;
|
||||
@@ -2894,7 +2895,10 @@ setup_internal_backends(char *configdir)
|
||||
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &saslmapping, LDAP_SCOPE_SUBTREE, "(objectclass=nsSaslMapping)", sasl_map_config_add, NULL, NULL);
|
||||
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &plugins, LDAP_SCOPE_SUBTREE, "(objectclass=nsSlapdPlugin)", check_plugin_path, NULL, NULL);
|
||||
|
||||
- be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
|
||||
+ be = be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
|
||||
+ be_addsuffix(be, &root);
|
||||
+ be_addsuffix(be, &monitor);
|
||||
+ be_addsuffix(be, &config);
|
||||
|
||||
/*
|
||||
* Now that the be's are in place, we can setup the mapping tree.
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,39 +0,0 @@
|
||||
From 3007700a659ede03085f5390153cce483ce987a1 Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Fri, 4 Dec 2020 10:14:33 +1000
|
||||
Subject: [PATCH] Issue 4460 - BUG - add machine name to subject alt names in
|
||||
SSCA (#4472)
|
||||
|
||||
Bug Description: During SSCA creation, the server cert did not have
|
||||
the machine name, which meant that the cert would not work without
|
||||
reqcert = never.
|
||||
|
||||
Fix Description: Add the machine name as an alt name during SSCA
|
||||
creation. It is not guaranteed this value is correct, but it
|
||||
is better than nothing.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4460
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: mreynolds389, droideck
|
||||
---
|
||||
src/lib389/lib389/instance/setup.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
|
||||
index 7d42ba292..e46f2d1e5 100644
|
||||
--- a/src/lib389/lib389/instance/setup.py
|
||||
+++ b/src/lib389/lib389/instance/setup.py
|
||||
@@ -887,7 +887,7 @@ class SetupDs(object):
|
||||
tlsdb_inst = NssSsl(dbpath=os.path.join(etc_dirsrv_path, dir))
|
||||
tlsdb_inst.import_rsa_crt(ca)
|
||||
|
||||
- csr = tlsdb.create_rsa_key_and_csr()
|
||||
+ csr = tlsdb.create_rsa_key_and_csr(alt_names=[general['full_machine_name']])
|
||||
(ca, crt) = ssca.rsa_ca_sign_csr(csr)
|
||||
tlsdb.import_rsa_crt(ca, crt)
|
||||
if general['selinux']:
|
||||
--
|
||||
2.26.2
|
||||
|
88
SOURCES/0016-Issue-4725-Fix-compiler-warnings.patch
Normal file
88
SOURCES/0016-Issue-4725-Fix-compiler-warnings.patch
Normal file
@ -0,0 +1,88 @@
|
||||
From 7345c51c68dfd90a704ccbb0e5b1e736af80f146 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 17 May 2021 16:10:22 +0200
|
||||
Subject: [PATCH] Issue 4725 - Fix compiler warnings
|
||||
|
||||
---
|
||||
ldap/servers/slapd/proto-slap.h | 2 +-
|
||||
ldap/servers/slapd/pw.c | 9 ++++-----
|
||||
ldap/servers/slapd/pw_retry.c | 2 --
|
||||
3 files changed, 5 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
|
||||
index 6ff178127..2768d5a1d 100644
|
||||
--- a/ldap/servers/slapd/proto-slap.h
|
||||
+++ b/ldap/servers/slapd/proto-slap.h
|
||||
@@ -1012,7 +1012,7 @@ int add_shadow_ext_password_attrs(Slapi_PBlock *pb, Slapi_Entry **e);
|
||||
* pw_retry.c
|
||||
*/
|
||||
int update_pw_retry(Slapi_PBlock *pb);
|
||||
-int update_trp_pw_usecount(Slapi_PBlock *pb, Slapi_Entry *e, int32_t use_count);
|
||||
+int update_tpr_pw_usecount(Slapi_PBlock *pb, Slapi_Entry *e, int32_t use_count);
|
||||
void pw_apply_mods(const Slapi_DN *sdn, Slapi_Mods *mods);
|
||||
void pw_set_componentID(struct slapi_componentid *cid);
|
||||
struct slapi_componentid *pw_get_componentID(void);
|
||||
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
|
||||
index d98422513..2a167c8f1 100644
|
||||
--- a/ldap/servers/slapd/pw.c
|
||||
+++ b/ldap/servers/slapd/pw.c
|
||||
@@ -2622,7 +2622,6 @@ int
|
||||
slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int send_result) {
|
||||
passwdPolicy *pwpolicy = NULL;
|
||||
char *dn = NULL;
|
||||
- int tpr_maxuse;
|
||||
char *value;
|
||||
time_t cur_time;
|
||||
char *cur_time_str = NULL;
|
||||
@@ -2638,7 +2637,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
|
||||
return 0;
|
||||
}
|
||||
|
||||
- if (slapi_entry_attr_hasvalue(bind_target_entry, "pwdTPRReset", "TRUE") == NULL) {
|
||||
+ if (!slapi_entry_attr_hasvalue(bind_target_entry, "pwdTPRReset", "TRUE")) {
|
||||
/* the password was not reset by an admin while a TRP pwp was set, just returned */
|
||||
return 0;
|
||||
}
|
||||
@@ -2646,7 +2645,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
|
||||
/* Check entry TPR max use */
|
||||
if (pwpolicy->pw_tpr_maxuse >= 0) {
|
||||
uint use_count;
|
||||
- value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRUseCount");
|
||||
+ value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRUseCount");
|
||||
if (value) {
|
||||
/* max Use is enforced */
|
||||
use_count = strtoull(value, 0, 0);
|
||||
@@ -2681,7 +2680,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
|
||||
|
||||
/* Check entry TPR expiration at a specific time */
|
||||
if (pwpolicy->pw_tpr_delay_expire_at >= 0) {
|
||||
- value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRExpireAt");
|
||||
+ value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRExpireAt");
|
||||
if (value) {
|
||||
/* max Use is enforced */
|
||||
if (difftime(parse_genTime(cur_time_str), parse_genTime(value)) >= 0) {
|
||||
@@ -2709,7 +2708,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
|
||||
|
||||
/* Check entry TPR valid after a specific time */
|
||||
if (pwpolicy->pw_tpr_delay_valid_from >= 0) {
|
||||
- value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRValidFrom");
|
||||
+ value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRValidFrom");
|
||||
if (value) {
|
||||
/* validity after a specific time is enforced */
|
||||
if (difftime(parse_genTime(value), parse_genTime(cur_time_str)) >= 0) {
|
||||
diff --git a/ldap/servers/slapd/pw_retry.c b/ldap/servers/slapd/pw_retry.c
|
||||
index 5d13eb636..af54aa19d 100644
|
||||
--- a/ldap/servers/slapd/pw_retry.c
|
||||
+++ b/ldap/servers/slapd/pw_retry.c
|
||||
@@ -163,8 +163,6 @@ set_retry_cnt_and_time(Slapi_PBlock *pb, int count, time_t cur_time)
|
||||
int
|
||||
set_tpr_usecount_mods(Slapi_PBlock *pb, Slapi_Mods *smods, int count)
|
||||
{
|
||||
- char *timestr;
|
||||
- time_t unlock_time;
|
||||
char retry_cnt[16] = {0}; /* 1-65535 */
|
||||
const char *dn = NULL;
|
||||
Slapi_DN *sdn = NULL;
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,50 +0,0 @@
|
||||
From 1386b140d8cc81d37fdea6593487fe542587ccac Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 9 Dec 2020 09:52:08 -0500
|
||||
Subject: [PATCH] Issue 4483 - heap-use-after-free in slapi_be_getsuffix
|
||||
|
||||
Description: heap-use-after-free in slapi_be_getsuffix after disk
|
||||
monitoring runs. This feature is freeing a list of
|
||||
backends which it does not need to do.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4483
|
||||
|
||||
Reviewed by: firstyear & tbordaz(Thanks!!)
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 13 +------------
|
||||
1 file changed, 1 insertion(+), 12 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 49199e4df..691f77570 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -606,12 +606,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
|
||||
now = start;
|
||||
while ((now - start) < grace_period) {
|
||||
if (g_get_shutdown()) {
|
||||
- be_index = 0;
|
||||
- if (be_list[be_index] != NULL) {
|
||||
- while ((be = be_list[be_index++])) {
|
||||
- slapi_be_free(&be);
|
||||
- }
|
||||
- }
|
||||
slapi_ch_array_free(dirs);
|
||||
dirs = NULL;
|
||||
return;
|
||||
@@ -706,12 +700,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
|
||||
}
|
||||
}
|
||||
}
|
||||
- be_index = 0;
|
||||
- if (be_list[be_index] != NULL) {
|
||||
- while ((be = be_list[be_index++])) {
|
||||
- slapi_be_free(&be);
|
||||
- }
|
||||
- }
|
||||
+
|
||||
slapi_ch_array_free(dirs);
|
||||
dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */
|
||||
g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL);
|
||||
--
|
||||
2.26.2
|
||||
|
@ -0,0 +1,202 @@
|
||||
From 59266365eda8130abf6901263efae4c87586376a Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 28 Jun 2021 16:40:15 +0200
|
||||
Subject: [PATCH] Issue 4814 - _cl5_get_tod_expiration may crash at startup
|
||||
|
||||
Bug description:
|
||||
This bug exist only in 1.4.3 branch
|
||||
In 1.4.3, CL open as a separated database so
|
||||
compaction mechanism is started along a CL
|
||||
mechanism (CL trimming).
|
||||
The problem is that the configuration of the CL
|
||||
compaction is done after the compaction mechanism
|
||||
(is started). Depending on thread scheduling it
|
||||
crashes
|
||||
|
||||
Fix description:
|
||||
Make sure configuration of compaction thread is
|
||||
taken into account (cl5ConfigSetCompaction) before
|
||||
the compaction thread starts (cl5open)
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4814
|
||||
|
||||
Reviewed by: Mark Reynolds, Simon Pichugin (thanks !)
|
||||
|
||||
Platforms tested: 8.5
|
||||
---
|
||||
ldap/servers/plugins/replication/cl5_api.c | 24 ++++++++++++-------
|
||||
ldap/servers/plugins/replication/cl5_api.h | 10 +++++++-
|
||||
ldap/servers/plugins/replication/cl5_config.c | 8 +++++--
|
||||
ldap/servers/plugins/replication/cl5_init.c | 4 +++-
|
||||
ldap/servers/plugins/replication/cl5_test.c | 2 +-
|
||||
.../servers/plugins/replication/repl_shared.h | 2 +-
|
||||
6 files changed, 35 insertions(+), 15 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
|
||||
index 4c5077b48..954b6b9e3 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.c
|
||||
@@ -1016,6 +1016,20 @@ cl5GetState()
|
||||
return s_cl5Desc.dbState;
|
||||
}
|
||||
|
||||
+void
|
||||
+cl5ConfigSetCompaction(int compactInterval, char *compactTime)
|
||||
+{
|
||||
+
|
||||
+ if (compactInterval != CL5_NUM_IGNORE) {
|
||||
+ s_cl5Desc.dbTrim.compactInterval = compactInterval;
|
||||
+ }
|
||||
+
|
||||
+ if (strcmp(compactTime, CL5_STR_IGNORE) != 0) {
|
||||
+ s_cl5Desc.dbTrim.compactTime = slapi_ch_strdup(compactTime);
|
||||
+ }
|
||||
+
|
||||
+}
|
||||
+
|
||||
/* Name: cl5ConfigTrimming
|
||||
Description: sets changelog trimming parameters; changelog must be open.
|
||||
Parameters: maxEntries - maximum number of entries in the chnagelog (in all files);
|
||||
@@ -1026,7 +1040,7 @@ cl5GetState()
|
||||
CL5_BAD_STATE if changelog is not open
|
||||
*/
|
||||
int
|
||||
-cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval)
|
||||
+cl5ConfigTrimming(int maxEntries, const char *maxAge, int trimInterval)
|
||||
{
|
||||
if (s_cl5Desc.dbState == CL5_STATE_NONE) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
@@ -1058,14 +1072,6 @@ cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char
|
||||
s_cl5Desc.dbTrim.maxEntries = maxEntries;
|
||||
}
|
||||
|
||||
- if (compactInterval != CL5_NUM_IGNORE) {
|
||||
- s_cl5Desc.dbTrim.compactInterval = compactInterval;
|
||||
- }
|
||||
-
|
||||
- if (strcmp(compactTime, CL5_STR_IGNORE) != 0) {
|
||||
- s_cl5Desc.dbTrim.compactTime = slapi_ch_strdup(compactTime);
|
||||
- }
|
||||
-
|
||||
if (trimInterval != CL5_NUM_IGNORE) {
|
||||
s_cl5Desc.dbTrim.trimInterval = trimInterval;
|
||||
}
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h
|
||||
index 11db771f2..6aa48aec4 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.h
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.h
|
||||
@@ -227,6 +227,14 @@ int cl5ImportLDIF(const char *clDir, const char *ldifFile, Replica **replicas);
|
||||
|
||||
int cl5GetState(void);
|
||||
|
||||
+/* Name: cl5ConfigSetCompaction
|
||||
+ * Description: sets the database compaction parameters
|
||||
+ * Parameters: compactInterval - Interval for compaction default is 30days
|
||||
+ * compactTime - Compact time default is 23:59
|
||||
+ * Return: void
|
||||
+ */
|
||||
+void cl5ConfigSetCompaction(int compactInterval, char *compactTime);
|
||||
+
|
||||
/* Name: cl5ConfigTrimming
|
||||
Description: sets changelog trimming parameters
|
||||
Parameters: maxEntries - maximum number of entries in the log;
|
||||
@@ -236,7 +244,7 @@ int cl5GetState(void);
|
||||
Return: CL5_SUCCESS if successful;
|
||||
CL5_BAD_STATE if changelog has not been open
|
||||
*/
|
||||
-int cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval);
|
||||
+int cl5ConfigTrimming(int maxEntries, const char *maxAge, int trimInterval);
|
||||
|
||||
void cl5DestroyIterator(void *iterator);
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_config.c b/ldap/servers/plugins/replication/cl5_config.c
|
||||
index b32686788..a43534c9b 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_config.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_config.c
|
||||
@@ -197,6 +197,8 @@ changelog5_config_add(Slapi_PBlock *pb __attribute__((unused)),
|
||||
|
||||
goto done;
|
||||
}
|
||||
+ /* Set compaction parameters */
|
||||
+ cl5ConfigSetCompaction(config.compactInterval, config.compactTime);
|
||||
|
||||
/* start the changelog */
|
||||
rc = cl5Open(config.dir, &config.dbconfig);
|
||||
@@ -212,7 +214,7 @@ changelog5_config_add(Slapi_PBlock *pb __attribute__((unused)),
|
||||
}
|
||||
|
||||
/* set trimming parameters */
|
||||
- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
|
||||
+ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval);
|
||||
if (rc != CL5_SUCCESS) {
|
||||
*returncode = 1;
|
||||
if (returntext) {
|
||||
@@ -548,6 +550,8 @@ changelog5_config_modify(Slapi_PBlock *pb,
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
|
||||
"changelog5_config_modify - Deleted the changelog at %s\n", currentDir);
|
||||
}
|
||||
+ /* Set compaction parameters */
|
||||
+ cl5ConfigSetCompaction(config.compactInterval, config.compactTime);
|
||||
|
||||
rc = cl5Open(config.dir, &config.dbconfig);
|
||||
if (rc != CL5_SUCCESS) {
|
||||
@@ -575,7 +579,7 @@ changelog5_config_modify(Slapi_PBlock *pb,
|
||||
if (config.maxEntries != CL5_NUM_IGNORE ||
|
||||
config.trimInterval != CL5_NUM_IGNORE ||
|
||||
strcmp(config.maxAge, CL5_STR_IGNORE) != 0) {
|
||||
- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
|
||||
+ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval);
|
||||
if (rc != CL5_SUCCESS) {
|
||||
*returncode = 1;
|
||||
if (returntext) {
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_init.c b/ldap/servers/plugins/replication/cl5_init.c
|
||||
index 251859714..567e0274c 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_init.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_init.c
|
||||
@@ -45,6 +45,8 @@ changelog5_init()
|
||||
rc = 0; /* OK */
|
||||
goto done;
|
||||
}
|
||||
+ /* Set compaction parameters */
|
||||
+ cl5ConfigSetCompaction(config.compactInterval, config.compactTime);
|
||||
|
||||
/* start changelog */
|
||||
rc = cl5Open(config.dir, &config.dbconfig);
|
||||
@@ -57,7 +59,7 @@ changelog5_init()
|
||||
}
|
||||
|
||||
/* set trimming parameters */
|
||||
- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
|
||||
+ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval);
|
||||
if (rc != CL5_SUCCESS) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
"changelog5_init: failed to configure changelog trimming\n");
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_test.c b/ldap/servers/plugins/replication/cl5_test.c
|
||||
index d6656653c..efb8c543a 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_test.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_test.c
|
||||
@@ -281,7 +281,7 @@ testTrimming()
|
||||
rc = populateChangelog(300, NULL);
|
||||
|
||||
if (rc == 0)
|
||||
- rc = cl5ConfigTrimming(300, "1d", CHANGELOGDB_COMPACT_INTERVAL, CHANGELOGDB_TRIM_INTERVAL);
|
||||
+ rc = cl5ConfigTrimming(300, "1d", CHANGELOGDB_TRIM_INTERVAL);
|
||||
|
||||
interval = PR_SecondsToInterval(300); /* 5 min is default trimming interval */
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
diff --git a/ldap/servers/plugins/replication/repl_shared.h b/ldap/servers/plugins/replication/repl_shared.h
|
||||
index 6708e12f7..b59b2bd27 100644
|
||||
--- a/ldap/servers/plugins/replication/repl_shared.h
|
||||
+++ b/ldap/servers/plugins/replication/repl_shared.h
|
||||
@@ -26,7 +26,7 @@
|
||||
|
||||
#define CHANGELOGDB_TRIM_INTERVAL 300 /* 5 minutes */
|
||||
#define CHANGELOGDB_COMPACT_INTERVAL 2592000 /* 30 days */
|
||||
-#define CHANGELOGDB_COMPACT_TIME "23:55" /* 30 days */
|
||||
+#define CHANGELOGDB_COMPACT_TIME "23:59" /* around midnight */
|
||||
|
||||
#define CONFIG_CHANGELOG_DIR_ATTRIBUTE "nsslapd-changelogdir"
|
||||
#define CONFIG_CHANGELOG_MAXENTRIES_ATTRIBUTE "nsslapd-changelogmaxentries"
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,65 +0,0 @@
|
||||
From 6e827f6d5e64e0be316f4e17111b2884899d302c Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 16 Dec 2020 16:30:28 +0100
|
||||
Subject: [PATCH] Issue 4480 - Unexpected info returned to ldap request (#4491)
|
||||
|
||||
Bug description:
|
||||
If the bind entry does not exist, the bind result info
|
||||
reports that 'No such entry'. It should not give any
|
||||
information if the target entry exists or not
|
||||
|
||||
Fix description:
|
||||
Does not return any additional information during a bind
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4480
|
||||
|
||||
Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all)
|
||||
|
||||
Platforms tested: F31
|
||||
---
|
||||
dirsrvtests/tests/suites/basic/basic_test.py | 1 -
|
||||
ldap/servers/slapd/back-ldbm/ldbm_config.c | 2 +-
|
||||
ldap/servers/slapd/result.c | 2 +-
|
||||
3 files changed, 2 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
index 120207321..1ae82dcdd 100644
|
||||
--- a/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
@@ -1400,7 +1400,6 @@ def test_dscreate_multiple_dashes_name(dscreate_long_instance):
|
||||
assert not dscreate_long_instance.exists()
|
||||
|
||||
|
||||
-
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
|
||||
index 3fe86d567..10cef250f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
|
||||
@@ -1234,7 +1234,7 @@ ldbm_config_search_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
|
||||
if (attrs) {
|
||||
for (size_t i = 0; attrs[i]; i++) {
|
||||
if (ldbm_config_moved_attr(attrs[i])) {
|
||||
- slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry");
|
||||
+ slapi_pblock_set(pb, SLAPI_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry");
|
||||
break;
|
||||
}
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
|
||||
index 9daf3b151..ab0d79454 100644
|
||||
--- a/ldap/servers/slapd/result.c
|
||||
+++ b/ldap/servers/slapd/result.c
|
||||
@@ -355,7 +355,7 @@ send_ldap_result_ext(
|
||||
if (text) {
|
||||
pbtext = text;
|
||||
} else {
|
||||
- slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &pbtext);
|
||||
+ slapi_pblock_get(pb, SLAPI_RESULT_TEXT, &pbtext);
|
||||
}
|
||||
|
||||
if (operation == NULL) {
|
||||
--
|
||||
2.26.2
|
||||
|
@ -0,0 +1,51 @@
|
||||
From e7fdfe527a5f72674fe4b577a0555cabf8ec73a5 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 7 Jun 2021 11:23:35 +0200
|
||||
Subject: [PATCH] Issue 4789 - Temporary password rules are not enforce with
|
||||
local password policy (#4790)
|
||||
|
||||
Bug description:
|
||||
When allocating a password policy structure (new_passwdPolicy)
|
||||
it is initialized with the local policy definition or
|
||||
the global one. If it exists a local policy entry, the TPR
|
||||
attributes (passwordTPRMaxUse, passwordTPRDelayValidFrom and
|
||||
passwordTPRDelayExpireAt) are not taken into account.
|
||||
|
||||
Fix description:
|
||||
Take into account TPR attributes to initialize the policy
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4789
|
||||
|
||||
Reviewed by: Simon Pichugin, William Brown
|
||||
|
||||
Platforms tested: F34
|
||||
---
|
||||
ldap/servers/slapd/pw.c | 12 ++++++++++++
|
||||
1 file changed, 12 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
|
||||
index 2a167c8f1..7680df41d 100644
|
||||
--- a/ldap/servers/slapd/pw.c
|
||||
+++ b/ldap/servers/slapd/pw.c
|
||||
@@ -2356,6 +2356,18 @@ new_passwdPolicy(Slapi_PBlock *pb, const char *dn)
|
||||
if ((sval = attr_get_present_values(attr))) {
|
||||
pwdpolicy->pw_dict_path = (char *)slapi_value_get_string(*sval);
|
||||
}
|
||||
+ } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_MAXUSE)) {
|
||||
+ if ((sval = attr_get_present_values(attr))) {
|
||||
+ pwdpolicy->pw_tpr_maxuse = slapi_value_get_int(*sval);
|
||||
+ }
|
||||
+ } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_DELAY_EXPIRE_AT)) {
|
||||
+ if ((sval = attr_get_present_values(attr))) {
|
||||
+ pwdpolicy->pw_tpr_delay_expire_at = slapi_value_get_int(*sval);
|
||||
+ }
|
||||
+ } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_DELAY_VALID_FROM)) {
|
||||
+ if ((sval = attr_get_present_values(attr))) {
|
||||
+ pwdpolicy->pw_tpr_delay_valid_from = slapi_value_get_int(*sval);
|
||||
+ }
|
||||
}
|
||||
} /* end of for() loop */
|
||||
if (pw_entry) {
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,108 +0,0 @@
|
||||
From 1fef5649ce05a17a741789cafb65269c099b396b Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <72748589+progier389@users.noreply.github.com>
|
||||
Date: Wed, 16 Dec 2020 16:21:35 +0100
|
||||
Subject: [PATCH 2/3] Issue #4504 - Fix pytest test_dsconf_replication_monitor
|
||||
(#4505)
|
||||
|
||||
(cherry picked from commit 0b08e6f35b000d1383580be59f902ac813e940f2)
|
||||
---
|
||||
.../tests/suites/clu/repl_monitor_test.py | 50 +++++++++++++------
|
||||
1 file changed, 36 insertions(+), 14 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
index b03d170c8..eb18d2da2 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
@@ -9,6 +9,7 @@
|
||||
import time
|
||||
import subprocess
|
||||
import pytest
|
||||
+import re
|
||||
|
||||
from lib389.cli_conf.replication import get_repl_monitor_info
|
||||
from lib389.tasks import *
|
||||
@@ -67,6 +68,25 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No
|
||||
log.info('Reset log file')
|
||||
f.truncate(0)
|
||||
|
||||
+def get_hostnames_from_log(port1, port2):
|
||||
+ # Get the supplier host names as displayed in replication monitor output
|
||||
+ with open(LOG_FILE, 'r') as logfile:
|
||||
+ logtext = logfile.read()
|
||||
+ # search for Supplier :hostname:port
|
||||
+ # and use \D to insure there is no more number is after
|
||||
+ # the matched port (i.e that 10 is not matching 101)
|
||||
+ regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)'
|
||||
+ match=re.search(regexp, logtext)
|
||||
+ host_m1 = 'localhost.localdomain'
|
||||
+ if (match is not None):
|
||||
+ host_m1 = match.group(2)
|
||||
+ # Same for master 2
|
||||
+ regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)'
|
||||
+ match=re.search(regexp, logtext)
|
||||
+ host_m2 = 'localhost.localdomain'
|
||||
+ if (match is not None):
|
||||
+ host_m2 = match.group(2)
|
||||
+ return (host_m1, host_m2)
|
||||
|
||||
@pytest.mark.ds50545
|
||||
@pytest.mark.bz1739718
|
||||
@@ -95,9 +115,6 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
|
||||
m1 = topology_m2.ms["master1"]
|
||||
m2 = topology_m2.ms["master2"]
|
||||
|
||||
- alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')',
|
||||
- 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')']
|
||||
-
|
||||
connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port)
|
||||
content_list = ['Replica Root: dc=example,dc=com',
|
||||
'Replica ID: 1',
|
||||
@@ -160,20 +177,9 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
|
||||
'001',
|
||||
m1.host + ':' + str(m1.port)]
|
||||
|
||||
- dsrc_content = '[repl-monitor-connections]\n' \
|
||||
- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
|
||||
- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
|
||||
- '\n' \
|
||||
- '[repl-monitor-aliases]\n' \
|
||||
- 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \
|
||||
- 'M2 = ' + m2.host + ':' + str(m2.port)
|
||||
-
|
||||
connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
|
||||
m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM]
|
||||
|
||||
- aliases = ['M1=' + m1.host + ':' + str(m1.port),
|
||||
- 'M2=' + m2.host + ':' + str(m2.port)]
|
||||
-
|
||||
args = FakeArgs()
|
||||
args.connections = connections
|
||||
args.aliases = None
|
||||
@@ -181,8 +187,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
|
||||
|
||||
log.info('Run replication monitor with connections option')
|
||||
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
|
||||
+ (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port)
|
||||
check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
|
||||
|
||||
+ # Prepare the data for next tests
|
||||
+ aliases = ['M1=' + host_m1 + ':' + str(m1.port),
|
||||
+ 'M2=' + host_m2 + ':' + str(m2.port)]
|
||||
+
|
||||
+ alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')',
|
||||
+ 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')']
|
||||
+
|
||||
+ dsrc_content = '[repl-monitor-connections]\n' \
|
||||
+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
|
||||
+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
|
||||
+ '\n' \
|
||||
+ '[repl-monitor-aliases]\n' \
|
||||
+ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \
|
||||
+ 'M2 = ' + host_m2 + ':' + str(m2.port)
|
||||
+
|
||||
log.info('Run replication monitor with aliases option')
|
||||
args.aliases = aliases
|
||||
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
|
||||
--
|
||||
2.26.2
|
||||
|
@ -0,0 +1,350 @@
|
||||
From 6a741b3ef50babf2ac2479437a38829204ffd438 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 17 Jun 2021 16:22:09 +0200
|
||||
Subject: [PATCH] Issue 4788 - CLI should support Temporary Password Rules
|
||||
attributes (#4793)
|
||||
|
||||
Bug description:
|
||||
Since #4725, password policy support temporary password rules.
|
||||
CLI (dsconf) does not support this RFE and only direct ldap
|
||||
operation can configure global/local password policy
|
||||
|
||||
Fix description:
|
||||
Update dsconf to support this new RFE.
|
||||
To run successfully the testcase it relies on #4788
|
||||
|
||||
relates: #4788
|
||||
|
||||
Reviewed by: Simon Pichugin (thanks !!)
|
||||
|
||||
Platforms tested: F34
|
||||
---
|
||||
.../password/pwdPolicy_attribute_test.py | 172 ++++++++++++++++--
|
||||
src/lib389/lib389/cli_conf/pwpolicy.py | 5 +-
|
||||
src/lib389/lib389/pwpolicy.py | 5 +-
|
||||
3 files changed, 165 insertions(+), 17 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
|
||||
index aee3a91ad..085d0a373 100644
|
||||
--- a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
|
||||
@@ -34,7 +34,7 @@ log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
-def create_user(topology_st, request):
|
||||
+def test_user(topology_st, request):
|
||||
"""User for binding operation"""
|
||||
topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on')
|
||||
log.info('Adding test user {}')
|
||||
@@ -56,10 +56,11 @@ def create_user(topology_st, request):
|
||||
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
|
||||
request.addfinalizer(fin)
|
||||
+ return user
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
-def password_policy(topology_st, create_user):
|
||||
+def password_policy(topology_st, test_user):
|
||||
"""Set up password policy for subtree and user"""
|
||||
|
||||
pwp = PwPolicyManager(topology_st.standalone)
|
||||
@@ -71,7 +72,7 @@ def password_policy(topology_st, create_user):
|
||||
pwp.create_user_policy(TEST_USER_DN, policy_props)
|
||||
|
||||
@pytest.mark.skipif(ds_is_older('1.4.3.3'), reason="Not implemented")
|
||||
-def test_pwd_reset(topology_st, create_user):
|
||||
+def test_pwd_reset(topology_st, test_user):
|
||||
"""Test new password policy attribute "pwdReset"
|
||||
|
||||
:id: 03db357b-4800-411e-a36e-28a534293004
|
||||
@@ -124,7 +125,7 @@ def test_pwd_reset(topology_st, create_user):
|
||||
[('on', 'off', ldap.UNWILLING_TO_PERFORM),
|
||||
('off', 'off', ldap.UNWILLING_TO_PERFORM),
|
||||
('off', 'on', False), ('on', 'on', False)])
|
||||
-def test_change_pwd(topology_st, create_user, password_policy,
|
||||
+def test_change_pwd(topology_st, test_user, password_policy,
|
||||
subtree_pwchange, user_pwchange, exception):
|
||||
"""Verify that 'passwordChange' attr works as expected
|
||||
User should have a priority over a subtree.
|
||||
@@ -184,7 +185,7 @@ def test_change_pwd(topology_st, create_user, password_policy,
|
||||
user.reset_password(TEST_USER_PWD)
|
||||
|
||||
|
||||
-def test_pwd_min_age(topology_st, create_user, password_policy):
|
||||
+def test_pwd_min_age(topology_st, test_user, password_policy):
|
||||
"""If we set passwordMinAge to some value, for example to 10, then it
|
||||
should not allow the user to change the password within 10 seconds after
|
||||
his previous change.
|
||||
@@ -257,7 +258,7 @@ def test_pwd_min_age(topology_st, create_user, password_policy):
|
||||
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
user.reset_password(TEST_USER_PWD)
|
||||
|
||||
-def test_global_tpr_maxuse_1(topology_st, create_user, request):
|
||||
+def test_global_tpr_maxuse_1(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRMaxUse
|
||||
Test that after passwordTPRMaxUse failures to bind
|
||||
additional bind with valid password are failing with CONSTRAINT_VIOLATION
|
||||
@@ -374,7 +375,7 @@ def test_global_tpr_maxuse_1(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_maxuse_2(topology_st, create_user, request):
|
||||
+def test_global_tpr_maxuse_2(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRMaxUse
|
||||
Test that after less than passwordTPRMaxUse failures to bind
|
||||
additional bind with valid password are successfull
|
||||
@@ -474,7 +475,7 @@ def test_global_tpr_maxuse_2(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_maxuse_3(topology_st, create_user, request):
|
||||
+def test_global_tpr_maxuse_3(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRMaxUse
|
||||
Test that after less than passwordTPRMaxUse failures to bind
|
||||
A bind with valid password is successfull but passwordMustChange
|
||||
@@ -587,7 +588,7 @@ def test_global_tpr_maxuse_3(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_maxuse_4(topology_st, create_user, request):
|
||||
+def test_global_tpr_maxuse_4(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRMaxUse
|
||||
Test that a TPR attribute passwordTPRMaxUse
|
||||
can be updated by DM but not the by user itself
|
||||
@@ -701,7 +702,148 @@ def test_global_tpr_maxuse_4(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayValidFrom_1(topology_st, create_user, request):
|
||||
+def test_local_tpr_maxuse_5(topology_st, test_user, request):
|
||||
+ """Test TPR local policy overpass global one: passwordTPRMaxUse
|
||||
+ Test that after passwordTPRMaxUse failures to bind
|
||||
+ additional bind with valid password are failing with CONSTRAINT_VIOLATION
|
||||
+
|
||||
+ :id: c3919707-d804-445a-8754-8385b1072c42
|
||||
+ :customerscenario: False
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Global password policy Enable passwordMustChange
|
||||
+ 2. Global password policy Set passwordTPRMaxUse=5
|
||||
+ 3. Global password policy Set passwordMaxFailure to a higher value to not disturb the test
|
||||
+ 4. Local password policy Enable passwordMustChange
|
||||
+ 5. Local password policy Set passwordTPRMaxUse=10 (higher than global)
|
||||
+ 6. Bind with a wrong password 10 times and check INVALID_CREDENTIALS
|
||||
+ 7. Check that passwordTPRUseCount got to the limit (5)
|
||||
+ 8. Bind with a wrong password (CONSTRAINT_VIOLATION)
|
||||
+ and check passwordTPRUseCount overpass the limit by 1 (11)
|
||||
+ 9. Bind with a valid password 10 times and check CONSTRAINT_VIOLATION
|
||||
+ and check passwordTPRUseCount increases
|
||||
+ 10. Reset password policy configuration and remove local password from user
|
||||
+ :expected results:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Success
|
||||
+ 9. Success
|
||||
+ 10. Success
|
||||
+ """
|
||||
+
|
||||
+ global_tpr_maxuse = 5
|
||||
+ # Set global password policy config, passwordMaxFailure being higher than
|
||||
+ # passwordTPRMaxUse so that TPR is enforced first
|
||||
+ topology_st.standalone.config.replace('passwordMustChange', 'on')
|
||||
+ topology_st.standalone.config.replace('passwordMaxFailure', str(global_tpr_maxuse + 20))
|
||||
+ topology_st.standalone.config.replace('passwordTPRMaxUse', str(global_tpr_maxuse))
|
||||
+ time.sleep(.5)
|
||||
+
|
||||
+ local_tpr_maxuse = global_tpr_maxuse + 5
|
||||
+ # Reset user's password with a local password policy
|
||||
+ # that has passwordTPRMaxUse higher than global
|
||||
+ #our_user = UserAccount(topology_st.standalone, TEST_USER_DN)
|
||||
+ subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(),
|
||||
+ 'slapd-standalone1',
|
||||
+ 'localpwp',
|
||||
+ 'adduser',
|
||||
+ test_user.dn])
|
||||
+ subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(),
|
||||
+ 'slapd-standalone1',
|
||||
+ 'localpwp',
|
||||
+ 'set',
|
||||
+ '--pwptprmaxuse',
|
||||
+ str(local_tpr_maxuse),
|
||||
+ '--pwdmustchange',
|
||||
+ 'on',
|
||||
+ test_user.dn])
|
||||
+ test_user.replace('userpassword', PASSWORD)
|
||||
+ time.sleep(.5)
|
||||
+
|
||||
+ # look up to passwordTPRMaxUse with failing
|
||||
+ # bind to check that the limits of TPR are enforced
|
||||
+ for i in range(local_tpr_maxuse):
|
||||
+ # Bind as user with a wrong password
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ test_user.rebind('wrong password')
|
||||
+ time.sleep(.5)
|
||||
+
|
||||
+ # Check that pwdReset is TRUE
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ #assert test_user.get_attr_val_utf8('pwdReset') == 'TRUE'
|
||||
+
|
||||
+ # Check that pwdTPRReset is TRUE
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1)
|
||||
+ log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1))
|
||||
+
|
||||
+
|
||||
+ # Now the #failures reached passwordTPRMaxUse
|
||||
+ # Check that pwdReset is TRUE
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ # Check that pwdTPRReset is TRUE
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse)
|
||||
+ log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (local_tpr_maxuse))
|
||||
+
|
||||
+ # Bind as user with wrong password --> ldap.CONSTRAINT_VIOLATION
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ test_user.rebind("wrong password")
|
||||
+ time.sleep(.5)
|
||||
+
|
||||
+ # Check that pwdReset is TRUE
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ # Check that pwdTPRReset is TRUE
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + 1)
|
||||
+ log.info("failing bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i))
|
||||
+
|
||||
+ # Now check that all next attempts with correct password are all in LDAP_CONSTRAINT_VIOLATION
|
||||
+ # and passwordTPRRetryCount remains unchanged
|
||||
+ # account is now similar to locked
|
||||
+ for i in range(10):
|
||||
+ # Bind as user with valid password
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ test_user.rebind(PASSWORD)
|
||||
+ time.sleep(.5)
|
||||
+
|
||||
+ # Check that pwdReset is TRUE
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ # Check that pwdTPRReset is TRUE
|
||||
+ # pwdTPRUseCount keeps increasing
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + i + 2)
|
||||
+ log.info("Rejected bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i + 2))
|
||||
+
|
||||
+
|
||||
+ def fin():
|
||||
+ topology_st.standalone.restart()
|
||||
+ # Reset password policy config
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ topology_st.standalone.config.replace('passwordMustChange', 'off')
|
||||
+
|
||||
+ # Remove local password policy from that entry
|
||||
+ subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(),
|
||||
+ 'slapd-standalone1',
|
||||
+ 'localpwp',
|
||||
+ 'remove',
|
||||
+ test_user.dn])
|
||||
+
|
||||
+ # Reset user's password
|
||||
+ test_user.replace('userpassword', TEST_USER_PWD)
|
||||
+
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+def test_global_tpr_delayValidFrom_1(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayValidFrom
|
||||
Test that a TPR password is not valid before reset time +
|
||||
passwordTPRDelayValidFrom
|
||||
@@ -766,7 +908,7 @@ def test_global_tpr_delayValidFrom_1(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayValidFrom_2(topology_st, create_user, request):
|
||||
+def test_global_tpr_delayValidFrom_2(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayValidFrom
|
||||
Test that a TPR password is valid after reset time +
|
||||
passwordTPRDelayValidFrom
|
||||
@@ -838,7 +980,7 @@ def test_global_tpr_delayValidFrom_2(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayValidFrom_3(topology_st, create_user, request):
|
||||
+def test_global_tpr_delayValidFrom_3(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayValidFrom
|
||||
Test that a TPR attribute passwordTPRDelayValidFrom
|
||||
can be updated by DM but not the by user itself
|
||||
@@ -940,7 +1082,7 @@ def test_global_tpr_delayValidFrom_3(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayExpireAt_1(topology_st, create_user, request):
|
||||
+def test_global_tpr_delayExpireAt_1(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayExpireAt
|
||||
Test that a TPR password is not valid after reset time +
|
||||
passwordTPRDelayExpireAt
|
||||
@@ -1010,7 +1152,7 @@ def test_global_tpr_delayExpireAt_1(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayExpireAt_2(topology_st, create_user, request):
|
||||
+def test_global_tpr_delayExpireAt_2(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayExpireAt
|
||||
Test that a TPR password is valid before reset time +
|
||||
passwordTPRDelayExpireAt
|
||||
@@ -1082,7 +1224,7 @@ def test_global_tpr_delayExpireAt_2(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayExpireAt_3(topology_st, create_user, request):
|
||||
+def test_global_tpr_delayExpireAt_3(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayExpireAt
|
||||
Test that a TPR attribute passwordTPRDelayExpireAt
|
||||
can be updated by DM but not the by user itself
|
||||
diff --git a/src/lib389/lib389/cli_conf/pwpolicy.py b/src/lib389/lib389/cli_conf/pwpolicy.py
|
||||
index 2838afcb8..26af6e7ec 100644
|
||||
--- a/src/lib389/lib389/cli_conf/pwpolicy.py
|
||||
+++ b/src/lib389/lib389/cli_conf/pwpolicy.py
|
||||
@@ -255,6 +255,9 @@ def create_parser(subparsers):
|
||||
set_parser.add_argument('--pwpinheritglobal', help="Set to \"on\" to allow local policies to inherit the global policy")
|
||||
set_parser.add_argument('--pwddictcheck', help="Set to \"on\" to enforce CrackLib dictionary checking")
|
||||
set_parser.add_argument('--pwddictpath', help="Filesystem path to specific/custom CrackLib dictionary files")
|
||||
+ set_parser.add_argument('--pwptprmaxuse', help="Number of times a reset password can be used for authentication")
|
||||
+ set_parser.add_argument('--pwptprdelayexpireat', help="Number of seconds after which a reset password expires")
|
||||
+ set_parser.add_argument('--pwptprdelayvalidfrom', help="Number of seconds to wait before using a reset password to authenticated")
|
||||
# delete local password policy
|
||||
del_parser = local_subcommands.add_parser('remove', help='Remove a local password policy')
|
||||
del_parser.set_defaults(func=del_local_policy)
|
||||
@@ -291,4 +294,4 @@ def create_parser(subparsers):
|
||||
#############################################
|
||||
set_parser.add_argument('DN', nargs=1, help='Set the local policy for this entry DN')
|
||||
add_subtree_parser.add_argument('DN', nargs=1, help='Add/replace the subtree policy for this entry DN')
|
||||
- add_user_parser.add_argument('DN', nargs=1, help='Add/replace the local password policy for this entry DN')
|
||||
\ No newline at end of file
|
||||
+ add_user_parser.add_argument('DN', nargs=1, help='Add/replace the local password policy for this entry DN')
|
||||
diff --git a/src/lib389/lib389/pwpolicy.py b/src/lib389/lib389/pwpolicy.py
|
||||
index 8653cb195..d2427933b 100644
|
||||
--- a/src/lib389/lib389/pwpolicy.py
|
||||
+++ b/src/lib389/lib389/pwpolicy.py
|
||||
@@ -65,7 +65,10 @@ class PwPolicyManager(object):
|
||||
'pwddictcheck': 'passworddictcheck',
|
||||
'pwddictpath': 'passworddictpath',
|
||||
'pwdallowhash': 'nsslapd-allow-hashed-passwords',
|
||||
- 'pwpinheritglobal': 'nsslapd-pwpolicy-inherit-global'
|
||||
+ 'pwpinheritglobal': 'nsslapd-pwpolicy-inherit-global',
|
||||
+ 'pwptprmaxuse': 'passwordTPRMaxUse',
|
||||
+ 'pwptprdelayexpireat': 'passwordTPRDelayExpireAt',
|
||||
+ 'pwptprdelayvalidfrom': 'passwordTPRDelayValidFrom'
|
||||
}
|
||||
|
||||
def is_subtree_policy(self, dn):
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,374 +0,0 @@
|
||||
From d7b49259ff2f9e0295bbfeaf128369ed33421974 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Mon, 30 Nov 2020 15:28:05 +0000
|
||||
Subject: [PATCH 1/6] Issue 4418 - ldif2db - offline. Warn the user of skipped
|
||||
entries
|
||||
|
||||
Bug Description: During an ldif2db import entries that do not
|
||||
conform to various constraints will be skipped and not imported.
|
||||
On completition of an import with skipped entries, the server
|
||||
returns a success exit code and logs the skipped entry detail to
|
||||
the error logs. The success exit code could lead the user to
|
||||
believe that all entries were successfully imported.
|
||||
|
||||
Fix Description: If a skipped entry occurs during import, the
|
||||
import will continue and a warning will be returned to the user.
|
||||
|
||||
CLI tools for offline import updated to handle warning code.
|
||||
|
||||
Test added to generate an incorrect ldif entry and perform an
|
||||
import.
|
||||
|
||||
Fixes: #4418
|
||||
|
||||
Reviewed by: Firstyear, droideck (Thanks)
|
||||
|
||||
(cherry picked from commit a98fe54292e9b183a2163efbc7bdfe208d4abfb0)
|
||||
---
|
||||
.../tests/suites/import/import_test.py | 54 ++++++++++++++++++-
|
||||
.../slapd/back-ldbm/db-bdb/bdb_import.c | 22 ++++++--
|
||||
ldap/servers/slapd/main.c | 8 +++
|
||||
ldap/servers/slapd/pblock.c | 24 +++++++++
|
||||
ldap/servers/slapd/pblock_v3.h | 1 +
|
||||
ldap/servers/slapd/slapi-private.h | 14 +++++
|
||||
src/lib389/lib389/__init__.py | 18 +++----
|
||||
src/lib389/lib389/_constants.py | 7 +++
|
||||
src/lib389/lib389/cli_ctl/dbtasks.py | 8 ++-
|
||||
9 files changed, 140 insertions(+), 16 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py
|
||||
index 3803ecf43..b47db96ed 100644
|
||||
--- a/dirsrvtests/tests/suites/import/import_test.py
|
||||
+++ b/dirsrvtests/tests/suites/import/import_test.py
|
||||
@@ -15,7 +15,7 @@ import pytest
|
||||
import time
|
||||
import glob
|
||||
from lib389.topologies import topology_st as topo
|
||||
-from lib389._constants import DEFAULT_SUFFIX
|
||||
+from lib389._constants import DEFAULT_SUFFIX, TaskWarning
|
||||
from lib389.dbgen import dbgen_users
|
||||
from lib389.tasks import ImportTask
|
||||
from lib389.index import Indexes
|
||||
@@ -139,6 +139,38 @@ def _create_bogus_ldif(topo):
|
||||
return import_ldif1
|
||||
|
||||
|
||||
+def _create_syntax_err_ldif(topo):
|
||||
+ """
|
||||
+ Create an incorrect ldif entry that violates syntax check
|
||||
+ """
|
||||
+ ldif_dir = topo.standalone.get_ldif_dir()
|
||||
+ line1 = """dn: dc=example,dc=com
|
||||
+objectClass: top
|
||||
+objectClass: domain
|
||||
+dc: example
|
||||
+dn: ou=groups,dc=example,dc=com
|
||||
+objectClass: top
|
||||
+objectClass: organizationalUnit
|
||||
+ou: groups
|
||||
+dn: uid=JHunt,ou=groups,dc=example,dc=com
|
||||
+objectClass: top
|
||||
+objectClass: person
|
||||
+objectClass: organizationalPerson
|
||||
+objectClass: inetOrgPerson
|
||||
+objectclass: inetUser
|
||||
+cn: James Hunt
|
||||
+sn: Hunt
|
||||
+uid: JHunt
|
||||
+givenName:
|
||||
+"""
|
||||
+ with open(f'{ldif_dir}/syntax_err.ldif', 'w') as out:
|
||||
+ out.write(f'{line1}')
|
||||
+ os.chmod(out.name, 0o777)
|
||||
+ out.close()
|
||||
+ import_ldif1 = ldif_dir + '/syntax_err.ldif'
|
||||
+ return import_ldif1
|
||||
+
|
||||
+
|
||||
def test_import_with_index(topo, _import_clean):
|
||||
"""
|
||||
Add an index, then import via cn=tasks
|
||||
@@ -214,6 +246,26 @@ def test_ldif2db_allows_entries_without_a_parent_to_be_imported(topo, _import_cl
|
||||
topo.standalone.start()
|
||||
|
||||
|
||||
+def test_ldif2db_syntax_check(topo):
|
||||
+ """ldif2db should return a warning when a skipped entry has occured.
|
||||
+ :id: 85e75670-42c5-4062-9edc-7f117c97a06f
|
||||
+ :setup:
|
||||
+ 1. Standalone Instance
|
||||
+ 2. Ldif entry that violates syntax check rule (empty givenname)
|
||||
+ :steps:
|
||||
+ 1. Create an ldif file which violates the syntax checking rule
|
||||
+ 2. Stop the server and import ldif file with ldif2db
|
||||
+ :expected results:
|
||||
+ 1. ldif2db import returns a warning to signify skipped entries
|
||||
+ """
|
||||
+ import_ldif1 = _create_syntax_err_ldif(topo)
|
||||
+ # Import the offending LDIF data - offline
|
||||
+ topo.standalone.stop()
|
||||
+ ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1)
|
||||
+ assert ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY
|
||||
+ topo.standalone.start()
|
||||
+
|
||||
+
|
||||
def test_issue_a_warning_if_the_cache_size_is_smaller(topo, _import_clean):
|
||||
"""Report during startup if nsslapd-cachememsize is too small
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
index e7da0517f..1e4830e99 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
@@ -2563,7 +2563,7 @@ error:
|
||||
slapi_task_dec_refcount(job->task);
|
||||
}
|
||||
import_all_done(job, ret);
|
||||
- ret = 1;
|
||||
+ ret |= WARN_UPGARDE_DN_FORMAT_ALL;
|
||||
} else if (NEED_DN_NORM == ret) {
|
||||
import_log_notice(job, SLAPI_LOG_NOTICE, "bdb_import_main",
|
||||
"%s complete. %s needs upgradednformat.",
|
||||
@@ -2572,7 +2572,7 @@ error:
|
||||
slapi_task_dec_refcount(job->task);
|
||||
}
|
||||
import_all_done(job, ret);
|
||||
- ret = 2;
|
||||
+ ret |= WARN_UPGRADE_DN_FORMAT;
|
||||
} else if (NEED_DN_NORM_SP == ret) {
|
||||
import_log_notice(job, SLAPI_LOG_NOTICE, "bdb_import_main",
|
||||
"%s complete. %s needs upgradednformat spaces.",
|
||||
@@ -2581,7 +2581,7 @@ error:
|
||||
slapi_task_dec_refcount(job->task);
|
||||
}
|
||||
import_all_done(job, ret);
|
||||
- ret = 3;
|
||||
+ ret |= WARN_UPGRADE_DN_FORMAT_SPACE;
|
||||
} else {
|
||||
ret = -1;
|
||||
if (job->task != NULL) {
|
||||
@@ -2600,6 +2600,11 @@ error:
|
||||
import_all_done(job, ret);
|
||||
}
|
||||
|
||||
+ /* set task warning if there are no errors */
|
||||
+ if((!ret) && (job->skipped)) {
|
||||
+ ret |= WARN_SKIPPED_IMPORT_ENTRY;
|
||||
+ }
|
||||
+
|
||||
/* This instance isn't busy anymore */
|
||||
instance_set_not_busy(job->inst);
|
||||
|
||||
@@ -2637,6 +2642,7 @@ bdb_back_ldif2db(Slapi_PBlock *pb)
|
||||
int total_files, i;
|
||||
int up_flags = 0;
|
||||
PRThread *thread = NULL;
|
||||
+ int ret = 0;
|
||||
|
||||
slapi_pblock_get(pb, SLAPI_BACKEND, &be);
|
||||
if (be == NULL) {
|
||||
@@ -2764,7 +2770,15 @@ bdb_back_ldif2db(Slapi_PBlock *pb)
|
||||
}
|
||||
|
||||
/* old style -- do it all synchronously (THIS IS GOING AWAY SOON) */
|
||||
- return import_main_offline((void *)job);
|
||||
+ ret = import_main_offline((void *)job);
|
||||
+
|
||||
+ /* no error just warning, reset ret */
|
||||
+ if(ret &= WARN_SKIPPED_IMPORT_ENTRY) {
|
||||
+ slapi_pblock_set_task_warning(pb, WARN_SKIPPED_IMPORT_ENTRY);
|
||||
+ ret = 0;
|
||||
+ }
|
||||
+
|
||||
+ return ret;
|
||||
}
|
||||
|
||||
struct _import_merge_thang
|
||||
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
|
||||
index 694375b22..104f6826c 100644
|
||||
--- a/ldap/servers/slapd/main.c
|
||||
+++ b/ldap/servers/slapd/main.c
|
||||
@@ -2069,6 +2069,14 @@ slapd_exemode_ldif2db(struct main_config *mcfg)
|
||||
plugin->plg_name);
|
||||
return_value = -1;
|
||||
}
|
||||
+
|
||||
+ /* check for task warnings */
|
||||
+ if(!return_value) {
|
||||
+ if((return_value = slapi_pblock_get_task_warning(pb))) {
|
||||
+ slapi_log_err(SLAPI_LOG_INFO, "slapd_exemode_ldif2db","returning task warning: %d\n", return_value);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
slapi_pblock_destroy(pb);
|
||||
charray_free(instances);
|
||||
charray_free(mcfg->cmd_line_instance_names);
|
||||
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
|
||||
index 454ea9cc3..1ad9d0399 100644
|
||||
--- a/ldap/servers/slapd/pblock.c
|
||||
+++ b/ldap/servers/slapd/pblock.c
|
||||
@@ -28,12 +28,14 @@
|
||||
#define SLAPI_LDIF_DUMP_REPLICA 2003
|
||||
#define SLAPI_PWDPOLICY 2004
|
||||
#define SLAPI_PW_ENTRY 2005
|
||||
+#define SLAPI_TASK_WARNING 2006
|
||||
|
||||
/* Used for checking assertions about pblocks in some cases. */
|
||||
#define SLAPI_HINT 9999
|
||||
|
||||
static PRLock *pblock_analytics_lock = NULL;
|
||||
|
||||
+
|
||||
static PLHashNumber
|
||||
hash_int_func(const void *key)
|
||||
{
|
||||
@@ -4315,6 +4317,28 @@ slapi_pblock_set_ldif_dump_replica(Slapi_PBlock *pb, int32_t dump_replica)
|
||||
pb->pb_task->ldif_dump_replica = dump_replica;
|
||||
}
|
||||
|
||||
+int32_t
|
||||
+slapi_pblock_get_task_warning(Slapi_PBlock *pb)
|
||||
+{
|
||||
+#ifdef PBLOCK_ANALYTICS
|
||||
+ pblock_analytics_record(pb, SLAPI_TASK_WARNING);
|
||||
+#endif
|
||||
+ if (pb->pb_task != NULL) {
|
||||
+ return pb->pb_task->task_warning;
|
||||
+ }
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+void
|
||||
+slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warning)
|
||||
+{
|
||||
+#ifdef PBLOCK_ANALYTICS
|
||||
+ pblock_analytics_record(pb, SLAPI_TASK_WARNING);
|
||||
+#endif
|
||||
+ _pblock_assert_pb_task(pb);
|
||||
+ pb->pb_task->task_warning = warning;
|
||||
+}
|
||||
+
|
||||
void *
|
||||
slapi_pblock_get_vattr_context(Slapi_PBlock *pb)
|
||||
{
|
||||
diff --git a/ldap/servers/slapd/pblock_v3.h b/ldap/servers/slapd/pblock_v3.h
|
||||
index 90498c0b0..b35d78565 100644
|
||||
--- a/ldap/servers/slapd/pblock_v3.h
|
||||
+++ b/ldap/servers/slapd/pblock_v3.h
|
||||
@@ -67,6 +67,7 @@ typedef struct _slapi_pblock_task
|
||||
int ldif2db_noattrindexes;
|
||||
int ldif_printkey;
|
||||
int task_flags;
|
||||
+ int32_t task_warning;
|
||||
int import_state;
|
||||
|
||||
int server_running; /* indicate that server is running */
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index c98c1947c..31cb33472 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -1465,6 +1465,20 @@ void slapi_pblock_set_operation_notes(Slapi_PBlock *pb, uint32_t opnotes);
|
||||
void slapi_pblock_set_flag_operation_notes(Slapi_PBlock *pb, uint32_t opflag);
|
||||
void slapi_pblock_set_result_text_if_empty(Slapi_PBlock *pb, char *text);
|
||||
|
||||
+/* task warnings */
|
||||
+typedef enum task_warning_t{
|
||||
+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0),
|
||||
+ WARN_UPGRADE_DN_FORMAT = (1 << 1),
|
||||
+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2),
|
||||
+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
|
||||
+} task_warning;
|
||||
+
|
||||
+int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
|
||||
+void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
|
||||
+
|
||||
+
|
||||
+int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name);
|
||||
+
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index 4e6a1905a..5b36a79e1 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -2683,7 +2683,7 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
# server is stopped)
|
||||
#
|
||||
def ldif2db(self, bename, suffixes, excludeSuffixes, encrypt,
|
||||
- import_file):
|
||||
+ import_file, import_cl):
|
||||
"""
|
||||
@param bename - The backend name of the database to import
|
||||
@param suffixes - List/tuple of suffixes to import
|
||||
@@ -2731,14 +2731,14 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
try:
|
||||
result = subprocess.check_output(cmd, encoding='utf-8')
|
||||
except subprocess.CalledProcessError as e:
|
||||
- self.log.debug("Command: %s failed with the return code %s and the error %s",
|
||||
- format_cmd_list(cmd), e.returncode, e.output)
|
||||
- return False
|
||||
-
|
||||
- self.log.debug("ldif2db output: BEGIN")
|
||||
- for line in result.split("\n"):
|
||||
- self.log.debug(line)
|
||||
- self.log.debug("ldif2db output: END")
|
||||
+ if e.returncode == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY:
|
||||
+ self.log.debug("Command: %s skipped import entry warning %s",
|
||||
+ format_cmd_list(cmd), e.returncode)
|
||||
+ return e.returncode
|
||||
+ else:
|
||||
+ self.log.debug("Command: %s failed with the return code %s and the error %s",
|
||||
+ format_cmd_list(cmd), e.returncode, e.output)
|
||||
+ return False
|
||||
|
||||
return True
|
||||
|
||||
diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py
|
||||
index e28c602a3..38ba04565 100644
|
||||
--- a/src/lib389/lib389/_constants.py
|
||||
+++ b/src/lib389/lib389/_constants.py
|
||||
@@ -162,6 +162,13 @@ DB2BAK = 'db2bak'
|
||||
DB2INDEX = 'db2index'
|
||||
DBSCAN = 'dbscan'
|
||||
|
||||
+# Task warnings
|
||||
+class TaskWarning(IntEnum):
|
||||
+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0)
|
||||
+ WARN_UPGRADE_DN_FORMAT = (1 << 1)
|
||||
+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2)
|
||||
+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
|
||||
+
|
||||
RDN_REPLICA = "cn=replica"
|
||||
|
||||
RETROCL_SUFFIX = "cn=changelog"
|
||||
diff --git a/src/lib389/lib389/cli_ctl/dbtasks.py b/src/lib389/lib389/cli_ctl/dbtasks.py
|
||||
index 590a1ea0e..02830239c 100644
|
||||
--- a/src/lib389/lib389/cli_ctl/dbtasks.py
|
||||
+++ b/src/lib389/lib389/cli_ctl/dbtasks.py
|
||||
@@ -7,6 +7,7 @@
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
|
||||
+from lib389._constants import TaskWarning
|
||||
|
||||
def dbtasks_db2index(inst, log, args):
|
||||
if not inst.db2index(bename=args.backend):
|
||||
@@ -44,10 +45,13 @@ def dbtasks_db2ldif(inst, log, args):
|
||||
|
||||
|
||||
def dbtasks_ldif2db(inst, log, args):
|
||||
- if not inst.ldif2db(bename=args.backend, encrypt=args.encrypted, import_file=args.ldif,
|
||||
- suffixes=None, excludeSuffixes=None):
|
||||
+ ret = inst.ldif2db(bename=args.backend, encrypt=args.encrypted, import_file=args.ldif,
|
||||
+ suffixes=None, excludeSuffixes=None, import_cl=False)
|
||||
+ if not ret:
|
||||
log.fatal("ldif2db failed")
|
||||
return False
|
||||
+ elif ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY:
|
||||
+ log.warn("ldif2db successful with skipped entries")
|
||||
else:
|
||||
log.info("ldif2db successful")
|
||||
|
||||
--
|
||||
2.26.2
|
||||
|
@ -0,0 +1,179 @@
|
||||
From 7b7217538908ae58df864ef5cd82e1d3303c189f Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 7 Jun 2021 12:58:42 -0400
|
||||
Subject: [PATCH] Issue 4447 - Crash when the Referential Integrity log is
|
||||
manually edited
|
||||
|
||||
Bug Description: If the referint log is manually edited with a string
|
||||
that is not a DN the server will crash when processing
|
||||
the log.
|
||||
|
||||
Fix Description: Check for NULL pointers when strtoking the file line.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4447
|
||||
|
||||
Reviewed by: firstyear(Thanks!)
|
||||
---
|
||||
.../tests/suites/plugins/referint_test.py | 72 +++++++++++++++----
|
||||
ldap/servers/plugins/referint/referint.c | 7 ++
|
||||
src/lib389/lib389/plugins.py | 15 ++++
|
||||
3 files changed, 80 insertions(+), 14 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/referint_test.py b/dirsrvtests/tests/suites/plugins/referint_test.py
|
||||
index 02b985767..fda602545 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/referint_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/referint_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2016 Red Hat, Inc.
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -12,13 +12,11 @@ Created on Dec 12, 2019
|
||||
@author: tbordaz
|
||||
'''
|
||||
import logging
|
||||
-import subprocess
|
||||
import pytest
|
||||
from lib389 import Entry
|
||||
-from lib389.utils import *
|
||||
-from lib389.plugins import *
|
||||
-from lib389._constants import *
|
||||
-from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.plugins import ReferentialIntegrityPlugin
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
+from lib389.idm.user import UserAccounts
|
||||
from lib389.idm.group import Groups
|
||||
from lib389.topologies import topology_st as topo
|
||||
|
||||
@@ -29,21 +27,27 @@ log = logging.getLogger(__name__)
|
||||
ESCAPED_RDN_BASE = "foo\\,oo"
|
||||
def _user_get_dn(no):
|
||||
uid = '%s%d' % (ESCAPED_RDN_BASE, no)
|
||||
- dn = 'uid=%s,%s' % (uid, SUFFIX)
|
||||
+ dn = 'uid=%s,%s' % (uid, DEFAULT_SUFFIX)
|
||||
return (uid, dn)
|
||||
|
||||
def add_escaped_user(server, no):
|
||||
(uid, dn) = _user_get_dn(no)
|
||||
log.fatal('Adding user (%s): ' % dn)
|
||||
- server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'],
|
||||
- 'uid': [uid],
|
||||
- 'sn' : [uid],
|
||||
- 'cn' : [uid]})))
|
||||
+ users = UserAccounts(server, DEFAULT_SUFFIX, None)
|
||||
+ user_properties = {
|
||||
+ 'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson', 'posixAccount'],
|
||||
+ 'uid': uid,
|
||||
+ 'cn' : uid,
|
||||
+ 'sn' : uid,
|
||||
+ 'uidNumber' : '1000',
|
||||
+ 'gidNumber' : '2000',
|
||||
+ 'homeDirectory' : '/home/testuser',
|
||||
+ }
|
||||
+ users.create(properties=user_properties)
|
||||
return dn
|
||||
|
||||
-@pytest.mark.ds50020
|
||||
def test_referential_false_failure(topo):
|
||||
- """On MODRDN referential integrity can erronously fail
|
||||
+ """On MODRDN referential integrity can erroneously fail
|
||||
|
||||
:id: f77aeb80-c4c4-471b-8c1b-4733b714778b
|
||||
:setup: Standalone Instance
|
||||
@@ -100,6 +104,46 @@ def test_referential_false_failure(topo):
|
||||
inst.restart()
|
||||
|
||||
# Here if the bug is fixed, referential is able to update the member value
|
||||
- inst.rename_s(user1.dn, 'uid=new_test_user_1001', newsuperior=SUFFIX, delold=0)
|
||||
+ user1.rename('uid=new_test_user_1001', newsuperior=DEFAULT_SUFFIX, deloldrdn=False)
|
||||
|
||||
|
||||
+def test_invalid_referint_log(topo):
|
||||
+ """If there is an invalid log line in the referint log, make sure the server
|
||||
+ does not crash at startup
|
||||
+
|
||||
+ :id: 34807b5a-ab17-4281-ae48-4e3513e19145
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Set the referint log delay
|
||||
+ 2. Create invalid log
|
||||
+ 3. Start the server (no crash)
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+
|
||||
+ # Set delay - required for log parsing at server startup
|
||||
+ plugin = ReferentialIntegrityPlugin(inst)
|
||||
+ plugin.enable()
|
||||
+ plugin.set_update_delay('2')
|
||||
+ logfile = plugin.get_log_file()
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Create invalid log
|
||||
+ inst.stop()
|
||||
+ with open(logfile, 'w') as log_fh:
|
||||
+ log_fh.write("CRASH\n")
|
||||
+
|
||||
+ # Start the instance
|
||||
+ inst.start()
|
||||
+ assert inst.status()
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
|
||||
index fd5356d72..28240c1f6 100644
|
||||
--- a/ldap/servers/plugins/referint/referint.c
|
||||
+++ b/ldap/servers/plugins/referint/referint.c
|
||||
@@ -1447,6 +1447,13 @@ referint_thread_func(void *arg __attribute__((unused)))
|
||||
sdn = slapi_sdn_new_normdn_byref(ptoken);
|
||||
ptoken = ldap_utf8strtok_r(NULL, delimiter, &iter);
|
||||
|
||||
+ if (ptoken == NULL) {
|
||||
+ /* Invalid line in referint log, skip it */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, REFERINT_PLUGIN_SUBSYSTEM,
|
||||
+ "Skipping invalid referint log line: (%s)\n", thisline);
|
||||
+ slapi_sdn_free(&sdn);
|
||||
+ continue;
|
||||
+ }
|
||||
if (!strcasecmp(ptoken, "NULL")) {
|
||||
tmprdn = NULL;
|
||||
} else {
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 2d88e60bd..b07e80022 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -518,6 +518,21 @@ class ReferentialIntegrityPlugin(Plugin):
|
||||
|
||||
self.set('referint-update-delay', str(value))
|
||||
|
||||
+ def get_log_file(self):
|
||||
+ """Get referint log file"""
|
||||
+
|
||||
+ return self.get_attr_val_utf8('referint-logfile')
|
||||
+
|
||||
+ def get_log_file_formatted(self):
|
||||
+ """Get referint log file"""
|
||||
+
|
||||
+ return self.display_attr('referint-logfile')
|
||||
+
|
||||
+ def set_log_file(self, value):
|
||||
+ """Set referint log file"""
|
||||
+
|
||||
+ self.set('referint-logfile', value)
|
||||
+
|
||||
def get_membership_attr(self, formatted=False):
|
||||
"""Get referint-membership-attr attribute"""
|
||||
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,52 +0,0 @@
|
||||
From 97bdef2d562e447d521202beb485c3948b0e7214 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Mon, 30 Nov 2020 15:28:05 +0000
|
||||
Subject: [PATCH 2/6] Issue 4418 - ldif2db - offline. Warn the user of skipped
|
||||
entries
|
||||
|
||||
Bug Description: During an ldif2db import entries that do not
|
||||
conform to various constraints will be skipped and not imported.
|
||||
On completition of an import with skipped entries, the server
|
||||
returns a success exit code and logs the skipped entry detail to
|
||||
the error logs. The success exit code could lead the user to
|
||||
believe that all entries were successfully imported.
|
||||
|
||||
Fix Description: If a skipped entry occurs during import, the
|
||||
import will continue and a warning will be returned to the user.
|
||||
|
||||
CLI tools for offline import updated to handle warning code.
|
||||
|
||||
Test added to generate an incorrect ldif entry and perform an
|
||||
import.
|
||||
|
||||
Fixes: #4418
|
||||
|
||||
Reviewed by: Firstyear, droideck (Thanks)
|
||||
---
|
||||
ldap/servers/slapd/slapi-private.h | 10 ++++++++++
|
||||
1 file changed, 10 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index 31cb33472..e0092d571 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -1476,6 +1476,16 @@ typedef enum task_warning_t{
|
||||
int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
|
||||
void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
|
||||
|
||||
+/* task warnings */
|
||||
+typedef enum task_warning_t{
|
||||
+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0),
|
||||
+ WARN_UPGRADE_DN_FORMAT = (1 << 1),
|
||||
+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2),
|
||||
+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
|
||||
+} task_warning;
|
||||
+
|
||||
+int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
|
||||
+void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
|
||||
|
||||
int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name);
|
||||
|
||||
--
|
||||
2.26.2
|
||||
|
114
SOURCES/0021-Issue-4791-Missing-dependency-for-RetroCL-RFE.patch
Normal file
114
SOURCES/0021-Issue-4791-Missing-dependency-for-RetroCL-RFE.patch
Normal file
@ -0,0 +1,114 @@
|
||||
From 964a153b420b26140e0bbddfbebb4a51aaa0e4ea Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Thu, 3 Jun 2021 15:16:22 +0000
|
||||
Subject: [PATCH 1/7] Issue 4791 - Missing dependency for RetroCL RFE
|
||||
|
||||
Description: The RetroCL exclude attribute RFE is dependent on functionality of the
|
||||
EntryUUID bug fix, that didn't make into the latest build. This breaks the
|
||||
RetroCL exclude attr feature so we need to provide a workaround.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4791
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/pull/4723
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/4224
|
||||
|
||||
Reviewed by: tbordaz, droideck (Thank you)
|
||||
---
|
||||
.../tests/suites/retrocl/basic_test.py | 6 ++--
|
||||
.../lib389/cli_conf/plugins/retrochangelog.py | 35 +++++++++++++++++--
|
||||
2 files changed, 36 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
index 112c73cb9..f3bc50f29 100644
|
||||
--- a/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
@@ -17,7 +17,7 @@ from lib389.utils import *
|
||||
from lib389.tasks import *
|
||||
from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
|
||||
from lib389.cli_base.dsrc import dsrc_arg_concat
|
||||
-from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add
|
||||
+from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add_attr
|
||||
from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
@@ -122,7 +122,7 @@ def test_retrocl_exclude_attr_add(topology_st):
|
||||
args.bindpw = None
|
||||
args.prompt = False
|
||||
args.exclude_attrs = ATTR_HOMEPHONE
|
||||
- args.func = retrochangelog_add
|
||||
+ args.func = retrochangelog_add_attr
|
||||
dsrc_inst = dsrc_arg_concat(args, None)
|
||||
inst = connect_instance(dsrc_inst, False, args)
|
||||
result = args.func(inst, None, log, args)
|
||||
@@ -255,7 +255,7 @@ def test_retrocl_exclude_attr_mod(topology_st):
|
||||
args.bindpw = None
|
||||
args.prompt = False
|
||||
args.exclude_attrs = ATTR_CARLICENSE
|
||||
- args.func = retrochangelog_add
|
||||
+ args.func = retrochangelog_add_attr
|
||||
dsrc_inst = dsrc_arg_concat(args, None)
|
||||
inst = connect_instance(dsrc_inst, False, args)
|
||||
result = args.func(inst, None, log, args)
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py
|
||||
index 9940c6532..160fbb82d 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py
|
||||
@@ -6,8 +6,13 @@
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
|
||||
+# JC Work around for missing dependency on https://github.com/389ds/389-ds-base/pull/4344
|
||||
+import ldap
|
||||
+
|
||||
from lib389.plugins import RetroChangelogPlugin
|
||||
-from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit
|
||||
+# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344
|
||||
+# from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add_attr
|
||||
+from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, _args_to_attrs
|
||||
|
||||
arg_to_attr = {
|
||||
'is_replicated': 'isReplicated',
|
||||
@@ -18,12 +23,38 @@ arg_to_attr = {
|
||||
'exclude_attrs': 'nsslapd-exclude-attrs'
|
||||
}
|
||||
|
||||
-
|
||||
def retrochangelog_edit(inst, basedn, log, args):
|
||||
log = log.getChild('retrochangelog_edit')
|
||||
plugin = RetroChangelogPlugin(inst)
|
||||
generic_object_edit(plugin, log, args, arg_to_attr)
|
||||
|
||||
+# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344
|
||||
+def retrochangelog_add_attr(inst, basedn, log, args):
|
||||
+ log = log.getChild('retrochangelog_add_attr')
|
||||
+ plugin = RetroChangelogPlugin(inst)
|
||||
+ generic_object_add_attr(plugin, log, args, arg_to_attr)
|
||||
+
|
||||
+# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344
|
||||
+def generic_object_add_attr(dsldap_object, log, args, arg_to_attr):
|
||||
+ """Add an attribute to the entry. This differs to 'edit' as edit uses replace,
|
||||
+ and this allows multivalues to be added.
|
||||
+
|
||||
+ dsldap_object should be a single instance of DSLdapObject with a set dn
|
||||
+ """
|
||||
+ log = log.getChild('generic_object_add_attr')
|
||||
+ # Gather the attributes
|
||||
+ attrs = _args_to_attrs(args, arg_to_attr)
|
||||
+
|
||||
+ modlist = []
|
||||
+ for attr, value in attrs.items():
|
||||
+ if not isinstance(value, list):
|
||||
+ value = [value]
|
||||
+ modlist.append((ldap.MOD_ADD, attr, value))
|
||||
+ if len(modlist) > 0:
|
||||
+ dsldap_object.apply_mods(modlist)
|
||||
+ log.info("Successfully changed the %s", dsldap_object.dn)
|
||||
+ else:
|
||||
+ raise ValueError("There is nothing to set in the %s plugin entry" % dsldap_object.dn)
|
||||
|
||||
def _add_parser_args(parser):
|
||||
parser.add_argument('--is-replicated', choices=['TRUE', 'FALSE'], type=str.upper,
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,34 +0,0 @@
|
||||
From 22fb8b2690a5fa364d252846f06b77b5fec8c602 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 7 Jan 2021 10:27:43 -0500
|
||||
Subject: [PATCH 3/6] Fix cherry-pick erorr
|
||||
|
||||
---
|
||||
ldap/servers/slapd/slapi-private.h | 11 -----------
|
||||
1 file changed, 11 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index e0092d571..d5abe8ac1 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -1476,17 +1476,6 @@ typedef enum task_warning_t{
|
||||
int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
|
||||
void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
|
||||
|
||||
-/* task warnings */
|
||||
-typedef enum task_warning_t{
|
||||
- WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0),
|
||||
- WARN_UPGRADE_DN_FORMAT = (1 << 1),
|
||||
- WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2),
|
||||
- WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
|
||||
-} task_warning;
|
||||
-
|
||||
-int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
|
||||
-void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
|
||||
-
|
||||
int slapi_exists_or_add_internal(Slapi_DN *dn, const char *filter, const char *entry, const char *modifier_name);
|
||||
|
||||
#ifdef __cplusplus
|
||||
--
|
||||
2.26.2
|
||||
|
@ -0,0 +1,642 @@
|
||||
From d2ac7e98d53cfe6c74c99ddf3504b1072418f05a Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 11 Mar 2021 10:12:46 -0500
|
||||
Subject: [PATCH] Issue 4656 - remove problematic language from ds-replcheck
|
||||
|
||||
Description: remove master from ds-replcheck and replace it with supplier
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4656
|
||||
|
||||
Reviewed by: mreynolds
|
||||
|
||||
e with '#' will be ignored, and an empty message aborts the commit.
|
||||
---
|
||||
ldap/admin/src/scripts/ds-replcheck | 202 ++++++++++++++--------------
|
||||
1 file changed, 101 insertions(+), 101 deletions(-)
|
||||
|
||||
diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
|
||||
index 169496e8f..f411f357a 100755
|
||||
--- a/ldap/admin/src/scripts/ds-replcheck
|
||||
+++ b/ldap/admin/src/scripts/ds-replcheck
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2020 Red Hat, Inc.
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -63,7 +63,7 @@ def remove_entry(rentries, dn):
|
||||
def get_ruv_time(ruv, rid):
|
||||
"""Take a RUV element (nsds50ruv attribute) and extract the timestamp from maxcsn
|
||||
:param ruv - A lsit of RUV elements
|
||||
- :param rid - The rid of the master to extractthe maxcsn time from
|
||||
+ :param rid - The rid of the supplier to extract the maxcsn time from
|
||||
:return: The time in seconds of the maxcsn, or 0 if there is no maxcsn, or -1 if
|
||||
the rid was not found
|
||||
"""
|
||||
@@ -213,22 +213,22 @@ def get_ruv_state(opts):
|
||||
:param opts - all the script options
|
||||
:return - A text description of the replicaton state
|
||||
"""
|
||||
- mtime = get_ruv_time(opts['master_ruv'], opts['rid'])
|
||||
+ mtime = get_ruv_time(opts['supplier_ruv'], opts['rid'])
|
||||
rtime = get_ruv_time(opts['replica_ruv'], opts['rid'])
|
||||
if mtime == -1:
|
||||
- repl_state = "Replication State: Replica ID ({}) not found in Master's RUV".format(opts['rid'])
|
||||
+ repl_state = "Replication State: Replica ID ({}) not found in Supplier's RUV".format(opts['rid'])
|
||||
elif rtime == -1:
|
||||
repl_state = "Replication State: Replica ID ({}) not found in Replica's RUV (not initialized?)".format(opts['rid'])
|
||||
elif mtime == 0:
|
||||
- repl_state = "Replication State: Master has not seen any updates"
|
||||
+ repl_state = "Replication State: Supplier has not seen any updates"
|
||||
elif rtime == 0:
|
||||
- repl_state = "Replication State: Replica has not seen any changes from the Master"
|
||||
+ repl_state = "Replication State: Replica has not seen any changes from the Supplier"
|
||||
elif mtime > rtime:
|
||||
- repl_state = "Replication State: Replica is behind Master by: {} seconds".format(mtime - rtime)
|
||||
+ repl_state = "Replication State: Replica is behind Supplier by: {} seconds".format(mtime - rtime)
|
||||
elif mtime < rtime:
|
||||
- repl_state = "Replication State: Replica is ahead of Master by: {} seconds".format(rtime - mtime)
|
||||
+ repl_state = "Replication State: Replica is ahead of Supplier by: {} seconds".format(rtime - mtime)
|
||||
else:
|
||||
- repl_state = "Replication State: Master and Replica are in perfect synchronization"
|
||||
+ repl_state = "Replication State: Supplier and Replica are in perfect synchronization"
|
||||
|
||||
return repl_state
|
||||
|
||||
@@ -238,11 +238,11 @@ def get_ruv_report(opts):
|
||||
:param opts - all the script options
|
||||
:return - A text blob to display in the report
|
||||
"""
|
||||
- opts['master_ruv'].sort()
|
||||
+ opts['supplier_ruv'].sort()
|
||||
opts['replica_ruv'].sort()
|
||||
|
||||
- report = "Master RUV:\n"
|
||||
- for element in opts['master_ruv']:
|
||||
+ report = "Supplier RUV:\n"
|
||||
+ for element in opts['supplier_ruv']:
|
||||
report += " %s\n" % (element)
|
||||
report += "\nReplica RUV:\n"
|
||||
for element in opts['replica_ruv']:
|
||||
@@ -521,7 +521,7 @@ def get_ldif_ruv(LDIF, opts):
|
||||
|
||||
def cmp_entry(mentry, rentry, opts):
|
||||
"""Compare the two entries, and return a "diff map"
|
||||
- :param mentry - A Master entry
|
||||
+ :param mentry - A Supplier entry
|
||||
:param rentry - A Replica entry
|
||||
:param opts - A Dict of the scripts options
|
||||
:return - A Dict of the differences in the entry, or None
|
||||
@@ -536,7 +536,7 @@ def cmp_entry(mentry, rentry, opts):
|
||||
mlist = list(mentry.data.keys())
|
||||
|
||||
#
|
||||
- # Check master
|
||||
+ # Check Supplier
|
||||
#
|
||||
for mattr in mlist:
|
||||
if mattr in opts['ignore']:
|
||||
@@ -555,7 +555,7 @@ def cmp_entry(mentry, rentry, opts):
|
||||
if not found:
|
||||
diff['missing'].append("")
|
||||
found = True
|
||||
- diff['missing'].append(" - Master's State Info: %s" % (val))
|
||||
+ diff['missing'].append(" - Supplier's State Info: %s" % (val))
|
||||
diff['missing'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
|
||||
else:
|
||||
# No state info, just move on
|
||||
@@ -566,18 +566,18 @@ def cmp_entry(mentry, rentry, opts):
|
||||
if report_conflict(rentry, mattr, opts) and report_conflict(mentry, mattr, opts):
|
||||
diff['diff'].append(" - Attribute '%s' is different:" % mattr)
|
||||
if 'nscpentrywsi' in mentry.data:
|
||||
- # Process Master
|
||||
+ # Process Supplier
|
||||
found = False
|
||||
for val in mentry.data['nscpentrywsi']:
|
||||
if val.lower().startswith(mattr + ';'):
|
||||
if not found:
|
||||
- diff['diff'].append(" Master:")
|
||||
+ diff['diff'].append(" Supplier:")
|
||||
diff['diff'].append(" - Value: %s" % (val.split(':')[1].lstrip()))
|
||||
diff['diff'].append(" - State Info: %s" % (val))
|
||||
diff['diff'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
|
||||
found = True
|
||||
if not found:
|
||||
- diff['diff'].append(" Master: ")
|
||||
+ diff['diff'].append(" Supplier: ")
|
||||
for val in mentry.data[mattr]:
|
||||
# This is an "origin" value which means it's never been
|
||||
# updated since replication was set up. So its the
|
||||
@@ -605,7 +605,7 @@ def cmp_entry(mentry, rentry, opts):
|
||||
diff['diff'].append("")
|
||||
else:
|
||||
# no state info, report what we got
|
||||
- diff['diff'].append(" Master: ")
|
||||
+ diff['diff'].append(" Supplier: ")
|
||||
for val in mentry.data[mattr]:
|
||||
diff['diff'].append(" - %s: %s" % (mattr, val))
|
||||
diff['diff'].append(" Replica: ")
|
||||
@@ -622,9 +622,9 @@ def cmp_entry(mentry, rentry, opts):
|
||||
continue
|
||||
|
||||
if rattr not in mlist:
|
||||
- # Master is missing the attribute
|
||||
+ # Supplier is missing the attribute
|
||||
if report_conflict(rentry, rattr, opts):
|
||||
- diff['missing'].append(" - Master missing attribute: \"%s\"" % (rattr))
|
||||
+ diff['missing'].append(" - Supplier missing attribute: \"%s\"" % (rattr))
|
||||
diff_count += 1
|
||||
if 'nscpentrywsi' in rentry.data:
|
||||
found = False
|
||||
@@ -663,7 +663,7 @@ def do_offline_report(opts, output_file=None):
|
||||
try:
|
||||
MLDIF = open(opts['mldif'], "r")
|
||||
except Exception as e:
|
||||
- print('Failed to open Master LDIF: ' + str(e))
|
||||
+ print('Failed to open Supplier LDIF: ' + str(e))
|
||||
return
|
||||
|
||||
try:
|
||||
@@ -676,10 +676,10 @@ def do_offline_report(opts, output_file=None):
|
||||
# Verify LDIF Files
|
||||
try:
|
||||
if opts['verbose']:
|
||||
- print("Validating Master ldif file ({})...".format(opts['mldif']))
|
||||
+ print("Validating Supplier ldif file ({})...".format(opts['mldif']))
|
||||
LDIFRecordList(MLDIF).parse()
|
||||
except ValueError:
|
||||
- print('Master LDIF file in invalid, aborting...')
|
||||
+ print('Supplier LDIF file in invalid, aborting...')
|
||||
MLDIF.close()
|
||||
RLDIF.close()
|
||||
return
|
||||
@@ -696,34 +696,34 @@ def do_offline_report(opts, output_file=None):
|
||||
# Get all the dn's, and entry counts
|
||||
if opts['verbose']:
|
||||
print ("Gathering all the DN's...")
|
||||
- master_dns = get_dns(MLDIF, opts['mldif'], opts)
|
||||
+ supplier_dns = get_dns(MLDIF, opts['mldif'], opts)
|
||||
replica_dns = get_dns(RLDIF, opts['rldif'], opts)
|
||||
- if master_dns is None or replica_dns is None:
|
||||
+ if supplier_dns is None or replica_dns is None:
|
||||
print("Aborting scan...")
|
||||
MLDIF.close()
|
||||
RLDIF.close()
|
||||
sys.exit(1)
|
||||
- m_count = len(master_dns)
|
||||
+ m_count = len(supplier_dns)
|
||||
r_count = len(replica_dns)
|
||||
|
||||
# Get DB RUV
|
||||
if opts['verbose']:
|
||||
print ("Gathering the database RUV's...")
|
||||
- opts['master_ruv'] = get_ldif_ruv(MLDIF, opts)
|
||||
+ opts['supplier_ruv'] = get_ldif_ruv(MLDIF, opts)
|
||||
opts['replica_ruv'] = get_ldif_ruv(RLDIF, opts)
|
||||
|
||||
- """ Compare the master entries with the replica's. Take our list of dn's from
|
||||
- the master ldif and get that entry( dn) from the master and replica ldif. In
|
||||
+ """ Compare the Supplier entries with the replica's. Take our list of dn's from
|
||||
+ the Supplier ldif and get that entry( dn) from the Supplier and replica ldif. In
|
||||
this phase we keep keep track of conflict/tombstone counts, and we check for
|
||||
missing entries and entry differences. We only need to do the entry diff
|
||||
checking in this phase - we do not need to do it when process the replica dn's
|
||||
because if the entry exists in both LDIF's then we already checked or diffs
|
||||
- while processing the master dn's.
|
||||
+ while processing the Supplier dn's.
|
||||
"""
|
||||
if opts['verbose']:
|
||||
- print ("Comparing Master to Replica...")
|
||||
+ print ("Comparing Supplier to Replica...")
|
||||
missing = False
|
||||
- for dn in master_dns:
|
||||
+ for dn in supplier_dns:
|
||||
mresult = ldif_search(MLDIF, dn)
|
||||
if mresult['entry'] is None and mresult['conflict'] is None and not mresult['tombstone']:
|
||||
# Try from the beginning
|
||||
@@ -736,7 +736,7 @@ def do_offline_report(opts, output_file=None):
|
||||
rresult['conflict'] is not None or rresult['tombstone']):
|
||||
""" We can safely remove this DN from the replica dn list as it
|
||||
does not need to be checked again. This also speeds things up
|
||||
- when doing the replica vs master phase.
|
||||
+ when doing the replica vs Supplier phase.
|
||||
"""
|
||||
replica_dns.remove(dn)
|
||||
|
||||
@@ -766,7 +766,7 @@ def do_offline_report(opts, output_file=None):
|
||||
missing_report += (' Entries missing on Replica:\n')
|
||||
missing = True
|
||||
if mresult['entry'] and 'createtimestamp' in mresult['entry'].data:
|
||||
- missing_report += (' - %s (Created on Master at: %s)\n' %
|
||||
+ missing_report += (' - %s (Created on Supplier at: %s)\n' %
|
||||
(dn, convert_timestamp(mresult['entry'].data['createtimestamp'][0])))
|
||||
else:
|
||||
missing_report += (' - %s\n' % dn)
|
||||
@@ -791,7 +791,7 @@ def do_offline_report(opts, output_file=None):
|
||||
remaining conflict & tombstone entries as well.
|
||||
"""
|
||||
if opts['verbose']:
|
||||
- print ("Comparing Replica to Master...")
|
||||
+ print ("Comparing Replica to Supplier...")
|
||||
MLDIF.seek(0)
|
||||
RLDIF.seek(0)
|
||||
missing = False
|
||||
@@ -811,7 +811,7 @@ def do_offline_report(opts, output_file=None):
|
||||
if mresult['entry'] is None and mresult['glue'] is None:
|
||||
MLDIF.seek(rresult['idx']) # Set the LDIF cursor/index to the last good line
|
||||
if not missing:
|
||||
- missing_report += (' Entries missing on Master:\n')
|
||||
+ missing_report += (' Entries missing on Supplier:\n')
|
||||
missing = True
|
||||
if rresult['entry'] and 'createtimestamp' in rresult['entry'].data:
|
||||
missing_report += (' - %s (Created on Replica at: %s)\n' %
|
||||
@@ -837,12 +837,12 @@ def do_offline_report(opts, output_file=None):
|
||||
final_report += get_ruv_report(opts)
|
||||
final_report += ('Entry Counts\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
- final_report += ('Master: %d\n' % (m_count))
|
||||
+ final_report += ('Supplier: %d\n' % (m_count))
|
||||
final_report += ('Replica: %d\n\n' % (r_count))
|
||||
|
||||
final_report += ('\nTombstones\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
- final_report += ('Master: %d\n' % (mtombstones))
|
||||
+ final_report += ('Supplier: %d\n' % (mtombstones))
|
||||
final_report += ('Replica: %d\n' % (rtombstones))
|
||||
|
||||
final_report += get_conflict_report(mconflicts, rconflicts, opts['conflicts'])
|
||||
@@ -859,9 +859,9 @@ def do_offline_report(opts, output_file=None):
|
||||
final_report += ('\nResult\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
if missing_report == "" and len(diff_report) == 0:
|
||||
- final_report += ('No replication differences between Master and Replica\n')
|
||||
+ final_report += ('No replication differences between Supplier and Replica\n')
|
||||
else:
|
||||
- final_report += ('There are replication differences between Master and Replica\n')
|
||||
+ final_report += ('There are replication differences between Supplier and Replica\n')
|
||||
|
||||
if output_file:
|
||||
output_file.write(final_report)
|
||||
@@ -871,8 +871,8 @@ def do_offline_report(opts, output_file=None):
|
||||
|
||||
def check_for_diffs(mentries, mglue, rentries, rglue, report, opts):
|
||||
"""Online mode only - Check for diffs, return the updated report
|
||||
- :param mentries - Master entries
|
||||
- :param mglue - Master glue entries
|
||||
+ :param mentries - Supplier entries
|
||||
+ :param mglue - Supplier glue entries
|
||||
:param rentries - Replica entries
|
||||
:param rglue - Replica glue entries
|
||||
:param report - A Dict of the entire report
|
||||
@@ -947,8 +947,8 @@ def validate_suffix(ldapnode, suffix, hostname):
|
||||
# Check suffix is replicated
|
||||
try:
|
||||
replica_filter = "(&(objectclass=nsds5replica)(nsDS5ReplicaRoot=%s))" % suffix
|
||||
- master_replica = ldapnode.search_s("cn=config",ldap.SCOPE_SUBTREE,replica_filter)
|
||||
- if (len(master_replica) != 1):
|
||||
+ supplier_replica = ldapnode.search_s("cn=config",ldap.SCOPE_SUBTREE,replica_filter)
|
||||
+ if (len(supplier_replica) != 1):
|
||||
print("Error: Failed to validate suffix in {}. {} is not replicated.".format(hostname, suffix))
|
||||
return False
|
||||
except ldap.LDAPError as e:
|
||||
@@ -969,7 +969,7 @@ def connect_to_replicas(opts):
|
||||
muri = "%s://%s" % (opts['mprotocol'], opts['mhost'].replace("/", "%2f"))
|
||||
else:
|
||||
muri = "%s://%s:%s/" % (opts['mprotocol'], opts['mhost'], opts['mport'])
|
||||
- master = SimpleLDAPObject(muri)
|
||||
+ supplier = SimpleLDAPObject(muri)
|
||||
|
||||
if opts['rprotocol'].lower() == 'ldapi':
|
||||
ruri = "%s://%s" % (opts['rprotocol'], opts['rhost'].replace("/", "%2f"))
|
||||
@@ -978,23 +978,23 @@ def connect_to_replicas(opts):
|
||||
replica = SimpleLDAPObject(ruri)
|
||||
|
||||
# Set timeouts
|
||||
- master.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
|
||||
- master.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
|
||||
+ supplier.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
|
||||
+ supplier.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
|
||||
replica.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
|
||||
replica.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
|
||||
|
||||
# Setup Secure Connection
|
||||
if opts['certdir'] is not None:
|
||||
- # Setup Master
|
||||
+ # Setup Supplier
|
||||
if opts['mprotocol'] != LDAPI:
|
||||
- master.set_option(ldap.OPT_X_TLS_CACERTDIR, opts['certdir'])
|
||||
- master.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD)
|
||||
+ supplier.set_option(ldap.OPT_X_TLS_CACERTDIR, opts['certdir'])
|
||||
+ supplier.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD)
|
||||
if opts['mprotocol'] == LDAP:
|
||||
# Do StartTLS
|
||||
try:
|
||||
- master.start_tls_s()
|
||||
+ supplier.start_tls_s()
|
||||
except ldap.LDAPError as e:
|
||||
- print('TLS negotiation failed on Master: {}'.format(str(e)))
|
||||
+ print('TLS negotiation failed on Supplier: {}'.format(str(e)))
|
||||
exit(1)
|
||||
|
||||
# Setup Replica
|
||||
@@ -1006,17 +1006,17 @@ def connect_to_replicas(opts):
|
||||
try:
|
||||
replica.start_tls_s()
|
||||
except ldap.LDAPError as e:
|
||||
- print('TLS negotiation failed on Master: {}'.format(str(e)))
|
||||
+ print('TLS negotiation failed on Supplier: {}'.format(str(e)))
|
||||
exit(1)
|
||||
|
||||
- # Open connection to master
|
||||
+ # Open connection to Supplier
|
||||
try:
|
||||
- master.simple_bind_s(opts['binddn'], opts['bindpw'])
|
||||
+ supplier.simple_bind_s(opts['binddn'], opts['bindpw'])
|
||||
except ldap.SERVER_DOWN as e:
|
||||
print(f"Cannot connect to {muri} ({str(e)})")
|
||||
sys.exit(1)
|
||||
except ldap.LDAPError as e:
|
||||
- print("Error: Failed to authenticate to Master: ({}). "
|
||||
+ print("Error: Failed to authenticate to Supplier: ({}). "
|
||||
"Please check your credentials and LDAP urls are correct.".format(str(e)))
|
||||
sys.exit(1)
|
||||
|
||||
@@ -1034,7 +1034,7 @@ def connect_to_replicas(opts):
|
||||
# Validate suffix
|
||||
if opts['verbose']:
|
||||
print ("Validating suffix ...")
|
||||
- if not validate_suffix(master, opts['suffix'], opts['mhost']):
|
||||
+ if not validate_suffix(supplier, opts['suffix'], opts['mhost']):
|
||||
sys.exit(1)
|
||||
|
||||
if not validate_suffix(replica,opts['suffix'], opts['rhost']):
|
||||
@@ -1042,16 +1042,16 @@ def connect_to_replicas(opts):
|
||||
|
||||
# Get the RUVs
|
||||
if opts['verbose']:
|
||||
- print ("Gathering Master's RUV...")
|
||||
+ print ("Gathering Supplier's RUV...")
|
||||
try:
|
||||
- master_ruv = master.search_s(opts['suffix'], ldap.SCOPE_SUBTREE, RUV_FILTER, ['nsds50ruv'])
|
||||
- if len(master_ruv) > 0:
|
||||
- opts['master_ruv'] = ensure_list_str(master_ruv[0][1]['nsds50ruv'])
|
||||
+ supplier_ruv = supplier.search_s(opts['suffix'], ldap.SCOPE_SUBTREE, RUV_FILTER, ['nsds50ruv'])
|
||||
+ if len(supplier_ruv) > 0:
|
||||
+ opts['supplier_ruv'] = ensure_list_str(supplier_ruv[0][1]['nsds50ruv'])
|
||||
else:
|
||||
- print("Error: Master does not have an RUV entry")
|
||||
+ print("Error: Supplier does not have an RUV entry")
|
||||
sys.exit(1)
|
||||
except ldap.LDAPError as e:
|
||||
- print("Error: Failed to get Master RUV entry: {}".format(str(e)))
|
||||
+ print("Error: Failed to get Supplier RUV entry: {}".format(str(e)))
|
||||
sys.exit(1)
|
||||
|
||||
if opts['verbose']:
|
||||
@@ -1067,12 +1067,12 @@ def connect_to_replicas(opts):
|
||||
print("Error: Failed to get Replica RUV entry: {}".format(str(e)))
|
||||
sys.exit(1)
|
||||
|
||||
- # Get the master RID
|
||||
+ # Get the Supplier RID
|
||||
if opts['verbose']:
|
||||
- print("Getting Master's replica ID")
|
||||
+ print("Getting Supplier's replica ID")
|
||||
try:
|
||||
search_filter = "(&(objectclass=nsds5Replica)(nsDS5ReplicaRoot={})(nsDS5ReplicaId=*))".format(opts['suffix'])
|
||||
- replica_entry = master.search_s("cn=config", ldap.SCOPE_SUBTREE, search_filter)
|
||||
+ replica_entry = supplier.search_s("cn=config", ldap.SCOPE_SUBTREE, search_filter)
|
||||
if len(replica_entry) > 0:
|
||||
opts['rid'] = ensure_int(replica_entry[0][1]['nsDS5ReplicaId'][0])
|
||||
else:
|
||||
@@ -1081,7 +1081,7 @@ def connect_to_replicas(opts):
|
||||
print("Error: Failed to get Replica entry: {}".format(str(e)))
|
||||
sys.exit(1)
|
||||
|
||||
- return (master, replica, opts)
|
||||
+ return (supplier, replica, opts)
|
||||
|
||||
|
||||
def print_online_report(report, opts, output_file):
|
||||
@@ -1104,11 +1104,11 @@ def print_online_report(report, opts, output_file):
|
||||
final_report += get_ruv_report(opts)
|
||||
final_report += ('Entry Counts\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
- final_report += ('Master: %d\n' % (report['m_count']))
|
||||
+ final_report += ('Supplier: %d\n' % (report['m_count']))
|
||||
final_report += ('Replica: %d\n\n' % (report['r_count']))
|
||||
final_report += ('\nTombstones\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
- final_report += ('Master: %d\n' % (report['mtombstones']))
|
||||
+ final_report += ('Supplier: %d\n' % (report['mtombstones']))
|
||||
final_report += ('Replica: %d\n' % (report['rtombstones']))
|
||||
final_report += report['conflict']
|
||||
missing = False
|
||||
@@ -1121,7 +1121,7 @@ def print_online_report(report, opts, output_file):
|
||||
final_report += (' Entries missing on Replica:\n')
|
||||
for entry in report['r_missing']:
|
||||
if 'createtimestamp' in entry.data:
|
||||
- final_report += (' - %s (Created on Master at: %s)\n' %
|
||||
+ final_report += (' - %s (Created on Supplier at: %s)\n' %
|
||||
(entry.dn, convert_timestamp(entry.data['createtimestamp'][0])))
|
||||
else:
|
||||
final_report += (' - %s\n' % (entry.dn))
|
||||
@@ -1129,7 +1129,7 @@ def print_online_report(report, opts, output_file):
|
||||
if m_missing > 0:
|
||||
if r_missing > 0:
|
||||
final_report += ('\n')
|
||||
- final_report += (' Entries missing on Master:\n')
|
||||
+ final_report += (' Entries missing on Supplier:\n')
|
||||
for entry in report['m_missing']:
|
||||
if 'createtimestamp' in entry.data:
|
||||
final_report += (' - %s (Created on Replica at: %s)\n' %
|
||||
@@ -1146,9 +1146,9 @@ def print_online_report(report, opts, output_file):
|
||||
final_report += ('\nResult\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
if not missing and len(report['diff']) == 0:
|
||||
- final_report += ('No replication differences between Master and Replica\n')
|
||||
+ final_report += ('No replication differences between Supplier and Replica\n')
|
||||
else:
|
||||
- final_report += ('There are replication differences between Master and Replica\n')
|
||||
+ final_report += ('There are replication differences between Supplier and Replica\n')
|
||||
|
||||
if output_file:
|
||||
output_file.write(final_report)
|
||||
@@ -1170,7 +1170,7 @@ def remove_state_info(entry):
|
||||
|
||||
def get_conflict_report(mentries, rentries, verbose):
|
||||
"""Gather the conflict entry dn's for each replica
|
||||
- :param mentries - Master entries
|
||||
+ :param mentries - Supplier entries
|
||||
:param rentries - Replica entries
|
||||
:param verbose - verbose logging
|
||||
:return - A text blob to dispaly in the report
|
||||
@@ -1197,7 +1197,7 @@ def get_conflict_report(mentries, rentries, verbose):
|
||||
report = "\n\nConflict Entries\n"
|
||||
report += "=====================================================\n\n"
|
||||
if len(m_conflicts) > 0:
|
||||
- report += ('Master Conflict Entries: %d\n' % (len(m_conflicts)))
|
||||
+ report += ('Supplier Conflict Entries: %d\n' % (len(m_conflicts)))
|
||||
if verbose:
|
||||
for entry in m_conflicts:
|
||||
report += ('\n - %s\n' % (entry['dn']))
|
||||
@@ -1239,8 +1239,8 @@ def do_online_report(opts, output_file=None):
|
||||
rconflicts = []
|
||||
mconflicts = []
|
||||
|
||||
- # Fire off paged searches on Master and Replica
|
||||
- master, replica, opts = connect_to_replicas(opts)
|
||||
+ # Fire off paged searches on Supplier and Replica
|
||||
+ supplier, replica, opts = connect_to_replicas(opts)
|
||||
|
||||
if opts['verbose']:
|
||||
print('Start searching and comparing...')
|
||||
@@ -1248,12 +1248,12 @@ def do_online_report(opts, output_file=None):
|
||||
controls = [paged_ctrl]
|
||||
req_pr_ctrl = controls[0]
|
||||
try:
|
||||
- master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
|
||||
- "(|(objectclass=*)(objectclass=ldapsubentry)(objectclass=nstombstone))",
|
||||
- ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'],
|
||||
- serverctrls=controls)
|
||||
+ supplier_msgid = supplier.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
|
||||
+ "(|(objectclass=*)(objectclass=ldapsubentry)(objectclass=nstombstone))",
|
||||
+ ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'],
|
||||
+ serverctrls=controls)
|
||||
except ldap.LDAPError as e:
|
||||
- print("Error: Failed to get Master entries: %s", str(e))
|
||||
+ print("Error: Failed to get Supplier entries: %s", str(e))
|
||||
sys.exit(1)
|
||||
try:
|
||||
replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
|
||||
@@ -1268,11 +1268,11 @@ def do_online_report(opts, output_file=None):
|
||||
while not m_done or not r_done:
|
||||
try:
|
||||
if not m_done:
|
||||
- m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid)
|
||||
+ m_rtype, m_rdata, m_rmsgid, m_rctrls = supplier.result3(supplier_msgid)
|
||||
elif not r_done:
|
||||
m_rdata = []
|
||||
except ldap.LDAPError as e:
|
||||
- print("Error: Problem getting the results from the master: %s", str(e))
|
||||
+ print("Error: Problem getting the results from the Supplier: %s", str(e))
|
||||
sys.exit(1)
|
||||
try:
|
||||
if not r_done:
|
||||
@@ -1299,7 +1299,7 @@ def do_online_report(opts, output_file=None):
|
||||
report, opts)
|
||||
|
||||
if not m_done:
|
||||
- # Master
|
||||
+ # Supplier
|
||||
m_pctrls = [
|
||||
c
|
||||
for c in m_rctrls
|
||||
@@ -1310,11 +1310,11 @@ def do_online_report(opts, output_file=None):
|
||||
try:
|
||||
# Copy cookie from response control to request control
|
||||
req_pr_ctrl.cookie = m_pctrls[0].cookie
|
||||
- master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
|
||||
+ supplier_msgid = supplier.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
|
||||
"(|(objectclass=*)(objectclass=ldapsubentry))",
|
||||
['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
|
||||
except ldap.LDAPError as e:
|
||||
- print("Error: Problem searching the master: %s", str(e))
|
||||
+ print("Error: Problem searching the Supplier: %s", str(e))
|
||||
sys.exit(1)
|
||||
else:
|
||||
m_done = True # No more pages available
|
||||
@@ -1354,7 +1354,7 @@ def do_online_report(opts, output_file=None):
|
||||
print_online_report(report, opts, output_file)
|
||||
|
||||
# unbind
|
||||
- master.unbind_s()
|
||||
+ supplier.unbind_s()
|
||||
replica.unbind_s()
|
||||
|
||||
|
||||
@@ -1367,18 +1367,18 @@ def init_online_params(args):
|
||||
|
||||
# Make sure the URLs are different
|
||||
if args.murl == args.rurl:
|
||||
- print("Master and Replica LDAP URLs are the same, they must be different")
|
||||
+ print("Supplier and Replica LDAP URLs are the same, they must be different")
|
||||
sys.exit(1)
|
||||
|
||||
- # Parse Master url
|
||||
+ # Parse Supplier url
|
||||
if not ldapurl.isLDAPUrl(args.murl):
|
||||
- print("Master LDAP URL is invalid")
|
||||
+ print("Supplier LDAP URL is invalid")
|
||||
sys.exit(1)
|
||||
murl = ldapurl.LDAPUrl(args.murl)
|
||||
if murl.urlscheme in VALID_PROTOCOLS:
|
||||
opts['mprotocol'] = murl.urlscheme
|
||||
else:
|
||||
- print('Unsupported ldap url protocol (%s) for Master, please use "ldaps" or "ldap"' %
|
||||
+ print('Unsupported ldap url protocol (%s) for Supplier, please use "ldaps" or "ldap"' %
|
||||
murl.urlscheme)
|
||||
sys.exit(1)
|
||||
|
||||
@@ -1520,7 +1520,7 @@ def offline_report(args):
|
||||
print ("LDIF file ({}) is empty".format(ldif_dir))
|
||||
sys.exit(1)
|
||||
if opts['mldif'] == opts['rldif']:
|
||||
- print("The Master and Replica LDIF files must be different")
|
||||
+ print("The Supplier and Replica LDIF files must be different")
|
||||
sys.exit(1)
|
||||
|
||||
OUTPUT_FILE = None
|
||||
@@ -1547,7 +1547,7 @@ def get_state(args):
|
||||
"""Just do the RUV comparision
|
||||
"""
|
||||
opts = init_online_params(args)
|
||||
- master, replica, opts = connect_to_replicas(opts)
|
||||
+ supplier, replica, opts = connect_to_replicas(opts)
|
||||
print(get_ruv_state(opts))
|
||||
|
||||
|
||||
@@ -1569,10 +1569,10 @@ def main():
|
||||
# Get state
|
||||
state_parser = subparsers.add_parser('state', help="Get the current replicaton state between two replicas")
|
||||
state_parser.set_defaults(func=get_state)
|
||||
- state_parser.add_argument('-m', '--master-url', help='The LDAP URL for the Master server',
|
||||
- dest='murl', default=None, required=True)
|
||||
+ state_parser.add_argument('-m', '--supplier-url', help='The LDAP URL for the Supplier server',
|
||||
+ dest='murl', default=None, required=True)
|
||||
state_parser.add_argument('-r', '--replica-url', help='The LDAP URL for the Replica server',
|
||||
- dest='rurl', required=True, default=None)
|
||||
+ dest='rurl', required=True, default=None)
|
||||
state_parser.add_argument('-b', '--suffix', help='Replicated suffix', dest='suffix', required=True)
|
||||
state_parser.add_argument('-D', '--bind-dn', help='The Bind DN', required=True, dest='binddn', default=None)
|
||||
state_parser.add_argument('-w', '--bind-pw', help='The Bind password', dest='bindpw', default=None)
|
||||
@@ -1586,7 +1586,7 @@ def main():
|
||||
# Online mode
|
||||
online_parser = subparsers.add_parser('online', help="Compare two online replicas for differences")
|
||||
online_parser.set_defaults(func=online_report)
|
||||
- online_parser.add_argument('-m', '--master-url', help='The LDAP URL for the Master server (REQUIRED)',
|
||||
+ online_parser.add_argument('-m', '--supplier-url', help='The LDAP URL for the Supplier server (REQUIRED)',
|
||||
dest='murl', default=None, required=True)
|
||||
online_parser.add_argument('-r', '--replica-url', help='The LDAP URL for the Replica server (REQUIRED)',
|
||||
dest='rurl', required=True, default=None)
|
||||
@@ -1612,12 +1612,12 @@ def main():
|
||||
# Offline LDIF mode
|
||||
offline_parser = subparsers.add_parser('offline', help="Compare two replication LDIF files for differences (LDIF file generated by 'db2ldif -r')")
|
||||
offline_parser.set_defaults(func=offline_report)
|
||||
- offline_parser.add_argument('-m', '--master-ldif', help='Master LDIF file',
|
||||
+ offline_parser.add_argument('-m', '--supplier-ldif', help='Supplier LDIF file',
|
||||
dest='mldif', default=None, required=True)
|
||||
offline_parser.add_argument('-r', '--replica-ldif', help='Replica LDIF file',
|
||||
dest='rldif', default=None, required=True)
|
||||
offline_parser.add_argument('--rid', dest='rid', default=None, required=True,
|
||||
- help='The Replica Identifer (rid) for the "Master" server')
|
||||
+ help='The Replica Identifier (rid) for the "Supplier" server')
|
||||
offline_parser.add_argument('-b', '--suffix', help='Replicated suffix', dest='suffix', required=True)
|
||||
offline_parser.add_argument('-c', '--conflicts', help='Display verbose conflict information', action='store_true',
|
||||
dest='conflicts', default=False)
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,393 +0,0 @@
|
||||
From 43f8a317bcd9040874b27cad905347a9e6bc8a6f Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 9 Dec 2020 22:42:59 +0000
|
||||
Subject: [PATCH 4/6] Issue 4419 - Warn users of skipped entries during ldif2db
|
||||
online import (#4476)
|
||||
|
||||
Bug Description: During an online ldif2db import entries that do not
|
||||
conform to various constraints will be skipped and
|
||||
not imported. On completition of an import with skipped
|
||||
entries, the server responds with a success message
|
||||
and logs the skipped entry detail to the error logs.
|
||||
The success messgae could lead the user to believe
|
||||
that all entries were successfully imported.
|
||||
|
||||
Fix Description: If a skipped entry occurs during import, the import
|
||||
will continue and a warning message will be displayed.
|
||||
The schema is extended with a nsTaskWarning attribute
|
||||
which is used to capture and retrieve any task
|
||||
warnings.
|
||||
|
||||
CLI tools for online import updated.
|
||||
|
||||
Test added to generate an incorrect ldif entry and perform an
|
||||
online import.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4419
|
||||
|
||||
Reviewed by: tbordaz, mreynolds389, droideck, Firstyear (Thanks)
|
||||
---
|
||||
.../tests/suites/import/import_test.py | 39 +++++++++++++++++--
|
||||
ldap/schema/02common.ldif | 3 +-
|
||||
.../back-ldbm/db-bdb/bdb_import_threads.c | 5 +++
|
||||
ldap/servers/slapd/slap.h | 1 +
|
||||
ldap/servers/slapd/slapi-plugin.h | 11 ++++++
|
||||
ldap/servers/slapd/slapi-private.h | 8 ----
|
||||
ldap/servers/slapd/task.c | 29 +++++++++++++-
|
||||
src/lib389/lib389/cli_conf/backend.py | 6 ++-
|
||||
src/lib389/lib389/tasks.py | 23 +++++++++--
|
||||
9 files changed, 108 insertions(+), 17 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py
|
||||
index b47db96ed..77c915026 100644
|
||||
--- a/dirsrvtests/tests/suites/import/import_test.py
|
||||
+++ b/dirsrvtests/tests/suites/import/import_test.py
|
||||
@@ -65,6 +65,9 @@ def _import_clean(request, topo):
|
||||
import_ldif = ldif_dir + '/basic_import.ldif'
|
||||
if os.path.exists(import_ldif):
|
||||
os.remove(import_ldif)
|
||||
+ syntax_err_ldif = ldif_dir + '/syntax_err.dif'
|
||||
+ if os.path.exists(syntax_err_ldif):
|
||||
+ os.remove(syntax_err_ldif)
|
||||
|
||||
request.addfinalizer(finofaci)
|
||||
|
||||
@@ -141,17 +144,19 @@ def _create_bogus_ldif(topo):
|
||||
|
||||
def _create_syntax_err_ldif(topo):
|
||||
"""
|
||||
- Create an incorrect ldif entry that violates syntax check
|
||||
+ Create an ldif file, which contains an entry that violates syntax check
|
||||
"""
|
||||
ldif_dir = topo.standalone.get_ldif_dir()
|
||||
line1 = """dn: dc=example,dc=com
|
||||
objectClass: top
|
||||
objectClass: domain
|
||||
dc: example
|
||||
+
|
||||
dn: ou=groups,dc=example,dc=com
|
||||
objectClass: top
|
||||
objectClass: organizationalUnit
|
||||
ou: groups
|
||||
+
|
||||
dn: uid=JHunt,ou=groups,dc=example,dc=com
|
||||
objectClass: top
|
||||
objectClass: person
|
||||
@@ -201,6 +206,34 @@ def test_import_with_index(topo, _import_clean):
|
||||
assert f'{place}/userRoot/roomNumber.db' in glob.glob(f'{place}/userRoot/*.db', recursive=True)
|
||||
|
||||
|
||||
+def test_online_import_with_warning(topo, _import_clean):
|
||||
+ """
|
||||
+ Import an ldif file with syntax errors, verify skipped entry warning code
|
||||
+
|
||||
+ :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb8
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Create standalone Instance
|
||||
+ 2. Create an ldif file with an entry that violates syntax check (empty givenname)
|
||||
+ 3. Online import of troublesome ldif file
|
||||
+ :expected results:
|
||||
+ 1. Successful import with skipped entry warning
|
||||
+ """
|
||||
+ topo.standalone.restart()
|
||||
+
|
||||
+ import_task = ImportTask(topo.standalone)
|
||||
+ import_ldif1 = _create_syntax_err_ldif(topo)
|
||||
+
|
||||
+ # Importing the offending ldif file - online
|
||||
+ import_task.import_suffix_from_ldif(ldiffile=import_ldif1, suffix=DEFAULT_SUFFIX)
|
||||
+
|
||||
+ # There is just a single entry in this ldif
|
||||
+ import_task.wait(5)
|
||||
+
|
||||
+ # Check for the task nsTaskWarning attr, make sure its set to skipped entry code
|
||||
+ assert import_task.present('nstaskwarning')
|
||||
+ assert TaskWarning.WARN_SKIPPED_IMPORT_ENTRY == import_task.get_task_warn()
|
||||
+
|
||||
def test_crash_on_ldif2db(topo, _import_clean):
|
||||
"""
|
||||
Delete the cn=monitor entry for an LDBM backend instance. Doing this will
|
||||
@@ -246,7 +279,7 @@ def test_ldif2db_allows_entries_without_a_parent_to_be_imported(topo, _import_cl
|
||||
topo.standalone.start()
|
||||
|
||||
|
||||
-def test_ldif2db_syntax_check(topo):
|
||||
+def test_ldif2db_syntax_check(topo, _import_clean):
|
||||
"""ldif2db should return a warning when a skipped entry has occured.
|
||||
:id: 85e75670-42c5-4062-9edc-7f117c97a06f
|
||||
:setup:
|
||||
@@ -261,7 +294,7 @@ def test_ldif2db_syntax_check(topo):
|
||||
import_ldif1 = _create_syntax_err_ldif(topo)
|
||||
# Import the offending LDIF data - offline
|
||||
topo.standalone.stop()
|
||||
- ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1)
|
||||
+ ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1, None)
|
||||
assert ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY
|
||||
topo.standalone.start()
|
||||
|
||||
diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif
|
||||
index c6dc074db..821640d03 100644
|
||||
--- a/ldap/schema/02common.ldif
|
||||
+++ b/ldap/schema/02common.ldif
|
||||
@@ -145,6 +145,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2356 NAME 'nsTaskExitCode' DESC 'Slapi T
|
||||
attributeTypes: ( 2.16.840.1.113730.3.1.2357 NAME 'nsTaskCurrentItem' DESC 'Slapi Task item' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
|
||||
attributeTypes: ( 2.16.840.1.113730.3.1.2358 NAME 'nsTaskTotalItems' DESC 'Slapi Task total items' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
|
||||
attributeTypes: ( 2.16.840.1.113730.3.1.2359 NAME 'nsTaskCreated' DESC 'Slapi Task creation date' SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
|
||||
+attributeTypes: ( 2.16.840.1.113730.3.1.2375 NAME 'nsTaskWarning' DESC 'Slapi Task warning code' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
|
||||
#
|
||||
# objectclasses:
|
||||
#
|
||||
@@ -177,5 +178,5 @@ objectClasses: ( 2.16.840.1.113730.3.2.503 NAME 'nsDSWindowsReplicationAgreement
|
||||
objectClasses: ( 2.16.840.1.113730.3.2.128 NAME 'costemplate' DESC 'Netscape defined objectclass' SUP top MAY ( cn $ cospriority ) X-ORIGIN 'Netscape Directory Server' )
|
||||
objectClasses: ( 2.16.840.1.113730.3.2.304 NAME 'nsView' DESC 'Netscape defined objectclass' SUP top AUXILIARY MAY ( nsViewFilter $ description ) X-ORIGIN 'Netscape Directory Server' )
|
||||
objectClasses: ( 2.16.840.1.113730.3.2.316 NAME 'nsAttributeEncryption' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsEncryptionAlgorithm ) X-ORIGIN 'Netscape Directory Server' )
|
||||
-objectClasses: ( 2.16.840.1.113730.3.2.335 NAME 'nsSlapiTask' DESC 'Slapi_Task objectclass' SUP top MUST ( cn ) MAY ( ttl $ nsTaskLog $ nsTaskStatus $ nsTaskExitCode $ nsTaskCurrentItem $ nsTaskTotalItems $ nsTaskCreated ) X-ORIGIN '389 Directory Server' )
|
||||
+objectClasses: ( 2.16.840.1.113730.3.2.335 NAME 'nsSlapiTask' DESC 'Slapi_Task objectclass' SUP top MUST ( cn ) MAY ( ttl $ nsTaskLog $ nsTaskStatus $ nsTaskExitCode $ nsTaskCurrentItem $ nsTaskTotalItems $ nsTaskCreated $ nsTaskWarning ) X-ORIGIN '389 Directory Server' )
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
index 310893884..5c7d9c8f7 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
@@ -747,6 +747,11 @@ import_producer(void *param)
|
||||
}
|
||||
}
|
||||
|
||||
+ /* capture skipped entry warnings for this task */
|
||||
+ if((job) && (job->skipped)) {
|
||||
+ slapi_task_set_warning(job->task, WARN_SKIPPED_IMPORT_ENTRY);
|
||||
+ }
|
||||
+
|
||||
slapi_value_free(&(job->usn_value));
|
||||
import_free_ldif(&c);
|
||||
info->state = FINISHED;
|
||||
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
|
||||
index 53c9161d1..be4d38739 100644
|
||||
--- a/ldap/servers/slapd/slap.h
|
||||
+++ b/ldap/servers/slapd/slap.h
|
||||
@@ -1753,6 +1753,7 @@ typedef struct slapi_task
|
||||
int task_progress; /* number between 0 and task_work */
|
||||
int task_work; /* "units" of work to be done */
|
||||
int task_flags; /* (see above) */
|
||||
+ task_warning task_warn; /* task warning */
|
||||
char *task_status; /* transient status info */
|
||||
char *task_log; /* appended warnings, etc */
|
||||
char task_date[SLAPI_TIMESTAMP_BUFSIZE]; /* Date/time when task was created */
|
||||
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
|
||||
index 96313ef2c..ddb11bc7c 100644
|
||||
--- a/ldap/servers/slapd/slapi-plugin.h
|
||||
+++ b/ldap/servers/slapd/slapi-plugin.h
|
||||
@@ -6638,6 +6638,15 @@ int slapi_config_remove_callback(int operation, int flags, const char *base, int
|
||||
/* task flags (set by the task-control code) */
|
||||
#define SLAPI_TASK_DESTROYING 0x01 /* queued event for destruction */
|
||||
|
||||
+/* task warnings */
|
||||
+typedef enum task_warning_t{
|
||||
+ WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0),
|
||||
+ WARN_UPGRADE_DN_FORMAT = (1 << 1),
|
||||
+ WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2),
|
||||
+ WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
|
||||
+} task_warning;
|
||||
+
|
||||
+
|
||||
int slapi_task_register_handler(const char *name, dseCallbackFn func);
|
||||
int slapi_plugin_task_register_handler(const char *name, dseCallbackFn func, Slapi_PBlock *plugin_pb);
|
||||
int slapi_plugin_task_unregister_handler(const char *name, dseCallbackFn func);
|
||||
@@ -6654,6 +6663,8 @@ int slapi_task_get_refcount(Slapi_Task *task);
|
||||
void slapi_task_set_destructor_fn(Slapi_Task *task, TaskCallbackFn func);
|
||||
void slapi_task_set_cancel_fn(Slapi_Task *task, TaskCallbackFn func);
|
||||
void slapi_task_status_changed(Slapi_Task *task);
|
||||
+void slapi_task_set_warning(Slapi_Task *task, task_warning warn);
|
||||
+int slapi_task_get_warning(Slapi_Task *task);
|
||||
void slapi_task_log_status(Slapi_Task *task, char *format, ...)
|
||||
#ifdef __GNUC__
|
||||
__attribute__((format(printf, 2, 3)));
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index d5abe8ac1..b956ebe63 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -1465,14 +1465,6 @@ void slapi_pblock_set_operation_notes(Slapi_PBlock *pb, uint32_t opnotes);
|
||||
void slapi_pblock_set_flag_operation_notes(Slapi_PBlock *pb, uint32_t opflag);
|
||||
void slapi_pblock_set_result_text_if_empty(Slapi_PBlock *pb, char *text);
|
||||
|
||||
-/* task warnings */
|
||||
-typedef enum task_warning_t{
|
||||
- WARN_UPGARDE_DN_FORMAT_ALL = (1 << 0),
|
||||
- WARN_UPGRADE_DN_FORMAT = (1 << 1),
|
||||
- WARN_UPGRADE_DN_FORMAT_SPACE = (1 << 2),
|
||||
- WARN_SKIPPED_IMPORT_ENTRY = (1 << 3)
|
||||
-} task_warning;
|
||||
-
|
||||
int32_t slapi_pblock_get_task_warning(Slapi_PBlock *pb);
|
||||
void slapi_pblock_set_task_warning(Slapi_PBlock *pb, task_warning warn);
|
||||
|
||||
diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c
|
||||
index 936c64920..806077a16 100644
|
||||
--- a/ldap/servers/slapd/task.c
|
||||
+++ b/ldap/servers/slapd/task.c
|
||||
@@ -46,6 +46,7 @@ static uint64_t shutting_down = 0;
|
||||
#define TASK_PROGRESS_NAME "nsTaskCurrentItem"
|
||||
#define TASK_WORK_NAME "nsTaskTotalItems"
|
||||
#define TASK_DATE_NAME "nsTaskCreated"
|
||||
+#define TASK_WARNING_NAME "nsTaskWarning"
|
||||
|
||||
#define DEFAULT_TTL "3600" /* seconds */
|
||||
#define TASK_SYSCONFIG_FILE_ATTR "sysconfigfile" /* sysconfig reload task file attr */
|
||||
@@ -332,7 +333,7 @@ slapi_task_status_changed(Slapi_Task *task)
|
||||
LDAPMod modlist[20];
|
||||
LDAPMod *mod[20];
|
||||
int cur = 0, i;
|
||||
- char s1[20], s2[20], s3[20];
|
||||
+ char s1[20], s2[20], s3[20], s4[20];
|
||||
|
||||
if (shutting_down) {
|
||||
/* don't care about task status updates anymore */
|
||||
@@ -346,9 +347,11 @@ slapi_task_status_changed(Slapi_Task *task)
|
||||
sprintf(s1, "%d", task->task_exitcode);
|
||||
sprintf(s2, "%d", task->task_progress);
|
||||
sprintf(s3, "%d", task->task_work);
|
||||
+ sprintf(s4, "%d", task->task_warn);
|
||||
NEXTMOD(TASK_PROGRESS_NAME, s2);
|
||||
NEXTMOD(TASK_WORK_NAME, s3);
|
||||
NEXTMOD(TASK_DATE_NAME, task->task_date);
|
||||
+ NEXTMOD(TASK_WARNING_NAME, s4);
|
||||
/* only add the exit code when the job is done */
|
||||
if ((task->task_state == SLAPI_TASK_FINISHED) ||
|
||||
(task->task_state == SLAPI_TASK_CANCELLED)) {
|
||||
@@ -452,6 +455,30 @@ slapi_task_get_refcount(Slapi_Task *task)
|
||||
return 0; /* return value not currently used */
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Return task warning
|
||||
+ */
|
||||
+int
|
||||
+slapi_task_get_warning(Slapi_Task *task)
|
||||
+{
|
||||
+ if (task) {
|
||||
+ return task->task_warn;
|
||||
+ }
|
||||
+
|
||||
+ return 0; /* return value not currently used */
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * Set task warning
|
||||
+ */
|
||||
+void
|
||||
+slapi_task_set_warning(Slapi_Task *task, task_warning warn)
|
||||
+{
|
||||
+ if (task) {
|
||||
+ return task->task_warn |= warn;
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
int
|
||||
slapi_plugin_task_unregister_handler(const char *name, dseCallbackFn func)
|
||||
{
|
||||
diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py
|
||||
index d7a6e670c..6bfbcb036 100644
|
||||
--- a/src/lib389/lib389/cli_conf/backend.py
|
||||
+++ b/src/lib389/lib389/cli_conf/backend.py
|
||||
@@ -243,9 +243,13 @@ def backend_import(inst, basedn, log, args):
|
||||
exclude_suffixes=args.exclude_suffixes)
|
||||
task.wait(timeout=None)
|
||||
result = task.get_exit_code()
|
||||
+ warning = task.get_task_warn()
|
||||
|
||||
if task.is_complete() and result == 0:
|
||||
- log.info("The import task has finished successfully")
|
||||
+ if warning is None or (warning == 0):
|
||||
+ log.info("The import task has finished successfully")
|
||||
+ else:
|
||||
+ log.info("The import task has finished successfully, with warning code {}, check the logs for more detail".format(warning))
|
||||
else:
|
||||
raise ValueError("Import task failed\n-------------------------\n{}".format(ensure_str(task.get_task_log())))
|
||||
|
||||
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
|
||||
index dc7bb9206..bf20d1e61 100644
|
||||
--- a/src/lib389/lib389/tasks.py
|
||||
+++ b/src/lib389/lib389/tasks.py
|
||||
@@ -38,6 +38,7 @@ class Task(DSLdapObject):
|
||||
self._protected = False
|
||||
self._exit_code = None
|
||||
self._task_log = ""
|
||||
+ self._task_warn = None
|
||||
|
||||
def status(self):
|
||||
"""Return the decoded status of the task
|
||||
@@ -49,6 +50,7 @@ class Task(DSLdapObject):
|
||||
|
||||
self._exit_code = self.get_attr_val_utf8("nsTaskExitCode")
|
||||
self._task_log = self.get_attr_val_utf8("nsTaskLog")
|
||||
+ self._task_warn = self.get_attr_val_utf8("nsTaskWarning")
|
||||
if not self.exists():
|
||||
self._log.debug("complete: task has self cleaned ...")
|
||||
# The task cleaned it self up.
|
||||
@@ -77,6 +79,15 @@ class Task(DSLdapObject):
|
||||
return None
|
||||
return None
|
||||
|
||||
+ def get_task_warn(self):
|
||||
+ """Return task's warning code if task is complete, else None."""
|
||||
+ if self.is_complete():
|
||||
+ try:
|
||||
+ return int(self._task_warn)
|
||||
+ except TypeError:
|
||||
+ return None
|
||||
+ return None
|
||||
+
|
||||
def wait(self, timeout=120):
|
||||
"""Wait until task is complete."""
|
||||
|
||||
@@ -390,14 +401,17 @@ class Tasks(object):
|
||||
running, true if done - if true, second is the exit code - if dowait
|
||||
is True, this function will block until the task is complete'''
|
||||
attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode',
|
||||
- 'nsTaskCurrentItem', 'nsTaskTotalItems']
|
||||
+ 'nsTaskCurrentItem', 'nsTaskTotalItems', 'nsTaskWarning']
|
||||
done = False
|
||||
exitCode = 0
|
||||
+ warningCode = 0
|
||||
dn = entry.dn
|
||||
while not done:
|
||||
entry = self.conn.getEntry(dn, attrlist=attrlist)
|
||||
self.log.debug("task entry %r", entry)
|
||||
|
||||
+ if entry.nsTaskWarning:
|
||||
+ warningCode = int(entry.nsTaskWarning)
|
||||
if entry.nsTaskExitCode:
|
||||
exitCode = int(entry.nsTaskExitCode)
|
||||
done = True
|
||||
@@ -405,7 +419,7 @@ class Tasks(object):
|
||||
time.sleep(1)
|
||||
else:
|
||||
break
|
||||
- return (done, exitCode)
|
||||
+ return (done, exitCode, warningCode)
|
||||
|
||||
def importLDIF(self, suffix=None, benamebase=None, input_file=None,
|
||||
args=None):
|
||||
@@ -461,8 +475,9 @@ class Tasks(object):
|
||||
self.conn.add_s(entry)
|
||||
|
||||
exitCode = 0
|
||||
+ warningCode = 0
|
||||
if args and args.get(TASK_WAIT, False):
|
||||
- (done, exitCode) = self.conn.tasks.checkTask(entry, True)
|
||||
+ (done, exitCode, warningCode) = self.conn.tasks.checkTask(entry, True)
|
||||
|
||||
if exitCode:
|
||||
self.log.error("Error: import task %s for file %s exited with %d",
|
||||
@@ -470,6 +485,8 @@ class Tasks(object):
|
||||
else:
|
||||
self.log.info("Import task %s for file %s completed successfully",
|
||||
cn, input_file)
|
||||
+ if warningCode:
|
||||
+ self.log.info("with warning code %d", warningCode)
|
||||
self.dn = dn
|
||||
self.entry = entry
|
||||
return exitCode
|
||||
--
|
||||
2.26.2
|
||||
|
@ -1,4 +1,4 @@
|
||||
From 98caa0c0ddf48db791a26764aa695fa2345584ce Mon Sep 17 00:00:00 2001
|
||||
From 55a47c1bfe1ce1c27e470384c4f1d50895db25f7 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Tue, 13 Jul 2021 14:18:03 -0400
|
||||
Subject: [PATCH] Issue 4443 - Internal unindexed searches in syncrepl/retro
|
||||
@ -22,13 +22,160 @@ relates: https://github.com/389ds/389-ds-base/issues/4443
|
||||
|
||||
Reviewed by: spichugi & tbordaz(Thanks!!)
|
||||
---
|
||||
.../tests/suites/retrocl/basic_test.py | 53 ++++++++-------
|
||||
.../suites/retrocl/retrocl_indexing_test.py | 68 +++++++++++++++++++
|
||||
ldap/servers/plugins/retrocl/retrocl_create.c | 2 +-
|
||||
.../slapd/back-ldbm/ldbm_index_config.c | 25 +++++--
|
||||
src/lib389/lib389/_mapped_object.py | 13 ++++
|
||||
4 files changed, 102 insertions(+), 6 deletions(-)
|
||||
5 files changed, 130 insertions(+), 31 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
index f3bc50f29..84d513829 100644
|
||||
--- a/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
@@ -8,7 +8,6 @@
|
||||
|
||||
import logging
|
||||
import ldap
|
||||
-import time
|
||||
import pytest
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.plugins import RetroChangelogPlugin
|
||||
@@ -18,7 +17,8 @@ from lib389.tasks import *
|
||||
from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
|
||||
from lib389.cli_base.dsrc import dsrc_arg_concat
|
||||
from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add_attr
|
||||
-from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
|
||||
+from lib389.idm.user import UserAccount, UserAccounts
|
||||
+from lib389._mapped_object import DSLdapObjects
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
@@ -82,7 +82,7 @@ def test_retrocl_exclude_attr_add(topology_st):
|
||||
|
||||
log.info('Adding user1')
|
||||
try:
|
||||
- user1 = users.create(properties={
|
||||
+ users.create(properties={
|
||||
'sn': '1',
|
||||
'cn': 'user 1',
|
||||
'uid': 'user1',
|
||||
@@ -97,17 +97,18 @@ def test_retrocl_exclude_attr_add(topology_st):
|
||||
except ldap.ALREADY_EXISTS:
|
||||
pass
|
||||
except ldap.LDAPError as e:
|
||||
- log.error("Failed to add user1")
|
||||
+ log.error("Failed to add user1: " + str(e))
|
||||
|
||||
log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
|
||||
try:
|
||||
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX)
|
||||
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
|
||||
except ldap.LDAPError as e:
|
||||
- log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ log.fatal("Changelog search failed, error: " + str(e))
|
||||
assert False
|
||||
assert len(cllist) > 0
|
||||
- if cllist[0].hasAttr('changes'):
|
||||
- clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ if cllist[0].present('changes'):
|
||||
+ clstr = str(cllist[0].get_attr_vals_utf8('changes'))
|
||||
assert ATTR_HOMEPHONE in clstr
|
||||
assert ATTR_CARLICENSE in clstr
|
||||
|
||||
@@ -134,7 +135,7 @@ def test_retrocl_exclude_attr_add(topology_st):
|
||||
|
||||
log.info('Adding user2')
|
||||
try:
|
||||
- user2 = users.create(properties={
|
||||
+ users.create(properties={
|
||||
'sn': '2',
|
||||
'cn': 'user 2',
|
||||
'uid': 'user2',
|
||||
@@ -149,18 +150,18 @@ def test_retrocl_exclude_attr_add(topology_st):
|
||||
except ldap.ALREADY_EXISTS:
|
||||
pass
|
||||
except ldap.LDAPError as e:
|
||||
- log.error("Failed to add user2")
|
||||
+ log.error("Failed to add user2: " + str(e))
|
||||
|
||||
log.info('Verify homePhone attr is not in the changelog changestring')
|
||||
try:
|
||||
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN)
|
||||
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER2_DN})')
|
||||
assert len(cllist) > 0
|
||||
- if cllist[0].hasAttr('changes'):
|
||||
- clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ if cllist[0].present('changes'):
|
||||
+ clstr = str(cllist[0].get_attr_vals_utf8('changes'))
|
||||
assert ATTR_HOMEPHONE not in clstr
|
||||
assert ATTR_CARLICENSE in clstr
|
||||
except ldap.LDAPError as e:
|
||||
- log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ log.fatal("Changelog search failed, error: " + str(e))
|
||||
assert False
|
||||
|
||||
def test_retrocl_exclude_attr_mod(topology_st):
|
||||
@@ -228,19 +229,20 @@ def test_retrocl_exclude_attr_mod(topology_st):
|
||||
'homeDirectory': '/home/user1',
|
||||
'userpassword': USER_PW})
|
||||
except ldap.ALREADY_EXISTS:
|
||||
- pass
|
||||
+ user1 = UserAccount(st, dn=USER1_DN)
|
||||
except ldap.LDAPError as e:
|
||||
- log.error("Failed to add user1")
|
||||
+ log.error("Failed to add user1: " + str(e))
|
||||
|
||||
log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
|
||||
try:
|
||||
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX)
|
||||
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
|
||||
except ldap.LDAPError as e:
|
||||
- log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ log.fatal("Changelog search failed, error: " + str(e))
|
||||
assert False
|
||||
assert len(cllist) > 0
|
||||
- if cllist[0].hasAttr('changes'):
|
||||
- clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ if cllist[0].present('changes'):
|
||||
+ clstr = str(cllist[0].get_attr_vals_utf8('changes'))
|
||||
assert ATTR_HOMEPHONE in clstr
|
||||
assert ATTR_CARLICENSE in clstr
|
||||
|
||||
@@ -267,24 +269,25 @@ def test_retrocl_exclude_attr_mod(topology_st):
|
||||
|
||||
log.info('Modify user1 carLicense attribute')
|
||||
try:
|
||||
- st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")])
|
||||
+ user1.replace(ATTR_CARLICENSE, "123WX321")
|
||||
except ldap.LDAPError as e:
|
||||
log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc'])
|
||||
assert False
|
||||
|
||||
log.info('Verify carLicense attr is not in the changelog changestring')
|
||||
try:
|
||||
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
|
||||
assert len(cllist) > 0
|
||||
# There will be 2 entries in the changelog for this user, we are only
|
||||
#interested in the second one, the modify operation.
|
||||
- if cllist[1].hasAttr('changes'):
|
||||
- clstr = (cllist[1].getValue('changes')).decode()
|
||||
+ if cllist[1].present('changes'):
|
||||
+ clstr = str(cllist[1].get_attr_vals_utf8('changes'))
|
||||
assert ATTR_CARLICENSE not in clstr
|
||||
except ldap.LDAPError as e:
|
||||
- log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ log.fatal("Changelog search failed, error: " + str(e))
|
||||
assert False
|
||||
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py
|
||||
new file mode 100644
|
||||
index 000000000..b1dfe962c
|
||||
@ -198,10 +345,10 @@ index 9722d0ce7..38e7368e1 100644
|
||||
if (rc == LDAP_SUCCESS) {
|
||||
/* Assume the caller knows if it is OK to go online immediately */
|
||||
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
|
||||
index ca6ea6ef8..6cdcb0dc7 100644
|
||||
index b6d778b01..fe610d175 100644
|
||||
--- a/src/lib389/lib389/_mapped_object.py
|
||||
+++ b/src/lib389/lib389/_mapped_object.py
|
||||
@@ -147,6 +147,19 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
@@ -148,6 +148,19 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
|
||||
return True
|
||||
|
@ -1,149 +0,0 @@
|
||||
From 61d82ef842e0e4e013937bf05d7f640be2d2fc09 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 16 Dec 2020 16:30:28 +0100
|
||||
Subject: [PATCH 5/6] Issue 4480 - Unexpected info returned to ldap request
|
||||
(#4491)
|
||||
|
||||
Bug description:
|
||||
If the bind entry does not exist, the bind result info
|
||||
reports that 'No such entry'. It should not give any
|
||||
information if the target entry exists or not
|
||||
|
||||
Fix description:
|
||||
Does not return any additional information during a bind
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4480
|
||||
|
||||
Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all)
|
||||
|
||||
Platforms tested: F31
|
||||
---
|
||||
dirsrvtests/tests/suites/basic/basic_test.py | 112 +++++++++++++++++++
|
||||
1 file changed, 112 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
index 1ae82dcdd..02b73ee85 100644
|
||||
--- a/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
@@ -1400,6 +1400,118 @@ def test_dscreate_multiple_dashes_name(dscreate_long_instance):
|
||||
assert not dscreate_long_instance.exists()
|
||||
|
||||
|
||||
+@pytest.fixture(scope="module", params=('c=uk', 'cn=test_user', 'dc=example,dc=com', 'o=south', 'ou=sales', 'wrong=some_value'))
|
||||
+def dscreate_test_rdn_value(request):
|
||||
+ template_file = "/tmp/dssetup.inf"
|
||||
+ template_text = f"""[general]
|
||||
+config_version = 2
|
||||
+# This invalid hostname ...
|
||||
+full_machine_name = localhost.localdomain
|
||||
+# Means we absolutely require this.
|
||||
+strict_host_checking = False
|
||||
+# In tests, we can be run in containers, NEVER trust
|
||||
+# that systemd is there, or functional in any capacity
|
||||
+systemd = False
|
||||
+
|
||||
+[slapd]
|
||||
+instance_name = test_different_rdn
|
||||
+root_dn = cn=directory manager
|
||||
+root_password = someLongPassword_123
|
||||
+# We do not have access to high ports in containers,
|
||||
+# so default to something higher.
|
||||
+port = 38999
|
||||
+secure_port = 63699
|
||||
+
|
||||
+[backend-userroot]
|
||||
+create_suffix_entry = True
|
||||
+suffix = {request.param}
|
||||
+"""
|
||||
+
|
||||
+ with open(template_file, "w") as template_fd:
|
||||
+ template_fd.write(template_text)
|
||||
+
|
||||
+ # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389
|
||||
+ tmp_env = os.environ
|
||||
+ if "PYTHONPATH" in tmp_env:
|
||||
+ del tmp_env["PYTHONPATH"]
|
||||
+
|
||||
+ def fin():
|
||||
+ os.remove(template_file)
|
||||
+ if request.param != "wrong=some_value":
|
||||
+ try:
|
||||
+ subprocess.check_call(['dsctl', 'test_different_rdn', 'remove', '--do-it'])
|
||||
+ except subprocess.CalledProcessError as e:
|
||||
+ log.fatal(f"Failed to remove test instance Error ({e.returncode}) {e.output}")
|
||||
+ else:
|
||||
+ log.info("Wrong RDN is passed, instance not created")
|
||||
+ request.addfinalizer(fin)
|
||||
+ return template_file, tmp_env, request.param,
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.0.0'),
|
||||
+ reason="This test is only required with new admin cli, and requires root.")
|
||||
+@pytest.mark.bz1807419
|
||||
+@pytest.mark.ds50928
|
||||
+def test_dscreate_with_different_rdn(dscreate_test_rdn_value):
|
||||
+ """Test that dscreate works with different RDN attributes as suffix
|
||||
+
|
||||
+ :id: 77ed6300-6a2f-4e79-a862-1f1105f1e3ef
|
||||
+ :parametrized: yes
|
||||
+ :setup: None
|
||||
+ :steps:
|
||||
+ 1. Create template file for dscreate with different RDN attributes as suffix
|
||||
+ 2. Create instance using template file
|
||||
+ 3. Create instance with 'wrong=some_value' as suffix's RDN attribute
|
||||
+ :expectedresults:
|
||||
+ 1. Should succeeds
|
||||
+ 2. Should succeeds
|
||||
+ 3. Should fail
|
||||
+ """
|
||||
+ try:
|
||||
+ subprocess.check_call([
|
||||
+ 'dscreate',
|
||||
+ 'from-file',
|
||||
+ dscreate_test_rdn_value[0]
|
||||
+ ], env=dscreate_test_rdn_value[1])
|
||||
+ except subprocess.CalledProcessError as e:
|
||||
+ log.fatal(f"dscreate failed! Error ({e.returncode}) {e.output}")
|
||||
+ if dscreate_test_rdn_value[2] != "wrong=some_value":
|
||||
+ assert False
|
||||
+ else:
|
||||
+ assert True
|
||||
+
|
||||
+def test_bind_invalid_entry(topology_st):
|
||||
+ """Test the failing bind does not return information about the entry
|
||||
+
|
||||
+ :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1: bind as non existing entry
|
||||
+ 2: check that bind info does not report 'No such entry'
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1: pass
|
||||
+ 2: pass
|
||||
+ """
|
||||
+
|
||||
+ topology_st.standalone.restart()
|
||||
+ INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX
|
||||
+ try:
|
||||
+ topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY)
|
||||
+ log.info('exception description: ' + e.args[0]['desc'])
|
||||
+ if 'info' in e.args[0]:
|
||||
+ log.info('exception info: ' + e.args[0]['info'])
|
||||
+ assert e.args[0]['desc'] == 'Invalid credentials'
|
||||
+ assert 'info' not in e.args[0]
|
||||
+ pass
|
||||
+
|
||||
+ log.info('test_bind_invalid_entry: PASSED')
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
--
|
||||
2.26.2
|
||||
|
@ -1,4 +1,4 @@
|
||||
From 1da033b82b428bb5b90c201a59aaab24e0f14ccf Mon Sep 17 00:00:00 2001
|
||||
From 2f0218f91d35c83a2aaecb71849a54b2481390ab Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Fri, 9 Jul 2021 11:53:35 +1000
|
||||
Subject: [PATCH] Issue 4817 - BUG - locked crypt accounts on import may allow
|
@ -1,99 +0,0 @@
|
||||
From 3c74f736c657d007770fe866842b08d0a74772ca Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 9 Dec 2020 15:21:11 -0500
|
||||
Subject: [PATCH 6/6] Issue 4414 - disk monitoring - prevent division by zero
|
||||
crash
|
||||
|
||||
Bug Description: If a disk mount has zero total space or zero used
|
||||
space then a division by zero can occur and the
|
||||
server will crash.
|
||||
|
||||
It has also been observed that sometimes a system
|
||||
can return the wrong disk entirely, and when that
|
||||
happens the incorrect disk also has zero available
|
||||
space which triggers the disk monitioring thread to
|
||||
immediately shut the server down.
|
||||
|
||||
Fix Description: Check the total and used space for zero and do not
|
||||
divide, just ignore it. As a preemptive measure
|
||||
ignore disks from /dev, /proc, /sys (except /dev/shm).
|
||||
Yes it's a bit hacky, but the true underlying cause
|
||||
is not known yet. So better to be safe than sorry.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/4414
|
||||
|
||||
Reviewed by: firstyear(Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 22 +++++++++++++++++++++-
|
||||
ldap/servers/slapd/monitor.c | 13 +++++--------
|
||||
2 files changed, 26 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 691f77570..bfd965263 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -221,7 +221,27 @@ disk_mon_get_mount_point(char *dir)
|
||||
}
|
||||
if (s.st_dev == dev_id) {
|
||||
endmntent(fp);
|
||||
- return (slapi_ch_strdup(mnt->mnt_dir));
|
||||
+
|
||||
+ if ((strncmp(mnt->mnt_dir, "/dev", 4) == 0 && strncmp(mnt->mnt_dir, "/dev/shm", 8) != 0) ||
|
||||
+ strncmp(mnt->mnt_dir, "/proc", 4) == 0 ||
|
||||
+ strncmp(mnt->mnt_dir, "/sys", 4) == 0)
|
||||
+ {
|
||||
+ /*
|
||||
+ * Ignore "mount directories" starting with /dev (except
|
||||
+ * /dev/shm), /proc, /sys For some reason these mounts are
|
||||
+ * occasionally/incorrectly returned. Only seen this at a
|
||||
+ * customer site once. When it happens it causes disk
|
||||
+ * monitoring to think the server has 0 disk space left, and
|
||||
+ * it abruptly/unexpectedly shuts the server down. At this
|
||||
+ * point it looks like a bug in stat(), setmntent(), or
|
||||
+ * getmntent(), but there is no way to prove that since there
|
||||
+ * is no way to reproduce the original issue. For now just
|
||||
+ * return NULL to be safe.
|
||||
+ */
|
||||
+ return NULL;
|
||||
+ } else {
|
||||
+ return (slapi_ch_strdup(mnt->mnt_dir));
|
||||
+ }
|
||||
}
|
||||
}
|
||||
endmntent(fp);
|
||||
diff --git a/ldap/servers/slapd/monitor.c b/ldap/servers/slapd/monitor.c
|
||||
index 562721bed..65f082986 100644
|
||||
--- a/ldap/servers/slapd/monitor.c
|
||||
+++ b/ldap/servers/slapd/monitor.c
|
||||
@@ -131,7 +131,6 @@ monitor_disk_info (Slapi_PBlock *pb __attribute__((unused)),
|
||||
{
|
||||
int32_t rc = LDAP_SUCCESS;
|
||||
char **dirs = NULL;
|
||||
- char buf[BUFSIZ];
|
||||
struct berval val;
|
||||
struct berval *vals[2];
|
||||
uint64_t total_space;
|
||||
@@ -143,15 +142,13 @@ monitor_disk_info (Slapi_PBlock *pb __attribute__((unused)),
|
||||
|
||||
disk_mon_get_dirs(&dirs);
|
||||
|
||||
- for (uint16_t i = 0; dirs && dirs[i]; i++) {
|
||||
+ for (size_t i = 0; dirs && dirs[i]; i++) {
|
||||
+ char buf[BUFSIZ] = {0};
|
||||
rc = disk_get_info(dirs[i], &total_space, &avail_space, &used_space);
|
||||
- if (rc) {
|
||||
- slapi_log_err(SLAPI_LOG_WARNING, "monitor_disk_info",
|
||||
- "Unable to get 'cn=disk space,cn=monitor' stats for %s\n", dirs[i]);
|
||||
- } else {
|
||||
+ if (rc == 0 && total_space > 0 && used_space > 0) {
|
||||
val.bv_len = snprintf(buf, sizeof(buf),
|
||||
- "partition=\"%s\" size=\"%" PRIu64 "\" used=\"%" PRIu64 "\" available=\"%" PRIu64 "\" use%%=\"%" PRIu64 "\"",
|
||||
- dirs[i], total_space, used_space, avail_space, used_space * 100 / total_space);
|
||||
+ "partition=\"%s\" size=\"%" PRIu64 "\" used=\"%" PRIu64 "\" available=\"%" PRIu64 "\" use%%=\"%" PRIu64 "\"",
|
||||
+ dirs[i], total_space, used_space, avail_space, used_space * 100 / total_space);
|
||||
val.bv_val = buf;
|
||||
attrlist_merge(&e->e_attrs, "dsDisk", vals);
|
||||
}
|
||||
--
|
||||
2.26.2
|
||||
|
@ -1,4 +1,4 @@
|
||||
From 4919320a395ee13db67a4cc5f7c0b76e781b3b73 Mon Sep 17 00:00:00 2001
|
||||
From 31d53e7da585723e66b838dcf34b77ea7c9968c6 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 21 Jul 2021 09:16:30 +0200
|
||||
Subject: [PATCH] Issue 4837 - persistent search returns entries even when an
|
@ -0,0 +1,49 @@
|
||||
From 616dc9964a4675dea2ab2c2efb9bd31c3903e29d Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 26 Jul 2021 15:22:08 -0400
|
||||
Subject: [PATCH] Hardcode gost crypt passsword storage scheme
|
||||
|
||||
---
|
||||
.../plugins/pwdstorage/gost_yescrypt.c | 22 -------------------
|
||||
1 file changed, 22 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/gost_yescrypt.c b/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
|
||||
index 67b39395e..7b0d1653c 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
|
||||
@@ -11,7 +11,6 @@
|
||||
|
||||
#include <crypt.h>
|
||||
|
||||
-#ifdef XCRYPT_VERSION_STR
|
||||
#include <errno.h>
|
||||
int
|
||||
gost_yescrypt_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
@@ -64,24 +63,3 @@ gost_yescrypt_pw_enc(const char *pwd)
|
||||
return enc;
|
||||
}
|
||||
|
||||
-#else
|
||||
-
|
||||
-/*
|
||||
- * We do not have xcrypt, so always fail all checks.
|
||||
- */
|
||||
-int
|
||||
-gost_yescrypt_pw_cmp(const char *userpwd __attribute__((unused)), const char *dbpwd __attribute__((unused)))
|
||||
-{
|
||||
- slapi_log_err(SLAPI_LOG_ERR, GOST_YESCRYPT_SCHEME_NAME,
|
||||
- "Unable to use gost_yescrypt_pw_cmp, xcrypt is not available.\n");
|
||||
- return 1;
|
||||
-}
|
||||
-
|
||||
-char *
|
||||
-gost_yescrypt_pw_enc(const char *pwd __attribute__((unused)))
|
||||
-{
|
||||
- slapi_log_err(SLAPI_LOG_ERR, GOST_YESCRYPT_SCHEME_NAME,
|
||||
- "Unable to use gost_yescrypt_pw_enc, xcrypt is not available.\n");
|
||||
- return NULL;
|
||||
-}
|
||||
-#endif
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,132 +0,0 @@
|
||||
From 48b30739f33d1eb526dbdd45c820129c4a4c4bcb Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <72748589+progier389@users.noreply.github.com>
|
||||
Date: Tue, 12 Jan 2021 11:06:24 +0100
|
||||
Subject: [PATCH] Issue 4504 - Insure ldapi is enabled in repl_monitor_test.py
|
||||
(Needed on RHEL) (#4527)
|
||||
|
||||
(cherry picked from commit 279556bc78ed743d7a053069621d999ec045866f)
|
||||
---
|
||||
.../tests/suites/clu/repl_monitor_test.py | 67 +++++++++----------
|
||||
1 file changed, 31 insertions(+), 36 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
index eb18d2da2..b2cb840b3 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
@@ -9,7 +9,6 @@
|
||||
import time
|
||||
import subprocess
|
||||
import pytest
|
||||
-import re
|
||||
|
||||
from lib389.cli_conf.replication import get_repl_monitor_info
|
||||
from lib389.tasks import *
|
||||
@@ -18,6 +17,8 @@ from lib389.topologies import topology_m2
|
||||
from lib389.cli_base import FakeArgs
|
||||
from lib389.cli_base.dsrc import dsrc_arg_concat
|
||||
from lib389.cli_base import connect_instance
|
||||
+from lib389.replica import Replicas
|
||||
+
|
||||
|
||||
pytestmark = pytest.mark.tier0
|
||||
|
||||
@@ -68,25 +69,6 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No
|
||||
log.info('Reset log file')
|
||||
f.truncate(0)
|
||||
|
||||
-def get_hostnames_from_log(port1, port2):
|
||||
- # Get the supplier host names as displayed in replication monitor output
|
||||
- with open(LOG_FILE, 'r') as logfile:
|
||||
- logtext = logfile.read()
|
||||
- # search for Supplier :hostname:port
|
||||
- # and use \D to insure there is no more number is after
|
||||
- # the matched port (i.e that 10 is not matching 101)
|
||||
- regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)'
|
||||
- match=re.search(regexp, logtext)
|
||||
- host_m1 = 'localhost.localdomain'
|
||||
- if (match is not None):
|
||||
- host_m1 = match.group(2)
|
||||
- # Same for master 2
|
||||
- regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)'
|
||||
- match=re.search(regexp, logtext)
|
||||
- host_m2 = 'localhost.localdomain'
|
||||
- if (match is not None):
|
||||
- host_m2 = match.group(2)
|
||||
- return (host_m1, host_m2)
|
||||
|
||||
@pytest.mark.ds50545
|
||||
@pytest.mark.bz1739718
|
||||
@@ -115,6 +97,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
|
||||
m1 = topology_m2.ms["master1"]
|
||||
m2 = topology_m2.ms["master2"]
|
||||
|
||||
+ # Enable ldapi if not already done.
|
||||
+ for inst in [topology_m2.ms["master1"], topology_m2.ms["master2"]]:
|
||||
+ if not inst.can_autobind():
|
||||
+ # Update ns-slapd instance
|
||||
+ inst.config.set('nsslapd-ldapilisten', 'on')
|
||||
+ inst.config.set('nsslapd-ldapiautobind', 'on')
|
||||
+ inst.restart()
|
||||
+ # Ensure that updates have been sent both ways.
|
||||
+ replicas = Replicas(m1)
|
||||
+ replica = replicas.get(DEFAULT_SUFFIX)
|
||||
+ replica.test_replication([m2])
|
||||
+ replicas = Replicas(m2)
|
||||
+ replica = replicas.get(DEFAULT_SUFFIX)
|
||||
+ replica.test_replication([m1])
|
||||
+
|
||||
+ alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')',
|
||||
+ 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')']
|
||||
+
|
||||
connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port)
|
||||
content_list = ['Replica Root: dc=example,dc=com',
|
||||
'Replica ID: 1',
|
||||
@@ -177,9 +177,20 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
|
||||
'001',
|
||||
m1.host + ':' + str(m1.port)]
|
||||
|
||||
+ dsrc_content = '[repl-monitor-connections]\n' \
|
||||
+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
|
||||
+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
|
||||
+ '\n' \
|
||||
+ '[repl-monitor-aliases]\n' \
|
||||
+ 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \
|
||||
+ 'M2 = ' + m2.host + ':' + str(m2.port)
|
||||
+
|
||||
connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
|
||||
m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM]
|
||||
|
||||
+ aliases = ['M1=' + m1.host + ':' + str(m1.port),
|
||||
+ 'M2=' + m2.host + ':' + str(m2.port)]
|
||||
+
|
||||
args = FakeArgs()
|
||||
args.connections = connections
|
||||
args.aliases = None
|
||||
@@ -187,24 +198,8 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
|
||||
|
||||
log.info('Run replication monitor with connections option')
|
||||
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
|
||||
- (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port)
|
||||
check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
|
||||
|
||||
- # Prepare the data for next tests
|
||||
- aliases = ['M1=' + host_m1 + ':' + str(m1.port),
|
||||
- 'M2=' + host_m2 + ':' + str(m2.port)]
|
||||
-
|
||||
- alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')',
|
||||
- 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')']
|
||||
-
|
||||
- dsrc_content = '[repl-monitor-connections]\n' \
|
||||
- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
|
||||
- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
|
||||
- '\n' \
|
||||
- '[repl-monitor-aliases]\n' \
|
||||
- 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \
|
||||
- 'M2 = ' + host_m2 + ':' + str(m2.port)
|
||||
-
|
||||
log.info('Run replication monitor with aliases option')
|
||||
args.aliases = aliases
|
||||
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
|
||||
--
|
||||
2.26.2
|
||||
|
@ -1,51 +0,0 @@
|
||||
From f84e75de9176218d3b47a447d07fe8fb7ca3d72f Mon Sep 17 00:00:00 2001
|
||||
From: Barbora Simonova <bsmejkal@redhat.com>
|
||||
Date: Mon, 11 Jan 2021 15:51:24 +0100
|
||||
Subject: [PATCH] Issue 4315 - performance search rate: nagle triggers high
|
||||
rate of setsocketopt
|
||||
|
||||
Description:
|
||||
The config value of nsslapd-nagle is now set to 'off' by default.
|
||||
Added a test case, that checks the value.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/4315
|
||||
|
||||
Reviewed by: droideck (Thanks!)
|
||||
---
|
||||
.../tests/suites/config/config_test.py | 20 +++++++++++++++++++
|
||||
1 file changed, 20 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
|
||||
index 38d1ed9ac..fda16a530 100644
|
||||
--- a/dirsrvtests/tests/suites/config/config_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/config_test.py
|
||||
@@ -41,6 +41,26 @@ def big_file():
|
||||
return TEMP_BIG_FILE
|
||||
|
||||
|
||||
+@pytest.mark.bz1897248
|
||||
+@pytest.mark.ds4315
|
||||
+@pytest.mark.skipif(ds_is_older('1.4.3.16'), reason="This config setting exists in 1.4.3.16 and higher")
|
||||
+def test_nagle_default_value(topo):
|
||||
+ """Test that nsslapd-nagle attribute is off by default
|
||||
+
|
||||
+ :id: 00361f5d-d638-4d39-8231-66fa52637203
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create instance
|
||||
+ 2. Check the value of nsslapd-nagle
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. The value of nsslapd-nagle should be off
|
||||
+ """
|
||||
+
|
||||
+ log.info('Check the value of nsslapd-nagle attribute is off by default')
|
||||
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-nagle') == 'off'
|
||||
+
|
||||
+
|
||||
def test_maxbersize_repl(topology_m2, big_file):
|
||||
"""maxbersize is ignored in the replicated operations.
|
||||
|
||||
--
|
||||
2.26.2
|
||||
|
@ -0,0 +1,39 @@
|
||||
From a2a51130b2f95316237b85da099a8be734969e54 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Sat, 24 Apr 2021 21:37:54 +0100
|
||||
Subject: [PATCH] Issue 4734 - import of entry with no parent warning (#4735)
|
||||
|
||||
Description: Online import of ldif file that contains an entry with
|
||||
no parent doesnt generate a task warning.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4734
|
||||
|
||||
Author: vashirov@redhat.com (Thanks)
|
||||
|
||||
Reviewed by: mreynolds, jchapma
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c | 6 ++++++
|
||||
1 file changed, 6 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
index 905a84e74..35183ed59 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
@@ -2767,8 +2767,14 @@ import_foreman(void *param)
|
||||
if (job->flags & FLAG_ABORT) {
|
||||
goto error;
|
||||
}
|
||||
+
|
||||
+ /* capture skipped entry warnings for this task */
|
||||
+ if((job) && (job->skipped)) {
|
||||
+ slapi_task_set_warning(job->task, WARN_SKIPPED_IMPORT_ENTRY);
|
||||
+ }
|
||||
}
|
||||
|
||||
+
|
||||
slapi_pblock_destroy(pb);
|
||||
info->state = FINISHED;
|
||||
return;
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,98 +0,0 @@
|
||||
From 00ccec335792e3fa44712427463c64eb1ff9c5be Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 12 Jan 2021 17:45:41 +0100
|
||||
Subject: [PATCH] Issue 4504 - insure that repl_monitor_test use ldapi (for
|
||||
RHEL) - fix merge issue (#4533)
|
||||
|
||||
(cherry picked from commit a880fddc192414d6283ea6832491b7349e5471dc)
|
||||
---
|
||||
.../tests/suites/clu/repl_monitor_test.py | 47 ++++++++++++++-----
|
||||
1 file changed, 36 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
index b2cb840b3..caf6a9099 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
|
||||
@@ -9,6 +9,7 @@
|
||||
import time
|
||||
import subprocess
|
||||
import pytest
|
||||
+import re
|
||||
|
||||
from lib389.cli_conf.replication import get_repl_monitor_info
|
||||
from lib389.tasks import *
|
||||
@@ -69,6 +70,25 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No
|
||||
log.info('Reset log file')
|
||||
f.truncate(0)
|
||||
|
||||
+def get_hostnames_from_log(port1, port2):
|
||||
+ # Get the supplier host names as displayed in replication monitor output
|
||||
+ with open(LOG_FILE, 'r') as logfile:
|
||||
+ logtext = logfile.read()
|
||||
+ # search for Supplier :hostname:port
|
||||
+ # and use \D to insure there is no more number is after
|
||||
+ # the matched port (i.e that 10 is not matching 101)
|
||||
+ regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)'
|
||||
+ match=re.search(regexp, logtext)
|
||||
+ host_m1 = 'localhost.localdomain'
|
||||
+ if (match is not None):
|
||||
+ host_m1 = match.group(2)
|
||||
+ # Same for master 2
|
||||
+ regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)'
|
||||
+ match=re.search(regexp, logtext)
|
||||
+ host_m2 = 'localhost.localdomain'
|
||||
+ if (match is not None):
|
||||
+ host_m2 = match.group(2)
|
||||
+ return (host_m1, host_m2)
|
||||
|
||||
@pytest.mark.ds50545
|
||||
@pytest.mark.bz1739718
|
||||
@@ -177,20 +197,9 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
|
||||
'001',
|
||||
m1.host + ':' + str(m1.port)]
|
||||
|
||||
- dsrc_content = '[repl-monitor-connections]\n' \
|
||||
- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
|
||||
- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
|
||||
- '\n' \
|
||||
- '[repl-monitor-aliases]\n' \
|
||||
- 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \
|
||||
- 'M2 = ' + m2.host + ':' + str(m2.port)
|
||||
-
|
||||
connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
|
||||
m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM]
|
||||
|
||||
- aliases = ['M1=' + m1.host + ':' + str(m1.port),
|
||||
- 'M2=' + m2.host + ':' + str(m2.port)]
|
||||
-
|
||||
args = FakeArgs()
|
||||
args.connections = connections
|
||||
args.aliases = None
|
||||
@@ -198,8 +207,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
|
||||
|
||||
log.info('Run replication monitor with connections option')
|
||||
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
|
||||
+ (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port)
|
||||
check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
|
||||
|
||||
+ # Prepare the data for next tests
|
||||
+ aliases = ['M1=' + host_m1 + ':' + str(m1.port),
|
||||
+ 'M2=' + host_m2 + ':' + str(m2.port)]
|
||||
+
|
||||
+ alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')',
|
||||
+ 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')']
|
||||
+
|
||||
+ dsrc_content = '[repl-monitor-connections]\n' \
|
||||
+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
|
||||
+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
|
||||
+ '\n' \
|
||||
+ '[repl-monitor-aliases]\n' \
|
||||
+ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \
|
||||
+ 'M2 = ' + host_m2 + ':' + str(m2.port)
|
||||
+
|
||||
log.info('Run replication monitor with aliases option')
|
||||
args.aliases = aliases
|
||||
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
|
||||
--
|
||||
2.26.2
|
||||
|
@ -0,0 +1,37 @@
|
||||
From f9bc249b2baa11a8ac0eb54e4077eb706d137e38 Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Thu, 19 Aug 2021 11:06:06 +1000
|
||||
Subject: [PATCH] Issue 4872 - BUG - entryuuid enabled by default causes
|
||||
replication issues (#4876)
|
||||
|
||||
Bug Description: Due to older servers missing the syntax
|
||||
plugin this breaks schema replication and causes cascading
|
||||
errors.
|
||||
|
||||
Fix Description: This changes the syntax to be a case
|
||||
insensitive string, while leaving the plugins in place
|
||||
for other usage.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/4872
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @mreynolds389 @progier389
|
||||
---
|
||||
ldap/schema/03entryuuid.ldif | 3 ++-
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/schema/03entryuuid.ldif b/ldap/schema/03entryuuid.ldif
|
||||
index cbde981fe..f7a7f40d5 100644
|
||||
--- a/ldap/schema/03entryuuid.ldif
|
||||
+++ b/ldap/schema/03entryuuid.ldif
|
||||
@@ -13,4 +13,5 @@ dn: cn=schema
|
||||
#
|
||||
# attributes
|
||||
#
|
||||
-attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
|
||||
+# attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
|
||||
+attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,70 +0,0 @@
|
||||
From 2afc65fd1750afcb1667545da5625f5a932aacdd Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Wed, 13 Jan 2021 15:16:08 +0100
|
||||
Subject: [PATCH] Issue 4528 - Fix cn=monitor SCOPE_ONE search (#4529)
|
||||
|
||||
Bug Description: While doing a ldapsearch on "cn=monitor" is
|
||||
throwing err=32 with -s one.
|
||||
|
||||
Fix Description: 'cn=monitor' is not a real entry so we should not
|
||||
trying to check if the searched suffix (cm=monitor or its children)
|
||||
belongs to the searched backend.
|
||||
|
||||
Fixes: #4528
|
||||
|
||||
Reviewed by: @mreynolds389 @Firstyear @tbordaz (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/opshared.c | 15 ++++++++++-----
|
||||
1 file changed, 10 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index c0bc5dcd0..f5ed71144 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -240,6 +240,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
int rc = 0;
|
||||
int internal_op;
|
||||
Slapi_DN *basesdn = NULL;
|
||||
+ Slapi_DN monitorsdn = {0};
|
||||
Slapi_DN *sdn = NULL;
|
||||
Slapi_Operation *operation = NULL;
|
||||
Slapi_Entry *referral = NULL;
|
||||
@@ -765,9 +766,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
}
|
||||
} else {
|
||||
/* be_suffix null means that we are searching the default backend
|
||||
- * -> don't change the search parameters in pblock
|
||||
- */
|
||||
- if (be_suffix != NULL) {
|
||||
+ * -> don't change the search parameters in pblock
|
||||
+ * Also, we skip this block for 'cn=monitor' search and its subsearches
|
||||
+ * as they are done by callbacks from monitor.c */
|
||||
+ slapi_sdn_init_dn_byref(&monitorsdn, "cn=monitor");
|
||||
+ if (!((be_suffix == NULL) || slapi_sdn_issuffix(basesdn, &monitorsdn))) {
|
||||
if ((be_name == NULL) && (scope == LDAP_SCOPE_ONELEVEL)) {
|
||||
/* one level searches
|
||||
* - depending on the suffix of the backend we might have to
|
||||
@@ -789,8 +792,10 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
} else if (slapi_sdn_issuffix(basesdn, be_suffix)) {
|
||||
int tmp_scope = LDAP_SCOPE_ONELEVEL;
|
||||
slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &tmp_scope);
|
||||
- } else
|
||||
+ } else {
|
||||
+ slapi_sdn_done(&monitorsdn);
|
||||
goto next_be;
|
||||
+ }
|
||||
}
|
||||
|
||||
/* subtree searches :
|
||||
@@ -811,7 +816,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
}
|
||||
}
|
||||
}
|
||||
-
|
||||
+ slapi_sdn_done(&monitorsdn);
|
||||
slapi_pblock_set(pb, SLAPI_BACKEND, be);
|
||||
slapi_pblock_set(pb, SLAPI_PLUGIN, be->be_database);
|
||||
slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, NULL);
|
||||
--
|
||||
2.26.2
|
||||
|
125
SOURCES/0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch
Normal file
125
SOURCES/0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch
Normal file
@ -0,0 +1,125 @@
|
||||
From 120511d35095a48d60abbb7cb2367d0c30fbc757 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 25 Aug 2021 13:20:56 -0400
|
||||
Subject: [PATCH] Remove GOST-YESCRYPT password sotrage scheme
|
||||
|
||||
---
|
||||
.../tests/suites/password/pwd_algo_test.py | 1 -
|
||||
ldap/ldif/template-dse-minimal.ldif.in | 9 ---------
|
||||
ldap/ldif/template-dse.ldif.in | 9 ---------
|
||||
ldap/servers/plugins/pwdstorage/pwd_init.c | 18 ------------------
|
||||
ldap/servers/slapd/fedse.c | 13 -------------
|
||||
5 files changed, 50 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/pwd_algo_test.py b/dirsrvtests/tests/suites/password/pwd_algo_test.py
|
||||
index 66bda420e..88f8e40b7 100644
|
||||
--- a/dirsrvtests/tests/suites/password/pwd_algo_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/pwd_algo_test.py
|
||||
@@ -124,7 +124,6 @@ def _test_algo_for_pbkdf2(inst, algo_name):
|
||||
('CLEAR', 'CRYPT', 'CRYPT-MD5', 'CRYPT-SHA256', 'CRYPT-SHA512',
|
||||
'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA',
|
||||
'SSHA256', 'SSHA384', 'SSHA512', 'PBKDF2_SHA256', 'DEFAULT',
|
||||
- 'GOST_YESCRYPT',
|
||||
))
|
||||
def test_pwd_algo_test(topology_st, algo):
|
||||
"""Assert that all of our password algorithms correctly PASS and FAIL varying
|
||||
diff --git a/ldap/ldif/template-dse-minimal.ldif.in b/ldap/ldif/template-dse-minimal.ldif.in
|
||||
index 2eccae9b2..1a05f4a67 100644
|
||||
--- a/ldap/ldif/template-dse-minimal.ldif.in
|
||||
+++ b/ldap/ldif/template-dse-minimal.ldif.in
|
||||
@@ -194,15 +194,6 @@ nsslapd-pluginarg1: nsds5ReplicaCredentials
|
||||
nsslapd-pluginid: aes-storage-scheme
|
||||
nsslapd-pluginprecedence: 1
|
||||
|
||||
-dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config
|
||||
-objectclass: top
|
||||
-objectclass: nsSlapdPlugin
|
||||
-cn: GOST_YESCRYPT
|
||||
-nsslapd-pluginpath: libpwdstorage-plugin
|
||||
-nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init
|
||||
-nsslapd-plugintype: pwdstoragescheme
|
||||
-nsslapd-pluginenabled: on
|
||||
-
|
||||
dn: cn=Syntax Validation Task,cn=plugins,cn=config
|
||||
objectclass: top
|
||||
objectclass: nsSlapdPlugin
|
||||
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
|
||||
index 7e7480cba..f30531bec 100644
|
||||
--- a/ldap/ldif/template-dse.ldif.in
|
||||
+++ b/ldap/ldif/template-dse.ldif.in
|
||||
@@ -242,15 +242,6 @@ nsslapd-pluginarg2: nsds5ReplicaBootstrapCredentials
|
||||
nsslapd-pluginid: aes-storage-scheme
|
||||
nsslapd-pluginprecedence: 1
|
||||
|
||||
-dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config
|
||||
-objectclass: top
|
||||
-objectclass: nsSlapdPlugin
|
||||
-cn: GOST_YESCRYPT
|
||||
-nsslapd-pluginpath: libpwdstorage-plugin
|
||||
-nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init
|
||||
-nsslapd-plugintype: pwdstoragescheme
|
||||
-nsslapd-pluginenabled: on
|
||||
-
|
||||
dn: cn=Syntax Validation Task,cn=plugins,cn=config
|
||||
objectclass: top
|
||||
objectclass: nsSlapdPlugin
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/pwd_init.c b/ldap/servers/plugins/pwdstorage/pwd_init.c
|
||||
index 606e63404..59cfc4684 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/pwd_init.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/pwd_init.c
|
||||
@@ -52,8 +52,6 @@ static Slapi_PluginDesc smd5_pdesc = {"smd5-password-storage-scheme", VENDOR, DS
|
||||
|
||||
static Slapi_PluginDesc pbkdf2_sha256_pdesc = {"pbkdf2-sha256-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Salted PBKDF2 SHA256 hash algorithm (PBKDF2_SHA256)"};
|
||||
|
||||
-static Slapi_PluginDesc gost_yescrypt_pdesc = {"gost-yescrypt-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Yescrypt KDF algorithm (Streebog256)"};
|
||||
-
|
||||
static char *plugin_name = "NSPwdStoragePlugin";
|
||||
|
||||
int
|
||||
@@ -431,19 +429,3 @@ pbkdf2_sha256_pwd_storage_scheme_init(Slapi_PBlock *pb)
|
||||
return rc;
|
||||
}
|
||||
|
||||
-int
|
||||
-gost_yescrypt_pwd_storage_scheme_init(Slapi_PBlock *pb)
|
||||
-{
|
||||
- int rc;
|
||||
-
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "=> gost_yescrypt_pwd_storage_scheme_init\n");
|
||||
-
|
||||
- rc = slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION, (void *)SLAPI_PLUGIN_VERSION_01);
|
||||
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION, (void *)&gost_yescrypt_pdesc);
|
||||
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_ENC_FN, (void *)gost_yescrypt_pw_enc);
|
||||
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *)gost_yescrypt_pw_cmp);
|
||||
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, GOST_YESCRYPT_SCHEME_NAME);
|
||||
-
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "<= gost_yescrypt_pwd_storage_scheme_init %d\n", rc);
|
||||
- return rc;
|
||||
-}
|
||||
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
|
||||
index 44159c991..24b7ed11c 100644
|
||||
--- a/ldap/servers/slapd/fedse.c
|
||||
+++ b/ldap/servers/slapd/fedse.c
|
||||
@@ -203,19 +203,6 @@ static const char *internal_entries[] =
|
||||
"nsslapd-pluginVersion: none\n"
|
||||
"nsslapd-pluginVendor: 389 Project\n"
|
||||
"nsslapd-pluginDescription: CRYPT-SHA512\n",
|
||||
-
|
||||
- "dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config\n"
|
||||
- "objectclass: top\n"
|
||||
- "objectclass: nsSlapdPlugin\n"
|
||||
- "cn: GOST_YESCRYPT\n"
|
||||
- "nsslapd-pluginpath: libpwdstorage-plugin\n"
|
||||
- "nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init\n"
|
||||
- "nsslapd-plugintype: pwdstoragescheme\n"
|
||||
- "nsslapd-pluginenabled: on\n"
|
||||
- "nsslapd-pluginId: GOST_YESCRYPT\n"
|
||||
- "nsslapd-pluginVersion: none\n"
|
||||
- "nsslapd-pluginVendor: 389 Project\n"
|
||||
- "nsslapd-pluginDescription: GOST_YESCRYPT\n",
|
||||
};
|
||||
|
||||
static int NUM_INTERNAL_ENTRIES = sizeof(internal_entries) / sizeof(internal_entries[0]);
|
||||
--
|
||||
2.31.1
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,44 @@
|
||||
From df0ccce06259b9ef06d522e61da4e3ffcbbf5016 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 25 Aug 2021 16:54:57 -0400
|
||||
Subject: [PATCH] Issue 4884 - server crashes when dnaInterval attribute is set
|
||||
to zero
|
||||
|
||||
Bug Description:
|
||||
|
||||
A division by zero crash occurs if the dnaInterval is set to zero
|
||||
|
||||
Fix Description:
|
||||
|
||||
Validate the config value of dnaInterval and adjust it to the
|
||||
default/safe value of "1" if needed.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4884
|
||||
|
||||
Reviewed by: tbordaz(Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/dna/dna.c | 7 +++++++
|
||||
1 file changed, 7 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
|
||||
index 928a3f54a..c983ebdd0 100644
|
||||
--- a/ldap/servers/plugins/dna/dna.c
|
||||
+++ b/ldap/servers/plugins/dna/dna.c
|
||||
@@ -1025,7 +1025,14 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
|
||||
|
||||
value = slapi_entry_attr_get_charptr(e, DNA_INTERVAL);
|
||||
if (value) {
|
||||
+ errno = 0;
|
||||
entry->interval = strtoull(value, 0, 0);
|
||||
+ if (entry->interval == 0 || errno == ERANGE) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, DNA_PLUGIN_SUBSYSTEM,
|
||||
+ "dna_parse_config_entry - Invalid value for dnaInterval (%s), "
|
||||
+ "Using default value of 1\n", value);
|
||||
+ entry->interval = 1;
|
||||
+ }
|
||||
slapi_ch_free_string(&value);
|
||||
}
|
||||
|
||||
--
|
||||
2.31.1
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,782 +0,0 @@
|
||||
From 788d7c69a446d1ae324b2c58daaa5d4fd5528748 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 20 Jan 2021 16:42:15 -0500
|
||||
Subject: [PATCH 1/3] Issue 5442 - Search results are different between RHDS10
|
||||
and RHDS11
|
||||
|
||||
Bug Description: In 1.4.x we introduced a change that was overly strict about
|
||||
how a search on a non-existent subtree returned its error code.
|
||||
It was changed from returning an error 32 to an error 0 with
|
||||
zero entries returned.
|
||||
|
||||
Fix Description: When finding the entry and processing acl's make sure to
|
||||
gather the aci's that match the resource even if the resource
|
||||
does not exist. This requires some extra checks when processing
|
||||
the target attribute.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4542
|
||||
|
||||
Reviewed by: firstyear, elkris, and tbordaz (Thanks!)
|
||||
|
||||
Apply Thierry's changes
|
||||
|
||||
round 2
|
||||
|
||||
Apply more suggestions from Thierry
|
||||
---
|
||||
dirsrvtests/tests/suites/acl/misc_test.py | 108 +++++++-
|
||||
ldap/servers/plugins/acl/acl.c | 296 ++++++++++------------
|
||||
ldap/servers/slapd/back-ldbm/findentry.c | 6 +-
|
||||
src/lib389/lib389/_mapped_object.py | 4 +-
|
||||
4 files changed, 239 insertions(+), 175 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/acl/misc_test.py b/dirsrvtests/tests/suites/acl/misc_test.py
|
||||
index 5f0e3eb72..c640e60ad 100644
|
||||
--- a/dirsrvtests/tests/suites/acl/misc_test.py
|
||||
+++ b/dirsrvtests/tests/suites/acl/misc_test.py
|
||||
@@ -12,7 +12,7 @@ import ldap
|
||||
import os
|
||||
import pytest
|
||||
|
||||
-from lib389._constants import DEFAULT_SUFFIX, PW_DM
|
||||
+from lib389._constants import DEFAULT_SUFFIX, PW_DM, DN_DM
|
||||
from lib389.idm.user import UserAccount, UserAccounts
|
||||
from lib389._mapped_object import DSLdapObject
|
||||
from lib389.idm.account import Accounts, Anonymous
|
||||
@@ -408,14 +408,112 @@ def test_do_bind_as_201_distinct_users(topo, clean, aci_of_user):
|
||||
user = uas.create_test_user(uid=i, gid=i)
|
||||
user.set('userPassword', PW_DM)
|
||||
|
||||
- for i in range(len(uas.list())):
|
||||
- uas.list()[i].bind(PW_DM)
|
||||
+ users = uas.list()
|
||||
+ for user in users:
|
||||
+ user.bind(PW_DM)
|
||||
|
||||
ACLPlugin(topo.standalone).replace("nsslapd-aclpb-max-selected-acls", '220')
|
||||
topo.standalone.restart()
|
||||
|
||||
- for i in range(len(uas.list())):
|
||||
- uas.list()[i].bind(PW_DM)
|
||||
+ users = uas.list()
|
||||
+ for user in users:
|
||||
+ user.bind(PW_DM)
|
||||
+
|
||||
+
|
||||
+def test_info_disclosure(request, topo):
|
||||
+ """Test that a search returns 32 when base entry does not exist
|
||||
+
|
||||
+ :id: f6dec4c2-65a3-41e4-a4c0-146196863333
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Add aci
|
||||
+ 2. Add test user
|
||||
+ 3. Bind as user and search for non-existent entry
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Error 32 is returned
|
||||
+ """
|
||||
+
|
||||
+ ACI_TARGET = "(targetattr = \"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX)
|
||||
+ ACI_ALLOW = "(version 3.0; acl \"Read/Search permission for all users\"; allow (read,search)"
|
||||
+ ACI_SUBJECT = "(userdn=\"ldap:///all\");)"
|
||||
+ ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT
|
||||
+
|
||||
+ # Get current ACi's so we can restore them when we are done
|
||||
+ suffix = Domain(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ preserved_acis = suffix.get_attr_vals_utf8('aci')
|
||||
+
|
||||
+ def finofaci():
|
||||
+ domain = Domain(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ try:
|
||||
+ domain.remove_all('aci')
|
||||
+ domain.replace_values('aci', preserved_acis)
|
||||
+ except:
|
||||
+ pass
|
||||
+ request.addfinalizer(finofaci)
|
||||
+
|
||||
+ # Remove aci's
|
||||
+ suffix.remove_all('aci')
|
||||
+
|
||||
+ # Add test user
|
||||
+ USER_DN = "uid=test,ou=people," + DEFAULT_SUFFIX
|
||||
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ users.create(properties={
|
||||
+ 'uid': 'test',
|
||||
+ 'cn': 'test',
|
||||
+ 'sn': 'test',
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': '/home/test',
|
||||
+ 'userPassword': PW_DM
|
||||
+ })
|
||||
+
|
||||
+ # bind as user
|
||||
+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM)
|
||||
+
|
||||
+ # Search fo existing base DN
|
||||
+ test = Domain(conn, DEFAULT_SUFFIX)
|
||||
+ try:
|
||||
+ test.get_attr_vals_utf8_l('dc')
|
||||
+ assert False
|
||||
+ except IndexError:
|
||||
+ pass
|
||||
+
|
||||
+ # Search for a non existent bases
|
||||
+ subtree = Domain(conn, "ou=does_not_exist," + DEFAULT_SUFFIX)
|
||||
+ try:
|
||||
+ subtree.get_attr_vals_utf8_l('objectclass')
|
||||
+ except IndexError:
|
||||
+ pass
|
||||
+ subtree = Domain(conn, "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX)
|
||||
+ try:
|
||||
+ subtree.get_attr_vals_utf8_l('objectclass')
|
||||
+ except IndexError:
|
||||
+ pass
|
||||
+ # Try ONE level search instead of BASE
|
||||
+ try:
|
||||
+ Accounts(conn, "ou=does_not_exist," + DEFAULT_SUFFIX).filter("(objectclass=top)", ldap.SCOPE_ONELEVEL)
|
||||
+ except IndexError:
|
||||
+ pass
|
||||
+
|
||||
+ # add aci
|
||||
+ suffix.add('aci', ACI)
|
||||
+
|
||||
+ # Search for a non existent entry which should raise an exception
|
||||
+ with pytest.raises(ldap.NO_SUCH_OBJECT):
|
||||
+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM)
|
||||
+ subtree = Domain(conn, "ou=does_not_exist," + DEFAULT_SUFFIX)
|
||||
+ subtree.get_attr_vals_utf8_l('objectclass')
|
||||
+ with pytest.raises(ldap.NO_SUCH_OBJECT):
|
||||
+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM)
|
||||
+ subtree = Domain(conn, "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX)
|
||||
+ subtree.get_attr_vals_utf8_l('objectclass')
|
||||
+ with pytest.raises(ldap.NO_SUCH_OBJECT):
|
||||
+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM)
|
||||
+ DN = "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX
|
||||
+ Accounts(conn, DN).filter("(objectclass=top)", ldap.SCOPE_ONELEVEL, strict=True)
|
||||
+
|
||||
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c
|
||||
index 41a909a18..4e811f73a 100644
|
||||
--- a/ldap/servers/plugins/acl/acl.c
|
||||
+++ b/ldap/servers/plugins/acl/acl.c
|
||||
@@ -2111,10 +2111,11 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
aci_right = aci->aci_access;
|
||||
res_right = aclpb->aclpb_access;
|
||||
if (!(aci_right & res_right)) {
|
||||
- /* If we are looking for read/search and the acl has read/search
|
||||
- ** then go further because if targets match we may keep that
|
||||
- ** acl in the entry cache list.
|
||||
- */
|
||||
+ /*
|
||||
+ * If we are looking for read/search and the acl has read/search
|
||||
+ * then go further because if targets match we may keep that
|
||||
+ * acl in the entry cache list.
|
||||
+ */
|
||||
if (!((res_right & (SLAPI_ACL_SEARCH | SLAPI_ACL_READ)) &&
|
||||
(aci_right & (SLAPI_ACL_SEARCH | SLAPI_ACL_READ)))) {
|
||||
matches = ACL_FALSE;
|
||||
@@ -2122,30 +2123,29 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
}
|
||||
}
|
||||
|
||||
-
|
||||
- /* first Let's see if the entry is under the subtree where the
|
||||
- ** ACL resides. We can't let somebody affect a target beyond the
|
||||
- ** scope of where the ACL resides
|
||||
- ** Example: ACL is located in "ou=engineering, o=ace industry, c=us
|
||||
- ** but if the target is "o=ace industry, c=us", then we are in trouble.
|
||||
- **
|
||||
- ** If the aci is in the rootdse and the entry is not, then we do not
|
||||
- ** match--ie. acis in the rootdse do NOT apply below...for the moment.
|
||||
- **
|
||||
- */
|
||||
+ /*
|
||||
+ * First Let's see if the entry is under the subtree where the
|
||||
+ * ACL resides. We can't let somebody affect a target beyond the
|
||||
+ * scope of where the ACL resides
|
||||
+ * Example: ACL is located in "ou=engineering, o=ace industry, c=us
|
||||
+ * but if the target is "o=ace industry, c=us", then we are in trouble.
|
||||
+ *
|
||||
+ * If the aci is in the rootdse and the entry is not, then we do not
|
||||
+ * match--ie. acis in the rootdse do NOT apply below...for the moment.
|
||||
+ */
|
||||
res_ndn = slapi_sdn_get_ndn(aclpb->aclpb_curr_entry_sdn);
|
||||
aci_ndn = slapi_sdn_get_ndn(aci->aci_sdn);
|
||||
- if (!slapi_sdn_issuffix(aclpb->aclpb_curr_entry_sdn, aci->aci_sdn) || (!slapi_is_rootdse(res_ndn) && slapi_is_rootdse(aci_ndn))) {
|
||||
-
|
||||
- /* cant' poke around */
|
||||
+ if (!slapi_sdn_issuffix(aclpb->aclpb_curr_entry_sdn, aci->aci_sdn) ||
|
||||
+ (!slapi_is_rootdse(res_ndn) && slapi_is_rootdse(aci_ndn)))
|
||||
+ {
|
||||
+ /* can't poke around */
|
||||
matches = ACL_FALSE;
|
||||
goto acl__resource_match_aci_EXIT;
|
||||
}
|
||||
|
||||
/*
|
||||
- ** We have a single ACI which we need to find if it applies to
|
||||
- ** the resource or not.
|
||||
- */
|
||||
+ * We have a single ACI which we need to find if it applies to the resource or not.
|
||||
+ */
|
||||
if ((aci->aci_type & ACI_TARGET_DN) && (aclpb->aclpb_curr_entry_sdn)) {
|
||||
char *avaType;
|
||||
struct berval *avaValue;
|
||||
@@ -2173,25 +2173,23 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
char *avaType;
|
||||
struct berval *avaValue;
|
||||
char logbuf[1024];
|
||||
-
|
||||
- /* We are evaluating the moddn permission.
|
||||
- * The aci contains target_to and target_from
|
||||
- *
|
||||
- * target_to filter must be checked against the resource ndn that was stored in
|
||||
- * aclpb->aclpb_curr_entry_sdn
|
||||
- *
|
||||
- * target_from filter must be check against the entry ndn that is in aclpb->aclpb_moddn_source_sdn
|
||||
- * (sdn was stored in the pblock)
|
||||
- */
|
||||
+ /*
|
||||
+ * We are evaluating the moddn permission.
|
||||
+ * The aci contains target_to and target_from
|
||||
+ *
|
||||
+ * target_to filter must be checked against the resource ndn that was stored in
|
||||
+ * aclpb->aclpb_curr_entry_sdn
|
||||
+ *
|
||||
+ * target_from filter must be check against the entry ndn that is in aclpb->aclpb_moddn_source_sdn
|
||||
+ * (sdn was stored in the pblock)
|
||||
+ */
|
||||
if (aci->target_to) {
|
||||
f = aci->target_to;
|
||||
dn_matched = ACL_TRUE;
|
||||
|
||||
/* Now check if the filter is a simple or substring filter */
|
||||
if (aci->aci_type & ACI_TARGET_MODDN_TO_PATTERN) {
|
||||
- /* This is a filter with substring
|
||||
- * e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com
|
||||
- */
|
||||
+ /* This is a filter with substring e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com */
|
||||
slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_to substring: %s\n",
|
||||
slapi_filter_to_string(f, logbuf, sizeof(logbuf)));
|
||||
if ((rv = acl_match_substring(f, (char *)res_ndn, 0 /* match suffix */)) != ACL_TRUE) {
|
||||
@@ -2204,9 +2202,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
}
|
||||
}
|
||||
} else {
|
||||
- /* This is a filter without substring
|
||||
- * e.g. ldap:///cn=accounts,dc=example,dc=com
|
||||
- */
|
||||
+ /* This is a filter without substring e.g. ldap:///cn=accounts,dc=example,dc=com */
|
||||
slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_to: %s\n",
|
||||
slapi_filter_to_string(f, logbuf, sizeof(logbuf)));
|
||||
slapi_filter_get_ava(f, &avaType, &avaValue);
|
||||
@@ -2230,8 +2226,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
/* Now check if the filter is a simple or substring filter */
|
||||
if (aci->aci_type & ACI_TARGET_MODDN_FROM_PATTERN) {
|
||||
/* This is a filter with substring
|
||||
- * e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com
|
||||
- */
|
||||
+ * e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com
|
||||
+ */
|
||||
slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_from substring: %s\n",
|
||||
slapi_filter_to_string(f, logbuf, sizeof(logbuf)));
|
||||
if ((rv = acl_match_substring(f, (char *)slapi_sdn_get_dn(aclpb->aclpb_moddn_source_sdn), 0 /* match suffix */)) != ACL_TRUE) {
|
||||
@@ -2243,11 +2239,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
goto acl__resource_match_aci_EXIT;
|
||||
}
|
||||
}
|
||||
-
|
||||
} else {
|
||||
- /* This is a filter without substring
|
||||
- * e.g. ldap:///cn=accounts,dc=example,dc=com
|
||||
- */
|
||||
+ /* This is a filter without substring e.g. ldap:///cn=accounts,dc=example,dc=com */
|
||||
slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_from: %s\n",
|
||||
slapi_filter_to_string(f, logbuf, sizeof(logbuf)));
|
||||
if (!slapi_dn_issuffix(slapi_sdn_get_dn(aclpb->aclpb_moddn_source_sdn), avaValue->bv_val)) {
|
||||
@@ -2269,10 +2262,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
}
|
||||
|
||||
if (aci->aci_type & ACI_TARGET_PATTERN) {
|
||||
-
|
||||
f = aci->target;
|
||||
dn_matched = ACL_TRUE;
|
||||
-
|
||||
if ((rv = acl_match_substring(f, (char *)res_ndn, 0 /* match suffux */)) != ACL_TRUE) {
|
||||
dn_matched = ACL_FALSE;
|
||||
if (rv == ACL_ERR) {
|
||||
@@ -2296,7 +2287,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
|
||||
/*
|
||||
* Is it a (target="ldap://cn=*,($dn),o=sun.com") kind of thing.
|
||||
- */
|
||||
+ */
|
||||
if (aci->aci_type & ACI_TARGET_MACRO_DN) {
|
||||
/*
|
||||
* See if the ($dn) component matches the string and
|
||||
@@ -2306,8 +2297,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
* entry is the same one don't recalculate it--
|
||||
* this flag only works for search right now, could
|
||||
* also optimise for mods by making it work for mods.
|
||||
- */
|
||||
-
|
||||
+ */
|
||||
if ((aclpb->aclpb_res_type & ACLPB_NEW_ENTRY) == 0) {
|
||||
/*
|
||||
* Here same entry so just look up the matched value,
|
||||
@@ -2356,8 +2346,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
* If there is already an entry for this aci in this
|
||||
* aclpb then remove it--it's an old value for a
|
||||
* different entry.
|
||||
- */
|
||||
-
|
||||
+ */
|
||||
acl_ht_add_and_freeOld(aclpb->aclpb_macro_ht,
|
||||
(PLHashNumber)aci->aci_index,
|
||||
matched_val);
|
||||
@@ -2381,30 +2370,27 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
}
|
||||
|
||||
/*
|
||||
- ** Here, if there's a targetfilter field, see if it matches.
|
||||
- **
|
||||
- ** The commented out code below was an erroneous attempt to skip
|
||||
- ** this test. It is wrong because: 1. you need to store
|
||||
- ** whether the last test matched or not (you cannot just assume it did)
|
||||
- ** and 2. It may not be the same aci, so the previous matched
|
||||
- ** value is a function of the aci.
|
||||
- ** May be interesting to build such a cache...but no evidence for
|
||||
- ** for that right now. See Bug 383424.
|
||||
- **
|
||||
- **
|
||||
- ** && ((aclpb->aclpb_state & ACLPB_SEARCH_BASED_ON_LIST) ||
|
||||
- ** (aclpb->aclpb_res_type & ACLPB_NEW_ENTRY))
|
||||
- */
|
||||
+ * Here, if there's a targetfilter field, see if it matches.
|
||||
+ *
|
||||
+ * The commented out code below was an erroneous attempt to skip
|
||||
+ * this test. It is wrong because: 1. you need to store
|
||||
+ * whether the last test matched or not (you cannot just assume it did)
|
||||
+ * and 2. It may not be the same aci, so the previous matched
|
||||
+ * value is a function of the aci.
|
||||
+ * May be interesting to build such a cache...but no evidence for
|
||||
+ * for that right now. See Bug 383424.
|
||||
+ *
|
||||
+ *
|
||||
+ * && ((aclpb->aclpb_state & ACLPB_SEARCH_BASED_ON_LIST) ||
|
||||
+ * (aclpb->aclpb_res_type & ACLPB_NEW_ENTRY))
|
||||
+ */
|
||||
if (aci->aci_type & ACI_TARGET_FILTER) {
|
||||
int filter_matched = ACL_TRUE;
|
||||
-
|
||||
/*
|
||||
* Check for macros.
|
||||
* For targetfilter we need to fake the lasinfo structure--it's
|
||||
* created "naturally" for subjects but not targets.
|
||||
- */
|
||||
-
|
||||
-
|
||||
+ */
|
||||
if (aci->aci_type & ACI_TARGET_FILTER_MACRO_DN) {
|
||||
|
||||
lasInfo *lasinfo = NULL;
|
||||
@@ -2419,11 +2405,9 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
ACL_EVAL_TARGET_FILTER);
|
||||
slapi_ch_free((void **)&lasinfo);
|
||||
} else {
|
||||
-
|
||||
-
|
||||
if (slapi_vattr_filter_test(NULL, aclpb->aclpb_curr_entry,
|
||||
aci->targetFilter,
|
||||
- 0 /*don't do acess chk*/) != 0) {
|
||||
+ 0 /*don't do access check*/) != 0) {
|
||||
filter_matched = ACL_FALSE;
|
||||
}
|
||||
}
|
||||
@@ -2450,7 +2434,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
* Check to see if we need to evaluate any targetattrfilters.
|
||||
* They look as follows:
|
||||
* (targetattrfilters="add=sn:(sn=rob) && gn:(gn!=byrne),
|
||||
- * del=sn:(sn=rob) && gn:(gn=byrne)")
|
||||
+ * del=sn:(sn=rob) && gn:(gn=byrne)")
|
||||
*
|
||||
* For ADD/DELETE:
|
||||
* If theres's a targetattrfilter then each add/del filter
|
||||
@@ -2458,29 +2442,25 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
* by each value of the attribute in the entry.
|
||||
*
|
||||
* For MODIFY:
|
||||
- * If there's a targetattrfilter then the add/del filter
|
||||
+ * If there's a targetattrfilter then the add/del filter
|
||||
* must be satisfied by the attribute to be added/deleted.
|
||||
* (MODIFY acl is evaluated one value at a time).
|
||||
*
|
||||
*
|
||||
- */
|
||||
-
|
||||
+ */
|
||||
if (((aclpb->aclpb_access & SLAPI_ACL_ADD) &&
|
||||
(aci->aci_type & ACI_TARGET_ATTR_ADD_FILTERS)) ||
|
||||
((aclpb->aclpb_access & SLAPI_ACL_DELETE) &&
|
||||
- (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS))) {
|
||||
-
|
||||
+ (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS)))
|
||||
+ {
|
||||
Targetattrfilter **attrFilterArray = NULL;
|
||||
-
|
||||
Targetattrfilter *attrFilter = NULL;
|
||||
-
|
||||
Slapi_Attr *attr_ptr = NULL;
|
||||
Slapi_Value *sval;
|
||||
const struct berval *attrVal;
|
||||
int k;
|
||||
int done;
|
||||
|
||||
-
|
||||
if ((aclpb->aclpb_access & SLAPI_ACL_ADD) &&
|
||||
(aci->aci_type & ACI_TARGET_ATTR_ADD_FILTERS)) {
|
||||
|
||||
@@ -2497,28 +2477,20 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
|
||||
while (attrFilterArray && attrFilterArray[num_attrs] && attr_matched) {
|
||||
attrFilter = attrFilterArray[num_attrs];
|
||||
-
|
||||
/*
|
||||
- * If this filter applies to an attribute in the entry,
|
||||
- * apply it to the entry.
|
||||
- * Otherwise just ignore it.
|
||||
- *
|
||||
- */
|
||||
-
|
||||
- if (slapi_entry_attr_find(aclpb->aclpb_curr_entry,
|
||||
- attrFilter->attr_str,
|
||||
- &attr_ptr) == 0) {
|
||||
-
|
||||
+ * If this filter applies to an attribute in the entry,
|
||||
+ * apply it to the entry.
|
||||
+ * Otherwise just ignore it.
|
||||
+ *
|
||||
+ */
|
||||
+ if (slapi_entry_attr_find(aclpb->aclpb_curr_entry, attrFilter->attr_str, &attr_ptr) == 0) {
|
||||
/*
|
||||
- * This is an applicable filter.
|
||||
- * The filter is to be appplied to the entry being added
|
||||
- * or deleted.
|
||||
- * The filter needs to be satisfied by _each_ occurence
|
||||
- * of the attribute in the entry--otherwise you
|
||||
- * could satisfy the filter and then put loads of other
|
||||
- * values in on the back of it.
|
||||
- */
|
||||
-
|
||||
+ * This is an applicable filter.
|
||||
+ * The filter is to be applied to the entry being added or deleted.
|
||||
+ * The filter needs to be satisfied by _each_ occurrence of the
|
||||
+ * attribute in the entry--otherwise you could satisfy the filter
|
||||
+ * and then put loads of other values in on the back of it.
|
||||
+ */
|
||||
sval = NULL;
|
||||
attrVal = NULL;
|
||||
k = slapi_attr_first_value(attr_ptr, &sval);
|
||||
@@ -2528,12 +2500,11 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
|
||||
if (acl__make_filter_test_entry(&aclpb->aclpb_filter_test_entry,
|
||||
attrFilter->attr_str,
|
||||
- (struct berval *)attrVal) == LDAP_SUCCESS) {
|
||||
-
|
||||
+ (struct berval *)attrVal) == LDAP_SUCCESS)
|
||||
+ {
|
||||
attr_matched = acl__test_filter(aclpb->aclpb_filter_test_entry,
|
||||
attrFilter->filter,
|
||||
- 1 /* Do filter sense evaluation below */
|
||||
- );
|
||||
+ 1 /* Do filter sense evaluation below */);
|
||||
done = !attr_matched;
|
||||
slapi_entry_free(aclpb->aclpb_filter_test_entry);
|
||||
}
|
||||
@@ -2542,19 +2513,19 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
} /* while */
|
||||
|
||||
/*
|
||||
- * Here, we applied an applicable filter to the entry.
|
||||
- * So if attr_matched is ACL_TRUE then every value
|
||||
- * of the attribute in the entry satisfied the filter.
|
||||
- * Otherwise, attr_matched is ACL_FALSE and not every
|
||||
- * value satisfied the filter, so we will teminate the
|
||||
- * scan of the filter list.
|
||||
- */
|
||||
+ * Here, we applied an applicable filter to the entry.
|
||||
+ * So if attr_matched is ACL_TRUE then every value
|
||||
+ * of the attribute in the entry satisfied the filter.
|
||||
+ * Otherwise, attr_matched is ACL_FALSE and not every
|
||||
+ * value satisfied the filter, so we will terminate the
|
||||
+ * scan of the filter list.
|
||||
+ */
|
||||
}
|
||||
|
||||
num_attrs++;
|
||||
} /* while */
|
||||
|
||||
-/*
|
||||
+ /*
|
||||
* Here, we've applied all the applicable filters to the entry.
|
||||
* Each one must have been satisfied by all the values of the attribute.
|
||||
* The result of this is stored in attr_matched.
|
||||
@@ -2585,7 +2556,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
} else if (((aclpb->aclpb_access & ACLPB_SLAPI_ACL_WRITE_ADD) &&
|
||||
(aci->aci_type & ACI_TARGET_ATTR_ADD_FILTERS)) ||
|
||||
((aclpb->aclpb_access & ACLPB_SLAPI_ACL_WRITE_DEL) &&
|
||||
- (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS))) {
|
||||
+ (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS)))
|
||||
+ {
|
||||
/*
|
||||
* Here, it's a modify add/del and we have attr filters.
|
||||
* So, we need to scan the add/del filter list to find the filter
|
||||
@@ -2629,11 +2601,10 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
* Otherwise, ignore the targetattrfilters.
|
||||
*/
|
||||
if (found) {
|
||||
-
|
||||
if (acl__make_filter_test_entry(&aclpb->aclpb_filter_test_entry,
|
||||
aclpb->aclpb_curr_attrEval->attrEval_name,
|
||||
- aclpb->aclpb_curr_attrVal) == LDAP_SUCCESS) {
|
||||
-
|
||||
+ aclpb->aclpb_curr_attrVal) == LDAP_SUCCESS)
|
||||
+ {
|
||||
attr_matched = acl__test_filter(aclpb->aclpb_filter_test_entry,
|
||||
attrFilter->filter,
|
||||
1 /* Do filter sense evaluation below */
|
||||
@@ -2651,20 +2622,21 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
* Here this attribute appeared and was matched in a
|
||||
* targetattrfilters list, so record this fact so we do
|
||||
* not have to scan the targetattr list for the attribute.
|
||||
- */
|
||||
+ */
|
||||
|
||||
attr_matched_in_targetattrfilters = 1;
|
||||
}
|
||||
} /* targetvaluefilters */
|
||||
|
||||
|
||||
- /* There are 3 cases by which acis are selected.
|
||||
- ** 1) By scanning the whole list and picking based on the resource.
|
||||
- ** 2) By picking a subset of the list which will be used for the whole
|
||||
- ** acl evaluation.
|
||||
- ** 3) A finer granularity, i.e, a selected list of acls which will be
|
||||
- ** used for only that entry's evaluation.
|
||||
- */
|
||||
+ /*
|
||||
+ * There are 3 cases by which acis are selected.
|
||||
+ * 1) By scanning the whole list and picking based on the resource.
|
||||
+ * 2) By picking a subset of the list which will be used for the whole
|
||||
+ * acl evaluation.
|
||||
+ * 3) A finer granularity, i.e, a selected list of acls which will be
|
||||
+ * used for only that entry's evaluation.
|
||||
+ */
|
||||
if (!(skip_attrEval) && (aclpb->aclpb_state & ACLPB_SEARCH_BASED_ON_ENTRY_LIST) &&
|
||||
(res_right & SLAPI_ACL_SEARCH) &&
|
||||
((aci->aci_access & SLAPI_ACL_READ) || (aci->aci_access & SLAPI_ACL_SEARCH))) {
|
||||
@@ -2680,7 +2652,6 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
}
|
||||
}
|
||||
|
||||
-
|
||||
/* If we are suppose to skip attr eval, then let's skip it */
|
||||
if ((aclpb->aclpb_access & SLAPI_ACL_SEARCH) && (!skip_attrEval) &&
|
||||
(aclpb->aclpb_res_type & ACLPB_NEW_ENTRY)) {
|
||||
@@ -2697,9 +2668,10 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
goto acl__resource_match_aci_EXIT;
|
||||
}
|
||||
|
||||
- /* We need to check again because we don't want to select this handle
|
||||
- ** if the right doesn't match for now.
|
||||
- */
|
||||
+ /*
|
||||
+ * We need to check again because we don't want to select this handle
|
||||
+ * if the right doesn't match for now.
|
||||
+ */
|
||||
if (!(aci_right & res_right)) {
|
||||
matches = ACL_FALSE;
|
||||
goto acl__resource_match_aci_EXIT;
|
||||
@@ -2718,20 +2690,16 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
* rbyrneXXX if we had a proper permission for modrdn eg SLAPI_ACL_MODRDN
|
||||
* then we would not need this crappy way of telling it was a MODRDN
|
||||
* request ie. SLAPI_ACL_WRITE && !(c_attrEval).
|
||||
- */
|
||||
-
|
||||
+ */
|
||||
c_attrEval = aclpb->aclpb_curr_attrEval;
|
||||
|
||||
/*
|
||||
* If we've already matched on targattrfilter then do not
|
||||
* bother to look at the attrlist.
|
||||
- */
|
||||
-
|
||||
+ */
|
||||
if (!attr_matched_in_targetattrfilters) {
|
||||
-
|
||||
/* match target attr */
|
||||
- if ((c_attrEval) &&
|
||||
- (aci->aci_type & ACI_TARGET_ATTR)) {
|
||||
+ if ((c_attrEval) && (aci->aci_type & ACI_TARGET_ATTR)) {
|
||||
/* there is a target ATTR */
|
||||
Targetattr **attrArray = aci->targetAttr;
|
||||
Targetattr *attr = NULL;
|
||||
@@ -2773,46 +2741,43 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
matches = (attr_matched ? ACL_TRUE : ACL_FALSE);
|
||||
}
|
||||
|
||||
-
|
||||
aclpb->aclpb_state &= ~ACLPB_ATTR_STAR_MATCHED;
|
||||
/* figure out how it matched, i.e star matched */
|
||||
- if (matches && star_matched && num_attrs == 1 &&
|
||||
- !(aclpb->aclpb_state & ACLPB_FOUND_ATTR_RULE))
|
||||
+ if (matches && star_matched && num_attrs == 1 && !(aclpb->aclpb_state & ACLPB_FOUND_ATTR_RULE)) {
|
||||
aclpb->aclpb_state |= ACLPB_ATTR_STAR_MATCHED;
|
||||
- else {
|
||||
+ } else {
|
||||
/* we are here means that there is a specific
|
||||
- ** attr in the rule for this resource.
|
||||
- ** We need to avoid this case
|
||||
- ** Rule 1: (targetattr = "uid")
|
||||
- ** Rule 2: (targetattr = "*")
|
||||
- ** we cannot use STAR optimization
|
||||
- */
|
||||
+ * attr in the rule for this resource.
|
||||
+ * We need to avoid this case
|
||||
+ * Rule 1: (targetattr = "uid")
|
||||
+ * Rule 2: (targetattr = "*")
|
||||
+ * we cannot use STAR optimization
|
||||
+ */
|
||||
aclpb->aclpb_state |= ACLPB_FOUND_ATTR_RULE;
|
||||
aclpb->aclpb_state &= ~ACLPB_ATTR_STAR_MATCHED;
|
||||
}
|
||||
- } else if ((c_attrEval) ||
|
||||
- (aci->aci_type & ACI_TARGET_ATTR)) {
|
||||
+ } else if ((c_attrEval) || (aci->aci_type & ACI_TARGET_ATTR)) {
|
||||
if ((aci_right & ACL_RIGHTS_TARGETATTR_NOT_NEEDED) &&
|
||||
(aclpb->aclpb_access & ACL_RIGHTS_TARGETATTR_NOT_NEEDED)) {
|
||||
/*
|
||||
- ** Targetattr rule doesn't make any sense
|
||||
- ** in this case. So select this rule
|
||||
- ** default: matches = ACL_TRUE;
|
||||
- */
|
||||
+ * Targetattr rule doesn't make any sense
|
||||
+ * in this case. So select this rule
|
||||
+ * default: matches = ACL_TRUE;
|
||||
+ */
|
||||
;
|
||||
- } else if (aci_right & SLAPI_ACL_WRITE &&
|
||||
+ } else if ((aci_right & SLAPI_ACL_WRITE) &&
|
||||
(aci->aci_type & ACI_TARGET_ATTR) &&
|
||||
!(c_attrEval) &&
|
||||
(aci->aci_type & ACI_HAS_ALLOW_RULE)) {
|
||||
/* We need to handle modrdn operation. Modrdn doesn't
|
||||
- ** change any attrs but changes the RDN and so (attr=NULL).
|
||||
- ** Here we found an acl which has a targetattr but
|
||||
- ** the resource doesn't need one. In that case, we should
|
||||
- ** consider this acl.
|
||||
- ** the opposite is true if it is a deny rule, only a deny without
|
||||
- ** any targetattr should deny modrdn
|
||||
- ** default: matches = ACL_TRUE;
|
||||
- */
|
||||
+ * change any attrs but changes the RDN and so (attr=NULL).
|
||||
+ * Here we found an acl which has a targetattr but
|
||||
+ * the resource doesn't need one. In that case, we should
|
||||
+ * consider this acl.
|
||||
+ * the opposite is true if it is a deny rule, only a deny without
|
||||
+ * any targetattr should deny modrdn
|
||||
+ * default: matches = ACL_TRUE;
|
||||
+ */
|
||||
;
|
||||
} else {
|
||||
matches = ACL_FALSE;
|
||||
@@ -2821,16 +2786,16 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
} /* !attr_matched_in_targetattrfilters */
|
||||
|
||||
/*
|
||||
- ** Here we are testing if we find a entry test rule (which should
|
||||
- ** be rare). In that case, just remember it. An entry test rule
|
||||
- ** doesn't have "(targetattr)".
|
||||
- */
|
||||
+ * Here we are testing if we find a entry test rule (which should
|
||||
+ * be rare). In that case, just remember it. An entry test rule
|
||||
+ * doesn't have "(targetattr)".
|
||||
+ */
|
||||
if ((aclpb->aclpb_state & ACLPB_EVALUATING_FIRST_ATTR) &&
|
||||
(!(aci->aci_type & ACI_TARGET_ATTR))) {
|
||||
aclpb->aclpb_state |= ACLPB_FOUND_A_ENTRY_TEST_RULE;
|
||||
}
|
||||
|
||||
-/*
|
||||
+ /*
|
||||
* Generic exit point for this routine:
|
||||
* matches is ACL_TRUE if the aci matches the target of the resource,
|
||||
* ACL_FALSE othrewise.
|
||||
@@ -2853,6 +2818,7 @@ acl__resource_match_aci_EXIT:
|
||||
|
||||
return (matches);
|
||||
}
|
||||
+
|
||||
/* Macro to determine if the cached result is valid or not. */
|
||||
#define ACL_CACHED_RESULT_VALID(result) \
|
||||
(((result & ACLPB_CACHE_READ_RES_ALLOW) && \
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/findentry.c b/ldap/servers/slapd/back-ldbm/findentry.c
|
||||
index 6e53a0aea..bff751c88 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/findentry.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/findentry.c
|
||||
@@ -93,7 +93,6 @@ find_entry_internal_dn(
|
||||
size_t tries = 0;
|
||||
int isroot = 0;
|
||||
int op_type;
|
||||
- char *errbuf = NULL;
|
||||
|
||||
/* get the managedsait ldap message control */
|
||||
slapi_pblock_get(pb, SLAPI_MANAGEDSAIT, &managedsait);
|
||||
@@ -207,8 +206,8 @@ find_entry_internal_dn(
|
||||
break;
|
||||
}
|
||||
if (acl_type > 0) {
|
||||
- err = plugin_call_acl_plugin(pb, me->ep_entry, NULL, NULL, acl_type,
|
||||
- ACLPLUGIN_ACCESS_DEFAULT, &errbuf);
|
||||
+ char *dummy_attr = "1.1";
|
||||
+ err = slapi_access_allowed(pb, me->ep_entry, dummy_attr, NULL, acl_type);
|
||||
}
|
||||
if (((acl_type > 0) && err) || (op_type == SLAPI_OPERATION_BIND)) {
|
||||
/*
|
||||
@@ -237,7 +236,6 @@ find_entry_internal_dn(
|
||||
CACHE_RETURN(&inst->inst_cache, &me);
|
||||
}
|
||||
|
||||
- slapi_ch_free_string(&errbuf);
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "find_entry_internal_dn", "<= Not found (%s)\n",
|
||||
slapi_sdn_get_dn(sdn));
|
||||
return (NULL);
|
||||
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
|
||||
index c60837601..ca6ea6ef8 100644
|
||||
--- a/src/lib389/lib389/_mapped_object.py
|
||||
+++ b/src/lib389/lib389/_mapped_object.py
|
||||
@@ -1190,7 +1190,7 @@ class DSLdapObjects(DSLogging, DSLints):
|
||||
# Now actually commit the creation req
|
||||
return co.ensure_state(rdn, properties, self._basedn)
|
||||
|
||||
- def filter(self, search, scope=None):
|
||||
+ def filter(self, search, scope=None, strict=False):
|
||||
# This will yield and & filter for objectClass with as many terms as needed.
|
||||
if search:
|
||||
search_filter = _gen_and([self._get_objectclass_filter(), search])
|
||||
@@ -1211,5 +1211,7 @@ class DSLdapObjects(DSLogging, DSLints):
|
||||
insts = [self._entry_to_instance(dn=r.dn, entry=r) for r in results]
|
||||
except ldap.NO_SUCH_OBJECT:
|
||||
# There are no objects to select from, se we return an empty array
|
||||
+ if strict:
|
||||
+ raise ldap.NO_SUCH_OBJECT
|
||||
insts = []
|
||||
return insts
|
||||
--
|
||||
2.26.2
|
||||
|
@ -1,452 +0,0 @@
|
||||
From 5bca57b52069508a55b36fafe3729b7d1243743b Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 27 Jan 2021 11:58:38 +0100
|
||||
Subject: [PATCH 2/3] Issue 4526 - sync_repl: when completing an operation in
|
||||
the pending list, it can select the wrong operation (#4553)
|
||||
|
||||
Bug description:
|
||||
When an operation complete, it was retrieved in the pending list with
|
||||
the address of the Operation structure. In case of POST OP nested operations
|
||||
the same address can be reused. So when completing an operation there could be
|
||||
a confusion which operation actually completed.
|
||||
A second problem is that if an update its DB_DEADLOCK, the BETXN_PREOP can
|
||||
be called several times. During retry, the operation is already in the pending
|
||||
list.
|
||||
|
||||
Fix description:
|
||||
The fix defines a new operation extension (sync_persist_extension_type).
|
||||
This operation extension contains an index (idx_pl) of the op_pl in the
|
||||
the pending list.
|
||||
|
||||
And additional safety fix is to dump the pending list in case it becomes large (>10).
|
||||
The pending list is dumped with SLAPI_LOG_PLUGIN.
|
||||
|
||||
When there is a retry (operation extension exists) the call to sync_update_persist_betxn_pre_op
|
||||
becomes a NOOP: the operation is not added again in the pending list.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4526
|
||||
|
||||
Reviewed by: William Brown (Thanks !!)
|
||||
---
|
||||
ldap/servers/plugins/sync/sync.h | 9 ++
|
||||
ldap/servers/plugins/sync/sync_init.c | 64 +++++++-
|
||||
ldap/servers/plugins/sync/sync_persist.c | 194 ++++++++++++++++-------
|
||||
3 files changed, 208 insertions(+), 59 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/sync/sync.h b/ldap/servers/plugins/sync/sync.h
|
||||
index 7241fddbf..2fdf24476 100644
|
||||
--- a/ldap/servers/plugins/sync/sync.h
|
||||
+++ b/ldap/servers/plugins/sync/sync.h
|
||||
@@ -82,6 +82,12 @@ typedef enum _pl_flags {
|
||||
OPERATION_PL_IGNORED = 5
|
||||
} pl_flags_t;
|
||||
|
||||
+typedef struct op_ext_ident
|
||||
+{
|
||||
+ uint32_t idx_pl; /* To uniquely identify an operation in PL, the operation extension
|
||||
+ * contains the index of that operation in the pending list
|
||||
+ */
|
||||
+} op_ext_ident_t;
|
||||
/* Pending list operations.
|
||||
* it contains a list ('next') of nested operations. The
|
||||
* order the same order that the server applied the operation
|
||||
@@ -90,6 +96,7 @@ typedef enum _pl_flags {
|
||||
typedef struct OPERATION_PL_CTX
|
||||
{
|
||||
Operation *op; /* Pending operation, should not be freed as it belongs to the pblock */
|
||||
+ uint32_t idx_pl; /* index of the operation in the pending list */
|
||||
pl_flags_t flags; /* operation is completed (set to TRUE in POST) */
|
||||
Slapi_Entry *entry; /* entry to be store in the enqueued node. 1st arg sync_queue_change */
|
||||
Slapi_Entry *eprev; /* pre-entry to be stored in the enqueued node. 2nd arg sync_queue_change */
|
||||
@@ -99,6 +106,8 @@ typedef struct OPERATION_PL_CTX
|
||||
|
||||
OPERATION_PL_CTX_T * get_thread_primary_op(void);
|
||||
void set_thread_primary_op(OPERATION_PL_CTX_T *op);
|
||||
+const op_ext_ident_t * sync_persist_get_operation_extension(Slapi_PBlock *pb);
|
||||
+void sync_persist_set_operation_extension(Slapi_PBlock *pb, op_ext_ident_t *op_ident);
|
||||
|
||||
int sync_register_operation_extension(void);
|
||||
int sync_unregister_operation_entension(void);
|
||||
diff --git a/ldap/servers/plugins/sync/sync_init.c b/ldap/servers/plugins/sync/sync_init.c
|
||||
index 74af14512..9e6a12000 100644
|
||||
--- a/ldap/servers/plugins/sync/sync_init.c
|
||||
+++ b/ldap/servers/plugins/sync/sync_init.c
|
||||
@@ -16,6 +16,7 @@ static int sync_preop_init(Slapi_PBlock *pb);
|
||||
static int sync_postop_init(Slapi_PBlock *pb);
|
||||
static int sync_be_postop_init(Slapi_PBlock *pb);
|
||||
static int sync_betxn_preop_init(Slapi_PBlock *pb);
|
||||
+static int sync_persist_register_operation_extension(void);
|
||||
|
||||
static PRUintn thread_primary_op;
|
||||
|
||||
@@ -43,7 +44,8 @@ sync_init(Slapi_PBlock *pb)
|
||||
slapi_pblock_set(pb, SLAPI_PLUGIN_CLOSE_FN,
|
||||
(void *)sync_close) != 0 ||
|
||||
slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION,
|
||||
- (void *)&pdesc) != 0) {
|
||||
+ (void *)&pdesc) != 0 ||
|
||||
+ sync_persist_register_operation_extension()) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM,
|
||||
"sync_init - Failed to register plugin\n");
|
||||
rc = 1;
|
||||
@@ -242,4 +244,64 @@ set_thread_primary_op(OPERATION_PL_CTX_T *op)
|
||||
PR_SetThreadPrivate(thread_primary_op, (void *) head);
|
||||
}
|
||||
head->next = op;
|
||||
+}
|
||||
+
|
||||
+/* The following definitions are used for the operation pending list
|
||||
+ * (used by sync_repl). To retrieve a specific operation in the pending
|
||||
+ * list, the operation extension contains the index of the operation in
|
||||
+ * the pending list
|
||||
+ */
|
||||
+static int sync_persist_extension_type; /* initialized in sync_persist_register_operation_extension */
|
||||
+static int sync_persist_extension_handle; /* initialized in sync_persist_register_operation_extension */
|
||||
+
|
||||
+const op_ext_ident_t *
|
||||
+sync_persist_get_operation_extension(Slapi_PBlock *pb)
|
||||
+{
|
||||
+ Slapi_Operation *op;
|
||||
+ op_ext_ident_t *ident;
|
||||
+
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
|
||||
+ ident = slapi_get_object_extension(sync_persist_extension_type, op,
|
||||
+ sync_persist_extension_handle);
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_get_operation_extension operation (op=0x%lx) -> %d\n",
|
||||
+ (ulong) op, ident ? ident->idx_pl : -1);
|
||||
+ return (const op_ext_ident_t *) ident;
|
||||
+
|
||||
+}
|
||||
+
|
||||
+void
|
||||
+sync_persist_set_operation_extension(Slapi_PBlock *pb, op_ext_ident_t *op_ident)
|
||||
+{
|
||||
+ Slapi_Operation *op;
|
||||
+
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_set_operation_extension operation (op=0x%lx) -> %d\n",
|
||||
+ (ulong) op, op_ident ? op_ident->idx_pl : -1);
|
||||
+ slapi_set_object_extension(sync_persist_extension_type, op,
|
||||
+ sync_persist_extension_handle, (void *)op_ident);
|
||||
+}
|
||||
+/* operation extension constructor */
|
||||
+static void *
|
||||
+sync_persist_operation_extension_constructor(void *object __attribute__((unused)), void *parent __attribute__((unused)))
|
||||
+{
|
||||
+ /* we only set the extension value explicitly in sync_update_persist_betxn_pre_op */
|
||||
+ return NULL; /* we don't set anything in the ctor */
|
||||
+}
|
||||
+
|
||||
+/* consumer operation extension destructor */
|
||||
+static void
|
||||
+sync_persist_operation_extension_destructor(void *ext, void *object __attribute__((unused)), void *parent __attribute__((unused)))
|
||||
+{
|
||||
+ op_ext_ident_t *op_ident = (op_ext_ident_t *)ext;
|
||||
+ slapi_ch_free((void **)&op_ident);
|
||||
+}
|
||||
+static int
|
||||
+sync_persist_register_operation_extension(void)
|
||||
+{
|
||||
+ return slapi_register_object_extension(SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ SLAPI_EXT_OPERATION,
|
||||
+ sync_persist_operation_extension_constructor,
|
||||
+ sync_persist_operation_extension_destructor,
|
||||
+ &sync_persist_extension_type,
|
||||
+ &sync_persist_extension_handle);
|
||||
}
|
||||
\ No newline at end of file
|
||||
diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c
|
||||
index d13f142b0..e93a8fa83 100644
|
||||
--- a/ldap/servers/plugins/sync/sync_persist.c
|
||||
+++ b/ldap/servers/plugins/sync/sync_persist.c
|
||||
@@ -47,6 +47,9 @@ static int sync_release_connection(Slapi_PBlock *pb, Slapi_Connection *conn, Sla
|
||||
* per thread pending list of nested operation..
|
||||
* being a betxn_preop the pending list has the same order
|
||||
* that the server received the operation
|
||||
+ *
|
||||
+ * In case of DB_RETRY, this callback can be called several times
|
||||
+ * The detection of the DB_RETRY is done via the operation extension
|
||||
*/
|
||||
int
|
||||
sync_update_persist_betxn_pre_op(Slapi_PBlock *pb)
|
||||
@@ -54,64 +57,128 @@ sync_update_persist_betxn_pre_op(Slapi_PBlock *pb)
|
||||
OPERATION_PL_CTX_T *prim_op;
|
||||
OPERATION_PL_CTX_T *new_op;
|
||||
Slapi_DN *sdn;
|
||||
+ uint32_t idx_pl = 0;
|
||||
+ op_ext_ident_t *op_ident;
|
||||
+ Operation *op;
|
||||
|
||||
if (!SYNC_IS_INITIALIZED()) {
|
||||
/* not initialized if sync plugin is not started */
|
||||
return 0;
|
||||
}
|
||||
|
||||
+ prim_op = get_thread_primary_op();
|
||||
+ op_ident = sync_persist_get_operation_extension(pb);
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
|
||||
+ slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn);
|
||||
+
|
||||
+ /* Check if we are in a DB retry case */
|
||||
+ if (op_ident && prim_op) {
|
||||
+ OPERATION_PL_CTX_T *current_op;
|
||||
+
|
||||
+ /* This callback is called (with the same operation) because of a DB_RETRY */
|
||||
+
|
||||
+ /* It already existed (in the operation extension) an index of the operation in the pending list */
|
||||
+ for (idx_pl = 0, current_op = prim_op; current_op->next; idx_pl++, current_op = current_op->next) {
|
||||
+ if (op_ident->idx_pl == idx_pl) {
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* The retrieved operation in the pending list is at the right
|
||||
+ * index and state. Just return making this callback a noop
|
||||
+ */
|
||||
+ PR_ASSERT(current_op);
|
||||
+ PR_ASSERT(current_op->op == op);
|
||||
+ PR_ASSERT(current_op->flags == OPERATION_PL_PENDING);
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, SYNC_PLUGIN_SUBSYSTEM, "sync_update_persist_betxn_pre_op - DB retried operation targets "
|
||||
+ "\"%s\" (op=0x%lx idx_pl=%d) => op not changed in PL\n",
|
||||
+ slapi_sdn_get_dn(sdn), (ulong) op, idx_pl);
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
/* Create a new pending operation node */
|
||||
new_op = (OPERATION_PL_CTX_T *)slapi_ch_calloc(1, sizeof(OPERATION_PL_CTX_T));
|
||||
new_op->flags = OPERATION_PL_PENDING;
|
||||
- slapi_pblock_get(pb, SLAPI_OPERATION, &new_op->op);
|
||||
- slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn);
|
||||
+ new_op->op = op;
|
||||
|
||||
- prim_op = get_thread_primary_op();
|
||||
if (prim_op) {
|
||||
/* It already exists a primary operation, so the current
|
||||
* operation is a nested one that we need to register at the end
|
||||
* of the pending nested operations
|
||||
+ * Also computes the idx_pl that will be the identifier (index) of the operation
|
||||
+ * in the pending list
|
||||
*/
|
||||
OPERATION_PL_CTX_T *current_op;
|
||||
- for (current_op = prim_op; current_op->next; current_op = current_op->next);
|
||||
+ for (idx_pl = 0, current_op = prim_op; current_op->next; idx_pl++, current_op = current_op->next);
|
||||
current_op->next = new_op;
|
||||
+ idx_pl++; /* idx_pl is currently the index of the last op
|
||||
+ * as we are adding a new op we need to increase that index
|
||||
+ */
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_update_persist_betxn_pre_op - nested operation targets "
|
||||
- "\"%s\" (0x%lx)\n",
|
||||
- slapi_sdn_get_dn(sdn), (ulong) new_op->op);
|
||||
+ "\"%s\" (op=0x%lx idx_pl=%d)\n",
|
||||
+ slapi_sdn_get_dn(sdn), (ulong) new_op->op, idx_pl);
|
||||
} else {
|
||||
/* The current operation is the first/primary one in the txn
|
||||
* registers it directly in the thread private data (head)
|
||||
*/
|
||||
set_thread_primary_op(new_op);
|
||||
+ idx_pl = 0; /* as primary operation, its index in the pending list is 0 */
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "sync_update_persist_betxn_pre_op - primary operation targets "
|
||||
"\"%s\" (0x%lx)\n",
|
||||
slapi_sdn_get_dn(sdn), (ulong) new_op->op);
|
||||
}
|
||||
+
|
||||
+ /* records, in the operation extension AND in the pending list, the identifier (index) of
|
||||
+ * this operation into the pending list
|
||||
+ */
|
||||
+ op_ident = (op_ext_ident_t *) slapi_ch_calloc(1, sizeof (op_ext_ident_t));
|
||||
+ op_ident->idx_pl = idx_pl;
|
||||
+ new_op->idx_pl = idx_pl;
|
||||
+ sync_persist_set_operation_extension(pb, op_ident);
|
||||
return 0;
|
||||
}
|
||||
|
||||
-/* This operation can not be proceed by sync_repl listener because
|
||||
- * of internal problem. For example, POST entry does not exist
|
||||
+/* This operation failed or skipped (e.g. no MODs).
|
||||
+ * In such case POST entry does not exist
|
||||
*/
|
||||
static void
|
||||
-ignore_op_pl(Operation *op)
|
||||
+ignore_op_pl(Slapi_PBlock *pb)
|
||||
{
|
||||
OPERATION_PL_CTX_T *prim_op, *curr_op;
|
||||
+ op_ext_ident_t *ident;
|
||||
+ Operation *op;
|
||||
+
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
|
||||
+
|
||||
+ /* prim_op is set if betxn was called
|
||||
+ * In case of invalid update (schema violation) the
|
||||
+ * operation skip betxn and prim_op is not set.
|
||||
+ * This is the same for ident
|
||||
+ */
|
||||
prim_op = get_thread_primary_op();
|
||||
+ ident = sync_persist_get_operation_extension(pb);
|
||||
|
||||
- for (curr_op = prim_op; curr_op; curr_op = curr_op->next) {
|
||||
- if ((curr_op->op == op) &&
|
||||
- (curr_op->flags == OPERATION_PL_PENDING)) { /* If by any "chance" a same operation structure was reused in consecutive updates
|
||||
- * we can not only rely on 'op' value
|
||||
- */
|
||||
- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl operation (0x%lx) from the pending list\n",
|
||||
- (ulong) op);
|
||||
- curr_op->flags = OPERATION_PL_IGNORED;
|
||||
- return;
|
||||
+ if (ident) {
|
||||
+ /* The TXN_BEPROP was called, so the operation is
|
||||
+ * registered in the pending list
|
||||
+ */
|
||||
+ for (curr_op = prim_op; curr_op; curr_op = curr_op->next) {
|
||||
+ if (curr_op->idx_pl == ident->idx_pl) {
|
||||
+ /* The operation extension (ident) refers this operation (currop in the pending list).
|
||||
+ * This is called during sync_repl postop. At this moment
|
||||
+ * the operation in the pending list (identified by idx_pl in the operation extension)
|
||||
+ * should be pending
|
||||
+ */
|
||||
+ PR_ASSERT(curr_op->flags == OPERATION_PL_PENDING);
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl operation (op=0x%lx, idx_pl=%d) from the pending list\n",
|
||||
+ (ulong) op, ident->idx_pl);
|
||||
+ curr_op->flags = OPERATION_PL_IGNORED;
|
||||
+ return;
|
||||
+ }
|
||||
}
|
||||
}
|
||||
- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl can not retrieve an operation (0x%lx) in pending list\n",
|
||||
- (ulong) op);
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "ignore_op_pl failing operation (op=0x%lx, idx_pl=%d) was not in the pending list\n",
|
||||
+ (ulong) op, ident ? ident->idx_pl : -1);
|
||||
}
|
||||
|
||||
/* This is a generic function that is called by betxn_post of this plugin.
|
||||
@@ -126,7 +193,9 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
|
||||
{
|
||||
OPERATION_PL_CTX_T *prim_op = NULL, *curr_op;
|
||||
Operation *pb_op;
|
||||
+ op_ext_ident_t *ident;
|
||||
Slapi_DN *sdn;
|
||||
+ uint32_t count; /* use for diagnostic of the lenght of the pending list */
|
||||
int32_t rc;
|
||||
|
||||
if (!SYNC_IS_INITIALIZED()) {
|
||||
@@ -138,7 +207,7 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
|
||||
|
||||
if (NULL == e) {
|
||||
/* Ignore this operation (for example case of failure of the operation) */
|
||||
- ignore_op_pl(pb_op);
|
||||
+ ignore_op_pl(pb);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -161,16 +230,21 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
|
||||
|
||||
|
||||
prim_op = get_thread_primary_op();
|
||||
+ ident = sync_persist_get_operation_extension(pb);
|
||||
PR_ASSERT(prim_op);
|
||||
+ PR_ASSERT(ident);
|
||||
/* First mark the operation as completed/failed
|
||||
* the param to be used once the operation will be pushed
|
||||
* on the listeners queue
|
||||
*/
|
||||
for (curr_op = prim_op; curr_op; curr_op = curr_op->next) {
|
||||
- if ((curr_op->op == pb_op) &&
|
||||
- (curr_op->flags == OPERATION_PL_PENDING)) { /* If by any "chance" a same operation structure was reused in consecutive updates
|
||||
- * we can not only rely on 'op' value
|
||||
- */
|
||||
+ if (curr_op->idx_pl == ident->idx_pl) {
|
||||
+ /* The operation extension (ident) refers this operation (currop in the pending list)
|
||||
+ * This is called during sync_repl postop. At this moment
|
||||
+ * the operation in the pending list (identified by idx_pl in the operation extension)
|
||||
+ * should be pending
|
||||
+ */
|
||||
+ PR_ASSERT(curr_op->flags == OPERATION_PL_PENDING);
|
||||
if (rc == LDAP_SUCCESS) {
|
||||
curr_op->flags = OPERATION_PL_SUCCEEDED;
|
||||
curr_op->entry = e ? slapi_entry_dup(e) : NULL;
|
||||
@@ -183,46 +257,50 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
|
||||
}
|
||||
}
|
||||
if (!curr_op) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "%s - operation not found on the pendling list\n", label);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "%s - operation (op=0x%lx, idx_pl=%d) not found on the pendling list\n",
|
||||
+ label, (ulong) pb_op, ident->idx_pl);
|
||||
PR_ASSERT(curr_op);
|
||||
}
|
||||
|
||||
-#if DEBUG
|
||||
- /* dump the pending queue */
|
||||
- for (curr_op = prim_op; curr_op; curr_op = curr_op->next) {
|
||||
- char *flags_str;
|
||||
- char * entry_str;
|
||||
+ /* for diagnostic of the pending list, dump its content if it is too long */
|
||||
+ for (count = 0, curr_op = prim_op; curr_op; count++, curr_op = curr_op->next);
|
||||
+ if (loglevel_is_set(SLAPI_LOG_PLUGIN) && (count > 10)) {
|
||||
|
||||
- if (curr_op->entry) {
|
||||
- entry_str = slapi_entry_get_dn(curr_op->entry);
|
||||
- } else if (curr_op->eprev){
|
||||
- entry_str = slapi_entry_get_dn(curr_op->eprev);
|
||||
- } else {
|
||||
- entry_str = "unknown";
|
||||
- }
|
||||
- switch (curr_op->flags) {
|
||||
- case OPERATION_PL_SUCCEEDED:
|
||||
- flags_str = "succeeded";
|
||||
- break;
|
||||
- case OPERATION_PL_FAILED:
|
||||
- flags_str = "failed";
|
||||
- break;
|
||||
- case OPERATION_PL_IGNORED:
|
||||
- flags_str = "ignored";
|
||||
- break;
|
||||
- case OPERATION_PL_PENDING:
|
||||
- flags_str = "pending";
|
||||
- break;
|
||||
- default:
|
||||
- flags_str = "unknown";
|
||||
- break;
|
||||
-
|
||||
+ /* if pending list looks abnormally too long, dump the pending list */
|
||||
+ for (curr_op = prim_op; curr_op; curr_op = curr_op->next) {
|
||||
+ char *flags_str;
|
||||
+ char * entry_str;
|
||||
|
||||
- }
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "dump pending list(0x%lx) %s %s\n",
|
||||
+ if (curr_op->entry) {
|
||||
+ entry_str = slapi_entry_get_dn(curr_op->entry);
|
||||
+ } else if (curr_op->eprev) {
|
||||
+ entry_str = slapi_entry_get_dn(curr_op->eprev);
|
||||
+ } else {
|
||||
+ entry_str = "unknown";
|
||||
+ }
|
||||
+ switch (curr_op->flags) {
|
||||
+ case OPERATION_PL_SUCCEEDED:
|
||||
+ flags_str = "succeeded";
|
||||
+ break;
|
||||
+ case OPERATION_PL_FAILED:
|
||||
+ flags_str = "failed";
|
||||
+ break;
|
||||
+ case OPERATION_PL_IGNORED:
|
||||
+ flags_str = "ignored";
|
||||
+ break;
|
||||
+ case OPERATION_PL_PENDING:
|
||||
+ flags_str = "pending";
|
||||
+ break;
|
||||
+ default:
|
||||
+ flags_str = "unknown";
|
||||
+ break;
|
||||
+
|
||||
+
|
||||
+ }
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "dump pending list(0x%lx) %s %s\n",
|
||||
(ulong) curr_op->op, entry_str, flags_str);
|
||||
+ }
|
||||
}
|
||||
-#endif
|
||||
|
||||
/* Second check if it remains a pending operation in the pending list */
|
||||
for (curr_op = prim_op; curr_op; curr_op = curr_op->next) {
|
||||
--
|
||||
2.26.2
|
||||
|
@ -1,145 +0,0 @@
|
||||
From e6536aa27bfdc27cad07f6c5cd3312f0f0710c96 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 1 Feb 2021 09:28:25 +0100
|
||||
Subject: [PATCH 3/3] Issue 4581 - A failed re-indexing leaves the database in
|
||||
broken state (#4582)
|
||||
|
||||
Bug description:
|
||||
During reindex the numsubordinates attribute is not updated in parent entries.
|
||||
The consequence is that the internal counter job->numsubordinates==0.
|
||||
Later when indexing the ancestorid, the server can show the progression of this
|
||||
indexing with a ratio using job->numsubordinates==0.
|
||||
Division with 0 -> SIGFPE
|
||||
|
||||
Fix description:
|
||||
if the numsubordinates is NULL, log a message without a division.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4581
|
||||
|
||||
Reviewed by: Pierre Rogier, Mark Reynolds, Simon Pichugin, Teko Mihinto (thanks !!)
|
||||
|
||||
Platforms tested: F31
|
||||
---
|
||||
.../slapd/back-ldbm/db-bdb/bdb_import.c | 72 ++++++++++++++-----
|
||||
1 file changed, 54 insertions(+), 18 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
index ba783ee59..7f484934f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
@@ -468,18 +468,30 @@ bdb_get_nonleaf_ids(backend *be, DB_TXN *txn, IDList **idl, ImportJob *job)
|
||||
}
|
||||
key_count++;
|
||||
if (!(key_count % PROGRESS_INTERVAL)) {
|
||||
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
|
||||
- "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
|
||||
- (key_count * 100 / job->numsubordinates), key_count);
|
||||
+ if (job->numsubordinates) {
|
||||
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
|
||||
+ "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
|
||||
+ (key_count * 100 / job->numsubordinates), key_count);
|
||||
+ } else {
|
||||
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
|
||||
+ "Gathering ancestorid non-leaf IDs: processed %d ancestors...",
|
||||
+ key_count);
|
||||
+ }
|
||||
started_progress_logging = 1;
|
||||
}
|
||||
} while (ret == 0 && !(job->flags & FLAG_ABORT));
|
||||
|
||||
if (started_progress_logging) {
|
||||
/* finish what we started logging */
|
||||
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
|
||||
- "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
|
||||
- (key_count * 100 / job->numsubordinates), key_count);
|
||||
+ if (job->numsubordinates) {
|
||||
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
|
||||
+ "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
|
||||
+ (key_count * 100 / job->numsubordinates), key_count);
|
||||
+ } else {
|
||||
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
|
||||
+ "Gathering ancestorid non-leaf IDs: processed %d ancestors",
|
||||
+ key_count);
|
||||
+ }
|
||||
}
|
||||
import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
|
||||
"Finished gathering ancestorid non-leaf IDs.");
|
||||
@@ -660,9 +672,15 @@ bdb_ancestorid_default_create_index(backend *be, ImportJob *job)
|
||||
|
||||
key_count++;
|
||||
if (!(key_count % PROGRESS_INTERVAL)) {
|
||||
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
|
||||
- "Creating ancestorid index: processed %d%% (ID count %d)",
|
||||
- (key_count * 100 / job->numsubordinates), key_count);
|
||||
+ if (job->numsubordinates) {
|
||||
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
|
||||
+ "Creating ancestorid index: processed %d%% (ID count %d)",
|
||||
+ (key_count * 100 / job->numsubordinates), key_count);
|
||||
+ } else {
|
||||
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
|
||||
+ "Creating ancestorid index: processed %d ancestors...",
|
||||
+ key_count);
|
||||
+ }
|
||||
started_progress_logging = 1;
|
||||
}
|
||||
|
||||
@@ -743,9 +761,15 @@ out:
|
||||
if (ret == 0) {
|
||||
if (started_progress_logging) {
|
||||
/* finish what we started logging */
|
||||
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
|
||||
- "Creating ancestorid index: processed %d%% (ID count %d)",
|
||||
- (key_count * 100 / job->numsubordinates), key_count);
|
||||
+ if (job->numsubordinates) {
|
||||
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
|
||||
+ "Creating ancestorid index: processed %d%% (ID count %d)",
|
||||
+ (key_count * 100 / job->numsubordinates), key_count);
|
||||
+ } else {
|
||||
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
|
||||
+ "Creating ancestorid index: processed %d ancestors",
|
||||
+ key_count);
|
||||
+ }
|
||||
}
|
||||
import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
|
||||
"Created ancestorid index (old idl).");
|
||||
@@ -869,9 +893,15 @@ bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job)
|
||||
|
||||
key_count++;
|
||||
if (!(key_count % PROGRESS_INTERVAL)) {
|
||||
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
|
||||
- "Creating ancestorid index: progress %d%% (ID count %d)",
|
||||
- (key_count * 100 / job->numsubordinates), key_count);
|
||||
+ if (job->numsubordinates) {
|
||||
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
|
||||
+ "Creating ancestorid index: progress %d%% (ID count %d)",
|
||||
+ (key_count * 100 / job->numsubordinates), key_count);
|
||||
+ } else {
|
||||
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
|
||||
+ "Creating ancestorid index: progress %d ancestors...",
|
||||
+ key_count);
|
||||
+ }
|
||||
started_progress_logging = 1;
|
||||
}
|
||||
|
||||
@@ -932,9 +962,15 @@ out:
|
||||
if (ret == 0) {
|
||||
if (started_progress_logging) {
|
||||
/* finish what we started logging */
|
||||
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
|
||||
- "Creating ancestorid index: processed %d%% (ID count %d)",
|
||||
- (key_count * 100 / job->numsubordinates), key_count);
|
||||
+ if (job->numsubordinates) {
|
||||
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
|
||||
+ "Creating ancestorid index: processed %d%% (ID count %d)",
|
||||
+ (key_count * 100 / job->numsubordinates), key_count);
|
||||
+ } else {
|
||||
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
|
||||
+ "Creating ancestorid index: processed %d ancestors",
|
||||
+ key_count);
|
||||
+ }
|
||||
}
|
||||
import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
|
||||
"Created ancestorid index (new idl).");
|
||||
--
|
||||
2.26.2
|
||||
|
@ -1,190 +0,0 @@
|
||||
From 4839898dbe69d6445f3571beec1bf3f1557d6cc6 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Tue, 12 Jan 2021 10:09:23 -0500
|
||||
Subject: [PATCH] Issue 4513 - CI Tests - fix test failures
|
||||
|
||||
Description:
|
||||
|
||||
Fixed tests in these suites: basic, entryuuid, filter, lib389, and schema
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4513
|
||||
|
||||
Reviewed by: progier(Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/basic/basic_test.py | 65 ++++++++++---------
|
||||
.../filter/rfc3673_all_oper_attrs_test.py | 4 +-
|
||||
.../suites/lib389/config_compare_test.py | 5 +-
|
||||
.../suites/lib389/idm/user_compare_i2_test.py | 3 +
|
||||
.../tests/suites/schema/schema_reload_test.py | 3 +
|
||||
5 files changed, 47 insertions(+), 33 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
index 97908c31c..fc9af46e4 100644
|
||||
--- a/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
@@ -1059,6 +1059,41 @@ def test_search_ou(topology_st):
|
||||
assert len(entries) == 0
|
||||
|
||||
|
||||
+def test_bind_invalid_entry(topology_st):
|
||||
+ """Test the failing bind does not return information about the entry
|
||||
+
|
||||
+ :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1: bind as non existing entry
|
||||
+ 2: check that bind info does not report 'No such entry'
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1: pass
|
||||
+ 2: pass
|
||||
+ """
|
||||
+
|
||||
+ topology_st.standalone.restart()
|
||||
+ INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX
|
||||
+ try:
|
||||
+ topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY)
|
||||
+ log.info('exception description: ' + e.args[0]['desc'])
|
||||
+ if 'info' in e.args[0]:
|
||||
+ log.info('exception info: ' + e.args[0]['info'])
|
||||
+ assert e.args[0]['desc'] == 'Invalid credentials'
|
||||
+ assert 'info' not in e.args[0]
|
||||
+ pass
|
||||
+
|
||||
+ log.info('test_bind_invalid_entry: PASSED')
|
||||
+
|
||||
+ # reset credentials
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PW_DM)
|
||||
+
|
||||
+
|
||||
@pytest.mark.bz1044135
|
||||
@pytest.mark.ds47319
|
||||
def test_connection_buffer_size(topology_st):
|
||||
@@ -1477,36 +1512,6 @@ def test_dscreate_with_different_rdn(dscreate_test_rdn_value):
|
||||
else:
|
||||
assert True
|
||||
|
||||
-def test_bind_invalid_entry(topology_st):
|
||||
- """Test the failing bind does not return information about the entry
|
||||
-
|
||||
- :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f
|
||||
-
|
||||
- :setup: Standalone instance
|
||||
-
|
||||
- :steps:
|
||||
- 1: bind as non existing entry
|
||||
- 2: check that bind info does not report 'No such entry'
|
||||
-
|
||||
- :expectedresults:
|
||||
- 1: pass
|
||||
- 2: pass
|
||||
- """
|
||||
-
|
||||
- topology_st.standalone.restart()
|
||||
- INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX
|
||||
- try:
|
||||
- topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD)
|
||||
- except ldap.LDAPError as e:
|
||||
- log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY)
|
||||
- log.info('exception description: ' + e.args[0]['desc'])
|
||||
- if 'info' in e.args[0]:
|
||||
- log.info('exception info: ' + e.args[0]['info'])
|
||||
- assert e.args[0]['desc'] == 'Invalid credentials'
|
||||
- assert 'info' not in e.args[0]
|
||||
- pass
|
||||
-
|
||||
- log.info('test_bind_invalid_entry: PASSED')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
diff --git a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py
|
||||
index c882bea5f..0477acda7 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py
|
||||
@@ -53,11 +53,11 @@ TEST_PARAMS = [(DN_ROOT, False, [
|
||||
(TEST_USER_DN, False, [
|
||||
'createTimestamp', 'creatorsName', 'entrydn',
|
||||
'entryid', 'modifiersName', 'modifyTimestamp',
|
||||
- 'nsUniqueId', 'parentid'
|
||||
+ 'nsUniqueId', 'parentid', 'entryUUID'
|
||||
]),
|
||||
(TEST_USER_DN, True, [
|
||||
'createTimestamp', 'creatorsName', 'entrydn',
|
||||
- 'entryid', 'modifyTimestamp', 'nsUniqueId', 'parentid'
|
||||
+ 'entryid', 'modifyTimestamp', 'nsUniqueId', 'parentid', 'entryUUID'
|
||||
]),
|
||||
(DN_CONFIG, False, [
|
||||
'numSubordinates', 'passwordHistory'
|
||||
diff --git a/dirsrvtests/tests/suites/lib389/config_compare_test.py b/dirsrvtests/tests/suites/lib389/config_compare_test.py
|
||||
index 709bae8cb..84f55acfa 100644
|
||||
--- a/dirsrvtests/tests/suites/lib389/config_compare_test.py
|
||||
+++ b/dirsrvtests/tests/suites/lib389/config_compare_test.py
|
||||
@@ -22,15 +22,18 @@ def test_config_compare(topology_i2):
|
||||
st2_config = topology_i2.ins.get('standalone2').config
|
||||
# 'nsslapd-port' attribute is expected to be same in cn=config comparison,
|
||||
# but they are different in our testing environment
|
||||
- # as we are using 2 DS instances running, both running simultaneuosly.
|
||||
+ # as we are using 2 DS instances running, both running simultaneously.
|
||||
# Hence explicitly adding 'nsslapd-port' to compare_exclude.
|
||||
st1_config._compare_exclude.append('nsslapd-port')
|
||||
st2_config._compare_exclude.append('nsslapd-port')
|
||||
st1_config._compare_exclude.append('nsslapd-secureport')
|
||||
st2_config._compare_exclude.append('nsslapd-secureport')
|
||||
+ st1_config._compare_exclude.append('nsslapd-ldapssotoken-secret')
|
||||
+ st2_config._compare_exclude.append('nsslapd-ldapssotoken-secret')
|
||||
|
||||
assert Config.compare(st1_config, st2_config)
|
||||
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py b/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py
|
||||
index c7540e4ce..ccde0f6b0 100644
|
||||
--- a/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py
|
||||
@@ -39,6 +39,9 @@ def test_user_compare_i2(topology_i2):
|
||||
st2_users.create(properties=user_properties)
|
||||
st2_testuser = st2_users.get('testuser')
|
||||
|
||||
+ st1_testuser._compare_exclude.append('entryuuid')
|
||||
+ st2_testuser._compare_exclude.append('entryuuid')
|
||||
+
|
||||
assert UserAccount.compare(st1_testuser, st2_testuser)
|
||||
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/schema/schema_reload_test.py b/dirsrvtests/tests/suites/schema/schema_reload_test.py
|
||||
index 2ece5dda5..e7e7d833d 100644
|
||||
--- a/dirsrvtests/tests/suites/schema/schema_reload_test.py
|
||||
+++ b/dirsrvtests/tests/suites/schema/schema_reload_test.py
|
||||
@@ -54,6 +54,7 @@ def test_valid_schema(topo):
|
||||
schema_file.write("objectclasses: ( 1.2.3.4.5.6.7.8 NAME 'TestObject' " +
|
||||
"SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " +
|
||||
"sn $ ValidAttribute ) X-ORIGIN 'user defined' )')\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
except OSError as e:
|
||||
log.fatal("Failed to create schema file: " +
|
||||
"{} Error: {}".format(schema_filename, str(e)))
|
||||
@@ -106,6 +107,7 @@ def test_invalid_schema(topo):
|
||||
schema_file.write("objectclasses: ( 1.2.3.4.5.6.7 NAME 'MoZiLLaOBJeCT' " +
|
||||
"SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " +
|
||||
"sn $ MozillaAttribute ) X-ORIGIN 'user defined' )')\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
except OSError as e:
|
||||
log.fatal("Failed to create schema file: " +
|
||||
"{} Error: {}".format(schema_filename, str(e)))
|
||||
@@ -122,6 +124,7 @@ def test_invalid_schema(topo):
|
||||
schema_file.write("objectclasses: ( 1.2.3.4.5.6.70 NAME 'MoZiLLaOBJeCT' " +
|
||||
"SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " +
|
||||
"cn $ MoZiLLaATTRiBuTe ) X-ORIGIN 'user defined' )')\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
except OSError as e:
|
||||
log.fatal("Failed to create schema file: " +
|
||||
"{} Error: {}".format(schema_filename, str(e)))
|
||||
--
|
||||
2.26.2
|
||||
|
@ -1,130 +0,0 @@
|
||||
From 316aeae09468d6fd3b35422b236751eb1b5c309e Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Tue, 9 Feb 2021 14:02:59 -0500
|
||||
Subject: [PATCH 1/2] Issue 4609 - CVE - info disclosure when authenticating
|
||||
|
||||
Description: If you bind as a user that does not exist. Error 49 is returned
|
||||
instead of error 32. As error 32 discloses that the entry does
|
||||
not exist. When you bind as an entry that does not have userpassword
|
||||
set then error 48 (inappropriate auth) is returned, but this
|
||||
discloses that the entry does indeed exist. Instead we should
|
||||
always return error 49, even if the password is not set in the
|
||||
entry. This way we do not disclose to an attacker if the Bind
|
||||
DN exists or not.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/4609
|
||||
|
||||
Reviewed by: tbordaz(Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/basic/basic_test.py | 39 +++++++++++++++++++-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_bind.c | 4 +-
|
||||
ldap/servers/slapd/dse.c | 7 +++-
|
||||
3 files changed, 45 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
index fc9af46e4..e35f34721 100644
|
||||
--- a/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
from subprocess import check_output, PIPE, run
|
||||
from lib389 import DirSrv
|
||||
-from lib389.idm.user import UserAccounts
|
||||
+from lib389.idm.user import UserAccount, UserAccounts
|
||||
import pytest
|
||||
from lib389.tasks import *
|
||||
from lib389.utils import *
|
||||
@@ -1094,6 +1094,43 @@ def test_bind_invalid_entry(topology_st):
|
||||
topology_st.standalone.simple_bind_s(DN_DM, PW_DM)
|
||||
|
||||
|
||||
+def test_bind_entry_missing_passwd(topology_st):
|
||||
+ """
|
||||
+ :id: af209149-8fb8-48cb-93ea-3e82dd7119d2
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Bind as database entry that does not have userpassword set
|
||||
+ 2. Bind as database entry that does not exist
|
||||
+ 1. Bind as cn=config entry that does not have userpassword set
|
||||
+ 2. Bind as cn=config entry that does not exist
|
||||
+ :expectedresults:
|
||||
+ 1. Fails with error 49
|
||||
+ 2. Fails with error 49
|
||||
+ 3. Fails with error 49
|
||||
+ 4. Fails with error 49
|
||||
+ """
|
||||
+ user = UserAccount(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ # Bind as the suffix root entry which does not have a userpassword
|
||||
+ user.bind("some_password")
|
||||
+
|
||||
+ user = UserAccount(topology_st.standalone, "cn=not here," + DEFAULT_SUFFIX)
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ # Bind as the entry which does not exist
|
||||
+ user.bind("some_password")
|
||||
+
|
||||
+ # Test cn=config since it has its own code path
|
||||
+ user = UserAccount(topology_st.standalone, "cn=config")
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ # Bind as the config entry which does not have a userpassword
|
||||
+ user.bind("some_password")
|
||||
+
|
||||
+ user = UserAccount(topology_st.standalone, "cn=does not exist,cn=config")
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ # Bind as an entry under cn=config that does not exist
|
||||
+ user.bind("some_password")
|
||||
+
|
||||
+
|
||||
@pytest.mark.bz1044135
|
||||
@pytest.mark.ds47319
|
||||
def test_connection_buffer_size(topology_st):
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_bind.c b/ldap/servers/slapd/back-ldbm/ldbm_bind.c
|
||||
index fa450ecd5..38d115a32 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_bind.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_bind.c
|
||||
@@ -76,8 +76,8 @@ ldbm_back_bind(Slapi_PBlock *pb)
|
||||
case LDAP_AUTH_SIMPLE: {
|
||||
Slapi_Value cv;
|
||||
if (slapi_entry_attr_find(e->ep_entry, "userpassword", &attr) != 0) {
|
||||
- slapi_send_ldap_result(pb, LDAP_INAPPROPRIATE_AUTH, NULL,
|
||||
- NULL, 0, NULL);
|
||||
+ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "Entry does not have userpassword set");
|
||||
+ slapi_send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL, NULL, 0, NULL);
|
||||
CACHE_RETURN(&inst->inst_cache, &e);
|
||||
rc = SLAPI_BIND_FAIL;
|
||||
goto bail;
|
||||
diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c
|
||||
index 3c2de75fc..b04fafde6 100644
|
||||
--- a/ldap/servers/slapd/dse.c
|
||||
+++ b/ldap/servers/slapd/dse.c
|
||||
@@ -1446,7 +1446,8 @@ dse_bind(Slapi_PBlock *pb) /* JCM There should only be one exit point from this
|
||||
|
||||
ec = dse_get_entry_copy(pdse, sdn, DSE_USE_LOCK);
|
||||
if (ec == NULL) {
|
||||
- slapi_send_ldap_result(pb, LDAP_NO_SUCH_OBJECT, NULL, NULL, 0, NULL);
|
||||
+ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "Entry does not exist");
|
||||
+ slapi_send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL, NULL, 0, NULL);
|
||||
return (SLAPI_BIND_FAIL);
|
||||
}
|
||||
|
||||
@@ -1454,7 +1455,8 @@ dse_bind(Slapi_PBlock *pb) /* JCM There should only be one exit point from this
|
||||
case LDAP_AUTH_SIMPLE: {
|
||||
Slapi_Value cv;
|
||||
if (slapi_entry_attr_find(ec, "userpassword", &attr) != 0) {
|
||||
- slapi_send_ldap_result(pb, LDAP_INAPPROPRIATE_AUTH, NULL, NULL, 0, NULL);
|
||||
+ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "Entry does not have userpassword set");
|
||||
+ slapi_send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL, NULL, 0, NULL);
|
||||
slapi_entry_free(ec);
|
||||
return SLAPI_BIND_FAIL;
|
||||
}
|
||||
@@ -1462,6 +1464,7 @@ dse_bind(Slapi_PBlock *pb) /* JCM There should only be one exit point from this
|
||||
|
||||
slapi_value_init_berval(&cv, cred);
|
||||
if (slapi_pw_find_sv(bvals, &cv) != 0) {
|
||||
+ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "Invalid credentials");
|
||||
slapi_send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL, NULL, 0, NULL);
|
||||
slapi_entry_free(ec);
|
||||
value_done(&cv);
|
||||
--
|
||||
2.26.2
|
||||
|
@ -1,58 +0,0 @@
|
||||
From b01e30c79b1364ac35c0b2db2ef4a2ff64600a7f Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 23 Feb 2021 08:58:37 +0100
|
||||
Subject: [PATCH 1/2] Issue 4649 - crash in sync_repl when a MODRDN create a
|
||||
cenotaph (#4652)
|
||||
|
||||
Bug description:
|
||||
When an operation is flagged OP_FLAG_NOOP, it skips BETXN plugins but calls POST plugins.
|
||||
For sync_repl, betxn (sync_update_persist_betxn_pre_op) creates an operation extension to be
|
||||
consumed by the post (sync_update_persist_op). In case of OP_FLAG_NOOP, there is no
|
||||
operation extension.
|
||||
|
||||
Fix description:
|
||||
Test that the operation is OP_FLAG_NOOP if the operation extension is missing
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4649
|
||||
|
||||
Reviewed by: William Brown (thanks)
|
||||
|
||||
Platforms tested: F31
|
||||
---
|
||||
ldap/servers/plugins/sync/sync_persist.c | 14 ++++-
|
||||
2 files changed, 75 insertions(+), 2 deletions(-)
|
||||
diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c
|
||||
index e93a8fa83..12b23ebac 100644
|
||||
--- a/ldap/servers/plugins/sync/sync_persist.c
|
||||
+++ b/ldap/servers/plugins/sync/sync_persist.c
|
||||
@@ -206,7 +206,9 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
|
||||
slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn);
|
||||
|
||||
if (NULL == e) {
|
||||
- /* Ignore this operation (for example case of failure of the operation) */
|
||||
+ /* Ignore this operation (for example case of failure of the operation
|
||||
+ * or operation resulting in an empty Mods))
|
||||
+ */
|
||||
ignore_op_pl(pb);
|
||||
return;
|
||||
}
|
||||
@@ -232,7 +234,15 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
|
||||
prim_op = get_thread_primary_op();
|
||||
ident = sync_persist_get_operation_extension(pb);
|
||||
PR_ASSERT(prim_op);
|
||||
- PR_ASSERT(ident);
|
||||
+
|
||||
+ if ((ident == NULL) && operation_is_flag_set(pb_op, OP_FLAG_NOOP)) {
|
||||
+ /* This happens for URP (add cenotaph, fixup rename, tombstone resurrect)
|
||||
+ * As a NOOP betxn plugins are not called and operation ext is not created
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM, "Skip noop operation (0x%lx)\n",
|
||||
+ (ulong) pb_op);
|
||||
+ return;
|
||||
+ }
|
||||
/* First mark the operation as completed/failed
|
||||
* the param to be used once the operation will be pushed
|
||||
* on the listeners queue
|
||||
--
|
||||
2.26.2
|
||||
|
@ -1,55 +0,0 @@
|
||||
From 81e9e6431293cbdde5b037c88e5c644f39d3d14d Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 27 Apr 2021 09:29:32 +0200
|
||||
Subject: [PATCH 1/2] Issue 4711 - SIGSEV with sync_repl (#4738)
|
||||
|
||||
Bug description:
|
||||
sync_repl sends back entries identified with a unique
|
||||
identifier that is 'nsuniqueid'. If 'nsuniqueid' is
|
||||
missing, then it may crash
|
||||
|
||||
Fix description:
|
||||
Check a nsuniqueid is available else returns OP_ERR
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4711
|
||||
|
||||
Reviewed by: Pierre Rogier, James Chapman, William Brown (Thanks!)
|
||||
|
||||
Platforms tested: F33
|
||||
---
|
||||
ldap/servers/plugins/sync/sync_util.c | 12 ++++++++++--
|
||||
1 file changed, 10 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/sync/sync_util.c b/ldap/servers/plugins/sync/sync_util.c
|
||||
index e64d519e1..bdba0a6c2 100644
|
||||
--- a/ldap/servers/plugins/sync/sync_util.c
|
||||
+++ b/ldap/servers/plugins/sync/sync_util.c
|
||||
@@ -127,8 +127,8 @@ sync_create_state_control(Slapi_Entry *e, LDAPControl **ctrlp, int type, Sync_Co
|
||||
BerElement *ber;
|
||||
struct berval *bvp;
|
||||
char *uuid;
|
||||
- Slapi_Attr *attr;
|
||||
- Slapi_Value *val;
|
||||
+ Slapi_Attr *attr = NULL;
|
||||
+ Slapi_Value *val = NULL;
|
||||
|
||||
if (type == LDAP_SYNC_NONE || ctrlp == NULL || (ber = der_alloc()) == NULL) {
|
||||
return (LDAP_OPERATIONS_ERROR);
|
||||
@@ -138,6 +138,14 @@ sync_create_state_control(Slapi_Entry *e, LDAPControl **ctrlp, int type, Sync_Co
|
||||
|
||||
slapi_entry_attr_find(e, SLAPI_ATTR_UNIQUEID, &attr);
|
||||
slapi_attr_first_value(attr, &val);
|
||||
+ if ((attr == NULL) || (val == NULL)) {
|
||||
+ /* It may happen with entries in special backends
|
||||
+ * such like cn=config, cn=shema, cn=monitor...
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ "sync_create_state_control - Entries are missing nsuniqueid. Unable to proceed.\n");
|
||||
+ return (LDAP_OPERATIONS_ERROR);
|
||||
+ }
|
||||
uuid = sync_nsuniqueid2uuid(slapi_value_get_string(val));
|
||||
if ((rc = ber_printf(ber, "{eo", type, uuid, 16)) != -1) {
|
||||
if (cookie) {
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,206 +0,0 @@
|
||||
From 16ec195b12688bcbe0d113396eee782175102565 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 14 Dec 2020 10:41:58 +0100
|
||||
Subject: [PATCH] Issue 4492 - Changelog cache can upload updates from a wrong
|
||||
starting point (CSN)
|
||||
|
||||
Bug description:
|
||||
When a replication session starts, a starting point is computed
|
||||
according to supplier/consumer RUVs.
|
||||
from the starting point the updates are bulk loaded from the CL.
|
||||
When a bulk set have been fully evaluated the server needs to bulk load another set.
|
||||
It iterates until there is no more updates to send.
|
||||
The bug is that during bulk load, it recomputes the CL cursor position
|
||||
and this computation can be wrong. For example if a new update on
|
||||
a rarely updated replica (or not known replica) the new position will
|
||||
be set before the inital starting point
|
||||
|
||||
Fix description:
|
||||
Fixing the invalid computation is a bit risky (complex code resulting from
|
||||
years of corner cases handling) and a fix could fail to address others flavor
|
||||
with the same symptom
|
||||
The fix is only (sorry for that) safety checking fix that would end a replication session
|
||||
if the computed cursor position goes before the initial starting point.
|
||||
In case of large jump behind (24h) the starting point, a warning is logged.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4492
|
||||
|
||||
Reviewed by: Mark Reynolds, William Brown
|
||||
|
||||
Platforms tested: F31
|
||||
---
|
||||
ldap/servers/plugins/replication/cl5_api.c | 6 +-
|
||||
.../servers/plugins/replication/cl5_clcache.c | 60 ++++++++++++++++++-
|
||||
.../servers/plugins/replication/cl5_clcache.h | 4 +-
|
||||
3 files changed, 63 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
|
||||
index d7e47495a..403a6a666 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.c
|
||||
@@ -143,6 +143,7 @@ struct cl5replayiterator
|
||||
ReplicaId consumerRID; /* consumer's RID */
|
||||
const RUV *consumerRuv; /* consumer's update vector */
|
||||
Object *supplierRuvObj; /* supplier's update vector object */
|
||||
+ char starting_csn[CSN_STRSIZE];
|
||||
};
|
||||
|
||||
typedef struct cl5iterator
|
||||
@@ -1367,7 +1368,7 @@ cl5GetNextOperationToReplay(CL5ReplayIterator *iterator, CL5Entry *entry)
|
||||
return CL5_BAD_DATA;
|
||||
}
|
||||
|
||||
- rc = clcache_get_next_change(iterator->clcache, (void **)&key, &keylen, (void **)&data, &datalen, &csn);
|
||||
+ rc = clcache_get_next_change(iterator->clcache, (void **)&key, &keylen, (void **)&data, &datalen, &csn, iterator->starting_csn);
|
||||
|
||||
if (rc == DB_NOTFOUND) {
|
||||
/*
|
||||
@@ -4999,7 +5000,7 @@ _cl5PositionCursorForReplay(ReplicaId consumerRID, const RUV *consumerRuv, Repli
|
||||
if (rc != 0)
|
||||
goto done;
|
||||
|
||||
- rc = clcache_load_buffer(clcache, &startCSN, continue_on_missing);
|
||||
+ rc = clcache_load_buffer(clcache, &startCSN, continue_on_missing, NULL);
|
||||
|
||||
if (rc == 0) {
|
||||
haveChanges = PR_TRUE;
|
||||
@@ -5063,6 +5064,7 @@ _cl5PositionCursorForReplay(ReplicaId consumerRID, const RUV *consumerRuv, Repli
|
||||
(*iterator)->consumerRID = consumerRID;
|
||||
(*iterator)->consumerRuv = consumerRuv;
|
||||
(*iterator)->supplierRuvObj = supplierRuvObj;
|
||||
+ csn_as_string(startCSN, PR_FALSE, (*iterator)->starting_csn);
|
||||
} else if (rc == CL5_SUCCESS) {
|
||||
/* we have no changes to send */
|
||||
rc = CL5_NOTFOUND;
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_clcache.c b/ldap/servers/plugins/replication/cl5_clcache.c
|
||||
index 6b591fb8d..fcbca047a 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_clcache.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_clcache.c
|
||||
@@ -15,6 +15,8 @@
|
||||
#include "db.h" /* Berkeley DB */
|
||||
#include "cl5.h" /* changelog5Config */
|
||||
#include "cl5_clcache.h"
|
||||
+#include "slap.h"
|
||||
+#include "proto-slap.h"
|
||||
|
||||
/* newer bdb uses DB_BUFFER_SMALL instead of ENOMEM as the
|
||||
error return if the given buffer in which to load a
|
||||
@@ -323,14 +325,21 @@ clcache_return_buffer(CLC_Buffer **buf)
|
||||
* anchorcsn - passed in for the first load of a replication session;
|
||||
* flag - DB_SET to load in the key CSN record.
|
||||
* DB_NEXT to load in the records greater than key CSN.
|
||||
+ * initial_starting_csn
|
||||
+ * This is the starting_csn computed at the beginning of
|
||||
+ * the replication session. It never change during a session
|
||||
+ * (aka iterator creation).
|
||||
+ * This is used for safety checking that the next CSN use
|
||||
+ * for bulk load is not before the initial csn
|
||||
* return - DB error code instead of cl5 one because of the
|
||||
* historic reason.
|
||||
*/
|
||||
int
|
||||
-clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss)
|
||||
+clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss, char *initial_starting_csn)
|
||||
{
|
||||
int rc = 0;
|
||||
int flag = DB_NEXT;
|
||||
+ CSN limit_csn = {0};
|
||||
|
||||
if (anchorCSN)
|
||||
*anchorCSN = NULL;
|
||||
@@ -343,6 +352,30 @@ clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss)
|
||||
rc = clcache_adjust_anchorcsn(buf, &flag);
|
||||
}
|
||||
|
||||
+ /* safety checking, we do not want to (re)start replication before
|
||||
+ * the inital computed starting point
|
||||
+ */
|
||||
+ if (initial_starting_csn) {
|
||||
+ csn_init_by_string(&limit_csn, initial_starting_csn);
|
||||
+ if (csn_compare(&limit_csn, buf->buf_current_csn) > 0) {
|
||||
+ char curr[CSN_STRSIZE];
|
||||
+ int loglevel = SLAPI_LOG_REPL;
|
||||
+
|
||||
+ if (csn_time_difference(&limit_csn, buf->buf_current_csn) > (24 * 60 * 60)) {
|
||||
+ /* This is a big jump (more than a day) behind the
|
||||
+ * initial starting csn. Log a warning before ending
|
||||
+ * the session
|
||||
+ */
|
||||
+ loglevel = SLAPI_LOG_WARNING;
|
||||
+ }
|
||||
+ csn_as_string(buf->buf_current_csn, 0, curr);
|
||||
+ slapi_log_err(loglevel, buf->buf_agmt_name,
|
||||
+ "clcache_load_buffer - bulk load cursor (%s) is lower than starting csn %s. Ending session.\n", curr, initial_starting_csn);
|
||||
+ /* it just end the session with UPDATE_NO_MORE_UPDATES */
|
||||
+ rc = CLC_STATE_DONE;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
if (rc == 0) {
|
||||
|
||||
buf->buf_state = CLC_STATE_READY;
|
||||
@@ -365,6 +398,27 @@ clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss)
|
||||
}
|
||||
/* the use of alternative start csns can be limited, record its usage */
|
||||
(*continue_on_miss)--;
|
||||
+
|
||||
+ if (initial_starting_csn) {
|
||||
+ if (csn_compare(&limit_csn, buf->buf_current_csn) > 0) {
|
||||
+ char curr[CSN_STRSIZE];
|
||||
+ int loglevel = SLAPI_LOG_REPL;
|
||||
+
|
||||
+ if (csn_time_difference(&limit_csn, buf->buf_current_csn) > (24 * 60 * 60)) {
|
||||
+ /* This is a big jump (more than a day) behind the
|
||||
+ * initial starting csn. Log a warning before ending
|
||||
+ * the session
|
||||
+ */
|
||||
+ loglevel = SLAPI_LOG_WARNING;
|
||||
+ }
|
||||
+ csn_as_string(buf->buf_current_csn, 0, curr);
|
||||
+ slapi_log_err(loglevel, buf->buf_agmt_name,
|
||||
+ "clcache_load_buffer - (DB_SET_RANGE) bulk load cursor (%s) is lower than starting csn %s. Ending session.\n", curr, initial_starting_csn);
|
||||
+ rc = DB_NOTFOUND;
|
||||
+
|
||||
+ return rc;
|
||||
+ }
|
||||
+ }
|
||||
}
|
||||
/* Reset some flag variables */
|
||||
if (rc == 0) {
|
||||
@@ -492,7 +546,7 @@ retry:
|
||||
* *data: output - data of the next change, or NULL if no more change
|
||||
*/
|
||||
int
|
||||
-clcache_get_next_change(CLC_Buffer *buf, void **key, size_t *keylen, void **data, size_t *datalen, CSN **csn)
|
||||
+clcache_get_next_change(CLC_Buffer *buf, void **key, size_t *keylen, void **data, size_t *datalen, CSN **csn, char *initial_starting_csn)
|
||||
{
|
||||
int skip = 1;
|
||||
int rc = 0;
|
||||
@@ -510,7 +564,7 @@ clcache_get_next_change(CLC_Buffer *buf, void **key, size_t *keylen, void **data
|
||||
* We're done with the current buffer. Now load the next chunk.
|
||||
*/
|
||||
if (NULL == *key && CLC_STATE_READY == buf->buf_state) {
|
||||
- rc = clcache_load_buffer(buf, NULL, NULL);
|
||||
+ rc = clcache_load_buffer(buf, NULL, NULL, initial_starting_csn);
|
||||
if (0 == rc && buf->buf_record_ptr) {
|
||||
DB_MULTIPLE_KEY_NEXT(buf->buf_record_ptr, &buf->buf_data,
|
||||
*key, *keylen, *data, *datalen);
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_clcache.h b/ldap/servers/plugins/replication/cl5_clcache.h
|
||||
index 73eb41590..16d53d563 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_clcache.h
|
||||
+++ b/ldap/servers/plugins/replication/cl5_clcache.h
|
||||
@@ -23,9 +23,9 @@ typedef struct clc_buffer CLC_Buffer;
|
||||
int clcache_init(DB_ENV **dbenv);
|
||||
void clcache_set_config(void);
|
||||
int clcache_get_buffer(CLC_Buffer **buf, DB *db, ReplicaId consumer_rid, const RUV *consumer_ruv, const RUV *local_ruv);
|
||||
-int clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss);
|
||||
+int clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss, char *initial_starting_csn);
|
||||
void clcache_return_buffer(CLC_Buffer **buf);
|
||||
-int clcache_get_next_change(CLC_Buffer *buf, void **key, size_t *keylen, void **data, size_t *datalen, CSN **csn);
|
||||
+int clcache_get_next_change(CLC_Buffer *buf, void **key, size_t *keylen, void **data, size_t *datalen, CSN **csn, char *initial_starting_csn);
|
||||
void clcache_destroy(void);
|
||||
|
||||
#endif
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,146 +0,0 @@
|
||||
From f05f5f20a468efa82d13a99687ac5d3a5d80a3c9 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 23 Feb 2021 13:42:31 +0100
|
||||
Subject: [PATCH] Issue 4644 - Large updates can reset the CLcache to the
|
||||
beginning of the changelog (#4647)
|
||||
|
||||
Bug description:
|
||||
The replication agreements are using bulk load to load updates.
|
||||
For bulk load it uses a cursor with DB_MULTIPLE_KEY and DB_NEXT.
|
||||
Before using the cursor, it must be initialized with DB_SET.
|
||||
|
||||
If during the cursor/DB_SET the CSN refers to an update that is larger than
|
||||
the size of the provided buffer, then the cursor remains not initialized and
|
||||
c_get returns DB_BUFFER_SMALL.
|
||||
|
||||
The consequence is that the next c_get(DB_MULTIPLE_KEY and DB_NEXT) will return the
|
||||
first record in the changelog DB. This break CLcache.
|
||||
|
||||
Fix description:
|
||||
The fix is to harden cursor initialization so that if DB_SET fails
|
||||
because of DB_BUFFER_SMALL. It reallocates buf_data and retries a DB_SET.
|
||||
If DB_SET can not be initialized it logs a warning.
|
||||
|
||||
The patch also changes the behaviour of the fix #4492.
|
||||
#4492 detected a massive (1day) jump prior the starting csn and ended the
|
||||
replication session. If the jump was systematic, for example
|
||||
if the CLcache got broken because of a too large updates, then
|
||||
replication was systematically stopped.
|
||||
This patch suppress the systematically stop, letting RA doing a big jump.
|
||||
From #4492 only remains the warning.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4644
|
||||
|
||||
Reviewed by: Pierre Rogier (Thanks !!!!)
|
||||
|
||||
Platforms tested: F31
|
||||
---
|
||||
.../servers/plugins/replication/cl5_clcache.c | 68 +++++++++++++++----
|
||||
1 file changed, 53 insertions(+), 15 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_clcache.c b/ldap/servers/plugins/replication/cl5_clcache.c
|
||||
index fcbca047a..90dec4d54 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_clcache.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_clcache.c
|
||||
@@ -370,9 +370,7 @@ clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss, cha
|
||||
}
|
||||
csn_as_string(buf->buf_current_csn, 0, curr);
|
||||
slapi_log_err(loglevel, buf->buf_agmt_name,
|
||||
- "clcache_load_buffer - bulk load cursor (%s) is lower than starting csn %s. Ending session.\n", curr, initial_starting_csn);
|
||||
- /* it just end the session with UPDATE_NO_MORE_UPDATES */
|
||||
- rc = CLC_STATE_DONE;
|
||||
+ "clcache_load_buffer - bulk load cursor (%s) is lower than starting csn %s.\n", curr, initial_starting_csn);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -413,10 +411,7 @@ clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss, cha
|
||||
}
|
||||
csn_as_string(buf->buf_current_csn, 0, curr);
|
||||
slapi_log_err(loglevel, buf->buf_agmt_name,
|
||||
- "clcache_load_buffer - (DB_SET_RANGE) bulk load cursor (%s) is lower than starting csn %s. Ending session.\n", curr, initial_starting_csn);
|
||||
- rc = DB_NOTFOUND;
|
||||
-
|
||||
- return rc;
|
||||
+ "clcache_load_buffer - (DB_SET_RANGE) bulk load cursor (%s) is lower than starting csn %s.\n", curr, initial_starting_csn);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -444,6 +439,42 @@ clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss, cha
|
||||
return rc;
|
||||
}
|
||||
|
||||
+/* Set a cursor to a specific key (buf->buf_key)
|
||||
+ * In case buf_data is too small to receive the value, DB_SET fails
|
||||
+ * (DB_BUFFER_SMALL). This let the cursor uninitialized that is
|
||||
+ * problematic because further cursor DB_NEXT will reset the cursor
|
||||
+ * to the beginning of the CL.
|
||||
+ * If buf_data is too small, this function reallocates enough space
|
||||
+ *
|
||||
+ * It returns the return code of cursor->c_get
|
||||
+ */
|
||||
+static int
|
||||
+clcache_cursor_set(DBC *cursor, CLC_Buffer *buf)
|
||||
+{
|
||||
+ int rc;
|
||||
+ uint32_t ulen;
|
||||
+ uint32_t dlen;
|
||||
+ uint32_t size;
|
||||
+
|
||||
+ rc = cursor->c_get(cursor, &buf->buf_key, &buf->buf_data, DB_SET);
|
||||
+ if (rc == DB_BUFFER_SMALL) {
|
||||
+ uint32_t ulen;
|
||||
+
|
||||
+ /* Fortunately, buf->buf_data.size has been set by
|
||||
+ * c_get() to the actual data size needed. So we can
|
||||
+ * reallocate the data buffer and try to set again.
|
||||
+ */
|
||||
+ ulen = buf->buf_data.ulen;
|
||||
+ buf->buf_data.ulen = (buf->buf_data.size / DEFAULT_CLC_BUFFER_PAGE_SIZE + 1) * DEFAULT_CLC_BUFFER_PAGE_SIZE;
|
||||
+ buf->buf_data.data = slapi_ch_realloc(buf->buf_data.data, buf->buf_data.ulen);
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
|
||||
+ "clcache_cursor_set - buf data len reallocated %d -> %d bytes (DB_BUFFER_SMALL)\n",
|
||||
+ ulen, buf->buf_data.ulen);
|
||||
+ rc = cursor->c_get(cursor, &buf->buf_key, &buf->buf_data, DB_SET);
|
||||
+ }
|
||||
+ return rc;
|
||||
+}
|
||||
+
|
||||
static int
|
||||
clcache_load_buffer_bulk(CLC_Buffer *buf, int flag)
|
||||
{
|
||||
@@ -478,17 +509,24 @@ retry:
|
||||
|
||||
if (use_flag == DB_NEXT) {
|
||||
/* For bulk read, position the cursor before read the next block */
|
||||
- rc = cursor->c_get(cursor,
|
||||
- &buf->buf_key,
|
||||
- &buf->buf_data,
|
||||
- DB_SET);
|
||||
+ rc = clcache_cursor_set(cursor, buf);
|
||||
}
|
||||
|
||||
- /*
|
||||
- * Continue if the error is no-mem since we don't need to
|
||||
- * load in the key record anyway with DB_SET.
|
||||
- */
|
||||
if (0 == rc || DB_BUFFER_SMALL == rc) {
|
||||
+ /*
|
||||
+ * It should not have failed with DB_BUFFER_SMALL as we tried
|
||||
+ * to adjust buf_data in clcache_cursor_set.
|
||||
+ * But if it failed with DB_BUFFER_SMALL, there is a risk in clcache_cursor_get
|
||||
+ * that the cursor will be reset to the beginning of the changelog.
|
||||
+ * Returning an error at this point will stop replication that is
|
||||
+ * a risk. So just accept the risk of a reset to the beginning of the CL
|
||||
+ * and log an alarming message.
|
||||
+ */
|
||||
+ if (rc == DB_BUFFER_SMALL) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, buf->buf_agmt_name,
|
||||
+ "clcache_load_buffer_bulk - Fail to position on csn=%s from the changelog (too large update ?). Risk of full CL evaluation.\n",
|
||||
+ (char *)buf->buf_key.data);
|
||||
+ }
|
||||
rc = clcache_cursor_get(cursor, buf, use_flag);
|
||||
}
|
||||
}
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,40 +0,0 @@
|
||||
From 7e042cbc74440b81f46efa73ccb36d80732c7074 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 28 Jan 2021 10:39:31 +0100
|
||||
Subject: [PATCH] Issue 4563 - Failure on s390x: 'Fails to split RDN
|
||||
"o=pki-tomcat-CA" into components' (#4573)
|
||||
|
||||
Bug description:
|
||||
SLAPI_OPERATION_TYPE is a stored/read as an int (slapi_pblock_get/set).
|
||||
This although the storage field is an unsigned long.
|
||||
Calling slapi_pblock_get with an long (8 btyes) destination creates
|
||||
a problem on big-endian (s390x).
|
||||
|
||||
Fix description:
|
||||
Define destination op_type as an int (4 bytes)
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4563
|
||||
|
||||
Reviewed by: Mark Reynolds, William Brown
|
||||
|
||||
Platforms tested: F31 (little endian), Debian (big endian)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
index a507f3c31..49ca01d1d 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
@@ -216,7 +216,7 @@ error:
|
||||
int32_t
|
||||
entry_get_rdn_mods(Slapi_PBlock *pb, Slapi_Entry *entry, CSN *csn, int repl_op, Slapi_Mods **smods_ret)
|
||||
{
|
||||
- unsigned long op_type = SLAPI_OPERATION_NONE;
|
||||
+ int op_type = SLAPI_OPERATION_NONE;
|
||||
char *new_rdn = NULL;
|
||||
char **dns = NULL;
|
||||
char **rdns = NULL;
|
||||
--
|
||||
2.31.1
|
||||
|
565
SOURCES/Cargo.lock
generated
Normal file
565
SOURCES/Cargo.lock
generated
Normal file
@ -0,0 +1,565 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
|
||||
|
||||
[[package]]
|
||||
name = "cbindgen"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"syn",
|
||||
"tempfile",
|
||||
"toml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.68"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.33.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
|
||||
dependencies = [
|
||||
"ansi_term",
|
||||
"atty",
|
||||
"bitflags",
|
||||
"strsim",
|
||||
"textwrap",
|
||||
"unicode-width",
|
||||
"vec_map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "entryuuid"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"paste",
|
||||
"slapi_r_plugin",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "entryuuid_syntax"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"paste",
|
||||
"slapi_r_plugin",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fernet"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"byteorder",
|
||||
"getrandom",
|
||||
"openssl",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
|
||||
dependencies = [
|
||||
"foreign-types-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types-shared"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"wasi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "0.4.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
|
||||
|
||||
[[package]]
|
||||
name = "jobserver"
|
||||
version = "0.1.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.95"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36"
|
||||
|
||||
[[package]]
|
||||
name = "librnsslapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cbindgen",
|
||||
"libc",
|
||||
"slapd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "librslapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cbindgen",
|
||||
"libc",
|
||||
"slapd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3"
|
||||
|
||||
[[package]]
|
||||
name = "openssl"
|
||||
version = "0.10.34"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cfg-if",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"openssl-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.63"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"cc",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
|
||||
dependencies = [
|
||||
"paste-impl",
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste-impl"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
|
||||
dependencies = [
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
|
||||
dependencies = [
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
"rand_hc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_chacha"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.6.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_hc"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
|
||||
dependencies = [
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "remove_dir_all"
|
||||
version = "0.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rsds"
|
||||
version = "0.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.126"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.126"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.64"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"fernet",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slapi_r_plugin"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"paste",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.72"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "synstructure"
|
||||
version = "0.12.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"rand",
|
||||
"redox_syscall",
|
||||
"remove_dir_all",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
dependencies = [
|
||||
"unicode-width",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.5.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "vcpkg"
|
||||
version = "0.2.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "025ce40a007e1907e58d5bc1a594def78e5573bb0b1160bc389634e8f12e4faa"
|
||||
|
||||
[[package]]
|
||||
name = "vec_map"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.10.2+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu",
|
||||
"winapi-x86_64-pc-windows-gnu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "zeroize"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd"
|
||||
dependencies = [
|
||||
"zeroize_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zeroize_derive"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"synstructure",
|
||||
]
|
@ -16,7 +16,7 @@ ExcludeArch: i686
|
||||
%global use_Socket6 0
|
||||
|
||||
%global use_asan 0
|
||||
%global use_rust 0
|
||||
%global use_rust 1
|
||||
%global use_legacy 1
|
||||
%global bundle_jemalloc 1
|
||||
%if %{use_asan}
|
||||
@ -42,10 +42,13 @@ ExcludeArch: i686
|
||||
# set PIE flag
|
||||
%global _hardened_build 1
|
||||
|
||||
# Filter argparse-manpage from autogenerated package Requires
|
||||
%global __requires_exclude ^python.*argparse-manpage
|
||||
|
||||
Summary: 389 Directory Server (base)
|
||||
Name: 389-ds-base
|
||||
Version: 1.4.3.16
|
||||
Release: %{?relprefix}19%{?prerel}%{?dist}
|
||||
Version: 1.4.3.23
|
||||
Release: %{?relprefix}10%{?prerel}%{?dist}
|
||||
License: GPLv3+
|
||||
URL: https://www.port389.org
|
||||
Group: System Environment/Daemons
|
||||
@ -54,6 +57,62 @@ Conflicts: freeipa-server < 4.0.3
|
||||
Obsoletes: %{name} <= 1.4.0.9
|
||||
Provides: ldif2ldbm >= 0
|
||||
|
||||
##### Bundled cargo crates list - START #####
|
||||
Provides: bundled(crate(ansi_term)) = 0.11.0
|
||||
Provides: bundled(crate(atty)) = 0.2.14
|
||||
Provides: bundled(crate(autocfg)) = 1.0.1
|
||||
Provides: bundled(crate(base64)) = 0.10.1
|
||||
Provides: bundled(crate(bitflags)) = 1.2.1
|
||||
Provides: bundled(crate(byteorder)) = 1.4.2
|
||||
Provides: bundled(crate(cbindgen)) = 0.9.1
|
||||
Provides: bundled(crate(cc)) = 1.0.66
|
||||
Provides: bundled(crate(cfg-if)) = 0.1.10
|
||||
Provides: bundled(crate(cfg-if)) = 1.0.0
|
||||
Provides: bundled(crate(clap)) = 2.33.3
|
||||
Provides: bundled(crate(fernet)) = 0.1.3
|
||||
Provides: bundled(crate(foreign-types)) = 0.3.2
|
||||
Provides: bundled(crate(foreign-types-shared)) = 0.1.1
|
||||
Provides: bundled(crate(getrandom)) = 0.1.16
|
||||
Provides: bundled(crate(hermit-abi)) = 0.1.17
|
||||
Provides: bundled(crate(itoa)) = 0.4.7
|
||||
Provides: bundled(crate(lazy_static)) = 1.4.0
|
||||
Provides: bundled(crate(libc)) = 0.2.82
|
||||
Provides: bundled(crate(librnsslapd)) = 0.1.0
|
||||
Provides: bundled(crate(librslapd)) = 0.1.0
|
||||
Provides: bundled(crate(log)) = 0.4.11
|
||||
Provides: bundled(crate(openssl)) = 0.10.32
|
||||
Provides: bundled(crate(openssl-sys)) = 0.9.60
|
||||
Provides: bundled(crate(pkg-config)) = 0.3.19
|
||||
Provides: bundled(crate(ppv-lite86)) = 0.2.10
|
||||
Provides: bundled(crate(proc-macro2)) = 1.0.24
|
||||
Provides: bundled(crate(quote)) = 1.0.8
|
||||
Provides: bundled(crate(rand)) = 0.7.3
|
||||
Provides: bundled(crate(rand_chacha)) = 0.2.2
|
||||
Provides: bundled(crate(rand_core)) = 0.5.1
|
||||
Provides: bundled(crate(rand_hc)) = 0.2.0
|
||||
Provides: bundled(crate(redox_syscall)) = 0.1.57
|
||||
Provides: bundled(crate(remove_dir_all)) = 0.5.3
|
||||
Provides: bundled(crate(rsds)) = 0.1.0
|
||||
Provides: bundled(crate(ryu)) = 1.0.5
|
||||
Provides: bundled(crate(serde)) = 1.0.118
|
||||
Provides: bundled(crate(serde_derive)) = 1.0.118
|
||||
Provides: bundled(crate(serde_json)) = 1.0.61
|
||||
Provides: bundled(crate(slapd)) = 0.1.0
|
||||
Provides: bundled(crate(strsim)) = 0.8.0
|
||||
Provides: bundled(crate(syn)) = 1.0.58
|
||||
Provides: bundled(crate(tempfile)) = 3.1.0
|
||||
Provides: bundled(crate(textwrap)) = 0.11.0
|
||||
Provides: bundled(crate(toml)) = 0.5.8
|
||||
Provides: bundled(crate(unicode-width)) = 0.1.8
|
||||
Provides: bundled(crate(unicode-xid)) = 0.2.1
|
||||
Provides: bundled(crate(vcpkg)) = 0.2.11
|
||||
Provides: bundled(crate(vec_map)) = 0.8.2
|
||||
Provides: bundled(crate(wasi)) = 0.9.0+wasi_snapshot_preview1
|
||||
Provides: bundled(crate(winapi)) = 0.3.9
|
||||
Provides: bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0
|
||||
Provides: bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0
|
||||
##### Bundled cargo crates list - END #####
|
||||
|
||||
BuildRequires: nspr-devel
|
||||
BuildRequires: nss-devel >= 3.34
|
||||
BuildRequires: perl-generators
|
||||
@ -174,53 +233,40 @@ Source2: %{name}-devel.README
|
||||
%if %{bundle_jemalloc}
|
||||
Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2
|
||||
%endif
|
||||
Patch01: 0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch
|
||||
Patch02: 0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch
|
||||
Patch03: 0003-do-not-add-referrals-for-masters-with-different-data.patch
|
||||
Patch04: 0004-Ticket-50933-Update-2307compat.ldif.patch
|
||||
Patch05: 0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch
|
||||
Patch06: 0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch
|
||||
Patch07: 0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch
|
||||
Patch08: 0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch
|
||||
Patch09: 0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch
|
||||
Patch10: 0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch
|
||||
Patch11: 0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch
|
||||
Patch12: 0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch
|
||||
Patch13: 0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch
|
||||
Patch14: 0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch
|
||||
Patch15: 0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch
|
||||
Patch16: 0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch
|
||||
Patch17: 0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch
|
||||
Patch18: 0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch
|
||||
Patch19: 0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch
|
||||
Patch20: 0020-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch
|
||||
Patch21: 0021-Issue-4418-ldif2db-offline.-Warn-the-user-of-skipped.patch
|
||||
Patch22: 0022-Fix-cherry-pick-erorr.patch
|
||||
Patch23: 0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch
|
||||
Patch24: 0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch
|
||||
Patch25: 0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch
|
||||
Patch26: 0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch
|
||||
Patch27: 0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch
|
||||
Patch28: 0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch
|
||||
Patch29: 0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch
|
||||
Patch30: 0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch
|
||||
Patch31: 0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch
|
||||
Patch32: 0032-Backport-tests-from-master-branch-fix-failing-tests-.patch
|
||||
Patch33: 0033-Issue-5442-Search-results-are-different-between-RHDS.patch
|
||||
Patch34: 0034-Issue-4526-sync_repl-when-completing-an-operation-in.patch
|
||||
Patch35: 0035-Issue-4581-A-failed-re-indexing-leaves-the-database-.patch
|
||||
Patch36: 0036-Issue-4513-CI-Tests-fix-test-failures.patch
|
||||
Patch37: 0037-Issue-4609-CVE-info-disclosure-when-authenticating.patch
|
||||
Patch38: 0038-Issue-4649-crash-in-sync_repl-when-a-MODRDN-create-a.patch
|
||||
Patch39: 0039-Issue-4711-SIGSEV-with-sync_repl-4738.patch
|
||||
Patch40: 0040-Issue-4764-replicated-operation-sometime-checks-ACI-.patch
|
||||
Patch41: 0041-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch
|
||||
Patch42: 0042-Issue-4492-Changelog-cache-can-upload-updates-from-a.patch
|
||||
Patch43: 0043-Issue-4644-Large-updates-can-reset-the-CLcache-to-th.patch
|
||||
Patch44: 0044-Issue-4563-Failure-on-s390x-Fails-to-split-RDN-o-pki.patch
|
||||
Patch45: 0045-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch
|
||||
Patch46: 0046-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch
|
||||
Patch47: 0047-Issue-4837-persistent-search-returns-entries-even-wh.patch
|
||||
%if %{use_rust}
|
||||
Source4: vendor-%{version}-2.tar.gz
|
||||
Source5: Cargo.lock
|
||||
%endif
|
||||
Patch01: 0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch
|
||||
Patch02: 0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch
|
||||
Patch03: 0003-Ticket-137-Implement-EntryUUID-plugin.patch
|
||||
Patch04: 0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch
|
||||
Patch05: 0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch
|
||||
Patch06: 0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch
|
||||
Patch07: 0007-Ticket-51175-resolve-plugin-name-leaking.patch
|
||||
Patch08: 0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch
|
||||
Patch09: 0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch
|
||||
Patch10: 0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch
|
||||
Patch11: 0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch
|
||||
Patch12: 0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch
|
||||
Patch13: 0013-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch
|
||||
Patch14: 0014-Issue-4396-Minor-memory-leak-in-backend-4558-4572.patch
|
||||
Patch15: 0015-Issue-4700-Regression-in-winsync-replication-agreeme.patch
|
||||
Patch16: 0016-Issue-4725-Fix-compiler-warnings.patch
|
||||
Patch17: 0017-Issue-4814-_cl5_get_tod_expiration-may-crash-at-star.patch
|
||||
Patch18: 0018-Issue-4789-Temporary-password-rules-are-not-enforce-.patch
|
||||
Patch19: 0019-Issue-4788-CLI-should-support-Temporary-Password-Rul.patch
|
||||
Patch20: 0020-Issue-4447-Crash-when-the-Referential-Integrity-log-.patch
|
||||
Patch21: 0021-Issue-4791-Missing-dependency-for-RetroCL-RFE.patch
|
||||
Patch22: 0022-Issue-4656-remove-problematic-language-from-ds-replc.patch
|
||||
Patch23: 0023-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch
|
||||
Patch24: 0024-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch
|
||||
Patch25: 0025-Issue-4837-persistent-search-returns-entries-even-wh.patch
|
||||
Patch26: 0026-Hardcode-gost-crypt-passsword-storage-scheme.patch
|
||||
Patch27: 0027-Issue-4734-import-of-entry-with-no-parent-warning-47.patch
|
||||
Patch28: 0028-Issue-4872-BUG-entryuuid-enabled-by-default-causes-r.patch
|
||||
Patch29: 0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch
|
||||
Patch30: 0030-Issue-4884-server-crashes-when-dnaInterval-attribute.patch
|
||||
|
||||
|
||||
%description
|
||||
@ -348,6 +394,10 @@ A cockpit UI Plugin for configuring and administering the 389 Directory Server
|
||||
|
||||
%prep
|
||||
%autosetup -p1 -v -n %{name}-%{version}%{?prerel}
|
||||
%if %{use_rust}
|
||||
tar xvzf %{SOURCE4}
|
||||
cp %{SOURCE5} src/
|
||||
%endif
|
||||
%if %{bundle_jemalloc}
|
||||
%setup -q -n %{name}-%{version}%{?prerel} -T -D -b 3
|
||||
%endif
|
||||
@ -365,7 +415,7 @@ ASAN_FLAGS="--enable-asan --enable-debug"
|
||||
%endif
|
||||
|
||||
%if %{use_rust}
|
||||
RUST_FLAGS="--enable-rust"
|
||||
RUST_FLAGS="--enable-rust --enable-rust-offline"
|
||||
%endif
|
||||
|
||||
%if %{use_legacy}
|
||||
@ -699,9 +749,6 @@ exit 0
|
||||
%if %{bundle_jemalloc}
|
||||
%{_libdir}/%{pkgname}/lib/libjemalloc.so.2
|
||||
%endif
|
||||
%if %{use_rust}
|
||||
%{_libdir}/%{pkgname}/librsds.so
|
||||
%endif
|
||||
|
||||
%if %{use_legacy}
|
||||
%files legacy-tools
|
||||
@ -839,184 +886,63 @@ exit 0
|
||||
%doc README.md
|
||||
|
||||
%changelog
|
||||
* Wed Jul 21 2021 Thierry Bordaz <tbordaz@redhat.com> - 1.4.3.16-19
|
||||
- Bump version to 1.4.3.16-19
|
||||
- Resolve: Bug 1984091 - persistent search returns entries even when an error is returned by content-sync-plugin
|
||||
* Thu Aug 26 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-10
|
||||
- Bump version to 1.4.3.23-10
|
||||
- Resolves: Bug 1997138 - LDAP server crashes when dnaInterval attribute is set to 0
|
||||
|
||||
* Mon Jul 19 2021 Thierry Bordaz <tbordaz@redhat.com> - 1.4.3.16-18
|
||||
- Bump version to 1.4.3.16-18
|
||||
- Resolve: Bug 1983121 - CRYPT password hash with asterisk allows any bind attempt to succeed
|
||||
* Wed Aug 25 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-9
|
||||
- Bump version to 1.4.3.23-9
|
||||
- Resolves: Bug 1947044 - remove unsupported GOST password storage scheme
|
||||
|
||||
* Fri Jul 16 2021 Thierry Bordaz <tbordaz@redhat.com> - 1.4.3.16-17
|
||||
- Bump version to 1.4.3.16-17
|
||||
- Resolve: Bug 1983095 - Internal unindexed searches in syncrepl
|
||||
- Resolve: Bug 1980063 - IPA installation fails on s390x with 389-ds-base-1.4.3.8-4.module+el8.3.0+7193+dfd1e8ad.s390x
|
||||
* Thu Aug 19 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-8
|
||||
- Bump version to 1.4.3.23-8
|
||||
- Resolves: Bug 1947044 - add missing patch for import result code
|
||||
- Resolves: Bug 1944494 - support for RFC 4530 entryUUID attribute
|
||||
|
||||
* Wed Jun 16 2021 Thierry Bordaz <tbordaz@redhat.com> - 1.4.3.16-16
|
||||
- Bump version to 1.4.3.16-16
|
||||
- Resolves: Bug 1972738 - Changelog cache can upload updates from a wrong starting point (CSN)
|
||||
- Resolves: Bug 1972721 - Large updates can reset the CLcache to the beginning of the changelog
|
||||
* Mon Jul 26 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-7
|
||||
- Bump version to 1.4.3.23-7
|
||||
- Resolves: Bug 1983921 - persistent search returns entries even when an error is returned by content-sync-plugin
|
||||
|
||||
* Fri Jun 11 2021 Thierry Bordaz <tbordaz@redhat.com> - 1.4.3.16-15
|
||||
- Bump version to 1.4.3.16-15
|
||||
- Resolves: Bug 1970791 - A connection can be erroneously flagged as replication conn during evaluation of an aci with ip bind rule
|
||||
* Fri Jul 16 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-6
|
||||
- Bump version to 1.4.3.23-6
|
||||
- Resolves: Bug 1982787 - CRYPT password hash with asterisk allows any bind attempt to succeed
|
||||
|
||||
* Tue Jun 08 2021 Thierry Bordaz <tbordaz@redhat.com> - 1.4.3.16-14
|
||||
- Bump version to 1.4.3.16-14
|
||||
- Resolves: Bug 1968588 - ACIs are being evaluated against the Replication Manager account in a replication context
|
||||
- Resolves: Bug 1960720 - sync_repl NULL pointer dereference in sync_create_state_control()
|
||||
* Thu Jul 15 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-5
|
||||
- Bump version to 1.4.3.23-5
|
||||
- Resolves: Bug 1951020 - Internal unindexed searches in syncrepl
|
||||
- Resolves: Bug 1978279 - ds-replcheck state output message has 'Master' instead of 'Supplier'
|
||||
|
||||
* Thu Mar 11 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-13
|
||||
- Bump version to 1.4.3.16-13
|
||||
- Resolves: Bug 1930188 - crash in sync_repl when a MODRDN create a cenotaph
|
||||
* Tue Jun 29 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-4
|
||||
- Bump version to 1.4.3.23-4
|
||||
- Resolves: Bug 1976906 - Instance crash at restart after changelog configuration
|
||||
- Resolves: Bug 1480323 - ns-slapd crash at startup - Segmentation fault in strcmpi_fast() when the Referential Integrity log is manually edited
|
||||
- Resolves: Bug 1967596 - Temporary password - add CLI and fix compiler errors
|
||||
|
||||
* Mon Mar 1 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-12
|
||||
- Bump version to 1.4.3.16-12
|
||||
- Resolves: Bug 1929067 - PKI instance creation failed with new 389-ds-base build
|
||||
* Thu Jun 17 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-3
|
||||
- Bump version to 1.4.3.23-3
|
||||
- Resolves: Bug 1944494 - support for RFC 4530 entryUUID attribute
|
||||
- Resolves: Bug 1967839 - ACIs are being evaluated against the Replication Manager account in a replication context
|
||||
- Resolves: Bug 1970259 - A connection can be erroneously flagged as replication conn during evaluation of an aci with ip bind rule
|
||||
- Resolves: Bug 1972590 - Large updates can reset the CLcache to the beginning of the changelog
|
||||
- Resolves: Bug 1903221 - Memory leak in 389ds backend (Minor)
|
||||
|
||||
* Mon Feb 15 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-11
|
||||
- Bump version to 1.4.3.16-11
|
||||
- Resolves: Bug 1924130 - RHDS11: “write” permission of ACI changes ns-slapd’s behavior on search operation(remove patch as it breaks DogTag, will add this patch back after DogTag is fixed)
|
||||
* Sun May 30 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-2
|
||||
- Bump version to 1.4.3.23-2
|
||||
- Resolves: Bug 1812286 - RFE - Monitor the current DB locks ( nsslapd-db-current-locks )
|
||||
- Resolves: Bug 1748441 - RFE - Schedule execution of "compactdb" at specific date/time
|
||||
- Resolves: Bug 1938239 - RFE - Extend DNA plugin to support intervals sizes for subuids
|
||||
|
||||
* Wed Feb 10 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-10
|
||||
- Bump version to 1.4.3.16-10
|
||||
- Resolves: Bug 1924130 - RHDS11: “write” permission of ACI changes ns-slapd’s behavior on search operation(part 2)
|
||||
* Fri May 14 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-1
|
||||
- Bump version to 1.4.3.23-1
|
||||
- Resolves: Bug 1947044 - Rebase 389 DS with 389-ds-base-1.4.3.23 for RHEL 8.5
|
||||
- Resolves: Bug 1850664 - RFE - Add an option for the Retro Changelog to ignore some attributes
|
||||
- Resolves: Bug 1903221 - Memory leak in 389ds backend (Minor)
|
||||
- Resolves: Bug 1898541 - Changelog cache can upload updates from a wrong starting point (CSN)
|
||||
- Resolves: Bug 1889562 - client psearch with multiple threads hangs if nsslapd-maxthreadsperconn is under sized
|
||||
- Resolves: Bug 1924848 - Negative wtime on ldapcompare
|
||||
- Resolves: Bug 1895460 - RFE - Log an additional message if the server certificate nickname doesn't match nsSSLPersonalitySSL value
|
||||
- Resolves: Bug 1897614 - Performance search rate: change entry cache monitor to recursive pthread mutex
|
||||
- Resolves: Bug 1939607 - hang because of incorrect accounting of readers in vattr rwlock
|
||||
- Resolves: Bug 1626633 - [RFE] DS - Update the password policy to support a Temporary Password with expiration
|
||||
- Resolves: Bug 1952804 - CVE-2021-3514 389-ds:1.4/389-ds-base: sync_repl NULL pointer dereference in sync_create_state_control()
|
||||
|
||||
* Tue Feb 2 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-9
|
||||
- Bump version to 1.4.3.16-9
|
||||
- Resolves: Bug 1924130 - RHDS11: “write” permission of ACI changes ns-slapd’s behavior on search operation
|
||||
- Resolves: Bug 1916677 - A failed re-indexing leaves the database in broken state.
|
||||
- Resolves: Bug 1912822 - sync_repl: when completing an operation in the pending list, it can select the wrong operation
|
||||
|
||||
* Wed Jan 13 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-8
|
||||
- Bump version to 1.4.3.16-8
|
||||
- Resolves: Bug 1903539 - cn=monitor is throwing err=32 with scope: -s one
|
||||
- Resolves: Bug 1893870 - PR_WaitCondVar() issue causes replication delay when clock jumps backwards
|
||||
|
||||
* Thu Jan 7 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-7
|
||||
- Bump version to 1.4.3.16-7
|
||||
- Resolves: Bug 1890118 - SIGFPE crash in rhds disk monitoring routine
|
||||
- Resolves: Bug 1904991 - 389-ds:1.4/389-ds-base: information disclosure during the binding of a DN
|
||||
- Resolves: Bug 1627645 - ldif2db does not change exit code when there are skipped entries
|
||||
|
||||
* Wed Dec 16 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-6
|
||||
- Bump version to 1.4.3.16-6
|
||||
- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV - consumer (Unavailable) State (green) Reason (error (0)
|
||||
- Resolves: Bug 1904991 - Unexpected info returned to ldap request
|
||||
- Resolves: Bug 1843838 - heap-use-after-free in slapi_be_getsuffix
|
||||
- Resolves: Bug 1903133 - Server-Cert.crt created using dscreate has Subject:CN =localhost instead of hostname.
|
||||
|
||||
* Wed Dec 9 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-5
|
||||
- Bump version to 1.4.3.16-5
|
||||
- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV
|
||||
- Resolves: Bug 1887449 - Sync repl: missing update because operation are erroneously stated as nested
|
||||
- Resolves: Bug 1887415 - Sync repl - if a series of updates target the same entry then the cookie get wrong changenumber
|
||||
- Resolves: Bug 1851978 - SyncRepl plugin provides a wrong cookie
|
||||
|
||||
* Thu Dec 3 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-4
|
||||
- Bump version to 1.4.3.16-4
|
||||
- Resolves: Bug 1843517 - Using ldifgen with --start-idx option fails with unsupported operand
|
||||
- Resolves: Bug 1801086 - [RFE] Generate dsrc file using dsconf
|
||||
- Resolves: Bug 1843838 - heap-use-after-free in slapi_be_getsuffix
|
||||
|
||||
* Wed Nov 25 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-3
|
||||
- Bump version to 1.4.3.16-3
|
||||
- Resolves: Bug 1859219 - rfc2307 and rfc2307bis compat schema
|
||||
- Resolves: Bug 1843604 - reduce the cost of allocation/free when open/close a connection
|
||||
- Resolves: Bug 1898850 - Entries conflict not resolved by replication
|
||||
|
||||
* Thu Nov 19 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-2
|
||||
- Bump version to 1.4.3.16-2
|
||||
- Resolves: Bug 1859227 - create keep alive entry after on line init
|
||||
- Resolves: Bug 1888863 - group rdn with leading space char and add fails error 21 invalid syntax and delete fails error 32
|
||||
- Resolves: Bug 1859228 - do not add referrals for masters with different data generation
|
||||
|
||||
* Mon Oct 26 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-1
|
||||
- Bump version to 1.4.3.16-1
|
||||
- Resolves: Bug 1887415 - Sync repl - if a serie of updates target the same entry then the cookie get wrong changenumber
|
||||
- Resolves: Bug 1859225 - suffix management in backends incorrect
|
||||
|
||||
* Mon Oct 26 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.14-1
|
||||
- Bump version to 1.4.3.14-1
|
||||
- Resolves: Bug 1862529 - Rebase 389-ds-base-1.4.3 in RHEL 8.4
|
||||
- Resolves: Bug 1859301 - Misleading message in access log for idle timeout
|
||||
- Resolves: Bug 1889782 - Missing closing quote when reporting the details of unindexed/paged search results
|
||||
- Resolves: Bug 1862971 - dsidm user status fails with Error: 'nsUserAccount' object has no attribute 'is_locked'
|
||||
- Resolves: Bug 1859878 - Managed Entries configuration not being enforced
|
||||
- Resolves: Bug 1851973 - Duplicate entryUSN numbers for different LDAP entries in the same backend
|
||||
- Resolves: Bug 1851967 - if dbhome directory is set online backup fails
|
||||
- Resolves: Bug 1887449 - Sync repl: missing update because operation are erroneously stated as nested
|
||||
- Resolves: Bug 1887415 - Sync repl - if a serie of updates target the same entry then the cookie get wrong changenumber
|
||||
- Resolves: Bug 1851978 - SyncRepl plugin provides a wrong cookie
|
||||
- Resolves: Bug 1843604 - reduce the cost of allocation/free when open/close a connection
|
||||
- Resolves: Bug 1872930 - dscreate: Not possible to bind to a unix domain socket
|
||||
- Resolves: Bug 1861504 - ds-replcheck crashes in offline mode
|
||||
- Resolves: Bug 1859282 - remove ldbm_back_entry_release
|
||||
- Resolves: Bug 1859225 - suffix management in backends incorrect
|
||||
- Resolves: Bug 1859224 - remove unused or unnecessary database plugin functions
|
||||
- Resolves: Bug 1859219 - rfc2307 and rfc2307bis compat schema
|
||||
- Resolves: Bug 1851975 - Add option to reject internal unindexed searches
|
||||
- Resolves: Bug 1851972 - Remove code duplication from the BDB backend separation work
|
||||
- Resolves: Bug 1850275 - Add new access log keywords for time spent in work queue and actual operation time
|
||||
- Resolves: Bug 1848359 - Add failover credentials to replication agreement
|
||||
- Resolves: Bug 1837315 - Healthcheck code DSBLE0002 not returned on disabled suffix
|
||||
|
||||
* Wed Aug 5 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-5
|
||||
- Bump version to 1.4.3.8-5
|
||||
- Resolves: Bug 1841086 - SSL alert: The value of sslVersionMax "TLS1.3" is higher than the supported version
|
||||
- Resolves: Bug 1800529 - Memory leaks in disk monitoring
|
||||
- Resolves: Bug 1748227 - Instance name length is not enforced
|
||||
- Resolves: Bug 1849418 - python3-lib389 pulls unnecessary bash-completion package
|
||||
|
||||
* Fri Jun 26 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-4
|
||||
- Bump version to 1.4.3.8-4
|
||||
- Resolves: Bug 1806978 - ns-slapd crashes during db2ldif
|
||||
- Resolves: Bug 1450863 - Log warning when tuning of nsslapd-threadnumber above or below the optimal value
|
||||
- Resolves: Bug 1647017 - A distinguished value of a single valued attribute can be missing in an entry
|
||||
- Resolves: Bug 1806573 - Dsctl healthcheck doesn't work when using instance name with 'slapd-'
|
||||
- Resolves: Bug 1807773 - dsctl healthcheck : typo in DSREPLLE0002 Lint error suggested resolution commands
|
||||
- Resolves: Bug 1843567 - Healthcheck to find notes=F
|
||||
- Resolves: Bug 1845094 - User/Directory Manager can modify Password Policy attribute "pwdReset"
|
||||
- Resolves: Bug 1850275 - Add new access log keywords for time spent in work queue and actual operation time
|
||||
- Resolves: Bug 1442386 - Recreating an index while changing case will create an indexfile with the old name (different case) and after restart the indexfile is abandoned
|
||||
- Resolves: Bug 1672574 - nsIndexIDListScanLimit accepts any value
|
||||
- Resolves: Bug 1800529 - Memory leaks in disk monitoring
|
||||
|
||||
* Fri Jun 5 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-3
|
||||
- Bump version to 1.4.3.8-3
|
||||
- Resolves: Bug 1835619 - Healthcheck with --json option reports "Object of type 'bytes' is not JSON serializable" when mapping tree is deleted
|
||||
- Resolves: Bug 1836428 - Directory Server ds-replcheck RFE to add a timeout command-line arg/value to wait longer when connecting to a replica server
|
||||
- Resolves: Bug 1843090 - abort when a empty valueset is freed
|
||||
- Resolves: Bug 1843156 - Prevent unnecessarily duplication of the target entry
|
||||
- Resolves: Bug 1843157 - Check for clock errors and time skew
|
||||
- Resolves: Bug 1843159 - RFE AD filter rewriter for ObjectCategory
|
||||
- Resolves: Bug 1843162 - Creating Replication Manager fails if uid=repman is used
|
||||
- Resolves: Bug 1816851 - Add option to healthcheck to list all the lint reports
|
||||
- Resolves: Bug 1748227 - Instance name length is not enforced
|
||||
- Resolves: Bug 1748244 - dscreate doesn't sanitize instance name
|
||||
|
||||
* Mon May 11 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-2
|
||||
- Bump version to 1.4.3.8-2
|
||||
- Resolves: Bug 1833350 - Remove cockpit dependancies that are breaking builds
|
||||
|
||||
* Mon May 11 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-1
|
||||
- Bump version to 1.4.3.8-1
|
||||
- Resolves: Bug 1833350 - Rebase 389-ds-base for RHEL 8.3
|
||||
- Resolves: Bug 1728943 - [RFE] Advance options in RHDS Disk Monitoring Framework
|
||||
- Resolves: Bug 1775285 - [RFE] Implement the Password Policy attribute "pwdReset"
|
||||
- Resolves: Bug 1638875 - [RFE] extract key/certs pem file into a private namespace
|
||||
- Resolves: Bug 1758478 - AddressSanitizer: heap-buffer-overflow in ldap_utf8prev
|
||||
- Resolves: Bug 1795943 - Port dbmon.sh from legacy tools package
|
||||
- Resolves: Bug 1798394 - Port dbgen from legacy tools package
|
||||
- Resolves: Bug 1800529 - Memory leaks in disk monitoring
|
||||
- Resolves: Bug 1807419 - Unable to create a suffix with countryName either via dscreate or the admin console
|
||||
- Resolves: Bug 1816848 - Database links: get_monitor() takes 1 positional argument but 2 were given
|
||||
- Resolves: Bug 1816854 - Setting nsslapd-allowed-sasl-mechanisms truncates the value
|
||||
- Resolves: Bug 1816857 - Searches on cn=config takes values with spaces and makes multiple attributes out of them
|
||||
- Resolves: Bug 1816859 - lib389 - Replace exec() with setattr()
|
||||
- Resolves: Bug 1816862 - Memory leak in indirect COS
|
||||
- Resolves: Bug 1829071 - Installation of RHDS 11 fails on RHEL8 server with IPv6 disabled
|
||||
- Resolves: Bug 1833515 - set 'nsslapd-enable-upgrade-hash: off' as this raises warnings in IPA
|
||||
- Resolves: Bug 1790986 - cenotaph errors on modrdn operations
|
||||
- Resolves: Bug 1769734 - Heavy StartTLS connection load can randomly fail with err=1
|
||||
- Resolves: Bug 1758501 - LeakSanitizer: detected memory leaks in changelog5_init and perfctrs_init
|
||||
|
Loading…
Reference in New Issue
Block a user