Bump version to 1.4.3.35-1
Resolves: rhbz#2188628 - Rebase 389-ds-base in RHEL 8.9 to 1.4.3.25
This commit is contained in:
parent
e594c7e62a
commit
6dece768d3
3
.gitignore
vendored
3
.gitignore
vendored
@ -4,3 +4,6 @@ SOURCES/vendor-1.4.3.32-1.tar.gz
|
||||
/389-ds-base-1.4.3.32.tar.bz2
|
||||
/jemalloc-5.3.0.tar.bz2
|
||||
/vendor-1.4.3.32-1.tar.gz
|
||||
/vendor-1.4.3.35-1.tar.gz
|
||||
/Cargo-1.4.3.35.lock
|
||||
/389-ds-base-1.4.3.35.tar.bz2
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,322 +0,0 @@
|
||||
From 1e1c2b23c35282481628af7e971ac683da334502 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Tue, 27 Apr 2021 17:00:15 +0100
|
||||
Subject: [PATCH 02/12] Issue 4701 - RFE - Exclude attributes from retro
|
||||
changelog (#4723)
|
||||
|
||||
Description: When the retro changelog plugin is enabled it writes the
|
||||
added/modified values to the "cn-changelog" suffix. In
|
||||
some cases an entries attribute values can be of a
|
||||
sensitive nature and should be excluded. This RFE adds
|
||||
functionality that will allow an admin exclude certain
|
||||
attributes from the retro changelog DB.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/4701
|
||||
|
||||
Reviewed by: mreynolds389, droideck (Thanks folks)
|
||||
---
|
||||
.../tests/suites/retrocl/basic_test.py | 292 ++++++++++++++++++
|
||||
1 file changed, 292 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
new file mode 100644
|
||||
index 000000000..112c73cb9
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
@@ -0,0 +1,292 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+
|
||||
+import logging
|
||||
+import ldap
|
||||
+import time
|
||||
+import pytest
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389.plugins import RetroChangelogPlugin
|
||||
+from lib389._constants import *
|
||||
+from lib389.utils import *
|
||||
+from lib389.tasks import *
|
||||
+from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
|
||||
+from lib389.cli_base.dsrc import dsrc_arg_concat
|
||||
+from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add
|
||||
+from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+USER1_DN = 'uid=user1,ou=people,'+ DEFAULT_SUFFIX
|
||||
+USER2_DN = 'uid=user2,ou=people,'+ DEFAULT_SUFFIX
|
||||
+USER_PW = 'password'
|
||||
+ATTR_HOMEPHONE = 'homePhone'
|
||||
+ATTR_CARLICENSE = 'carLicense'
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+def test_retrocl_exclude_attr_add(topology_st):
|
||||
+ """ Test exclude attribute feature of the retrocl plugin for add operation
|
||||
+
|
||||
+ :id: 3481650f-2070-45ef-9600-2500cfc51559
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Enable dynamic plugins
|
||||
+ 2. Confige retro changelog plugin
|
||||
+ 3. Add an entry
|
||||
+ 4. Ensure entry attrs are in the changelog
|
||||
+ 5. Exclude an attr
|
||||
+ 6. Add another entry
|
||||
+ 7. Ensure excluded attr is not in the changelog
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ """
|
||||
+
|
||||
+ st = topology_st.standalone
|
||||
+
|
||||
+ log.info('Enable dynamic plugins')
|
||||
+ try:
|
||||
+ st.config.set('nsslapd-dynamic-plugins', 'on')
|
||||
+ except ldap.LDAPError as e:
|
||||
+ ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('Configure retrocl plugin')
|
||||
+ rcl = RetroChangelogPlugin(st)
|
||||
+ rcl.disable()
|
||||
+ rcl.enable()
|
||||
+ rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
|
||||
+
|
||||
+ log.info('Restarting instance')
|
||||
+ try:
|
||||
+ st.restart()
|
||||
+ except ldap.LDAPError as e:
|
||||
+ ldap.error('Failed to restart instance ' + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ users = UserAccounts(st, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ log.info('Adding user1')
|
||||
+ try:
|
||||
+ user1 = users.create(properties={
|
||||
+ 'sn': '1',
|
||||
+ 'cn': 'user 1',
|
||||
+ 'uid': 'user1',
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'givenname': 'user1',
|
||||
+ 'homePhone': '0861234567',
|
||||
+ 'carLicense': '131D16674',
|
||||
+ 'mail': 'user1@whereever.com',
|
||||
+ 'homeDirectory': '/home/user1',
|
||||
+ 'userpassword': USER_PW})
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error("Failed to add user1")
|
||||
+
|
||||
+ log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
|
||||
+ try:
|
||||
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ assert False
|
||||
+ assert len(cllist) > 0
|
||||
+ if cllist[0].hasAttr('changes'):
|
||||
+ clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ assert ATTR_HOMEPHONE in clstr
|
||||
+ assert ATTR_CARLICENSE in clstr
|
||||
+
|
||||
+ log.info('Excluding attribute ' + ATTR_HOMEPHONE)
|
||||
+ args = FakeArgs()
|
||||
+ args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM]
|
||||
+ args.instance = 'standalone1'
|
||||
+ args.basedn = None
|
||||
+ args.binddn = None
|
||||
+ args.starttls = False
|
||||
+ args.pwdfile = None
|
||||
+ args.bindpw = None
|
||||
+ args.prompt = False
|
||||
+ args.exclude_attrs = ATTR_HOMEPHONE
|
||||
+ args.func = retrochangelog_add
|
||||
+ dsrc_inst = dsrc_arg_concat(args, None)
|
||||
+ inst = connect_instance(dsrc_inst, False, args)
|
||||
+ result = args.func(inst, None, log, args)
|
||||
+ disconnect_instance(inst)
|
||||
+ assert result is None
|
||||
+
|
||||
+ log.info("5s delay for retrocl plugin to restart")
|
||||
+ time.sleep(5)
|
||||
+
|
||||
+ log.info('Adding user2')
|
||||
+ try:
|
||||
+ user2 = users.create(properties={
|
||||
+ 'sn': '2',
|
||||
+ 'cn': 'user 2',
|
||||
+ 'uid': 'user2',
|
||||
+ 'uidNumber': '22',
|
||||
+ 'gidNumber': '222',
|
||||
+ 'givenname': 'user2',
|
||||
+ 'homePhone': '0879088363',
|
||||
+ 'carLicense': '04WX11038',
|
||||
+ 'mail': 'user2@whereever.com',
|
||||
+ 'homeDirectory': '/home/user2',
|
||||
+ 'userpassword': USER_PW})
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error("Failed to add user2")
|
||||
+
|
||||
+ log.info('Verify homePhone attr is not in the changelog changestring')
|
||||
+ try:
|
||||
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN)
|
||||
+ assert len(cllist) > 0
|
||||
+ if cllist[0].hasAttr('changes'):
|
||||
+ clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ assert ATTR_HOMEPHONE not in clstr
|
||||
+ assert ATTR_CARLICENSE in clstr
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ assert False
|
||||
+
|
||||
+def test_retrocl_exclude_attr_mod(topology_st):
|
||||
+ """ Test exclude attribute feature of the retrocl plugin for mod operation
|
||||
+
|
||||
+ :id: f6bef689-685b-4f86-a98d-f7e6b1fcada3
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Enable dynamic plugins
|
||||
+ 2. Confige retro changelog plugin
|
||||
+ 3. Add user1 entry
|
||||
+ 4. Ensure entry attrs are in the changelog
|
||||
+ 5. Exclude an attr
|
||||
+ 6. Modify user1 entry
|
||||
+ 7. Ensure excluded attr is not in the changelog
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ """
|
||||
+
|
||||
+ st = topology_st.standalone
|
||||
+
|
||||
+ log.info('Enable dynamic plugins')
|
||||
+ try:
|
||||
+ st.config.set('nsslapd-dynamic-plugins', 'on')
|
||||
+ except ldap.LDAPError as e:
|
||||
+ ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('Configure retrocl plugin')
|
||||
+ rcl = RetroChangelogPlugin(st)
|
||||
+ rcl.disable()
|
||||
+ rcl.enable()
|
||||
+ rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
|
||||
+
|
||||
+ log.info('Restarting instance')
|
||||
+ try:
|
||||
+ st.restart()
|
||||
+ except ldap.LDAPError as e:
|
||||
+ ldap.error('Failed to restart instance ' + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ users = UserAccounts(st, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ log.info('Adding user1')
|
||||
+ try:
|
||||
+ user1 = users.create(properties={
|
||||
+ 'sn': '1',
|
||||
+ 'cn': 'user 1',
|
||||
+ 'uid': 'user1',
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'givenname': 'user1',
|
||||
+ 'homePhone': '0861234567',
|
||||
+ 'carLicense': '131D16674',
|
||||
+ 'mail': 'user1@whereever.com',
|
||||
+ 'homeDirectory': '/home/user1',
|
||||
+ 'userpassword': USER_PW})
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error("Failed to add user1")
|
||||
+
|
||||
+ log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
|
||||
+ try:
|
||||
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ assert False
|
||||
+ assert len(cllist) > 0
|
||||
+ if cllist[0].hasAttr('changes'):
|
||||
+ clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ assert ATTR_HOMEPHONE in clstr
|
||||
+ assert ATTR_CARLICENSE in clstr
|
||||
+
|
||||
+ log.info('Excluding attribute ' + ATTR_CARLICENSE)
|
||||
+ args = FakeArgs()
|
||||
+ args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM]
|
||||
+ args.instance = 'standalone1'
|
||||
+ args.basedn = None
|
||||
+ args.binddn = None
|
||||
+ args.starttls = False
|
||||
+ args.pwdfile = None
|
||||
+ args.bindpw = None
|
||||
+ args.prompt = False
|
||||
+ args.exclude_attrs = ATTR_CARLICENSE
|
||||
+ args.func = retrochangelog_add
|
||||
+ dsrc_inst = dsrc_arg_concat(args, None)
|
||||
+ inst = connect_instance(dsrc_inst, False, args)
|
||||
+ result = args.func(inst, None, log, args)
|
||||
+ disconnect_instance(inst)
|
||||
+ assert result is None
|
||||
+
|
||||
+ log.info("5s delay for retrocl plugin to restart")
|
||||
+ time.sleep(5)
|
||||
+
|
||||
+ log.info('Modify user1 carLicense attribute')
|
||||
+ try:
|
||||
+ st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")])
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('Verify carLicense attr is not in the changelog changestring')
|
||||
+ try:
|
||||
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ assert len(cllist) > 0
|
||||
+ # There will be 2 entries in the changelog for this user, we are only
|
||||
+ #interested in the second one, the modify operation.
|
||||
+ if cllist[1].hasAttr('changes'):
|
||||
+ clstr = (cllist[1].getValue('changes')).decode()
|
||||
+ assert ATTR_CARLICENSE not in clstr
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ assert False
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
--
|
||||
2.26.3
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,373 +0,0 @@
|
||||
From c167d6127db45d8426437c273060c8c8f7fbcb9b Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william.brown@suse.com>
|
||||
Date: Wed, 23 Sep 2020 09:19:34 +1000
|
||||
Subject: [PATCH 04/12] Ticket 4326 - entryuuid fixup did not work correctly
|
||||
(#4328)
|
||||
|
||||
Bug Description: due to an oversight in how fixup tasks
|
||||
worked, the entryuuid fixup task did not work correctly and
|
||||
would not persist over restarts.
|
||||
|
||||
Fix Description: Correctly implement entryuuid fixup.
|
||||
|
||||
fixes: #4326
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: mreynolds (thanks!)
|
||||
---
|
||||
.../tests/suites/entryuuid/basic_test.py | 24 +++-
|
||||
src/plugins/entryuuid/src/lib.rs | 43 ++++++-
|
||||
src/slapi_r_plugin/src/constants.rs | 5 +
|
||||
src/slapi_r_plugin/src/entry.rs | 8 ++
|
||||
src/slapi_r_plugin/src/lib.rs | 2 +
|
||||
src/slapi_r_plugin/src/macros.rs | 2 +-
|
||||
src/slapi_r_plugin/src/modify.rs | 118 ++++++++++++++++++
|
||||
src/slapi_r_plugin/src/pblock.rs | 7 ++
|
||||
src/slapi_r_plugin/src/value.rs | 4 +
|
||||
9 files changed, 206 insertions(+), 7 deletions(-)
|
||||
create mode 100644 src/slapi_r_plugin/src/modify.rs
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/entryuuid/basic_test.py b/dirsrvtests/tests/suites/entryuuid/basic_test.py
|
||||
index beb73701d..4d8a40909 100644
|
||||
--- a/dirsrvtests/tests/suites/entryuuid/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/entryuuid/basic_test.py
|
||||
@@ -12,6 +12,7 @@ import time
|
||||
import shutil
|
||||
from lib389.idm.user import nsUserAccounts, UserAccounts
|
||||
from lib389.idm.account import Accounts
|
||||
+from lib389.idm.domain import Domain
|
||||
from lib389.topologies import topology_st as topology
|
||||
from lib389.backend import Backends
|
||||
from lib389.paths import Paths
|
||||
@@ -190,6 +191,7 @@ def test_entryuuid_fixup_task(topology):
|
||||
3. Enable the entryuuid plugin
|
||||
4. Run the fixup
|
||||
5. Assert the entryuuid now exists
|
||||
+ 6. Restart and check they persist
|
||||
|
||||
:expectedresults:
|
||||
1. Success
|
||||
@@ -197,6 +199,7 @@ def test_entryuuid_fixup_task(topology):
|
||||
3. Success
|
||||
4. Success
|
||||
5. Suddenly EntryUUID!
|
||||
+ 6. Still has EntryUUID!
|
||||
"""
|
||||
# 1. Disable the plugin
|
||||
plug = EntryUUIDPlugin(topology.standalone)
|
||||
@@ -220,7 +223,22 @@ def test_entryuuid_fixup_task(topology):
|
||||
assert(task.is_complete() and task.get_exit_code() == 0)
|
||||
topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
|
||||
- # 5. Assert the uuid.
|
||||
- euuid = account.get_attr_val_utf8('entryUUID')
|
||||
- assert(euuid is not None)
|
||||
+ # 5.1 Assert the uuid on the user.
|
||||
+ euuid_user = account.get_attr_val_utf8('entryUUID')
|
||||
+ assert(euuid_user is not None)
|
||||
+
|
||||
+ # 5.2 Assert it on the domain entry.
|
||||
+ domain = Domain(topology.standalone, dn=DEFAULT_SUFFIX)
|
||||
+ euuid_domain = domain.get_attr_val_utf8('entryUUID')
|
||||
+ assert(euuid_domain is not None)
|
||||
+
|
||||
+ # Assert it persists after a restart.
|
||||
+ topology.standalone.restart()
|
||||
+ # 6.1 Assert the uuid on the use.
|
||||
+ euuid_user_2 = account.get_attr_val_utf8('entryUUID')
|
||||
+ assert(euuid_user_2 == euuid_user)
|
||||
+
|
||||
+ # 6.2 Assert it on the domain entry.
|
||||
+ euuid_domain_2 = domain.get_attr_val_utf8('entryUUID')
|
||||
+ assert(euuid_domain_2 == euuid_domain)
|
||||
|
||||
diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
|
||||
index 6b5e8d1bb..92977db05 100644
|
||||
--- a/src/plugins/entryuuid/src/lib.rs
|
||||
+++ b/src/plugins/entryuuid/src/lib.rs
|
||||
@@ -187,9 +187,46 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
}
|
||||
}
|
||||
|
||||
-pub fn entryuuid_fixup_mapfn(mut e: EntryRef, _data: &()) -> Result<(), PluginError> {
|
||||
- assign_uuid(&mut e);
|
||||
- Ok(())
|
||||
+pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError> {
|
||||
+ /* Supply a modification to the entry. */
|
||||
+ let sdn = e.get_sdnref();
|
||||
+
|
||||
+ /* Sanity check that entryuuid doesn't already exist */
|
||||
+ if e.contains_attr("entryUUID") {
|
||||
+ log_error!(
|
||||
+ ErrorLevel::Trace,
|
||||
+ "skipping fixup for -> {}",
|
||||
+ sdn.to_dn_string()
|
||||
+ );
|
||||
+ return Ok(());
|
||||
+ }
|
||||
+
|
||||
+ // Setup the modifications
|
||||
+ let mut mods = SlapiMods::new();
|
||||
+
|
||||
+ let u: Uuid = Uuid::new_v4();
|
||||
+ let uuid_value = Value::from(&u);
|
||||
+ let values: ValueArray = std::iter::once(uuid_value).collect();
|
||||
+ mods.append(ModType::Replace, "entryUUID", values);
|
||||
+
|
||||
+ /* */
|
||||
+ let lmod = Modify::new(&sdn, mods, plugin_id())?;
|
||||
+
|
||||
+ match lmod.execute() {
|
||||
+ Ok(_) => {
|
||||
+ log_error!(ErrorLevel::Trace, "fixed-up -> {}", sdn.to_dn_string());
|
||||
+ Ok(())
|
||||
+ }
|
||||
+ Err(e) => {
|
||||
+ log_error!(
|
||||
+ ErrorLevel::Error,
|
||||
+ "entryuuid_fixup_mapfn -> fixup failed -> {} {:?}",
|
||||
+ sdn.to_dn_string(),
|
||||
+ e
|
||||
+ );
|
||||
+ Err(PluginError::GenericFailure)
|
||||
+ }
|
||||
+ }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs
|
||||
index cf76ccbdb..34845c2f4 100644
|
||||
--- a/src/slapi_r_plugin/src/constants.rs
|
||||
+++ b/src/slapi_r_plugin/src/constants.rs
|
||||
@@ -5,6 +5,11 @@ use std::os::raw::c_char;
|
||||
pub const LDAP_SUCCESS: i32 = 0;
|
||||
pub const PLUGIN_DEFAULT_PRECEDENCE: i32 = 50;
|
||||
|
||||
+#[repr(i32)]
|
||||
+pub enum OpFlags {
|
||||
+ ByassReferrals = 0x0040_0000,
|
||||
+}
|
||||
+
|
||||
#[repr(i32)]
|
||||
/// The set of possible function handles we can register via the pblock. These
|
||||
/// values correspond to slapi-plugin.h.
|
||||
diff --git a/src/slapi_r_plugin/src/entry.rs b/src/slapi_r_plugin/src/entry.rs
|
||||
index 034efe692..22ae45189 100644
|
||||
--- a/src/slapi_r_plugin/src/entry.rs
|
||||
+++ b/src/slapi_r_plugin/src/entry.rs
|
||||
@@ -70,6 +70,14 @@ impl EntryRef {
|
||||
}
|
||||
}
|
||||
|
||||
+ pub fn contains_attr(&self, name: &str) -> bool {
|
||||
+ let cname = CString::new(name).expect("invalid attr name");
|
||||
+ let va = unsafe { slapi_entry_attr_get_valuearray(self.raw_e, cname.as_ptr()) };
|
||||
+
|
||||
+ // If it's null, it's not present, so flip the logic.
|
||||
+ !va.is_null()
|
||||
+ }
|
||||
+
|
||||
pub fn add_value(&mut self, a: &str, v: &ValueRef) {
|
||||
// turn the attr to a c string.
|
||||
// TODO FIX
|
||||
diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs
|
||||
index d7fc22e52..076907bae 100644
|
||||
--- a/src/slapi_r_plugin/src/lib.rs
|
||||
+++ b/src/slapi_r_plugin/src/lib.rs
|
||||
@@ -9,6 +9,7 @@ pub mod dn;
|
||||
pub mod entry;
|
||||
pub mod error;
|
||||
pub mod log;
|
||||
+pub mod modify;
|
||||
pub mod pblock;
|
||||
pub mod plugin;
|
||||
pub mod search;
|
||||
@@ -24,6 +25,7 @@ pub mod prelude {
|
||||
pub use crate::entry::EntryRef;
|
||||
pub use crate::error::{DseCallbackStatus, LDAPError, PluginError, RPluginError};
|
||||
pub use crate::log::{log_error, ErrorLevel};
|
||||
+ pub use crate::modify::{ModType, Modify, SlapiMods};
|
||||
pub use crate::pblock::{Pblock, PblockRef};
|
||||
pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3};
|
||||
pub use crate::search::{Search, SearchScope};
|
||||
diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs
|
||||
index 030449632..bc8dfa60f 100644
|
||||
--- a/src/slapi_r_plugin/src/macros.rs
|
||||
+++ b/src/slapi_r_plugin/src/macros.rs
|
||||
@@ -825,7 +825,7 @@ macro_rules! slapi_r_search_callback_mapfn {
|
||||
let e = EntryRef::new(raw_e);
|
||||
let data_ptr = raw_data as *const _;
|
||||
let data = unsafe { &(*data_ptr) };
|
||||
- match $cb_mod_ident(e, data) {
|
||||
+ match $cb_mod_ident(&e, data) {
|
||||
Ok(_) => LDAPError::Success as i32,
|
||||
Err(e) => e as i32,
|
||||
}
|
||||
diff --git a/src/slapi_r_plugin/src/modify.rs b/src/slapi_r_plugin/src/modify.rs
|
||||
new file mode 100644
|
||||
index 000000000..30864377a
|
||||
--- /dev/null
|
||||
+++ b/src/slapi_r_plugin/src/modify.rs
|
||||
@@ -0,0 +1,118 @@
|
||||
+use crate::constants::OpFlags;
|
||||
+use crate::dn::SdnRef;
|
||||
+use crate::error::{LDAPError, PluginError};
|
||||
+use crate::pblock::Pblock;
|
||||
+use crate::plugin::PluginIdRef;
|
||||
+use crate::value::{slapi_value, ValueArray};
|
||||
+
|
||||
+use std::ffi::CString;
|
||||
+use std::ops::{Deref, DerefMut};
|
||||
+use std::os::raw::c_char;
|
||||
+
|
||||
+extern "C" {
|
||||
+ fn slapi_modify_internal_set_pb_ext(
|
||||
+ pb: *const libc::c_void,
|
||||
+ dn: *const libc::c_void,
|
||||
+ mods: *const *const libc::c_void,
|
||||
+ controls: *const *const libc::c_void,
|
||||
+ uniqueid: *const c_char,
|
||||
+ plugin_ident: *const libc::c_void,
|
||||
+ op_flags: i32,
|
||||
+ );
|
||||
+ fn slapi_modify_internal_pb(pb: *const libc::c_void);
|
||||
+ fn slapi_mods_free(smods: *const *const libc::c_void);
|
||||
+ fn slapi_mods_get_ldapmods_byref(smods: *const libc::c_void) -> *const *const libc::c_void;
|
||||
+ fn slapi_mods_new() -> *const libc::c_void;
|
||||
+ fn slapi_mods_add_mod_values(
|
||||
+ smods: *const libc::c_void,
|
||||
+ mtype: i32,
|
||||
+ attrtype: *const c_char,
|
||||
+ value: *const *const slapi_value,
|
||||
+ );
|
||||
+}
|
||||
+
|
||||
+#[derive(Debug)]
|
||||
+#[repr(i32)]
|
||||
+pub enum ModType {
|
||||
+ Add = 0,
|
||||
+ Delete = 1,
|
||||
+ Replace = 2,
|
||||
+}
|
||||
+
|
||||
+pub struct SlapiMods {
|
||||
+ inner: *const libc::c_void,
|
||||
+ vas: Vec<ValueArray>,
|
||||
+}
|
||||
+
|
||||
+impl Drop for SlapiMods {
|
||||
+ fn drop(&mut self) {
|
||||
+ unsafe { slapi_mods_free(&self.inner as *const *const libc::c_void) }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+impl SlapiMods {
|
||||
+ pub fn new() -> Self {
|
||||
+ SlapiMods {
|
||||
+ inner: unsafe { slapi_mods_new() },
|
||||
+ vas: Vec::new(),
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ pub fn append(&mut self, modtype: ModType, attrtype: &str, values: ValueArray) {
|
||||
+ // We can get the value array pointer here to push to the inner
|
||||
+ // because the internal pointers won't change even when we push them
|
||||
+ // to the list to preserve their lifetime.
|
||||
+ let vas = values.as_ptr();
|
||||
+ // We take ownership of this to ensure it lives as least as long as our
|
||||
+ // slapimods structure.
|
||||
+ self.vas.push(values);
|
||||
+ // now we can insert these to the modes.
|
||||
+ let c_attrtype = CString::new(attrtype).expect("failed to allocate attrtype");
|
||||
+ unsafe { slapi_mods_add_mod_values(self.inner, modtype as i32, c_attrtype.as_ptr(), vas) };
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+pub struct Modify {
|
||||
+ pb: Pblock,
|
||||
+ mods: SlapiMods,
|
||||
+}
|
||||
+
|
||||
+pub struct ModifyResult {
|
||||
+ pb: Pblock,
|
||||
+}
|
||||
+
|
||||
+impl Modify {
|
||||
+ pub fn new(dn: &SdnRef, mods: SlapiMods, plugin_id: PluginIdRef) -> Result<Self, PluginError> {
|
||||
+ let pb = Pblock::new();
|
||||
+ let lmods = unsafe { slapi_mods_get_ldapmods_byref(mods.inner) };
|
||||
+ // OP_FLAG_ACTION_LOG_ACCESS
|
||||
+
|
||||
+ unsafe {
|
||||
+ slapi_modify_internal_set_pb_ext(
|
||||
+ pb.deref().as_ptr(),
|
||||
+ dn.as_ptr(),
|
||||
+ lmods,
|
||||
+ std::ptr::null(),
|
||||
+ std::ptr::null(),
|
||||
+ plugin_id.raw_pid,
|
||||
+ OpFlags::ByassReferrals as i32,
|
||||
+ )
|
||||
+ };
|
||||
+
|
||||
+ Ok(Modify { pb, mods })
|
||||
+ }
|
||||
+
|
||||
+ pub fn execute(self) -> Result<ModifyResult, LDAPError> {
|
||||
+ let Modify {
|
||||
+ mut pb,
|
||||
+ mods: _mods,
|
||||
+ } = self;
|
||||
+ unsafe { slapi_modify_internal_pb(pb.deref().as_ptr()) };
|
||||
+ let result = pb.get_op_result();
|
||||
+
|
||||
+ match result {
|
||||
+ 0 => Ok(ModifyResult { pb }),
|
||||
+ _e => Err(LDAPError::from(result)),
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs
|
||||
index b69ce1680..0f83914f3 100644
|
||||
--- a/src/slapi_r_plugin/src/pblock.rs
|
||||
+++ b/src/slapi_r_plugin/src/pblock.rs
|
||||
@@ -11,6 +11,7 @@ pub use crate::log::{log_error, ErrorLevel};
|
||||
extern "C" {
|
||||
fn slapi_pblock_set(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32;
|
||||
fn slapi_pblock_get(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32;
|
||||
+ fn slapi_pblock_destroy(pb: *const libc::c_void);
|
||||
fn slapi_pblock_new() -> *const libc::c_void;
|
||||
}
|
||||
|
||||
@@ -41,6 +42,12 @@ impl DerefMut for Pblock {
|
||||
}
|
||||
}
|
||||
|
||||
+impl Drop for Pblock {
|
||||
+ fn drop(&mut self) {
|
||||
+ unsafe { slapi_pblock_destroy(self.value.raw_pb) }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
pub struct PblockRef {
|
||||
raw_pb: *const libc::c_void,
|
||||
}
|
||||
diff --git a/src/slapi_r_plugin/src/value.rs b/src/slapi_r_plugin/src/value.rs
|
||||
index 5a40dd279..46246837a 100644
|
||||
--- a/src/slapi_r_plugin/src/value.rs
|
||||
+++ b/src/slapi_r_plugin/src/value.rs
|
||||
@@ -96,6 +96,10 @@ impl ValueArray {
|
||||
let bs = vs.into_boxed_slice();
|
||||
Box::leak(bs) as *const _ as *const *const slapi_value
|
||||
}
|
||||
+
|
||||
+ pub fn as_ptr(&self) -> *const *const slapi_value {
|
||||
+ self.data.as_ptr() as *const *const slapi_value
|
||||
+ }
|
||||
}
|
||||
|
||||
impl FromIterator<Value> for ValueArray {
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,192 +0,0 @@
|
||||
From b2e0a1d405d15383064e547fd15008bc136d3efe Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Thu, 17 Dec 2020 08:22:23 +1000
|
||||
Subject: [PATCH 05/12] Issue 4498 - BUG - entryuuid replication may not work
|
||||
(#4503)
|
||||
|
||||
Bug Description: EntryUUID can be duplicated in replication,
|
||||
due to a missing check in assign_uuid
|
||||
|
||||
Fix Description: Add a test case to determine how this occurs,
|
||||
and add the correct check for existing entryUUID.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/4498
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @mreynolds389
|
||||
---
|
||||
.../tests/suites/entryuuid/replicated_test.py | 77 +++++++++++++++++++
|
||||
rpm.mk | 2 +-
|
||||
src/plugins/entryuuid/src/lib.rs | 20 ++++-
|
||||
src/slapi_r_plugin/src/constants.rs | 2 +
|
||||
src/slapi_r_plugin/src/pblock.rs | 7 ++
|
||||
5 files changed, 106 insertions(+), 2 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/entryuuid/replicated_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/entryuuid/replicated_test.py b/dirsrvtests/tests/suites/entryuuid/replicated_test.py
|
||||
new file mode 100644
|
||||
index 000000000..a2ebc8ff7
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/entryuuid/replicated_test.py
|
||||
@@ -0,0 +1,77 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2020 William Brown <william@blackhats.net.au>
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+
|
||||
+import ldap
|
||||
+import pytest
|
||||
+import logging
|
||||
+from lib389.topologies import topology_m2 as topo_m2
|
||||
+from lib389.idm.user import nsUserAccounts
|
||||
+from lib389.paths import Paths
|
||||
+from lib389.utils import ds_is_older
|
||||
+from lib389._constants import *
|
||||
+from lib389.replica import ReplicationManager
|
||||
+
|
||||
+default_paths = Paths()
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions")
|
||||
+
|
||||
+def test_entryuuid_with_replication(topo_m2):
|
||||
+ """ Check that entryuuid works with replication
|
||||
+
|
||||
+ :id: a5f15bf9-7f63-473a-840c-b9037b787024
|
||||
+
|
||||
+ :setup: two node mmr
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Create an entry on one server
|
||||
+ 2. Wait for replication
|
||||
+ 3. Assert it is on the second
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 1. Success
|
||||
+ 1. Success
|
||||
+ """
|
||||
+
|
||||
+ server_a = topo_m2.ms["supplier1"]
|
||||
+ server_b = topo_m2.ms["supplier2"]
|
||||
+ server_a.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE))
|
||||
+ server_b.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE))
|
||||
+
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+
|
||||
+ account_a = nsUserAccounts(server_a, DEFAULT_SUFFIX).create_test_user(uid=2000)
|
||||
+ euuid_a = account_a.get_attr_vals_utf8('entryUUID')
|
||||
+ print("🧩 %s" % euuid_a)
|
||||
+ assert(euuid_a is not None)
|
||||
+ assert(len(euuid_a) == 1)
|
||||
+
|
||||
+ repl.wait_for_replication(server_a, server_b)
|
||||
+
|
||||
+ account_b = nsUserAccounts(server_b, DEFAULT_SUFFIX).get("test_user_2000")
|
||||
+ euuid_b = account_b.get_attr_vals_utf8('entryUUID')
|
||||
+ print("🧩 %s" % euuid_b)
|
||||
+
|
||||
+ server_a.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
+ server_b.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
+
|
||||
+ assert(euuid_b is not None)
|
||||
+ assert(len(euuid_b) == 1)
|
||||
+ assert(euuid_b == euuid_a)
|
||||
+
|
||||
+ account_b.set("description", "update")
|
||||
+ repl.wait_for_replication(server_b, server_a)
|
||||
+
|
||||
+ euuid_c = account_a.get_attr_vals_utf8('entryUUID')
|
||||
+ print("🧩 %s" % euuid_c)
|
||||
+ assert(euuid_c is not None)
|
||||
+ assert(len(euuid_c) == 1)
|
||||
+ assert(euuid_c == euuid_a)
|
||||
+
|
||||
diff --git a/rpm.mk b/rpm.mk
|
||||
index 02f5bba37..d1cdff7df 100644
|
||||
--- a/rpm.mk
|
||||
+++ b/rpm.mk
|
||||
@@ -25,7 +25,7 @@ TSAN_ON = 0
|
||||
# Undefined Behaviour Sanitizer
|
||||
UBSAN_ON = 0
|
||||
|
||||
-RUST_ON = 0
|
||||
+RUST_ON = 1
|
||||
|
||||
# PERL_ON is deprecated and turns on the LEGACY_ON, this for not breaking people's workflows.
|
||||
PERL_ON = 1
|
||||
diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
|
||||
index 92977db05..0197c5e83 100644
|
||||
--- a/src/plugins/entryuuid/src/lib.rs
|
||||
+++ b/src/plugins/entryuuid/src/lib.rs
|
||||
@@ -30,6 +30,16 @@ slapi_r_search_callback_mapfn!(entryuuid, entryuuid_fixup_cb, entryuuid_fixup_ma
|
||||
fn assign_uuid(e: &mut EntryRef) {
|
||||
let sdn = e.get_sdnref();
|
||||
|
||||
+ // 🚧 safety barrier 🚧
|
||||
+ if e.contains_attr("entryUUID") {
|
||||
+ log_error!(
|
||||
+ ErrorLevel::Trace,
|
||||
+ "assign_uuid -> entryUUID exists, skipping dn {}",
|
||||
+ sdn.to_dn_string()
|
||||
+ );
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
// We could consider making these lazy static.
|
||||
let config_sdn = Sdn::try_from("cn=config").expect("Invalid static dn");
|
||||
let schema_sdn = Sdn::try_from("cn=schema").expect("Invalid static dn");
|
||||
@@ -66,7 +76,15 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
}
|
||||
|
||||
fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> {
|
||||
- log_error!(ErrorLevel::Trace, "betxn_pre_add");
|
||||
+ if pb.get_is_replicated_operation() {
|
||||
+ log_error!(
|
||||
+ ErrorLevel::Trace,
|
||||
+ "betxn_pre_add -> replicated operation, will not change"
|
||||
+ );
|
||||
+ return Ok(());
|
||||
+ }
|
||||
+
|
||||
+ log_error!(ErrorLevel::Trace, "betxn_pre_add -> start");
|
||||
|
||||
let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?;
|
||||
assign_uuid(&mut e);
|
||||
diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs
|
||||
index 34845c2f4..aa0691acc 100644
|
||||
--- a/src/slapi_r_plugin/src/constants.rs
|
||||
+++ b/src/slapi_r_plugin/src/constants.rs
|
||||
@@ -164,6 +164,8 @@ pub(crate) enum PblockType {
|
||||
AddEntry = 60,
|
||||
/// SLAPI_BACKEND
|
||||
Backend = 130,
|
||||
+ /// SLAPI_IS_REPLICATED_OPERATION
|
||||
+ IsReplicationOperation = 142,
|
||||
/// SLAPI_PLUGIN_MR_NAMES
|
||||
MRNames = 624,
|
||||
/// SLAPI_PLUGIN_SYNTAX_NAMES
|
||||
diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs
|
||||
index 0f83914f3..718ff2ca7 100644
|
||||
--- a/src/slapi_r_plugin/src/pblock.rs
|
||||
+++ b/src/slapi_r_plugin/src/pblock.rs
|
||||
@@ -279,4 +279,11 @@ impl PblockRef {
|
||||
pub fn get_op_result(&mut self) -> i32 {
|
||||
self.get_value_i32(PblockType::OpResult).unwrap_or(-1)
|
||||
}
|
||||
+
|
||||
+ pub fn get_is_replicated_operation(&mut self) -> bool {
|
||||
+ let i = self.get_value_i32(PblockType::IsReplicationOperation).unwrap_or(0);
|
||||
+ // Because rust returns the result of the last evaluation, we can
|
||||
+ // just return if not equal 0.
|
||||
+ i != 0
|
||||
+ }
|
||||
}
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,626 +0,0 @@
|
||||
From 04c44e74503a842561b6c6e58001faf86d924b20 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 7 Dec 2020 11:00:45 -0500
|
||||
Subject: [PATCH 06/12] Issue 4421 - Unable to build with Rust enabled in
|
||||
closed environment
|
||||
|
||||
Description: Add Makefile flags and update rpm.mk that allow updating
|
||||
and downloading all the cargo/rust dependencies. This is
|
||||
needed for nightly tests and upstream/downstream releases.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4421
|
||||
|
||||
Reviewed by: firstyear(Thanks!)
|
||||
---
|
||||
rpm.mk | 3 +-
|
||||
rpm/389-ds-base.spec.in | 2 +-
|
||||
src/Cargo.lock | 563 ----------------------------------------
|
||||
3 files changed, 3 insertions(+), 565 deletions(-)
|
||||
delete mode 100644 src/Cargo.lock
|
||||
|
||||
diff --git a/rpm.mk b/rpm.mk
|
||||
index d1cdff7df..ef810c63c 100644
|
||||
--- a/rpm.mk
|
||||
+++ b/rpm.mk
|
||||
@@ -44,6 +44,7 @@ update-cargo-dependencies:
|
||||
cargo update --manifest-path=./src/Cargo.toml
|
||||
|
||||
download-cargo-dependencies:
|
||||
+ cargo update --manifest-path=./src/Cargo.toml
|
||||
cargo vendor --manifest-path=./src/Cargo.toml
|
||||
cargo fetch --manifest-path=./src/Cargo.toml
|
||||
tar -czf vendor.tar.gz vendor
|
||||
@@ -114,7 +115,7 @@ rpmbuildprep:
|
||||
cp dist/sources/$(JEMALLOC_TARBALL) $(RPMBUILD)/SOURCES/ ; \
|
||||
fi
|
||||
|
||||
-srpms: rpmroot srpmdistdir tarballs rpmbuildprep
|
||||
+srpms: rpmroot srpmdistdir download-cargo-dependencies tarballs rpmbuildprep
|
||||
rpmbuild --define "_topdir $(RPMBUILD)" -bs $(RPMBUILD)/SPECS/$(PACKAGE).spec
|
||||
cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)*.src.rpm dist/srpms/
|
||||
rm -rf $(RPMBUILD)
|
||||
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
|
||||
index b9f85489b..d80de8422 100644
|
||||
--- a/rpm/389-ds-base.spec.in
|
||||
+++ b/rpm/389-ds-base.spec.in
|
||||
@@ -357,7 +357,7 @@ UBSAN_FLAGS="--enable-ubsan --enable-debug"
|
||||
%endif
|
||||
|
||||
%if %{use_rust}
|
||||
-RUST_FLAGS="--enable-rust"
|
||||
+RUST_FLAGS="--enable-rust --enable-rust-offline"
|
||||
%endif
|
||||
|
||||
%if %{use_legacy}
|
||||
diff --git a/src/Cargo.lock b/src/Cargo.lock
|
||||
deleted file mode 100644
|
||||
index 33d7b8f23..000000000
|
||||
--- a/src/Cargo.lock
|
||||
+++ /dev/null
|
||||
@@ -1,563 +0,0 @@
|
||||
-# This file is automatically @generated by Cargo.
|
||||
-# It is not intended for manual editing.
|
||||
-[[package]]
|
||||
-name = "ansi_term"
|
||||
-version = "0.11.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
|
||||
-dependencies = [
|
||||
- "winapi",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "atty"
|
||||
-version = "0.2.14"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
-dependencies = [
|
||||
- "hermit-abi",
|
||||
- "libc",
|
||||
- "winapi",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "autocfg"
|
||||
-version = "1.0.1"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "base64"
|
||||
-version = "0.13.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "bitflags"
|
||||
-version = "1.2.1"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "byteorder"
|
||||
-version = "1.4.3"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "cbindgen"
|
||||
-version = "0.9.1"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
|
||||
-dependencies = [
|
||||
- "clap",
|
||||
- "log",
|
||||
- "proc-macro2",
|
||||
- "quote",
|
||||
- "serde",
|
||||
- "serde_json",
|
||||
- "syn",
|
||||
- "tempfile",
|
||||
- "toml",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "cc"
|
||||
-version = "1.0.67"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd"
|
||||
-dependencies = [
|
||||
- "jobserver",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "cfg-if"
|
||||
-version = "1.0.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "clap"
|
||||
-version = "2.33.3"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
|
||||
-dependencies = [
|
||||
- "ansi_term",
|
||||
- "atty",
|
||||
- "bitflags",
|
||||
- "strsim",
|
||||
- "textwrap",
|
||||
- "unicode-width",
|
||||
- "vec_map",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "entryuuid"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "cc",
|
||||
- "libc",
|
||||
- "paste",
|
||||
- "slapi_r_plugin",
|
||||
- "uuid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "entryuuid_syntax"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "cc",
|
||||
- "libc",
|
||||
- "paste",
|
||||
- "slapi_r_plugin",
|
||||
- "uuid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "fernet"
|
||||
-version = "0.1.4"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
|
||||
-dependencies = [
|
||||
- "base64",
|
||||
- "byteorder",
|
||||
- "getrandom",
|
||||
- "openssl",
|
||||
- "zeroize",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "foreign-types"
|
||||
-version = "0.3.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
|
||||
-dependencies = [
|
||||
- "foreign-types-shared",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "foreign-types-shared"
|
||||
-version = "0.1.1"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "getrandom"
|
||||
-version = "0.2.3"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
|
||||
-dependencies = [
|
||||
- "cfg-if",
|
||||
- "libc",
|
||||
- "wasi",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "hermit-abi"
|
||||
-version = "0.1.18"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
|
||||
-dependencies = [
|
||||
- "libc",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "itoa"
|
||||
-version = "0.4.7"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "jobserver"
|
||||
-version = "0.1.22"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd"
|
||||
-dependencies = [
|
||||
- "libc",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "lazy_static"
|
||||
-version = "1.4.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "libc"
|
||||
-version = "0.2.94"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "librnsslapd"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "cbindgen",
|
||||
- "libc",
|
||||
- "slapd",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "librslapd"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "cbindgen",
|
||||
- "libc",
|
||||
- "slapd",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "log"
|
||||
-version = "0.4.14"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
|
||||
-dependencies = [
|
||||
- "cfg-if",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "once_cell"
|
||||
-version = "1.7.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "openssl"
|
||||
-version = "0.10.34"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8"
|
||||
-dependencies = [
|
||||
- "bitflags",
|
||||
- "cfg-if",
|
||||
- "foreign-types",
|
||||
- "libc",
|
||||
- "once_cell",
|
||||
- "openssl-sys",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "openssl-sys"
|
||||
-version = "0.9.63"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98"
|
||||
-dependencies = [
|
||||
- "autocfg",
|
||||
- "cc",
|
||||
- "libc",
|
||||
- "pkg-config",
|
||||
- "vcpkg",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "paste"
|
||||
-version = "0.1.18"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
|
||||
-dependencies = [
|
||||
- "paste-impl",
|
||||
- "proc-macro-hack",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "paste-impl"
|
||||
-version = "0.1.18"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
|
||||
-dependencies = [
|
||||
- "proc-macro-hack",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "pkg-config"
|
||||
-version = "0.3.19"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "ppv-lite86"
|
||||
-version = "0.2.10"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "proc-macro-hack"
|
||||
-version = "0.5.19"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "proc-macro2"
|
||||
-version = "1.0.27"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
|
||||
-dependencies = [
|
||||
- "unicode-xid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "quote"
|
||||
-version = "1.0.9"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
|
||||
-dependencies = [
|
||||
- "proc-macro2",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "rand"
|
||||
-version = "0.8.3"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
|
||||
-dependencies = [
|
||||
- "libc",
|
||||
- "rand_chacha",
|
||||
- "rand_core",
|
||||
- "rand_hc",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "rand_chacha"
|
||||
-version = "0.3.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
|
||||
-dependencies = [
|
||||
- "ppv-lite86",
|
||||
- "rand_core",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "rand_core"
|
||||
-version = "0.6.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"
|
||||
-dependencies = [
|
||||
- "getrandom",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "rand_hc"
|
||||
-version = "0.3.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
|
||||
-dependencies = [
|
||||
- "rand_core",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "redox_syscall"
|
||||
-version = "0.2.8"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc"
|
||||
-dependencies = [
|
||||
- "bitflags",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "remove_dir_all"
|
||||
-version = "0.5.3"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
|
||||
-dependencies = [
|
||||
- "winapi",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "rsds"
|
||||
-version = "0.1.0"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "ryu"
|
||||
-version = "1.0.5"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "serde"
|
||||
-version = "1.0.126"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03"
|
||||
-dependencies = [
|
||||
- "serde_derive",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "serde_derive"
|
||||
-version = "1.0.126"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43"
|
||||
-dependencies = [
|
||||
- "proc-macro2",
|
||||
- "quote",
|
||||
- "syn",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "serde_json"
|
||||
-version = "1.0.64"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79"
|
||||
-dependencies = [
|
||||
- "itoa",
|
||||
- "ryu",
|
||||
- "serde",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "slapd"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "fernet",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "slapi_r_plugin"
|
||||
-version = "0.1.0"
|
||||
-dependencies = [
|
||||
- "lazy_static",
|
||||
- "libc",
|
||||
- "paste",
|
||||
- "uuid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "strsim"
|
||||
-version = "0.8.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "syn"
|
||||
-version = "1.0.72"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82"
|
||||
-dependencies = [
|
||||
- "proc-macro2",
|
||||
- "quote",
|
||||
- "unicode-xid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "synstructure"
|
||||
-version = "0.12.4"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
|
||||
-dependencies = [
|
||||
- "proc-macro2",
|
||||
- "quote",
|
||||
- "syn",
|
||||
- "unicode-xid",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "tempfile"
|
||||
-version = "3.2.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
|
||||
-dependencies = [
|
||||
- "cfg-if",
|
||||
- "libc",
|
||||
- "rand",
|
||||
- "redox_syscall",
|
||||
- "remove_dir_all",
|
||||
- "winapi",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "textwrap"
|
||||
-version = "0.11.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
-dependencies = [
|
||||
- "unicode-width",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "toml"
|
||||
-version = "0.5.8"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa"
|
||||
-dependencies = [
|
||||
- "serde",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "unicode-width"
|
||||
-version = "0.1.8"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "unicode-xid"
|
||||
-version = "0.2.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "uuid"
|
||||
-version = "0.8.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
|
||||
-dependencies = [
|
||||
- "getrandom",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "vcpkg"
|
||||
-version = "0.2.12"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "cbdbff6266a24120518560b5dc983096efb98462e51d0d68169895b237be3e5d"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "vec_map"
|
||||
-version = "0.8.2"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "wasi"
|
||||
-version = "0.10.2+wasi-snapshot-preview1"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "winapi"
|
||||
-version = "0.3.9"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
||||
-dependencies = [
|
||||
- "winapi-i686-pc-windows-gnu",
|
||||
- "winapi-x86_64-pc-windows-gnu",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "winapi-i686-pc-windows-gnu"
|
||||
-version = "0.4.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "winapi-x86_64-pc-windows-gnu"
|
||||
-version = "0.4.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
-
|
||||
-[[package]]
|
||||
-name = "zeroize"
|
||||
-version = "1.3.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd"
|
||||
-dependencies = [
|
||||
- "zeroize_derive",
|
||||
-]
|
||||
-
|
||||
-[[package]]
|
||||
-name = "zeroize_derive"
|
||||
-version = "1.1.0"
|
||||
-source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
-checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1"
|
||||
-dependencies = [
|
||||
- "proc-macro2",
|
||||
- "quote",
|
||||
- "syn",
|
||||
- "synstructure",
|
||||
-]
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,412 +0,0 @@
|
||||
From 279bdb5148eb0b67ddab40c4dd9d08e9e1672f13 Mon Sep 17 00:00:00 2001
|
||||
From: William Brown <william@blackhats.net.au>
|
||||
Date: Fri, 26 Jun 2020 10:27:56 +1000
|
||||
Subject: [PATCH 07/12] Ticket 51175 - resolve plugin name leaking
|
||||
|
||||
Bug Description: Previously pblock.c assumed that all plugin
|
||||
names were static c strings. Rust can't create static C
|
||||
strings, so these were intentionally leaked.
|
||||
|
||||
Fix Description: Rather than leak these, we do a dup/free
|
||||
through the slapiplugin struct instead, meaning we can use
|
||||
ephemeral, and properly managed strings in rust. This does not
|
||||
affect any other existing code which will still handle the
|
||||
static strings correctly.
|
||||
|
||||
https://pagure.io/389-ds-base/issue/51175
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: mreynolds, tbordaz (Thanks!)
|
||||
---
|
||||
Makefile.am | 1 +
|
||||
configure.ac | 2 +-
|
||||
ldap/servers/slapd/pagedresults.c | 6 +--
|
||||
ldap/servers/slapd/pblock.c | 9 ++--
|
||||
ldap/servers/slapd/plugin.c | 7 +++
|
||||
ldap/servers/slapd/pw_verify.c | 1 +
|
||||
ldap/servers/slapd/tools/pwenc.c | 2 +-
|
||||
src/slapi_r_plugin/README.md | 6 +--
|
||||
src/slapi_r_plugin/src/charray.rs | 32 ++++++++++++++
|
||||
src/slapi_r_plugin/src/lib.rs | 8 ++--
|
||||
src/slapi_r_plugin/src/macros.rs | 17 +++++---
|
||||
src/slapi_r_plugin/src/syntax_plugin.rs | 57 +++++++------------------
|
||||
12 files changed, 85 insertions(+), 63 deletions(-)
|
||||
create mode 100644 src/slapi_r_plugin/src/charray.rs
|
||||
|
||||
diff --git a/Makefile.am b/Makefile.am
|
||||
index 627953850..36434cf17 100644
|
||||
--- a/Makefile.am
|
||||
+++ b/Makefile.am
|
||||
@@ -1312,6 +1312,7 @@ rust-nsslapd-private.h: @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a
|
||||
libslapi_r_plugin_SOURCES = \
|
||||
src/slapi_r_plugin/src/backend.rs \
|
||||
src/slapi_r_plugin/src/ber.rs \
|
||||
+ src/slapi_r_plugin/src/charray.rs \
|
||||
src/slapi_r_plugin/src/constants.rs \
|
||||
src/slapi_r_plugin/src/dn.rs \
|
||||
src/slapi_r_plugin/src/entry.rs \
|
||||
diff --git a/configure.ac b/configure.ac
|
||||
index b3cf77d08..61bf35e4a 100644
|
||||
--- a/configure.ac
|
||||
+++ b/configure.ac
|
||||
@@ -122,7 +122,7 @@ if test "$enable_debug" = yes ; then
|
||||
debug_defs="-DDEBUG -DMCC_DEBUG"
|
||||
debug_cflags="-g3 -O0 -rdynamic"
|
||||
debug_cxxflags="-g3 -O0 -rdynamic"
|
||||
- debug_rust_defs="-C debuginfo=2"
|
||||
+ debug_rust_defs="-C debuginfo=2 -Z macro-backtrace"
|
||||
cargo_defs=""
|
||||
rust_target_dir="debug"
|
||||
else
|
||||
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
|
||||
index d8b8798b6..e3444e944 100644
|
||||
--- a/ldap/servers/slapd/pagedresults.c
|
||||
+++ b/ldap/servers/slapd/pagedresults.c
|
||||
@@ -738,10 +738,10 @@ pagedresults_cleanup(Connection *conn, int needlock)
|
||||
int i;
|
||||
PagedResults *prp = NULL;
|
||||
|
||||
- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n");
|
||||
+ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n"); */
|
||||
|
||||
if (NULL == conn) {
|
||||
- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n");
|
||||
+ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n"); */
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -767,7 +767,7 @@ pagedresults_cleanup(Connection *conn, int needlock)
|
||||
if (needlock) {
|
||||
pthread_mutex_unlock(&(conn->c_mutex));
|
||||
}
|
||||
- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc);
|
||||
+ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc); */
|
||||
return rc;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
|
||||
index 1ad9d0399..f7d1f8885 100644
|
||||
--- a/ldap/servers/slapd/pblock.c
|
||||
+++ b/ldap/servers/slapd/pblock.c
|
||||
@@ -3351,13 +3351,15 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
|
||||
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
|
||||
return (-1);
|
||||
}
|
||||
- pblock->pb_plugin->plg_syntax_names = (char **)value;
|
||||
+ PR_ASSERT(pblock->pb_plugin->plg_syntax_names == NULL);
|
||||
+ pblock->pb_plugin->plg_syntax_names = slapi_ch_array_dup((char **)value);
|
||||
break;
|
||||
case SLAPI_PLUGIN_SYNTAX_OID:
|
||||
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
|
||||
return (-1);
|
||||
}
|
||||
- pblock->pb_plugin->plg_syntax_oid = (char *)value;
|
||||
+ PR_ASSERT(pblock->pb_plugin->plg_syntax_oid == NULL);
|
||||
+ pblock->pb_plugin->plg_syntax_oid = slapi_ch_strdup((char *)value);
|
||||
break;
|
||||
case SLAPI_PLUGIN_SYNTAX_FLAGS:
|
||||
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
|
||||
@@ -3806,7 +3808,8 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
|
||||
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
|
||||
return (-1);
|
||||
}
|
||||
- pblock->pb_plugin->plg_mr_names = (char **)value;
|
||||
+ PR_ASSERT(pblock->pb_plugin->plg_mr_names == NULL);
|
||||
+ pblock->pb_plugin->plg_mr_names = slapi_ch_array_dup((char **)value);
|
||||
break;
|
||||
case SLAPI_PLUGIN_MR_COMPARE:
|
||||
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
|
||||
diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c
|
||||
index 282b98738..e6b48de60 100644
|
||||
--- a/ldap/servers/slapd/plugin.c
|
||||
+++ b/ldap/servers/slapd/plugin.c
|
||||
@@ -2694,6 +2694,13 @@ plugin_free(struct slapdplugin *plugin)
|
||||
if (plugin->plg_type == SLAPI_PLUGIN_PWD_STORAGE_SCHEME || plugin->plg_type == SLAPI_PLUGIN_REVER_PWD_STORAGE_SCHEME) {
|
||||
slapi_ch_free_string(&plugin->plg_pwdstorageschemename);
|
||||
}
|
||||
+ if (plugin->plg_type == SLAPI_PLUGIN_SYNTAX) {
|
||||
+ slapi_ch_free_string(&plugin->plg_syntax_oid);
|
||||
+ slapi_ch_array_free(plugin->plg_syntax_names);
|
||||
+ }
|
||||
+ if (plugin->plg_type == SLAPI_PLUGIN_MATCHINGRULE) {
|
||||
+ slapi_ch_array_free(plugin->plg_mr_names);
|
||||
+ }
|
||||
release_componentid(plugin->plg_identity);
|
||||
slapi_counter_destroy(&plugin->plg_op_counter);
|
||||
if (!plugin->plg_group) {
|
||||
diff --git a/ldap/servers/slapd/pw_verify.c b/ldap/servers/slapd/pw_verify.c
|
||||
index 4f0944b73..4ff1fa2fd 100644
|
||||
--- a/ldap/servers/slapd/pw_verify.c
|
||||
+++ b/ldap/servers/slapd/pw_verify.c
|
||||
@@ -111,6 +111,7 @@ pw_verify_token_dn(Slapi_PBlock *pb) {
|
||||
if (fernet_verify_token(dn, cred->bv_val, key, tok_ttl) != 0) {
|
||||
rc = SLAPI_BIND_SUCCESS;
|
||||
}
|
||||
+ slapi_ch_free_string(&key);
|
||||
#endif
|
||||
return rc;
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/tools/pwenc.c b/ldap/servers/slapd/tools/pwenc.c
|
||||
index 1629c06cd..d89225e34 100644
|
||||
--- a/ldap/servers/slapd/tools/pwenc.c
|
||||
+++ b/ldap/servers/slapd/tools/pwenc.c
|
||||
@@ -34,7 +34,7 @@
|
||||
|
||||
int ldap_syslog;
|
||||
int ldap_syslog_level;
|
||||
-int slapd_ldap_debug = LDAP_DEBUG_ANY;
|
||||
+/* int slapd_ldap_debug = LDAP_DEBUG_ANY; */
|
||||
int detached;
|
||||
FILE *error_logfp;
|
||||
FILE *access_logfp;
|
||||
diff --git a/src/slapi_r_plugin/README.md b/src/slapi_r_plugin/README.md
|
||||
index af9743ec9..1c9bcbf17 100644
|
||||
--- a/src/slapi_r_plugin/README.md
|
||||
+++ b/src/slapi_r_plugin/README.md
|
||||
@@ -15,7 +15,7 @@ the [Rust Nomicon](https://doc.rust-lang.org/nomicon/index.html)
|
||||
> warning about danger.
|
||||
|
||||
This document will not detail the specifics of unsafe or the invariants you must adhere to for rust
|
||||
-to work with C.
|
||||
+to work with C. Failure to uphold these invariants will lead to less than optimal consequences.
|
||||
|
||||
If you still want to see more about the plugin bindings, go on ...
|
||||
|
||||
@@ -135,7 +135,7 @@ associated functions.
|
||||
Now, you may notice that not all members of the trait are implemented. This is due to a feature
|
||||
of rust known as default trait impls. This allows the trait origin (src/plugin.rs) to provide
|
||||
template versions of these functions. If you "overwrite" them, your implementation is used. Unlike
|
||||
-OO, you may not inherit or call the default function.
|
||||
+OO, you may not inherit or call the default function.
|
||||
|
||||
If a default is not provided you *must* implement that function to be considered valid. Today (20200422)
|
||||
this only applies to `start` and `close`.
|
||||
@@ -183,7 +183,7 @@ It's important to understand how Rust manages memory both on the stack and the h
|
||||
As a result, this means that we must express in code, assertions about the proper ownership of memory
|
||||
and who is responsible for it (unlike C, where it can be hard to determine who or what is responsible
|
||||
for freeing some value.) Failure to handle this correctly, can and will lead to crashes, leaks or
|
||||
-*hand waving* magical failures that are eXtReMeLy FuN to debug.
|
||||
+*hand waving* magical failures that are `eXtReMeLy FuN` to debug.
|
||||
|
||||
### Reference Types
|
||||
|
||||
diff --git a/src/slapi_r_plugin/src/charray.rs b/src/slapi_r_plugin/src/charray.rs
|
||||
new file mode 100644
|
||||
index 000000000..d2e44693c
|
||||
--- /dev/null
|
||||
+++ b/src/slapi_r_plugin/src/charray.rs
|
||||
@@ -0,0 +1,32 @@
|
||||
+use std::ffi::CString;
|
||||
+use std::iter::once;
|
||||
+use std::os::raw::c_char;
|
||||
+use std::ptr;
|
||||
+
|
||||
+pub struct Charray {
|
||||
+ pin: Vec<CString>,
|
||||
+ charray: Vec<*const c_char>,
|
||||
+}
|
||||
+
|
||||
+impl Charray {
|
||||
+ pub fn new(input: &[&str]) -> Result<Self, ()> {
|
||||
+ let pin: Result<Vec<_>, ()> = input
|
||||
+ .iter()
|
||||
+ .map(|s| CString::new(*s).map_err(|_e| ()))
|
||||
+ .collect();
|
||||
+
|
||||
+ let pin = pin?;
|
||||
+
|
||||
+ let charray: Vec<_> = pin
|
||||
+ .iter()
|
||||
+ .map(|s| s.as_ptr())
|
||||
+ .chain(once(ptr::null()))
|
||||
+ .collect();
|
||||
+
|
||||
+ Ok(Charray { pin, charray })
|
||||
+ }
|
||||
+
|
||||
+ pub fn as_ptr(&self) -> *const *const c_char {
|
||||
+ self.charray.as_ptr()
|
||||
+ }
|
||||
+}
|
||||
diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs
|
||||
index 076907bae..be28cac95 100644
|
||||
--- a/src/slapi_r_plugin/src/lib.rs
|
||||
+++ b/src/slapi_r_plugin/src/lib.rs
|
||||
@@ -1,9 +1,11 @@
|
||||
-// extern crate lazy_static;
|
||||
+#[macro_use]
|
||||
+extern crate lazy_static;
|
||||
|
||||
#[macro_use]
|
||||
pub mod macros;
|
||||
pub mod backend;
|
||||
pub mod ber;
|
||||
+pub mod charray;
|
||||
mod constants;
|
||||
pub mod dn;
|
||||
pub mod entry;
|
||||
@@ -20,6 +22,7 @@ pub mod value;
|
||||
pub mod prelude {
|
||||
pub use crate::backend::{BackendRef, BackendRefTxn};
|
||||
pub use crate::ber::BerValRef;
|
||||
+ pub use crate::charray::Charray;
|
||||
pub use crate::constants::{FilterType, PluginFnType, PluginType, PluginVersion, LDAP_SUCCESS};
|
||||
pub use crate::dn::{Sdn, SdnRef};
|
||||
pub use crate::entry::EntryRef;
|
||||
@@ -30,8 +33,7 @@ pub mod prelude {
|
||||
pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3};
|
||||
pub use crate::search::{Search, SearchScope};
|
||||
pub use crate::syntax_plugin::{
|
||||
- matchingrule_register, name_to_leaking_char, names_to_leaking_char_array, SlapiOrdMr,
|
||||
- SlapiSubMr, SlapiSyntaxPlugin1,
|
||||
+ matchingrule_register, SlapiOrdMr, SlapiSubMr, SlapiSyntaxPlugin1,
|
||||
};
|
||||
pub use crate::task::{task_register_handler_fn, task_unregister_handler_fn, Task, TaskRef};
|
||||
pub use crate::value::{Value, ValueArray, ValueArrayRef, ValueRef};
|
||||
diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs
|
||||
index bc8dfa60f..97fc5d7ef 100644
|
||||
--- a/src/slapi_r_plugin/src/macros.rs
|
||||
+++ b/src/slapi_r_plugin/src/macros.rs
|
||||
@@ -249,6 +249,7 @@ macro_rules! slapi_r_syntax_plugin_hooks {
|
||||
paste::item! {
|
||||
use libc;
|
||||
use std::convert::TryFrom;
|
||||
+ use std::ffi::CString;
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn [<$mod_ident _plugin_init>](raw_pb: *const libc::c_void) -> i32 {
|
||||
@@ -261,15 +262,15 @@ macro_rules! slapi_r_syntax_plugin_hooks {
|
||||
};
|
||||
|
||||
// Setup the names/oids that this plugin provides syntaxes for.
|
||||
-
|
||||
- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::attr_supported_names()) };
|
||||
- match pb.register_syntax_names(name_ptr) {
|
||||
+ // DS will clone these, so they can be ephemeral to this function.
|
||||
+ let name_vec = Charray::new($hooks_ident::attr_supported_names().as_slice()).expect("invalid supported names");
|
||||
+ match pb.register_syntax_names(name_vec.as_ptr()) {
|
||||
0 => {},
|
||||
e => return e,
|
||||
};
|
||||
|
||||
- let name_ptr = unsafe { name_to_leaking_char($hooks_ident::attr_oid()) };
|
||||
- match pb.register_syntax_oid(name_ptr) {
|
||||
+ let attr_oid = CString::new($hooks_ident::attr_oid()).expect("invalid attr oid");
|
||||
+ match pb.register_syntax_oid(attr_oid.as_ptr()) {
|
||||
0 => {},
|
||||
e => return e,
|
||||
};
|
||||
@@ -430,7 +431,8 @@ macro_rules! slapi_r_syntax_plugin_hooks {
|
||||
e => return e,
|
||||
};
|
||||
|
||||
- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::eq_mr_supported_names()) };
|
||||
+ let name_vec = Charray::new($hooks_ident::eq_mr_supported_names().as_slice()).expect("invalid mr supported names");
|
||||
+ let name_ptr = name_vec.as_ptr();
|
||||
// SLAPI_PLUGIN_MR_NAMES
|
||||
match pb.register_mr_names(name_ptr) {
|
||||
0 => {},
|
||||
@@ -672,7 +674,8 @@ macro_rules! slapi_r_syntax_plugin_hooks {
|
||||
e => return e,
|
||||
};
|
||||
|
||||
- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::ord_mr_supported_names()) };
|
||||
+ let name_vec = Charray::new($hooks_ident::ord_mr_supported_names().as_slice()).expect("invalid ord supported names");
|
||||
+ let name_ptr = name_vec.as_ptr();
|
||||
// SLAPI_PLUGIN_MR_NAMES
|
||||
match pb.register_mr_names(name_ptr) {
|
||||
0 => {},
|
||||
diff --git a/src/slapi_r_plugin/src/syntax_plugin.rs b/src/slapi_r_plugin/src/syntax_plugin.rs
|
||||
index e7d5c01bd..86f84bdd8 100644
|
||||
--- a/src/slapi_r_plugin/src/syntax_plugin.rs
|
||||
+++ b/src/slapi_r_plugin/src/syntax_plugin.rs
|
||||
@@ -1,11 +1,11 @@
|
||||
use crate::ber::BerValRef;
|
||||
// use crate::constants::FilterType;
|
||||
+use crate::charray::Charray;
|
||||
use crate::error::PluginError;
|
||||
use crate::pblock::PblockRef;
|
||||
use crate::value::{ValueArray, ValueArrayRef};
|
||||
use std::cmp::Ordering;
|
||||
use std::ffi::CString;
|
||||
-use std::iter::once;
|
||||
use std::os::raw::c_char;
|
||||
use std::ptr;
|
||||
|
||||
@@ -26,37 +26,6 @@ struct slapi_matchingRuleEntry {
|
||||
mr_compat_syntax: *const *const c_char,
|
||||
}
|
||||
|
||||
-pub unsafe fn name_to_leaking_char(name: &str) -> *const c_char {
|
||||
- let n = CString::new(name)
|
||||
- .expect("An invalid string has been hardcoded!")
|
||||
- .into_boxed_c_str();
|
||||
- let n_ptr = n.as_ptr();
|
||||
- // Now we intentionally leak the name here, and the pointer will remain valid.
|
||||
- Box::leak(n);
|
||||
- n_ptr
|
||||
-}
|
||||
-
|
||||
-pub unsafe fn names_to_leaking_char_array(names: &[&str]) -> *const *const c_char {
|
||||
- let n_arr: Vec<CString> = names
|
||||
- .iter()
|
||||
- .map(|s| CString::new(*s).expect("An invalid string has been hardcoded!"))
|
||||
- .collect();
|
||||
- let n_arr = n_arr.into_boxed_slice();
|
||||
- let n_ptr_arr: Vec<*const c_char> = n_arr
|
||||
- .iter()
|
||||
- .map(|v| v.as_ptr())
|
||||
- .chain(once(ptr::null()))
|
||||
- .collect();
|
||||
- let n_ptr_arr = n_ptr_arr.into_boxed_slice();
|
||||
-
|
||||
- // Now we intentionally leak these names here,
|
||||
- let _r_n_arr = Box::leak(n_arr);
|
||||
- let r_n_ptr_arr = Box::leak(n_ptr_arr);
|
||||
-
|
||||
- let name_ptr = r_n_ptr_arr as *const _ as *const *const c_char;
|
||||
- name_ptr
|
||||
-}
|
||||
-
|
||||
// oid - the oid of the matching rule
|
||||
// name - the name of the mr
|
||||
// desc - description
|
||||
@@ -69,20 +38,24 @@ pub unsafe fn matchingrule_register(
|
||||
syntax: &str,
|
||||
compat_syntax: &[&str],
|
||||
) -> i32 {
|
||||
- let oid_ptr = name_to_leaking_char(oid);
|
||||
- let name_ptr = name_to_leaking_char(name);
|
||||
- let desc_ptr = name_to_leaking_char(desc);
|
||||
- let syntax_ptr = name_to_leaking_char(syntax);
|
||||
- let compat_syntax_ptr = names_to_leaking_char_array(compat_syntax);
|
||||
+ // Make everything CStrings that live long enough.
|
||||
+
|
||||
+ let oid_cs = CString::new(oid).expect("invalid oid");
|
||||
+ let name_cs = CString::new(name).expect("invalid name");
|
||||
+ let desc_cs = CString::new(desc).expect("invalid desc");
|
||||
+ let syntax_cs = CString::new(syntax).expect("invalid syntax");
|
||||
+
|
||||
+ // We have to do this so the cstrings live long enough.
|
||||
+ let compat_syntax_ca = Charray::new(compat_syntax).expect("invalid compat_syntax");
|
||||
|
||||
let new_mr = slapi_matchingRuleEntry {
|
||||
- mr_oid: oid_ptr,
|
||||
+ mr_oid: oid_cs.as_ptr(),
|
||||
_mr_oidalias: ptr::null(),
|
||||
- mr_name: name_ptr,
|
||||
- mr_desc: desc_ptr,
|
||||
- mr_syntax: syntax_ptr,
|
||||
+ mr_name: name_cs.as_ptr(),
|
||||
+ mr_desc: desc_cs.as_ptr(),
|
||||
+ mr_syntax: syntax_cs.as_ptr(),
|
||||
_mr_obsolete: 0,
|
||||
- mr_compat_syntax: compat_syntax_ptr,
|
||||
+ mr_compat_syntax: compat_syntax_ca.as_ptr(),
|
||||
};
|
||||
|
||||
let new_mr_ptr = &new_mr as *const _;
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,37 +0,0 @@
|
||||
From 40e9a4835a6e95f021a711a7c42ce0c1bddc5ba4 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Fri, 21 May 2021 13:09:12 -0400
|
||||
Subject: [PATCH 08/12] Issue 4773 - Enable interval feature of DNA plugin
|
||||
|
||||
Description: Enable the dormant interval feature in DNA plugin
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4773
|
||||
|
||||
Review by: mreynolds (one line commit rule)
|
||||
---
|
||||
ldap/servers/plugins/dna/dna.c | 2 --
|
||||
1 file changed, 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
|
||||
index bf6b74a99..928a3f54a 100644
|
||||
--- a/ldap/servers/plugins/dna/dna.c
|
||||
+++ b/ldap/servers/plugins/dna/dna.c
|
||||
@@ -1023,7 +1023,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
|
||||
/* Set the default interval to 1 */
|
||||
entry->interval = 1;
|
||||
|
||||
-#ifdef DNA_ENABLE_INTERVAL
|
||||
value = slapi_entry_attr_get_charptr(e, DNA_INTERVAL);
|
||||
if (value) {
|
||||
entry->interval = strtoull(value, 0, 0);
|
||||
@@ -1032,7 +1031,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
|
||||
|
||||
slapi_log_err(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM,
|
||||
"dna_parse_config_entry - %s [%" PRIu64 "]\n", DNA_INTERVAL, entry->interval);
|
||||
-#endif
|
||||
|
||||
value = slapi_entry_attr_get_charptr(e, DNA_GENERATE);
|
||||
if (value) {
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,926 +0,0 @@
|
||||
From 8df95679519364d0993572ecbea72ab89e5250a5 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Thu, 20 May 2021 14:24:25 +0200
|
||||
Subject: [PATCH 09/12] Issue 4623 - RFE - Monitor the current DB locks (#4762)
|
||||
|
||||
Description: DB lock gets exhausted because of unindexed internal searches
|
||||
(under a transaction). Indexing those searches is the way to prevent exhaustion.
|
||||
If db lock get exhausted during a txn, it leads to db panic and the later recovery
|
||||
can possibly fail. That leads to a full reinit of the instance where the db locks
|
||||
got exhausted.
|
||||
|
||||
Add three attributes to global BDB config: "nsslapd-db-locks-monitoring-enabled",
|
||||
"nsslapd-db-locks-monitoring-threshold" and "nsslapd-db-locks-monitoring-pause".
|
||||
By default, nsslapd-db-locks-monitoring-enabled is turned on, nsslapd-db-locks-monitoring-threshold is set to 90% and nsslapd-db-locks-monitoring-threshold is 500ms.
|
||||
|
||||
When current locks are close to the maximum locks value of 90% - returning
|
||||
the next candidate will fail until the maximum of locks won't be
|
||||
increased or current locks are released.
|
||||
The monitoring thread runs with the configurable interval of 500ms.
|
||||
|
||||
Add the setting to UI and CLI tools.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4623
|
||||
|
||||
Reviewed by: @Firstyear, @tbordaz, @jchapma, @mreynolds389 (Thank you!!)
|
||||
---
|
||||
.../suites/monitor/db_locks_monitor_test.py | 251 ++++++++++++++++++
|
||||
ldap/servers/slapd/back-ldbm/back-ldbm.h | 13 +-
|
||||
.../slapd/back-ldbm/db-bdb/bdb_config.c | 99 +++++++
|
||||
.../slapd/back-ldbm/db-bdb/bdb_layer.c | 85 ++++++
|
||||
ldap/servers/slapd/back-ldbm/init.c | 3 +
|
||||
ldap/servers/slapd/back-ldbm/ldbm_config.c | 3 +
|
||||
ldap/servers/slapd/back-ldbm/ldbm_config.h | 3 +
|
||||
ldap/servers/slapd/back-ldbm/ldbm_search.c | 13 +
|
||||
ldap/servers/slapd/libglobs.c | 4 +-
|
||||
src/cockpit/389-console/src/css/ds.css | 4 +
|
||||
src/cockpit/389-console/src/database.jsx | 7 +
|
||||
src/cockpit/389-console/src/index.html | 2 +-
|
||||
.../src/lib/database/databaseConfig.jsx | 88 +++++-
|
||||
src/lib389/lib389/backend.py | 3 +
|
||||
src/lib389/lib389/cli_conf/backend.py | 10 +
|
||||
15 files changed, 576 insertions(+), 12 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
|
||||
new file mode 100644
|
||||
index 000000000..7f9938f30
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
|
||||
@@ -0,0 +1,251 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import logging
|
||||
+import pytest
|
||||
+import datetime
|
||||
+import subprocess
|
||||
+from multiprocessing import Process, Queue
|
||||
+from lib389 import pid_from_file
|
||||
+from lib389.utils import ldap, os
|
||||
+from lib389._constants import DEFAULT_SUFFIX, ReplicaRole
|
||||
+from lib389.cli_base import LogCapture
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
+from lib389.tasks import AccessLog
|
||||
+from lib389.backend import Backends
|
||||
+from lib389.ldclt import Ldclt
|
||||
+from lib389.dbgen import dbgen_users
|
||||
+from lib389.tasks import ImportTask
|
||||
+from lib389.index import Indexes
|
||||
+from lib389.plugins import AttributeUniquenessPlugin
|
||||
+from lib389.config import BDB_LDBMConfig
|
||||
+from lib389.monitor import MonitorLDBM
|
||||
+from lib389.topologies import create_topology, _remove_ssca_db
|
||||
+
|
||||
+pytestmark = pytest.mark.tier2
|
||||
+db_locks_monitoring_ack = pytest.mark.skipif(not os.environ.get('DB_LOCKS_MONITORING_ACK', False),
|
||||
+ reason="DB locks monitoring tests may take hours if the feature is not present or another failure exists. "
|
||||
+ "Also, the feature requires a big amount of space as we set nsslapd-db-locks to 1300000.")
|
||||
+
|
||||
+DEBUGGING = os.getenv('DEBUGGING', default=False)
|
||||
+if DEBUGGING:
|
||||
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+else:
|
||||
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+def _kill_ns_slapd(inst):
|
||||
+ pid = str(pid_from_file(inst.ds_paths.pid_file))
|
||||
+ cmd = ['kill', '-9', pid]
|
||||
+ subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def topology_st_fn(request):
|
||||
+ """Create DS standalone instance for each test case"""
|
||||
+
|
||||
+ topology = create_topology({ReplicaRole.STANDALONE: 1})
|
||||
+
|
||||
+ def fin():
|
||||
+ # Kill the hanging process at the end of test to prevent failures in the following tests
|
||||
+ if DEBUGGING:
|
||||
+ [_kill_ns_slapd(inst) for inst in topology]
|
||||
+ else:
|
||||
+ [_kill_ns_slapd(inst) for inst in topology]
|
||||
+ assert _remove_ssca_db(topology)
|
||||
+ [inst.stop() for inst in topology if inst.exists()]
|
||||
+ [inst.delete() for inst in topology if inst.exists()]
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ topology.logcap = LogCapture()
|
||||
+ return topology
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def setup_attruniq_index_be_import(topology_st_fn):
|
||||
+ """Enable Attribute Uniqueness, disable indexes and
|
||||
+ import 120000 entries to the default backend
|
||||
+ """
|
||||
+ inst = topology_st_fn.standalone
|
||||
+
|
||||
+ inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access')
|
||||
+ inst.config.set('nsslapd-plugin-logging', 'on')
|
||||
+ inst.restart()
|
||||
+
|
||||
+ attruniq = AttributeUniquenessPlugin(inst, dn="cn=attruniq,cn=plugins,cn=config")
|
||||
+ attruniq.create(properties={'cn': 'attruniq'})
|
||||
+ for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']:
|
||||
+ attruniq.add_unique_attribute(cn)
|
||||
+ attruniq.add_unique_subtree(DEFAULT_SUFFIX)
|
||||
+ attruniq.enable_all_subtrees()
|
||||
+ attruniq.enable()
|
||||
+
|
||||
+ indexes = Indexes(inst)
|
||||
+ for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']:
|
||||
+ indexes.ensure_state(properties={
|
||||
+ 'cn': cn,
|
||||
+ 'nsSystemIndex': 'false',
|
||||
+ 'nsIndexType': 'none'})
|
||||
+
|
||||
+ bdb_config = BDB_LDBMConfig(inst)
|
||||
+ bdb_config.replace("nsslapd-db-locks", "130000")
|
||||
+ inst.restart()
|
||||
+
|
||||
+ ldif_dir = inst.get_ldif_dir()
|
||||
+ import_ldif = ldif_dir + '/perf_import.ldif'
|
||||
+
|
||||
+ # Valid online import
|
||||
+ import_task = ImportTask(inst)
|
||||
+ dbgen_users(inst, 120000, import_ldif, DEFAULT_SUFFIX, entry_name="userNew")
|
||||
+ import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
|
||||
+ import_task.wait()
|
||||
+ assert import_task.is_complete()
|
||||
+
|
||||
+
|
||||
+def create_user_wrapper(q, users):
|
||||
+ try:
|
||||
+ users.create_test_user()
|
||||
+ except Exception as ex:
|
||||
+ q.put(ex)
|
||||
+
|
||||
+
|
||||
+def spawn_worker_thread(function, users, log, timeout, info):
|
||||
+ log.info(f"Starting the thread - {info}")
|
||||
+ q = Queue()
|
||||
+ p = Process(target=function, args=(q,users,))
|
||||
+ p.start()
|
||||
+
|
||||
+ log.info(f"Waiting for {timeout} seconds for the thread to finish")
|
||||
+ p.join(timeout)
|
||||
+
|
||||
+ if p.is_alive():
|
||||
+ log.info("Killing the thread as it's still running")
|
||||
+ p.terminate()
|
||||
+ p.join()
|
||||
+ raise RuntimeError(f"Function call was aborted: {info}")
|
||||
+ result = q.get()
|
||||
+ if isinstance(result, Exception):
|
||||
+ raise result
|
||||
+ else:
|
||||
+ return result
|
||||
+
|
||||
+
|
||||
+@db_locks_monitoring_ack
|
||||
+@pytest.mark.parametrize("lock_threshold", [("70"), ("80"), ("95")])
|
||||
+def test_exhaust_db_locks_basic(topology_st_fn, setup_attruniq_index_be_import, lock_threshold):
|
||||
+ """Test that when all of the locks are exhausted the instance still working
|
||||
+ and database is not corrupted
|
||||
+
|
||||
+ :id: 299108cc-04d8-4ddc-b58e-99157fccd643
|
||||
+ :setup: Standalone instance with Attr Uniq plugin and user indexes disabled
|
||||
+ :steps: 1. Set nsslapd-db-locks to 11000
|
||||
+ 2. Check that we stop acquiring new locks when the threshold is reached
|
||||
+ 3. Check that we can regulate a pause interval for DB locks monitoring thread
|
||||
+ 4. Make sure the feature works for different backends on the same suffix
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st_fn.standalone
|
||||
+ ADDITIONAL_SUFFIX = 'ou=newpeople,dc=example,dc=com'
|
||||
+
|
||||
+ backends = Backends(inst)
|
||||
+ backends.create(properties={'nsslapd-suffix': ADDITIONAL_SUFFIX,
|
||||
+ 'name': ADDITIONAL_SUFFIX[-3:]})
|
||||
+ ous = OrganizationalUnits(inst, DEFAULT_SUFFIX)
|
||||
+ ous.create(properties={'ou': 'newpeople'})
|
||||
+
|
||||
+ bdb_config = BDB_LDBMConfig(inst)
|
||||
+ bdb_config.replace("nsslapd-db-locks", "11000")
|
||||
+
|
||||
+ # Restart server
|
||||
+ inst.restart()
|
||||
+
|
||||
+ for lock_enabled in ["on", "off"]:
|
||||
+ for lock_pause in ["100", "500", "1000"]:
|
||||
+ bdb_config.replace("nsslapd-db-locks-monitoring-enabled", lock_enabled)
|
||||
+ bdb_config.replace("nsslapd-db-locks-monitoring-threshold", lock_threshold)
|
||||
+ bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause)
|
||||
+ inst.restart()
|
||||
+
|
||||
+ if lock_enabled == "off":
|
||||
+ raised_exception = (RuntimeError, ldap.SERVER_DOWN)
|
||||
+ else:
|
||||
+ raised_exception = ldap.OPERATIONS_ERROR
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ with pytest.raises(raised_exception):
|
||||
+ spawn_worker_thread(create_user_wrapper, users, log, 30,
|
||||
+ f"Adding user with monitoring enabled='{lock_enabled}'; "
|
||||
+ f"threshold='{lock_threshold}'; pause='{lock_pause}'.")
|
||||
+ # Restart because we already run out of locks and the next unindexed searches will fail eventually
|
||||
+ if lock_enabled == "off":
|
||||
+ _kill_ns_slapd(inst)
|
||||
+ inst.restart()
|
||||
+
|
||||
+ users = UserAccounts(inst, ADDITIONAL_SUFFIX, rdn=None)
|
||||
+ with pytest.raises(raised_exception):
|
||||
+ spawn_worker_thread(create_user_wrapper, users, log, 30,
|
||||
+ f"Adding user with monitoring enabled='{lock_enabled}'; "
|
||||
+ f"threshold='{lock_threshold}'; pause='{lock_pause}'.")
|
||||
+ # In case feature is disabled - restart for the clean up
|
||||
+ if lock_enabled == "off":
|
||||
+ _kill_ns_slapd(inst)
|
||||
+ inst.restart()
|
||||
+
|
||||
+
|
||||
+@db_locks_monitoring_ack
|
||||
+def test_exhaust_db_locks_big_pause(topology_st_fn, setup_attruniq_index_be_import):
|
||||
+ """Test that DB lock pause setting increases the wait interval value for the monitoring thread
|
||||
+
|
||||
+ :id: 7d5bf838-5d4e-4ad5-8c03-5716afb84ea6
|
||||
+ :setup: Standalone instance with Attr Uniq plugin and user indexes disabled
|
||||
+ :steps: 1. Set nsslapd-db-locks to 20000 while using the default threshold value (95%)
|
||||
+ 2. Set nsslapd-db-locks-monitoring-pause to 10000 (10 seconds)
|
||||
+ 3. Make sure that the pause is successfully increased a few times in a row
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st_fn.standalone
|
||||
+
|
||||
+ bdb_config = BDB_LDBMConfig(inst)
|
||||
+ bdb_config.replace("nsslapd-db-locks", "20000")
|
||||
+ lock_pause = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-pause")
|
||||
+ assert lock_pause == 500
|
||||
+ lock_pause = "10000"
|
||||
+ bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause)
|
||||
+
|
||||
+ # Restart server
|
||||
+ inst.restart()
|
||||
+
|
||||
+ lock_enabled = bdb_config.get_attr_val_utf8_l("nsslapd-db-locks-monitoring-enabled")
|
||||
+ lock_threshold = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-threshold")
|
||||
+ assert lock_enabled == "on"
|
||||
+ assert lock_threshold == 90
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ start = datetime.datetime.now()
|
||||
+ with pytest.raises(ldap.OPERATIONS_ERROR):
|
||||
+ spawn_worker_thread(create_user_wrapper, users, log, 30,
|
||||
+ f"Adding user with monitoring enabled='{lock_enabled}'; "
|
||||
+ f"threshold='{lock_threshold}'; pause='{lock_pause}'. Expect it to 'Work'")
|
||||
+ end = datetime.datetime.now()
|
||||
+ time_delta = end - start
|
||||
+ if time_delta.seconds < 9:
|
||||
+ raise RuntimeError("nsslapd-db-locks-monitoring-pause attribute doesn't function correctly. "
|
||||
+ f"Finished the execution in {time_delta.seconds} seconds")
|
||||
+ # In case something has failed - restart for the clean up
|
||||
+ inst.restart()
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
index 571b0a58b..afb831c32 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
|
||||
@@ -155,6 +155,8 @@ typedef unsigned short u_int16_t;
|
||||
#define DEFAULT_DNCACHE_MAXCOUNT -1 /* no limit */
|
||||
#define DEFAULT_DBCACHE_SIZE 33554432
|
||||
#define DEFAULT_DBCACHE_SIZE_STR "33554432"
|
||||
+#define DEFAULT_DBLOCK_PAUSE 500
|
||||
+#define DEFAULT_DBLOCK_PAUSE_STR "500"
|
||||
#define DEFAULT_MODE 0600
|
||||
#define DEFAULT_ALLIDSTHRESHOLD 4000
|
||||
#define DEFAULT_IDL_TUNE 1
|
||||
@@ -575,12 +577,21 @@ struct ldbminfo
|
||||
char *li_backend_implement; /* low layer backend implementation */
|
||||
int li_noparentcheck; /* check if parent exists on add */
|
||||
|
||||
- /* the next 3 fields are for the params that don't get changed until
|
||||
+ /* db lock monitoring */
|
||||
+ /* if we decide to move the values to bdb_config, we can use slapi_back_get_info function to retrieve the values */
|
||||
+ int32_t li_dblock_monitoring; /* enables db locks monitoring thread - requires restart */
|
||||
+ uint32_t li_dblock_monitoring_pause; /* an interval for db locks monitoring thread */
|
||||
+ uint32_t li_dblock_threshold; /* when the percentage is reached, abort the search in ldbm_back_next_search_entry - requires restart*/
|
||||
+ uint32_t li_dblock_threshold_reached;
|
||||
+
|
||||
+ /* the next 4 fields are for the params that don't get changed until
|
||||
* the server is restarted (used by the admin console)
|
||||
*/
|
||||
char *li_new_directory;
|
||||
uint64_t li_new_dbcachesize;
|
||||
int li_new_dblock;
|
||||
+ int32_t li_new_dblock_monitoring;
|
||||
+ uint64_t li_new_dblock_threshold;
|
||||
|
||||
int li_new_dbncache;
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
|
||||
index 738b841aa..167644943 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
|
||||
@@ -190,6 +190,102 @@ bdb_config_db_lock_set(void *arg, void *value, char *errorbuf, int phase, int ap
|
||||
return retval;
|
||||
}
|
||||
|
||||
+static void *
|
||||
+bdb_config_db_lock_monitoring_get(void *arg)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+
|
||||
+ return (void *)((intptr_t)(li->li_new_dblock_monitoring));
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+bdb_config_db_lock_monitoring_set(void *arg, void *value, char *errorbuf __attribute__((unused)), int phase __attribute__((unused)), int apply)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+ int retval = LDAP_SUCCESS;
|
||||
+ int val = (int32_t)((intptr_t)value);
|
||||
+
|
||||
+ if (apply) {
|
||||
+ if (CONFIG_PHASE_RUNNING == phase) {
|
||||
+ li->li_new_dblock_monitoring = val;
|
||||
+ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_monitoring_set",
|
||||
+ "New nsslapd-db-lock-monitoring value will not take affect until the server is restarted\n");
|
||||
+ } else {
|
||||
+ li->li_new_dblock_monitoring = val;
|
||||
+ li->li_dblock_monitoring = val;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ return retval;
|
||||
+}
|
||||
+
|
||||
+static void *
|
||||
+bdb_config_db_lock_pause_get(void *arg)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+
|
||||
+ return (void *)((uintptr_t)(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED)));
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+bdb_config_db_lock_pause_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+ int retval = LDAP_SUCCESS;
|
||||
+ u_int32_t val = (u_int32_t)((uintptr_t)value);
|
||||
+
|
||||
+ if (val == 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_pause_set",
|
||||
+ "%s was set to '0'. The default value will be used (%s)",
|
||||
+ CONFIG_DB_LOCKS_PAUSE, DEFAULT_DBLOCK_PAUSE_STR);
|
||||
+ val = DEFAULT_DBLOCK_PAUSE;
|
||||
+ }
|
||||
+
|
||||
+ if (apply) {
|
||||
+ slapi_atomic_store_32((int32_t *)&(li->li_dblock_monitoring_pause), val, __ATOMIC_RELAXED);
|
||||
+ }
|
||||
+ return retval;
|
||||
+}
|
||||
+
|
||||
+static void *
|
||||
+bdb_config_db_lock_threshold_get(void *arg)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+
|
||||
+ return (void *)((uintptr_t)(li->li_new_dblock_threshold));
|
||||
+}
|
||||
+
|
||||
+static int
|
||||
+bdb_config_db_lock_threshold_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply)
|
||||
+{
|
||||
+ struct ldbminfo *li = (struct ldbminfo *)arg;
|
||||
+ int retval = LDAP_SUCCESS;
|
||||
+ u_int32_t val = (u_int32_t)((uintptr_t)value);
|
||||
+
|
||||
+ if (val < 70 || val > 95) {
|
||||
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
|
||||
+ "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95",
|
||||
+ CONFIG_DB_LOCKS_THRESHOLD, val);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_lock_threshold_set",
|
||||
+ "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95",
|
||||
+ CONFIG_DB_LOCKS_THRESHOLD, val);
|
||||
+ retval = LDAP_OPERATIONS_ERROR;
|
||||
+ return retval;
|
||||
+ }
|
||||
+
|
||||
+ if (apply) {
|
||||
+ if (CONFIG_PHASE_RUNNING == phase) {
|
||||
+ li->li_new_dblock_threshold = val;
|
||||
+ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_threshold_set",
|
||||
+ "New nsslapd-db-lock-monitoring-threshold value will not take affect until the server is restarted\n");
|
||||
+ } else {
|
||||
+ li->li_new_dblock_threshold = val;
|
||||
+ li->li_dblock_threshold = val;
|
||||
+ }
|
||||
+ }
|
||||
+ return retval;
|
||||
+}
|
||||
+
|
||||
static void *
|
||||
bdb_config_dbcachesize_get(void *arg)
|
||||
{
|
||||
@@ -1409,6 +1505,9 @@ static config_info bdb_config_param[] = {
|
||||
{CONFIG_SERIAL_LOCK, CONFIG_TYPE_ONOFF, "on", &bdb_config_serial_lock_get, &bdb_config_serial_lock_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
|
||||
{CONFIG_USE_LEGACY_ERRORCODE, CONFIG_TYPE_ONOFF, "off", &bdb_config_legacy_errcode_get, &bdb_config_legacy_errcode_set, 0},
|
||||
{CONFIG_DB_DEADLOCK_POLICY, CONFIG_TYPE_INT, STRINGIFYDEFINE(DB_LOCK_YOUNGEST), &bdb_config_db_deadlock_policy_get, &bdb_config_db_deadlock_policy_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
|
||||
+ {CONFIG_DB_LOCKS_MONITORING, CONFIG_TYPE_ONOFF, "on", &bdb_config_db_lock_monitoring_get, &bdb_config_db_lock_monitoring_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
|
||||
+ {CONFIG_DB_LOCKS_THRESHOLD, CONFIG_TYPE_INT, "90", &bdb_config_db_lock_threshold_get, &bdb_config_db_lock_threshold_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
|
||||
+ {CONFIG_DB_LOCKS_PAUSE, CONFIG_TYPE_INT, DEFAULT_DBLOCK_PAUSE_STR, &bdb_config_db_lock_pause_get, &bdb_config_db_lock_pause_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
|
||||
{NULL, 0, NULL, NULL, NULL, 0}};
|
||||
|
||||
void
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
index 6cccad8e6..2f25f67a2 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
@@ -35,6 +35,8 @@
|
||||
(env)->txn_checkpoint((env), (kbyte), (min), (flags))
|
||||
#define MEMP_STAT(env, gsp, fsp, flags, malloc) \
|
||||
(env)->memp_stat((env), (gsp), (fsp), (flags))
|
||||
+#define LOCK_STAT(env, statp, flags, malloc) \
|
||||
+ (env)->lock_stat((env), (statp), (flags))
|
||||
#define MEMP_TRICKLE(env, pct, nwrotep) \
|
||||
(env)->memp_trickle((env), (pct), (nwrotep))
|
||||
#define LOG_ARCHIVE(env, listp, flags, malloc) \
|
||||
@@ -66,6 +68,7 @@
|
||||
#define NEWDIR_MODE 0755
|
||||
#define DB_REGION_PREFIX "__db."
|
||||
|
||||
+static int locks_monitoring_threadmain(void *param);
|
||||
static int perf_threadmain(void *param);
|
||||
static int checkpoint_threadmain(void *param);
|
||||
static int trickle_threadmain(void *param);
|
||||
@@ -84,6 +87,7 @@ static int bdb_start_checkpoint_thread(struct ldbminfo *li);
|
||||
static int bdb_start_trickle_thread(struct ldbminfo *li);
|
||||
static int bdb_start_perf_thread(struct ldbminfo *li);
|
||||
static int bdb_start_txn_test_thread(struct ldbminfo *li);
|
||||
+static int bdb_start_locks_monitoring_thread(struct ldbminfo *li);
|
||||
static int trans_batch_count = 0;
|
||||
static int trans_batch_limit = 0;
|
||||
static int trans_batch_txn_min_sleep = 50; /* ms */
|
||||
@@ -1299,6 +1303,10 @@ bdb_start(struct ldbminfo *li, int dbmode)
|
||||
return return_value;
|
||||
}
|
||||
|
||||
+ if (0 != (return_value = bdb_start_locks_monitoring_thread(li))) {
|
||||
+ return return_value;
|
||||
+ }
|
||||
+
|
||||
/* We need to free the memory to avoid a leak
|
||||
* Also, we have to evaluate if the performance counter
|
||||
* should be preserved or not for database restore.
|
||||
@@ -2885,6 +2893,7 @@ bdb_start_perf_thread(struct ldbminfo *li)
|
||||
return return_value;
|
||||
}
|
||||
|
||||
+
|
||||
/* Performance thread */
|
||||
static int
|
||||
perf_threadmain(void *param)
|
||||
@@ -2910,6 +2919,82 @@ perf_threadmain(void *param)
|
||||
return 0;
|
||||
}
|
||||
|
||||
+
|
||||
+/*
|
||||
+ * create a thread for locks_monitoring_threadmain
|
||||
+ */
|
||||
+static int
|
||||
+bdb_start_locks_monitoring_thread(struct ldbminfo *li)
|
||||
+{
|
||||
+ int return_value = 0;
|
||||
+ if (li->li_dblock_monitoring) {
|
||||
+ if (NULL == PR_CreateThread(PR_USER_THREAD,
|
||||
+ (VFP)(void *)locks_monitoring_threadmain, li,
|
||||
+ PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
|
||||
+ PR_UNJOINABLE_THREAD,
|
||||
+ SLAPD_DEFAULT_THREAD_STACKSIZE)) {
|
||||
+ PRErrorCode prerr = PR_GetError();
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "bdb_start_locks_monitoring_thread",
|
||||
+ "Failed to create database locks monitoring thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
|
||||
+ prerr, slapd_pr_strerror(prerr));
|
||||
+ return_value = -1;
|
||||
+ }
|
||||
+ }
|
||||
+ return return_value;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+/* DB Locks Monitoring thread */
|
||||
+static int
|
||||
+locks_monitoring_threadmain(void *param)
|
||||
+{
|
||||
+ int ret = 0;
|
||||
+ uint64_t current_locks = 0;
|
||||
+ uint64_t max_locks = 0;
|
||||
+ uint32_t lock_exhaustion = 0;
|
||||
+ PRIntervalTime interval;
|
||||
+ struct ldbminfo *li = NULL;
|
||||
+
|
||||
+ PR_ASSERT(NULL != param);
|
||||
+ li = (struct ldbminfo *)param;
|
||||
+
|
||||
+ dblayer_private *priv = li->li_dblayer_private;
|
||||
+ bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
|
||||
+ PR_ASSERT(NULL != priv);
|
||||
+
|
||||
+ INCR_THREAD_COUNT(pEnv);
|
||||
+
|
||||
+ while (!BDB_CONFIG(li)->bdb_stop_threads) {
|
||||
+ if (dblayer_db_uses_locking(pEnv->bdb_DB_ENV)) {
|
||||
+ DB_LOCK_STAT *lockstat = NULL;
|
||||
+ ret = LOCK_STAT(pEnv->bdb_DB_ENV, &lockstat, 0, (void *)slapi_ch_malloc);
|
||||
+ if (0 == ret) {
|
||||
+ current_locks = lockstat->st_nlocks;
|
||||
+ max_locks = lockstat->st_maxlocks;
|
||||
+ if (max_locks){
|
||||
+ lock_exhaustion = (uint32_t)((double)current_locks / (double)max_locks * 100.0);
|
||||
+ } else {
|
||||
+ lock_exhaustion = 0;
|
||||
+ }
|
||||
+ if ((li->li_dblock_threshold) &&
|
||||
+ (lock_exhaustion >= li->li_dblock_threshold)) {
|
||||
+ slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 1, __ATOMIC_RELAXED);
|
||||
+ } else {
|
||||
+ slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 0, __ATOMIC_RELAXED);
|
||||
+ }
|
||||
+ }
|
||||
+ slapi_ch_free((void **)&lockstat);
|
||||
+ }
|
||||
+ interval = PR_MillisecondsToInterval(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED));
|
||||
+ DS_Sleep(interval);
|
||||
+ }
|
||||
+
|
||||
+ DECR_THREAD_COUNT(pEnv);
|
||||
+ slapi_log_err(SLAPI_LOG_TRACE, "locks_monitoring_threadmain", "Leaving locks_monitoring_threadmain\n");
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+
|
||||
/*
|
||||
* create a thread for deadlock_threadmain
|
||||
*/
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c
|
||||
index 893776699..4165c8fad 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/init.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/init.c
|
||||
@@ -70,6 +70,9 @@ ldbm_back_init(Slapi_PBlock *pb)
|
||||
/* Initialize the set of instances. */
|
||||
li->li_instance_set = objset_new(&ldbm_back_instance_set_destructor);
|
||||
|
||||
+ /* Init lock threshold value */
|
||||
+ li->li_dblock_threshold_reached = 0;
|
||||
+
|
||||
/* ask the factory to give us space in the Connection object
|
||||
* (only bulk import uses this)
|
||||
*/
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
|
||||
index 10cef250f..60884cf33 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
|
||||
@@ -87,6 +87,9 @@ static char *ldbm_config_moved_attributes[] =
|
||||
CONFIG_SERIAL_LOCK,
|
||||
CONFIG_USE_LEGACY_ERRORCODE,
|
||||
CONFIG_DB_DEADLOCK_POLICY,
|
||||
+ CONFIG_DB_LOCKS_MONITORING,
|
||||
+ CONFIG_DB_LOCKS_THRESHOLD,
|
||||
+ CONFIG_DB_LOCKS_PAUSE,
|
||||
""};
|
||||
|
||||
/* Used to add an array of entries, like the one above and
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.h b/ldap/servers/slapd/back-ldbm/ldbm_config.h
|
||||
index 58e64799c..6fa8292eb 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.h
|
||||
@@ -104,6 +104,9 @@ struct config_info
|
||||
#define CONFIG_DB_VERBOSE "nsslapd-db-verbose"
|
||||
#define CONFIG_DB_DEBUG "nsslapd-db-debug"
|
||||
#define CONFIG_DB_LOCK "nsslapd-db-locks"
|
||||
+#define CONFIG_DB_LOCKS_MONITORING "nsslapd-db-locks-monitoring-enabled"
|
||||
+#define CONFIG_DB_LOCKS_THRESHOLD "nsslapd-db-locks-monitoring-threshold"
|
||||
+#define CONFIG_DB_LOCKS_PAUSE "nsslapd-db-locks-monitoring-pause"
|
||||
#define CONFIG_DB_NAMED_REGIONS "nsslapd-db-named-regions"
|
||||
#define CONFIG_DB_PRIVATE_MEM "nsslapd-db-private-mem"
|
||||
#define CONFIG_DB_PRIVATE_IMPORT_MEM "nsslapd-db-private-import-mem"
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
index 1a7b510d4..6e22debde 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
@@ -1472,6 +1472,7 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
|
||||
slapi_pblock_get(pb, SLAPI_CONNECTION, &conn);
|
||||
slapi_pblock_get(pb, SLAPI_OPERATION, &op);
|
||||
|
||||
+
|
||||
if ((reverse_list = operation_is_flag_set(op, OP_FLAG_REVERSE_CANDIDATE_ORDER))) {
|
||||
/*
|
||||
* Start at the end of the list and work our way forward. Since a single
|
||||
@@ -1538,6 +1539,18 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
|
||||
|
||||
/* Find the next candidate entry and return it. */
|
||||
while (1) {
|
||||
+ if (li->li_dblock_monitoring &&
|
||||
+ slapi_atomic_load_32((int32_t *)&(li->li_dblock_threshold_reached), __ATOMIC_RELAXED)) {
|
||||
+ slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_next_search_entry",
|
||||
+ "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold "
|
||||
+ "under cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config). "
|
||||
+ "Please, increase nsslapd-db-locks according to your needs.\n");
|
||||
+ slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_ENTRY, NULL);
|
||||
+ delete_search_result_set(pb, &sr);
|
||||
+ rc = SLAPI_FAIL_GENERAL;
|
||||
+ slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold)", 0, NULL);
|
||||
+ goto bail;
|
||||
+ }
|
||||
|
||||
/* check for abandon */
|
||||
if (slapi_op_abandoned(pb) || (NULL == sr)) {
|
||||
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
|
||||
index 388616b36..db7d01bbc 100644
|
||||
--- a/ldap/servers/slapd/libglobs.c
|
||||
+++ b/ldap/servers/slapd/libglobs.c
|
||||
@@ -8171,8 +8171,8 @@ config_set(const char *attr, struct berval **values, char *errorbuf, int apply)
|
||||
#if 0
|
||||
debugHashTable(attr);
|
||||
#endif
|
||||
- slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored", attr);
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored", attr);
|
||||
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored\n", attr);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored\n", attr);
|
||||
return LDAP_NO_SUCH_ATTRIBUTE;
|
||||
}
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/css/ds.css b/src/cockpit/389-console/src/css/ds.css
|
||||
index 9248116e7..3cf50b593 100644
|
||||
--- a/src/cockpit/389-console/src/css/ds.css
|
||||
+++ b/src/cockpit/389-console/src/css/ds.css
|
||||
@@ -639,6 +639,10 @@ option {
|
||||
padding-right: 0 !important;
|
||||
}
|
||||
|
||||
+.ds-vertical-scroll-auto {
|
||||
+ overflow-y: auto !important;
|
||||
+}
|
||||
+
|
||||
.alert {
|
||||
max-width: 750px;
|
||||
}
|
||||
diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx
|
||||
index efa3ce6d5..11cae972c 100644
|
||||
--- a/src/cockpit/389-console/src/database.jsx
|
||||
+++ b/src/cockpit/389-console/src/database.jsx
|
||||
@@ -157,6 +157,7 @@ export class Database extends React.Component {
|
||||
const attrs = config.attrs;
|
||||
let db_cache_auto = false;
|
||||
let import_cache_auto = false;
|
||||
+ let dblocksMonitoring = false;
|
||||
let dbhome = "";
|
||||
|
||||
if ('nsslapd-db-home-directory' in attrs) {
|
||||
@@ -168,6 +169,9 @@ export class Database extends React.Component {
|
||||
if (attrs['nsslapd-import-cache-autosize'] != "0") {
|
||||
import_cache_auto = true;
|
||||
}
|
||||
+ if (attrs['nsslapd-db-locks-monitoring-enabled'][0] == "on") {
|
||||
+ dblocksMonitoring = true;
|
||||
+ }
|
||||
|
||||
this.setState(() => (
|
||||
{
|
||||
@@ -187,6 +191,9 @@ export class Database extends React.Component {
|
||||
txnlogdir: attrs['nsslapd-db-logdirectory'],
|
||||
dbhomedir: dbhome,
|
||||
dblocks: attrs['nsslapd-db-locks'],
|
||||
+ dblocksMonitoring: dblocksMonitoring,
|
||||
+ dblocksMonitoringThreshold: attrs['nsslapd-db-locks-monitoring-threshold'],
|
||||
+ dblocksMonitoringPause: attrs['nsslapd-db-locks-monitoring-pause'],
|
||||
chxpoint: attrs['nsslapd-db-checkpoint-interval'],
|
||||
compactinterval: attrs['nsslapd-db-compactdb-interval'],
|
||||
importcacheauto: attrs['nsslapd-import-cache-autosize'],
|
||||
diff --git a/src/cockpit/389-console/src/index.html b/src/cockpit/389-console/src/index.html
|
||||
index 1278844fc..fd0eeb669 100644
|
||||
--- a/src/cockpit/389-console/src/index.html
|
||||
+++ b/src/cockpit/389-console/src/index.html
|
||||
@@ -12,7 +12,7 @@
|
||||
</head>
|
||||
|
||||
|
||||
-<body>
|
||||
+<body class="ds-vertical-scroll-auto">
|
||||
<div id="dsinstance"></div>
|
||||
<script src="index.js"></script>
|
||||
</body>
|
||||
diff --git a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
|
||||
index f6e662bca..6a71c138d 100644
|
||||
--- a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
|
||||
@@ -31,6 +31,9 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
txnlogdir: this.props.data.txnlogdir,
|
||||
dbhomedir: this.props.data.dbhomedir,
|
||||
dblocks: this.props.data.dblocks,
|
||||
+ dblocksMonitoring: this.props.data.dblocksMonitoring,
|
||||
+ dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold,
|
||||
+ dblocksMonitoringPause: this.props.data.dblocksMonitoringPause,
|
||||
chxpoint: this.props.data.chxpoint,
|
||||
compactinterval: this.props.data.compactinterval,
|
||||
importcachesize: this.props.data.importcachesize,
|
||||
@@ -47,6 +50,9 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
_txnlogdir: this.props.data.txnlogdir,
|
||||
_dbhomedir: this.props.data.dbhomedir,
|
||||
_dblocks: this.props.data.dblocks,
|
||||
+ _dblocksMonitoring: this.props.data.dblocksMonitoring,
|
||||
+ _dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold,
|
||||
+ _dblocksMonitoringPause: this.props.data.dblocksMonitoringPause,
|
||||
_chxpoint: this.props.data.chxpoint,
|
||||
_compactinterval: this.props.data.compactinterval,
|
||||
_importcachesize: this.props.data.importcachesize,
|
||||
@@ -55,6 +61,7 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
_import_cache_auto: this.props.data.import_cache_auto,
|
||||
};
|
||||
this.handleChange = this.handleChange.bind(this);
|
||||
+ this.select_db_locks_monitoring = this.select_db_locks_monitoring.bind(this);
|
||||
this.select_auto_cache = this.select_auto_cache.bind(this);
|
||||
this.select_auto_import_cache = this.select_auto_import_cache.bind(this);
|
||||
this.save_db_config = this.save_db_config.bind(this);
|
||||
@@ -76,6 +83,12 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
}, this.handleChange(e));
|
||||
}
|
||||
|
||||
+ select_db_locks_monitoring (val, e) {
|
||||
+ this.setState({
|
||||
+ dblocksMonitoring: !this.state.dblocksMonitoring
|
||||
+ }, this.handleChange(val, e));
|
||||
+ }
|
||||
+
|
||||
handleChange(e) {
|
||||
// Generic
|
||||
const value = e.target.type === 'checkbox' ? e.target.checked : e.target.value;
|
||||
@@ -150,6 +163,21 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
cmd.push("--locks=" + this.state.dblocks);
|
||||
requireRestart = true;
|
||||
}
|
||||
+ if (this.state._dblocksMonitoring != this.state.dblocksMonitoring) {
|
||||
+ if (this.state.dblocksMonitoring) {
|
||||
+ cmd.push("--locks-monitoring-enabled=on");
|
||||
+ } else {
|
||||
+ cmd.push("--locks-monitoring-enabled=off");
|
||||
+ }
|
||||
+ requireRestart = true;
|
||||
+ }
|
||||
+ if (this.state._dblocksMonitoringThreshold != this.state.dblocksMonitoringThreshold) {
|
||||
+ cmd.push("--locks-monitoring-threshold=" + this.state.dblocksMonitoringThreshold);
|
||||
+ requireRestart = true;
|
||||
+ }
|
||||
+ if (this.state._dblocksMonitoringPause != this.state.dblocksMonitoringPause) {
|
||||
+ cmd.push("--locks-monitoring-pause=" + this.state.dblocksMonitoringPause);
|
||||
+ }
|
||||
if (this.state._chxpoint != this.state.chxpoint) {
|
||||
cmd.push("--checkpoint-interval=" + this.state.chxpoint);
|
||||
requireRestart = true;
|
||||
@@ -216,6 +244,28 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
let import_cache_form;
|
||||
let db_auto_checked = false;
|
||||
let import_auto_checked = false;
|
||||
+ let dblocksMonitor = "";
|
||||
+
|
||||
+ if (this.state.dblocksMonitoring) {
|
||||
+ dblocksMonitor = <div className="ds-margin-top">
|
||||
+ <Row className="ds-margin-top" title="Sets the DB lock exhaustion value in percentage (valid range is 70-95). If too many locks are acquired, the server will abort the searches while the number of locks are not decreased. It helps to avoid DB corruption and long recovery. (nsslapd-db-locks-monitoring-threshold)">
|
||||
+ <Col componentClass={ControlLabel} sm={4}>
|
||||
+ DB Locks Threshold Percentage
|
||||
+ </Col>
|
||||
+ <Col sm={8}>
|
||||
+ <input className="ds-input" type="number" id="dblocksMonitoringThreshold" size="10" onChange={this.handleChange} value={this.state.dblocksMonitoringThreshold} />
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
+ <Row className="ds-margin-top" title="Sets the amount of time (milliseconds) that the monitoring thread spends waiting between checks. (nsslapd-db-locks-monitoring-pause)">
|
||||
+ <Col componentClass={ControlLabel} sm={4}>
|
||||
+ DB Locks Pause Milliseconds
|
||||
+ </Col>
|
||||
+ <Col sm={8}>
|
||||
+ <input className="ds-input" type="number" id="dblocksMonitoringPause" size="10" onChange={this.handleChange} value={this.state.dblocksMonitoringPause} />
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
+ </div>;
|
||||
+ }
|
||||
|
||||
if (this.state.db_cache_auto) {
|
||||
db_cache_form = <div id="auto-cache-form" className="ds-margin-left">
|
||||
@@ -422,14 +472,6 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
<input id="dbhomedir" value={this.state.dbhomedir} onChange={this.handleChange} className="ds-input-auto" type="text" />
|
||||
</Col>
|
||||
</Row>
|
||||
- <Row className="ds-margin-top" title="The number of database locks (nsslapd-db-locks).">
|
||||
- <Col componentClass={ControlLabel} sm={4}>
|
||||
- Database Locks
|
||||
- </Col>
|
||||
- <Col sm={8}>
|
||||
- <input id="dblocks" value={this.state.dblocks} onChange={this.handleChange} className="ds-input-auto" type="text" />
|
||||
- </Col>
|
||||
- </Row>
|
||||
<Row className="ds-margin-top" title="Amount of time in seconds after which the Directory Server sends a checkpoint entry to the database transaction log (nsslapd-db-checkpoint-interval).">
|
||||
<Col componentClass={ControlLabel} sm={4}>
|
||||
Database Checkpoint Interval
|
||||
@@ -446,6 +488,36 @@ export class GlobalDatabaseConfig extends React.Component {
|
||||
<input id="compactinterval" value={this.state.compactinterval} onChange={this.handleChange} className="ds-input-auto" type="text" />
|
||||
</Col>
|
||||
</Row>
|
||||
+ <Row className="ds-margin-top" title="The number of database locks (nsslapd-db-locks).">
|
||||
+ <Col componentClass={ControlLabel} sm={4}>
|
||||
+ Database Locks
|
||||
+ </Col>
|
||||
+ <Col sm={8}>
|
||||
+ <input id="dblocks" value={this.state.dblocks} onChange={this.handleChange} className="ds-input-auto" type="text" />
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
+ <Row>
|
||||
+ <Col sm={12}>
|
||||
+ <h5 className="ds-sub-header">DB Locks Monitoring</h5>
|
||||
+ <hr />
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
+ <Row>
|
||||
+ <Col sm={12}>
|
||||
+ <Checkbox title="Set input to be set automatically"
|
||||
+ id="dblocksMonitoring"
|
||||
+ checked={this.state.dblocksMonitoring}
|
||||
+ onChange={this.select_db_locks_monitoring}
|
||||
+ >
|
||||
+ Enable Monitoring
|
||||
+ </Checkbox>
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
+ <Row>
|
||||
+ <Col sm={12}>
|
||||
+ {dblocksMonitor}
|
||||
+ </Col>
|
||||
+ </Row>
|
||||
</Form>
|
||||
</div>
|
||||
</div>
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index bcd7b383f..13bb27842 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -1011,6 +1011,9 @@ class DatabaseConfig(DSLdapObject):
|
||||
'nsslapd-db-transaction-batch-max-wait',
|
||||
'nsslapd-db-logbuf-size',
|
||||
'nsslapd-db-locks',
|
||||
+ 'nsslapd-db-locks-monitoring-enabled',
|
||||
+ 'nsslapd-db-locks-monitoring-threshold',
|
||||
+ 'nsslapd-db-locks-monitoring-pause',
|
||||
'nsslapd-db-private-import-mem',
|
||||
'nsslapd-import-cache-autosize',
|
||||
'nsslapd-cache-autosize',
|
||||
diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py
|
||||
index 6bfbcb036..722764d10 100644
|
||||
--- a/src/lib389/lib389/cli_conf/backend.py
|
||||
+++ b/src/lib389/lib389/cli_conf/backend.py
|
||||
@@ -46,6 +46,9 @@ arg_to_attr = {
|
||||
'txn_batch_max': 'nsslapd-db-transaction-batch-max-wait',
|
||||
'logbufsize': 'nsslapd-db-logbuf-size',
|
||||
'locks': 'nsslapd-db-locks',
|
||||
+ 'locks_monitoring_enabled': 'nsslapd-db-locks-monitoring-enabled',
|
||||
+ 'locks_monitoring_threshold': 'nsslapd-db-locks-monitoring-threshold',
|
||||
+ 'locks_monitoring_pause': 'nsslapd-db-locks-monitoring-pause',
|
||||
'import_cache_autosize': 'nsslapd-import-cache-autosize',
|
||||
'cache_autosize': 'nsslapd-cache-autosize',
|
||||
'cache_autosize_split': 'nsslapd-cache-autosize-split',
|
||||
@@ -998,6 +1001,13 @@ def create_parser(subparsers):
|
||||
'the batch count (only works when txn-batch-val is set)')
|
||||
set_db_config_parser.add_argument('--logbufsize', help='Specifies the transaction log information buffer size')
|
||||
set_db_config_parser.add_argument('--locks', help='Sets the maximum number of database locks')
|
||||
+ set_db_config_parser.add_argument('--locks-monitoring-enabled', help='Set to "on" or "off" to monitor DB locks. When it crosses the percentage value '
|
||||
+ 'set with "--locks-monitoring-threshold" ("on" by default)')
|
||||
+ set_db_config_parser.add_argument('--locks-monitoring-threshold', help='Sets the DB lock exhaustion value in percentage (valid range is 70-95). If too many locks are '
|
||||
+ 'acquired, the server will abort the searches while the number of locks '
|
||||
+ 'are not decreased. It helps to avoid DB corruption and long recovery.')
|
||||
+ set_db_config_parser.add_argument('--locks-monitoring-pause', help='Sets the DB lock monitoring value in milliseconds for the amount of time '
|
||||
+ 'that the monitoring thread spends waiting between checks.')
|
||||
set_db_config_parser.add_argument('--import-cache-autosize', help='Set to "on" or "off" to automatically set the size of the import '
|
||||
'cache to be used during the the import process of LDIF files')
|
||||
set_db_config_parser.add_argument('--cache-autosize', help='Sets the percentage of free memory that is used in total for the database '
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,33 +0,0 @@
|
||||
From 7573c62a2e61293a4800e67919d79341fa1a1532 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Wed, 26 May 2021 16:07:43 +0200
|
||||
Subject: [PATCH 10/12] Issue 4764 - replicated operation sometime checks ACI
|
||||
(#4783)
|
||||
|
||||
(cherry picked from commit 0cfdea7abcacfca6686a6cf84dbf7ae1167f3022)
|
||||
---
|
||||
ldap/servers/slapd/connection.c | 8 ++++++++
|
||||
1 file changed, 8 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index c7a15e775..e0c1a52d2 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -1771,6 +1771,14 @@ connection_threadmain()
|
||||
}
|
||||
}
|
||||
|
||||
+ /*
|
||||
+ * Fix bz 1931820 issue (the check to set OP_FLAG_REPLICATED may be done
|
||||
+ * before replication session is properly set).
|
||||
+ */
|
||||
+ if (replication_connection) {
|
||||
+ operation_set_flag(op, OP_FLAG_REPLICATED);
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* Call the do_<operation> function to process this request.
|
||||
*/
|
||||
--
|
||||
2.26.3
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,155 +0,0 @@
|
||||
From 580880a598a8f9972994684c49593a4cf8b8969b Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Sat, 29 May 2021 13:19:53 -0400
|
||||
Subject: [PATCH 12/12] Issue 4778 - RFE - Add changelog compaction task in
|
||||
1.4.3
|
||||
|
||||
Description: In 1.4.3 the replication changelog is a separate database,
|
||||
so it needs a separate "nsds5task" compaction task (COMPACT_CL5)
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4778
|
||||
|
||||
ASAN tested and approved
|
||||
|
||||
Reviewed by: mreynolds
|
||||
---
|
||||
ldap/servers/plugins/replication/cl5_api.c | 21 +++++++++----------
|
||||
ldap/servers/plugins/replication/cl5_api.h | 1 +
|
||||
.../replication/repl5_replica_config.c | 9 +++++++-
|
||||
3 files changed, 19 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
|
||||
index 75a2f46f5..4c5077b48 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.c
|
||||
@@ -266,7 +266,6 @@ static int _cl5TrimInit(void);
|
||||
static void _cl5TrimCleanup(void);
|
||||
static int _cl5TrimMain(void *param);
|
||||
static void _cl5DoTrimming(void);
|
||||
-static void _cl5CompactDBs(void);
|
||||
static void _cl5PurgeRID(Object *file_obj, ReplicaId cleaned_rid);
|
||||
static int _cl5PurgeGetFirstEntry(Object *file_obj, CL5Entry *entry, void **iterator, DB_TXN *txnid, int rid, DBT *key);
|
||||
static int _cl5PurgeGetNextEntry(CL5Entry *entry, void *iterator, DBT *key);
|
||||
@@ -3152,7 +3151,7 @@ _cl5TrimMain(void *param __attribute__((unused)))
|
||||
if (slapi_current_utc_time() > compactdb_time) {
|
||||
/* time to trim */
|
||||
timeCompactPrev = timeNow;
|
||||
- _cl5CompactDBs();
|
||||
+ cl5CompactDBs();
|
||||
compacting = PR_FALSE;
|
||||
}
|
||||
}
|
||||
@@ -3250,8 +3249,8 @@ _cl5DoPurging(cleanruv_purge_data *purge_data)
|
||||
}
|
||||
|
||||
/* clear free page files to reduce changelog */
|
||||
-static void
|
||||
-_cl5CompactDBs(void)
|
||||
+void
|
||||
+cl5CompactDBs(void)
|
||||
{
|
||||
int rc;
|
||||
Object *fileObj = NULL;
|
||||
@@ -3264,14 +3263,14 @@ _cl5CompactDBs(void)
|
||||
rc = TXN_BEGIN(s_cl5Desc.dbEnv, NULL, &txnid, 0);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - Failed to begin transaction; db error - %d %s\n",
|
||||
+ "cl5CompactDBs - Failed to begin transaction; db error - %d %s\n",
|
||||
rc, db_strerror(rc));
|
||||
goto bail;
|
||||
}
|
||||
|
||||
|
||||
slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - compacting replication changelogs...\n");
|
||||
+ "cl5CompactDBs - compacting replication changelogs...\n");
|
||||
for (fileObj = objset_first_obj(s_cl5Desc.dbFiles);
|
||||
fileObj;
|
||||
fileObj = objset_next_obj(s_cl5Desc.dbFiles, fileObj)) {
|
||||
@@ -3284,17 +3283,17 @@ _cl5CompactDBs(void)
|
||||
&c_data, DB_FREE_SPACE, NULL /*end*/);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - Failed to compact %s; db error - %d %s\n",
|
||||
+ "cl5CompactDBs - Failed to compact %s; db error - %d %s\n",
|
||||
dbFile->replName, rc, db_strerror(rc));
|
||||
goto bail;
|
||||
}
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - %s - %d pages freed\n",
|
||||
+ "cl5CompactDBs - %s - %d pages freed\n",
|
||||
dbFile->replName, c_data.compact_pages_free);
|
||||
}
|
||||
|
||||
slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - compacting replication changelogs finished.\n");
|
||||
+ "cl5CompactDBs - compacting replication changelogs finished.\n");
|
||||
bail:
|
||||
if (fileObj) {
|
||||
object_release(fileObj);
|
||||
@@ -3303,14 +3302,14 @@ bail:
|
||||
rc = TXN_ABORT(txnid);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - Failed to abort transaction; db error - %d %s\n",
|
||||
+ "cl5CompactDBs - Failed to abort transaction; db error - %d %s\n",
|
||||
rc, db_strerror(rc));
|
||||
}
|
||||
} else {
|
||||
rc = TXN_COMMIT(txnid);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
- "_cl5CompactDBs - Failed to commit transaction; db error - %d %s\n",
|
||||
+ "cl5CompactDBs - Failed to commit transaction; db error - %d %s\n",
|
||||
rc, db_strerror(rc));
|
||||
}
|
||||
}
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h
|
||||
index 4b0949fb3..11db771f2 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.h
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.h
|
||||
@@ -405,5 +405,6 @@ int cl5DeleteRUV(void);
|
||||
void cl5CleanRUV(ReplicaId rid);
|
||||
void cl5NotifyCleanup(int rid);
|
||||
void trigger_cl_purging(cleanruv_purge_data *purge_data);
|
||||
+void cl5CompactDBs(void);
|
||||
|
||||
#endif
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
|
||||
index a969ef82f..e708a1ccb 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
|
||||
@@ -29,6 +29,8 @@
|
||||
#define CLEANRUVLEN 8
|
||||
#define CLEANALLRUV "CLEANALLRUV"
|
||||
#define CLEANALLRUVLEN 11
|
||||
+#define COMPACT_CL5 "COMPACT_CL5"
|
||||
+#define COMPACT_CL5_LEN 11
|
||||
#define REPLICA_RDN "cn=replica"
|
||||
|
||||
#define CLEANALLRUV_MAX_WAIT 7200 /* 2 hours */
|
||||
@@ -1050,7 +1052,6 @@ replica_config_change_flags(Replica *r, const char *new_flags, char *returntext
|
||||
static int
|
||||
replica_execute_task(Replica *r, const char *task_name, char *returntext, int apply_mods)
|
||||
{
|
||||
-
|
||||
if (strcasecmp(task_name, CL2LDIF_TASK) == 0) {
|
||||
if (apply_mods) {
|
||||
return replica_execute_cl2ldif_task(r, returntext);
|
||||
@@ -1084,6 +1085,12 @@ replica_execute_task(Replica *r, const char *task_name, char *returntext, int ap
|
||||
return replica_execute_cleanall_ruv_task(r, (ReplicaId)temprid, empty_task, "no", PR_TRUE, returntext);
|
||||
} else
|
||||
return LDAP_SUCCESS;
|
||||
+ } else if (strncasecmp(task_name, COMPACT_CL5, COMPACT_CL5_LEN) == 0) {
|
||||
+ /* compact the replication changelogs */
|
||||
+ if (apply_mods) {
|
||||
+ cl5CompactDBs();
|
||||
+ }
|
||||
+ return LDAP_SUCCESS;
|
||||
} else {
|
||||
PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, "Unsupported replica task - %s", task_name);
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
|
||||
--
|
||||
2.26.3
|
||||
|
@ -1,52 +0,0 @@
|
||||
From bc41bbb89405b2059b80e344b2d4c59ae39aabe6 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 10 Jun 2021 15:03:27 +0200
|
||||
Subject: [PATCH 1/3] Issue 4797 - ACL IP ADDRESS evaluation may corrupt
|
||||
c_isreplication_session connection flags (#4799)
|
||||
|
||||
Bug description:
|
||||
The fix for ticket #3764 was broken with a missing break in a
|
||||
switch. The consequence is that while setting the client IP
|
||||
address in the pblock (SLAPI_CONN_CLIENTNETADDR_ACLIP), the
|
||||
connection is erroneously set as replication connection.
|
||||
This can lead to crash or failure of testcase
|
||||
test_access_from_certain_network_only_ip.
|
||||
This bug was quite hidden until the fix for #4764 is
|
||||
showing it more frequently
|
||||
|
||||
Fix description:
|
||||
Add the missing break
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4797
|
||||
|
||||
Reviewed by: Mark Reynolds
|
||||
|
||||
Platforms tested: F33
|
||||
---
|
||||
ldap/servers/slapd/pblock.c | 3 ++-
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
|
||||
index fcac53839..a64986aeb 100644
|
||||
--- a/ldap/servers/slapd/pblock.c
|
||||
+++ b/ldap/servers/slapd/pblock.c
|
||||
@@ -2595,7 +2595,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
|
||||
pblock->pb_conn->c_authtype = slapi_ch_strdup((char *)value);
|
||||
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
|
||||
break;
|
||||
- case SLAPI_CONN_CLIENTNETADDR_ACLIP:
|
||||
+ case SLAPI_CONN_CLIENTNETADDR_ACLIP:
|
||||
if (pblock->pb_conn == NULL) {
|
||||
break;
|
||||
}
|
||||
@@ -2603,6 +2603,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
|
||||
slapi_ch_free((void **)&pblock->pb_conn->cin_addr_aclip);
|
||||
pblock->pb_conn->cin_addr_aclip = (PRNetAddr *)value;
|
||||
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
|
||||
+ break;
|
||||
case SLAPI_CONN_IS_REPLICATION_SESSION:
|
||||
if (pblock->pb_conn == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR,
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,79 +0,0 @@
|
||||
From b3170e39519530c39d59202413b20e6bd466224d Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 27 Jan 2021 09:56:38 +0000
|
||||
Subject: [PATCH 2/3] Issue 4396 - Minor memory leak in backend (#4558) (#4572)
|
||||
|
||||
Bug Description: As multiple suffixes per backend were no longer used, this
|
||||
functionality has been replaced with a single suffix per backend. Legacy
|
||||
code remains that adds multiple suffixes to the dse internal backend,
|
||||
resulting in memory allocations that are lost.
|
||||
|
||||
Also a minor typo is corrected in backend.c
|
||||
|
||||
Fix Description: Calls to be_addsuffix on the DSE backend are removed
|
||||
as they are never used.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4396
|
||||
|
||||
Reviewed by: mreynolds389, Firstyear, droideck (Thank you)
|
||||
---
|
||||
ldap/servers/slapd/backend.c | 2 +-
|
||||
ldap/servers/slapd/fedse.c | 12 +++---------
|
||||
2 files changed, 4 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
|
||||
index bc52b4643..5707504a9 100644
|
||||
--- a/ldap/servers/slapd/backend.c
|
||||
+++ b/ldap/servers/slapd/backend.c
|
||||
@@ -42,7 +42,7 @@ be_init(Slapi_Backend *be, const char *type, const char *name, int isprivate, in
|
||||
}
|
||||
be->be_monitordn = slapi_create_dn_string("cn=monitor,cn=%s,cn=%s,cn=plugins,cn=config",
|
||||
name, type);
|
||||
- if (NULL == be->be_configdn) {
|
||||
+ if (NULL == be->be_monitordn) {
|
||||
slapi_log_err(SLAPI_LOG_ERR,
|
||||
"be_init", "Failed create instance monitor dn for "
|
||||
"plugin %s, instance %s\n",
|
||||
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
|
||||
index 0d645f909..7b820b540 100644
|
||||
--- a/ldap/servers/slapd/fedse.c
|
||||
+++ b/ldap/servers/slapd/fedse.c
|
||||
@@ -2827,7 +2827,7 @@ search_snmp(Slapi_PBlock *pb __attribute__((unused)),
|
||||
}
|
||||
|
||||
/*
|
||||
- * Called from config.c to install the internal backends
|
||||
+ * Called from main.c to install the internal backends
|
||||
*/
|
||||
int
|
||||
setup_internal_backends(char *configdir)
|
||||
@@ -2846,7 +2846,6 @@ setup_internal_backends(char *configdir)
|
||||
Slapi_DN counters;
|
||||
Slapi_DN snmp;
|
||||
Slapi_DN root;
|
||||
- Slapi_Backend *be;
|
||||
Slapi_DN encryption;
|
||||
Slapi_DN saslmapping;
|
||||
Slapi_DN plugins;
|
||||
@@ -2895,16 +2894,11 @@ setup_internal_backends(char *configdir)
|
||||
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &saslmapping, LDAP_SCOPE_SUBTREE, "(objectclass=nsSaslMapping)", sasl_map_config_add, NULL, NULL);
|
||||
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &plugins, LDAP_SCOPE_SUBTREE, "(objectclass=nsSlapdPlugin)", check_plugin_path, NULL, NULL);
|
||||
|
||||
- be = be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
|
||||
- be_addsuffix(be, &root);
|
||||
- be_addsuffix(be, &monitor);
|
||||
- be_addsuffix(be, &config);
|
||||
+ be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
|
||||
|
||||
/*
|
||||
- * Now that the be's are in place, we can
|
||||
- * setup the mapping tree.
|
||||
+ * Now that the be's are in place, we can setup the mapping tree.
|
||||
*/
|
||||
-
|
||||
if (mapping_tree_init()) {
|
||||
slapi_log_err(SLAPI_LOG_EMERG, "setup_internal_backends", "Failed to init mapping tree\n");
|
||||
exit(1);
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,66 +0,0 @@
|
||||
From 8d06fdf44b0d337f1e321e61ee1b22972ddea917 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 2 Apr 2021 14:05:41 +0200
|
||||
Subject: [PATCH 3/3] Issue 4700 - Regression in winsync replication agreement
|
||||
(#4712)
|
||||
|
||||
Bug description:
|
||||
#4396 fixes a memory leak but did not set 'cn=config' as
|
||||
DSE backend.
|
||||
It had no signicant impact unless with sidgen IPA plugin
|
||||
|
||||
Fix description:
|
||||
revert the portion of the #4364 patch that set be_suffix
|
||||
in be_addsuffix, free the suffix before setting it
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4700
|
||||
|
||||
Reviewed by: Pierre Rogier (thanks !)
|
||||
|
||||
Platforms tested: F33
|
||||
---
|
||||
ldap/servers/slapd/backend.c | 3 ++-
|
||||
ldap/servers/slapd/fedse.c | 6 +++++-
|
||||
2 files changed, 7 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
|
||||
index 5707504a9..5db706841 100644
|
||||
--- a/ldap/servers/slapd/backend.c
|
||||
+++ b/ldap/servers/slapd/backend.c
|
||||
@@ -173,7 +173,8 @@ void
|
||||
be_addsuffix(Slapi_Backend *be, const Slapi_DN *suffix)
|
||||
{
|
||||
if (be->be_state != BE_STATE_DELETED) {
|
||||
- be->be_suffix = slapi_sdn_dup(suffix);;
|
||||
+ slapi_sdn_free(&be->be_suffix);
|
||||
+ be->be_suffix = slapi_sdn_dup(suffix);
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
|
||||
index 7b820b540..44159c991 100644
|
||||
--- a/ldap/servers/slapd/fedse.c
|
||||
+++ b/ldap/servers/slapd/fedse.c
|
||||
@@ -2846,6 +2846,7 @@ setup_internal_backends(char *configdir)
|
||||
Slapi_DN counters;
|
||||
Slapi_DN snmp;
|
||||
Slapi_DN root;
|
||||
+ Slapi_Backend *be;
|
||||
Slapi_DN encryption;
|
||||
Slapi_DN saslmapping;
|
||||
Slapi_DN plugins;
|
||||
@@ -2894,7 +2895,10 @@ setup_internal_backends(char *configdir)
|
||||
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &saslmapping, LDAP_SCOPE_SUBTREE, "(objectclass=nsSaslMapping)", sasl_map_config_add, NULL, NULL);
|
||||
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &plugins, LDAP_SCOPE_SUBTREE, "(objectclass=nsSlapdPlugin)", check_plugin_path, NULL, NULL);
|
||||
|
||||
- be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
|
||||
+ be = be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
|
||||
+ be_addsuffix(be, &root);
|
||||
+ be_addsuffix(be, &monitor);
|
||||
+ be_addsuffix(be, &config);
|
||||
|
||||
/*
|
||||
* Now that the be's are in place, we can setup the mapping tree.
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,88 +0,0 @@
|
||||
From 7345c51c68dfd90a704ccbb0e5b1e736af80f146 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 17 May 2021 16:10:22 +0200
|
||||
Subject: [PATCH] Issue 4725 - Fix compiler warnings
|
||||
|
||||
---
|
||||
ldap/servers/slapd/proto-slap.h | 2 +-
|
||||
ldap/servers/slapd/pw.c | 9 ++++-----
|
||||
ldap/servers/slapd/pw_retry.c | 2 --
|
||||
3 files changed, 5 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
|
||||
index 6ff178127..2768d5a1d 100644
|
||||
--- a/ldap/servers/slapd/proto-slap.h
|
||||
+++ b/ldap/servers/slapd/proto-slap.h
|
||||
@@ -1012,7 +1012,7 @@ int add_shadow_ext_password_attrs(Slapi_PBlock *pb, Slapi_Entry **e);
|
||||
* pw_retry.c
|
||||
*/
|
||||
int update_pw_retry(Slapi_PBlock *pb);
|
||||
-int update_trp_pw_usecount(Slapi_PBlock *pb, Slapi_Entry *e, int32_t use_count);
|
||||
+int update_tpr_pw_usecount(Slapi_PBlock *pb, Slapi_Entry *e, int32_t use_count);
|
||||
void pw_apply_mods(const Slapi_DN *sdn, Slapi_Mods *mods);
|
||||
void pw_set_componentID(struct slapi_componentid *cid);
|
||||
struct slapi_componentid *pw_get_componentID(void);
|
||||
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
|
||||
index d98422513..2a167c8f1 100644
|
||||
--- a/ldap/servers/slapd/pw.c
|
||||
+++ b/ldap/servers/slapd/pw.c
|
||||
@@ -2622,7 +2622,6 @@ int
|
||||
slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int send_result) {
|
||||
passwdPolicy *pwpolicy = NULL;
|
||||
char *dn = NULL;
|
||||
- int tpr_maxuse;
|
||||
char *value;
|
||||
time_t cur_time;
|
||||
char *cur_time_str = NULL;
|
||||
@@ -2638,7 +2637,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
|
||||
return 0;
|
||||
}
|
||||
|
||||
- if (slapi_entry_attr_hasvalue(bind_target_entry, "pwdTPRReset", "TRUE") == NULL) {
|
||||
+ if (!slapi_entry_attr_hasvalue(bind_target_entry, "pwdTPRReset", "TRUE")) {
|
||||
/* the password was not reset by an admin while a TRP pwp was set, just returned */
|
||||
return 0;
|
||||
}
|
||||
@@ -2646,7 +2645,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
|
||||
/* Check entry TPR max use */
|
||||
if (pwpolicy->pw_tpr_maxuse >= 0) {
|
||||
uint use_count;
|
||||
- value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRUseCount");
|
||||
+ value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRUseCount");
|
||||
if (value) {
|
||||
/* max Use is enforced */
|
||||
use_count = strtoull(value, 0, 0);
|
||||
@@ -2681,7 +2680,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
|
||||
|
||||
/* Check entry TPR expiration at a specific time */
|
||||
if (pwpolicy->pw_tpr_delay_expire_at >= 0) {
|
||||
- value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRExpireAt");
|
||||
+ value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRExpireAt");
|
||||
if (value) {
|
||||
/* max Use is enforced */
|
||||
if (difftime(parse_genTime(cur_time_str), parse_genTime(value)) >= 0) {
|
||||
@@ -2709,7 +2708,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
|
||||
|
||||
/* Check entry TPR valid after a specific time */
|
||||
if (pwpolicy->pw_tpr_delay_valid_from >= 0) {
|
||||
- value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRValidFrom");
|
||||
+ value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRValidFrom");
|
||||
if (value) {
|
||||
/* validity after a specific time is enforced */
|
||||
if (difftime(parse_genTime(value), parse_genTime(cur_time_str)) >= 0) {
|
||||
diff --git a/ldap/servers/slapd/pw_retry.c b/ldap/servers/slapd/pw_retry.c
|
||||
index 5d13eb636..af54aa19d 100644
|
||||
--- a/ldap/servers/slapd/pw_retry.c
|
||||
+++ b/ldap/servers/slapd/pw_retry.c
|
||||
@@ -163,8 +163,6 @@ set_retry_cnt_and_time(Slapi_PBlock *pb, int count, time_t cur_time)
|
||||
int
|
||||
set_tpr_usecount_mods(Slapi_PBlock *pb, Slapi_Mods *smods, int count)
|
||||
{
|
||||
- char *timestr;
|
||||
- time_t unlock_time;
|
||||
char retry_cnt[16] = {0}; /* 1-65535 */
|
||||
const char *dn = NULL;
|
||||
Slapi_DN *sdn = NULL;
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,202 +0,0 @@
|
||||
From 59266365eda8130abf6901263efae4c87586376a Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 28 Jun 2021 16:40:15 +0200
|
||||
Subject: [PATCH] Issue 4814 - _cl5_get_tod_expiration may crash at startup
|
||||
|
||||
Bug description:
|
||||
This bug exist only in 1.4.3 branch
|
||||
In 1.4.3, CL open as a separated database so
|
||||
compaction mechanism is started along a CL
|
||||
mechanism (CL trimming).
|
||||
The problem is that the configuration of the CL
|
||||
compaction is done after the compaction mechanism
|
||||
(is started). Depending on thread scheduling it
|
||||
crashes
|
||||
|
||||
Fix description:
|
||||
Make sure configuration of compaction thread is
|
||||
taken into account (cl5ConfigSetCompaction) before
|
||||
the compaction thread starts (cl5open)
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4814
|
||||
|
||||
Reviewed by: Mark Reynolds, Simon Pichugin (thanks !)
|
||||
|
||||
Platforms tested: 8.5
|
||||
---
|
||||
ldap/servers/plugins/replication/cl5_api.c | 24 ++++++++++++-------
|
||||
ldap/servers/plugins/replication/cl5_api.h | 10 +++++++-
|
||||
ldap/servers/plugins/replication/cl5_config.c | 8 +++++--
|
||||
ldap/servers/plugins/replication/cl5_init.c | 4 +++-
|
||||
ldap/servers/plugins/replication/cl5_test.c | 2 +-
|
||||
.../servers/plugins/replication/repl_shared.h | 2 +-
|
||||
6 files changed, 35 insertions(+), 15 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
|
||||
index 4c5077b48..954b6b9e3 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.c
|
||||
@@ -1016,6 +1016,20 @@ cl5GetState()
|
||||
return s_cl5Desc.dbState;
|
||||
}
|
||||
|
||||
+void
|
||||
+cl5ConfigSetCompaction(int compactInterval, char *compactTime)
|
||||
+{
|
||||
+
|
||||
+ if (compactInterval != CL5_NUM_IGNORE) {
|
||||
+ s_cl5Desc.dbTrim.compactInterval = compactInterval;
|
||||
+ }
|
||||
+
|
||||
+ if (strcmp(compactTime, CL5_STR_IGNORE) != 0) {
|
||||
+ s_cl5Desc.dbTrim.compactTime = slapi_ch_strdup(compactTime);
|
||||
+ }
|
||||
+
|
||||
+}
|
||||
+
|
||||
/* Name: cl5ConfigTrimming
|
||||
Description: sets changelog trimming parameters; changelog must be open.
|
||||
Parameters: maxEntries - maximum number of entries in the chnagelog (in all files);
|
||||
@@ -1026,7 +1040,7 @@ cl5GetState()
|
||||
CL5_BAD_STATE if changelog is not open
|
||||
*/
|
||||
int
|
||||
-cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval)
|
||||
+cl5ConfigTrimming(int maxEntries, const char *maxAge, int trimInterval)
|
||||
{
|
||||
if (s_cl5Desc.dbState == CL5_STATE_NONE) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
@@ -1058,14 +1072,6 @@ cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char
|
||||
s_cl5Desc.dbTrim.maxEntries = maxEntries;
|
||||
}
|
||||
|
||||
- if (compactInterval != CL5_NUM_IGNORE) {
|
||||
- s_cl5Desc.dbTrim.compactInterval = compactInterval;
|
||||
- }
|
||||
-
|
||||
- if (strcmp(compactTime, CL5_STR_IGNORE) != 0) {
|
||||
- s_cl5Desc.dbTrim.compactTime = slapi_ch_strdup(compactTime);
|
||||
- }
|
||||
-
|
||||
if (trimInterval != CL5_NUM_IGNORE) {
|
||||
s_cl5Desc.dbTrim.trimInterval = trimInterval;
|
||||
}
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h
|
||||
index 11db771f2..6aa48aec4 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.h
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.h
|
||||
@@ -227,6 +227,14 @@ int cl5ImportLDIF(const char *clDir, const char *ldifFile, Replica **replicas);
|
||||
|
||||
int cl5GetState(void);
|
||||
|
||||
+/* Name: cl5ConfigSetCompaction
|
||||
+ * Description: sets the database compaction parameters
|
||||
+ * Parameters: compactInterval - Interval for compaction default is 30days
|
||||
+ * compactTime - Compact time default is 23:59
|
||||
+ * Return: void
|
||||
+ */
|
||||
+void cl5ConfigSetCompaction(int compactInterval, char *compactTime);
|
||||
+
|
||||
/* Name: cl5ConfigTrimming
|
||||
Description: sets changelog trimming parameters
|
||||
Parameters: maxEntries - maximum number of entries in the log;
|
||||
@@ -236,7 +244,7 @@ int cl5GetState(void);
|
||||
Return: CL5_SUCCESS if successful;
|
||||
CL5_BAD_STATE if changelog has not been open
|
||||
*/
|
||||
-int cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval);
|
||||
+int cl5ConfigTrimming(int maxEntries, const char *maxAge, int trimInterval);
|
||||
|
||||
void cl5DestroyIterator(void *iterator);
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_config.c b/ldap/servers/plugins/replication/cl5_config.c
|
||||
index b32686788..a43534c9b 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_config.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_config.c
|
||||
@@ -197,6 +197,8 @@ changelog5_config_add(Slapi_PBlock *pb __attribute__((unused)),
|
||||
|
||||
goto done;
|
||||
}
|
||||
+ /* Set compaction parameters */
|
||||
+ cl5ConfigSetCompaction(config.compactInterval, config.compactTime);
|
||||
|
||||
/* start the changelog */
|
||||
rc = cl5Open(config.dir, &config.dbconfig);
|
||||
@@ -212,7 +214,7 @@ changelog5_config_add(Slapi_PBlock *pb __attribute__((unused)),
|
||||
}
|
||||
|
||||
/* set trimming parameters */
|
||||
- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
|
||||
+ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval);
|
||||
if (rc != CL5_SUCCESS) {
|
||||
*returncode = 1;
|
||||
if (returntext) {
|
||||
@@ -548,6 +550,8 @@ changelog5_config_modify(Slapi_PBlock *pb,
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
|
||||
"changelog5_config_modify - Deleted the changelog at %s\n", currentDir);
|
||||
}
|
||||
+ /* Set compaction parameters */
|
||||
+ cl5ConfigSetCompaction(config.compactInterval, config.compactTime);
|
||||
|
||||
rc = cl5Open(config.dir, &config.dbconfig);
|
||||
if (rc != CL5_SUCCESS) {
|
||||
@@ -575,7 +579,7 @@ changelog5_config_modify(Slapi_PBlock *pb,
|
||||
if (config.maxEntries != CL5_NUM_IGNORE ||
|
||||
config.trimInterval != CL5_NUM_IGNORE ||
|
||||
strcmp(config.maxAge, CL5_STR_IGNORE) != 0) {
|
||||
- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
|
||||
+ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval);
|
||||
if (rc != CL5_SUCCESS) {
|
||||
*returncode = 1;
|
||||
if (returntext) {
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_init.c b/ldap/servers/plugins/replication/cl5_init.c
|
||||
index 251859714..567e0274c 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_init.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_init.c
|
||||
@@ -45,6 +45,8 @@ changelog5_init()
|
||||
rc = 0; /* OK */
|
||||
goto done;
|
||||
}
|
||||
+ /* Set compaction parameters */
|
||||
+ cl5ConfigSetCompaction(config.compactInterval, config.compactTime);
|
||||
|
||||
/* start changelog */
|
||||
rc = cl5Open(config.dir, &config.dbconfig);
|
||||
@@ -57,7 +59,7 @@ changelog5_init()
|
||||
}
|
||||
|
||||
/* set trimming parameters */
|
||||
- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
|
||||
+ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval);
|
||||
if (rc != CL5_SUCCESS) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
"changelog5_init: failed to configure changelog trimming\n");
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_test.c b/ldap/servers/plugins/replication/cl5_test.c
|
||||
index d6656653c..efb8c543a 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_test.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_test.c
|
||||
@@ -281,7 +281,7 @@ testTrimming()
|
||||
rc = populateChangelog(300, NULL);
|
||||
|
||||
if (rc == 0)
|
||||
- rc = cl5ConfigTrimming(300, "1d", CHANGELOGDB_COMPACT_INTERVAL, CHANGELOGDB_TRIM_INTERVAL);
|
||||
+ rc = cl5ConfigTrimming(300, "1d", CHANGELOGDB_TRIM_INTERVAL);
|
||||
|
||||
interval = PR_SecondsToInterval(300); /* 5 min is default trimming interval */
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
diff --git a/ldap/servers/plugins/replication/repl_shared.h b/ldap/servers/plugins/replication/repl_shared.h
|
||||
index 6708e12f7..b59b2bd27 100644
|
||||
--- a/ldap/servers/plugins/replication/repl_shared.h
|
||||
+++ b/ldap/servers/plugins/replication/repl_shared.h
|
||||
@@ -26,7 +26,7 @@
|
||||
|
||||
#define CHANGELOGDB_TRIM_INTERVAL 300 /* 5 minutes */
|
||||
#define CHANGELOGDB_COMPACT_INTERVAL 2592000 /* 30 days */
|
||||
-#define CHANGELOGDB_COMPACT_TIME "23:55" /* 30 days */
|
||||
+#define CHANGELOGDB_COMPACT_TIME "23:59" /* around midnight */
|
||||
|
||||
#define CONFIG_CHANGELOG_DIR_ATTRIBUTE "nsslapd-changelogdir"
|
||||
#define CONFIG_CHANGELOG_MAXENTRIES_ATTRIBUTE "nsslapd-changelogmaxentries"
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,51 +0,0 @@
|
||||
From e7fdfe527a5f72674fe4b577a0555cabf8ec73a5 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 7 Jun 2021 11:23:35 +0200
|
||||
Subject: [PATCH] Issue 4789 - Temporary password rules are not enforce with
|
||||
local password policy (#4790)
|
||||
|
||||
Bug description:
|
||||
When allocating a password policy structure (new_passwdPolicy)
|
||||
it is initialized with the local policy definition or
|
||||
the global one. If it exists a local policy entry, the TPR
|
||||
attributes (passwordTPRMaxUse, passwordTPRDelayValidFrom and
|
||||
passwordTPRDelayExpireAt) are not taken into account.
|
||||
|
||||
Fix description:
|
||||
Take into account TPR attributes to initialize the policy
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4789
|
||||
|
||||
Reviewed by: Simon Pichugin, William Brown
|
||||
|
||||
Platforms tested: F34
|
||||
---
|
||||
ldap/servers/slapd/pw.c | 12 ++++++++++++
|
||||
1 file changed, 12 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
|
||||
index 2a167c8f1..7680df41d 100644
|
||||
--- a/ldap/servers/slapd/pw.c
|
||||
+++ b/ldap/servers/slapd/pw.c
|
||||
@@ -2356,6 +2356,18 @@ new_passwdPolicy(Slapi_PBlock *pb, const char *dn)
|
||||
if ((sval = attr_get_present_values(attr))) {
|
||||
pwdpolicy->pw_dict_path = (char *)slapi_value_get_string(*sval);
|
||||
}
|
||||
+ } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_MAXUSE)) {
|
||||
+ if ((sval = attr_get_present_values(attr))) {
|
||||
+ pwdpolicy->pw_tpr_maxuse = slapi_value_get_int(*sval);
|
||||
+ }
|
||||
+ } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_DELAY_EXPIRE_AT)) {
|
||||
+ if ((sval = attr_get_present_values(attr))) {
|
||||
+ pwdpolicy->pw_tpr_delay_expire_at = slapi_value_get_int(*sval);
|
||||
+ }
|
||||
+ } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_DELAY_VALID_FROM)) {
|
||||
+ if ((sval = attr_get_present_values(attr))) {
|
||||
+ pwdpolicy->pw_tpr_delay_valid_from = slapi_value_get_int(*sval);
|
||||
+ }
|
||||
}
|
||||
} /* end of for() loop */
|
||||
if (pw_entry) {
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,350 +0,0 @@
|
||||
From 6a741b3ef50babf2ac2479437a38829204ffd438 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 17 Jun 2021 16:22:09 +0200
|
||||
Subject: [PATCH] Issue 4788 - CLI should support Temporary Password Rules
|
||||
attributes (#4793)
|
||||
|
||||
Bug description:
|
||||
Since #4725, password policy support temporary password rules.
|
||||
CLI (dsconf) does not support this RFE and only direct ldap
|
||||
operation can configure global/local password policy
|
||||
|
||||
Fix description:
|
||||
Update dsconf to support this new RFE.
|
||||
To run successfully the testcase it relies on #4788
|
||||
|
||||
relates: #4788
|
||||
|
||||
Reviewed by: Simon Pichugin (thanks !!)
|
||||
|
||||
Platforms tested: F34
|
||||
---
|
||||
.../password/pwdPolicy_attribute_test.py | 172 ++++++++++++++++--
|
||||
src/lib389/lib389/cli_conf/pwpolicy.py | 5 +-
|
||||
src/lib389/lib389/pwpolicy.py | 5 +-
|
||||
3 files changed, 165 insertions(+), 17 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
|
||||
index aee3a91ad..085d0a373 100644
|
||||
--- a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
|
||||
@@ -34,7 +34,7 @@ log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
-def create_user(topology_st, request):
|
||||
+def test_user(topology_st, request):
|
||||
"""User for binding operation"""
|
||||
topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on')
|
||||
log.info('Adding test user {}')
|
||||
@@ -56,10 +56,11 @@ def create_user(topology_st, request):
|
||||
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
|
||||
request.addfinalizer(fin)
|
||||
+ return user
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
-def password_policy(topology_st, create_user):
|
||||
+def password_policy(topology_st, test_user):
|
||||
"""Set up password policy for subtree and user"""
|
||||
|
||||
pwp = PwPolicyManager(topology_st.standalone)
|
||||
@@ -71,7 +72,7 @@ def password_policy(topology_st, create_user):
|
||||
pwp.create_user_policy(TEST_USER_DN, policy_props)
|
||||
|
||||
@pytest.mark.skipif(ds_is_older('1.4.3.3'), reason="Not implemented")
|
||||
-def test_pwd_reset(topology_st, create_user):
|
||||
+def test_pwd_reset(topology_st, test_user):
|
||||
"""Test new password policy attribute "pwdReset"
|
||||
|
||||
:id: 03db357b-4800-411e-a36e-28a534293004
|
||||
@@ -124,7 +125,7 @@ def test_pwd_reset(topology_st, create_user):
|
||||
[('on', 'off', ldap.UNWILLING_TO_PERFORM),
|
||||
('off', 'off', ldap.UNWILLING_TO_PERFORM),
|
||||
('off', 'on', False), ('on', 'on', False)])
|
||||
-def test_change_pwd(topology_st, create_user, password_policy,
|
||||
+def test_change_pwd(topology_st, test_user, password_policy,
|
||||
subtree_pwchange, user_pwchange, exception):
|
||||
"""Verify that 'passwordChange' attr works as expected
|
||||
User should have a priority over a subtree.
|
||||
@@ -184,7 +185,7 @@ def test_change_pwd(topology_st, create_user, password_policy,
|
||||
user.reset_password(TEST_USER_PWD)
|
||||
|
||||
|
||||
-def test_pwd_min_age(topology_st, create_user, password_policy):
|
||||
+def test_pwd_min_age(topology_st, test_user, password_policy):
|
||||
"""If we set passwordMinAge to some value, for example to 10, then it
|
||||
should not allow the user to change the password within 10 seconds after
|
||||
his previous change.
|
||||
@@ -257,7 +258,7 @@ def test_pwd_min_age(topology_st, create_user, password_policy):
|
||||
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
user.reset_password(TEST_USER_PWD)
|
||||
|
||||
-def test_global_tpr_maxuse_1(topology_st, create_user, request):
|
||||
+def test_global_tpr_maxuse_1(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRMaxUse
|
||||
Test that after passwordTPRMaxUse failures to bind
|
||||
additional bind with valid password are failing with CONSTRAINT_VIOLATION
|
||||
@@ -374,7 +375,7 @@ def test_global_tpr_maxuse_1(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_maxuse_2(topology_st, create_user, request):
|
||||
+def test_global_tpr_maxuse_2(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRMaxUse
|
||||
Test that after less than passwordTPRMaxUse failures to bind
|
||||
additional bind with valid password are successfull
|
||||
@@ -474,7 +475,7 @@ def test_global_tpr_maxuse_2(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_maxuse_3(topology_st, create_user, request):
|
||||
+def test_global_tpr_maxuse_3(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRMaxUse
|
||||
Test that after less than passwordTPRMaxUse failures to bind
|
||||
A bind with valid password is successfull but passwordMustChange
|
||||
@@ -587,7 +588,7 @@ def test_global_tpr_maxuse_3(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_maxuse_4(topology_st, create_user, request):
|
||||
+def test_global_tpr_maxuse_4(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRMaxUse
|
||||
Test that a TPR attribute passwordTPRMaxUse
|
||||
can be updated by DM but not the by user itself
|
||||
@@ -701,7 +702,148 @@ def test_global_tpr_maxuse_4(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayValidFrom_1(topology_st, create_user, request):
|
||||
+def test_local_tpr_maxuse_5(topology_st, test_user, request):
|
||||
+ """Test TPR local policy overpass global one: passwordTPRMaxUse
|
||||
+ Test that after passwordTPRMaxUse failures to bind
|
||||
+ additional bind with valid password are failing with CONSTRAINT_VIOLATION
|
||||
+
|
||||
+ :id: c3919707-d804-445a-8754-8385b1072c42
|
||||
+ :customerscenario: False
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Global password policy Enable passwordMustChange
|
||||
+ 2. Global password policy Set passwordTPRMaxUse=5
|
||||
+ 3. Global password policy Set passwordMaxFailure to a higher value to not disturb the test
|
||||
+ 4. Local password policy Enable passwordMustChange
|
||||
+ 5. Local password policy Set passwordTPRMaxUse=10 (higher than global)
|
||||
+ 6. Bind with a wrong password 10 times and check INVALID_CREDENTIALS
|
||||
+ 7. Check that passwordTPRUseCount got to the limit (5)
|
||||
+ 8. Bind with a wrong password (CONSTRAINT_VIOLATION)
|
||||
+ and check passwordTPRUseCount overpass the limit by 1 (11)
|
||||
+ 9. Bind with a valid password 10 times and check CONSTRAINT_VIOLATION
|
||||
+ and check passwordTPRUseCount increases
|
||||
+ 10. Reset password policy configuration and remove local password from user
|
||||
+ :expected results:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Success
|
||||
+ 9. Success
|
||||
+ 10. Success
|
||||
+ """
|
||||
+
|
||||
+ global_tpr_maxuse = 5
|
||||
+ # Set global password policy config, passwordMaxFailure being higher than
|
||||
+ # passwordTPRMaxUse so that TPR is enforced first
|
||||
+ topology_st.standalone.config.replace('passwordMustChange', 'on')
|
||||
+ topology_st.standalone.config.replace('passwordMaxFailure', str(global_tpr_maxuse + 20))
|
||||
+ topology_st.standalone.config.replace('passwordTPRMaxUse', str(global_tpr_maxuse))
|
||||
+ time.sleep(.5)
|
||||
+
|
||||
+ local_tpr_maxuse = global_tpr_maxuse + 5
|
||||
+ # Reset user's password with a local password policy
|
||||
+ # that has passwordTPRMaxUse higher than global
|
||||
+ #our_user = UserAccount(topology_st.standalone, TEST_USER_DN)
|
||||
+ subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(),
|
||||
+ 'slapd-standalone1',
|
||||
+ 'localpwp',
|
||||
+ 'adduser',
|
||||
+ test_user.dn])
|
||||
+ subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(),
|
||||
+ 'slapd-standalone1',
|
||||
+ 'localpwp',
|
||||
+ 'set',
|
||||
+ '--pwptprmaxuse',
|
||||
+ str(local_tpr_maxuse),
|
||||
+ '--pwdmustchange',
|
||||
+ 'on',
|
||||
+ test_user.dn])
|
||||
+ test_user.replace('userpassword', PASSWORD)
|
||||
+ time.sleep(.5)
|
||||
+
|
||||
+ # look up to passwordTPRMaxUse with failing
|
||||
+ # bind to check that the limits of TPR are enforced
|
||||
+ for i in range(local_tpr_maxuse):
|
||||
+ # Bind as user with a wrong password
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ test_user.rebind('wrong password')
|
||||
+ time.sleep(.5)
|
||||
+
|
||||
+ # Check that pwdReset is TRUE
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ #assert test_user.get_attr_val_utf8('pwdReset') == 'TRUE'
|
||||
+
|
||||
+ # Check that pwdTPRReset is TRUE
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1)
|
||||
+ log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1))
|
||||
+
|
||||
+
|
||||
+ # Now the #failures reached passwordTPRMaxUse
|
||||
+ # Check that pwdReset is TRUE
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ # Check that pwdTPRReset is TRUE
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse)
|
||||
+ log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (local_tpr_maxuse))
|
||||
+
|
||||
+ # Bind as user with wrong password --> ldap.CONSTRAINT_VIOLATION
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ test_user.rebind("wrong password")
|
||||
+ time.sleep(.5)
|
||||
+
|
||||
+ # Check that pwdReset is TRUE
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ # Check that pwdTPRReset is TRUE
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + 1)
|
||||
+ log.info("failing bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i))
|
||||
+
|
||||
+ # Now check that all next attempts with correct password are all in LDAP_CONSTRAINT_VIOLATION
|
||||
+ # and passwordTPRRetryCount remains unchanged
|
||||
+ # account is now similar to locked
|
||||
+ for i in range(10):
|
||||
+ # Bind as user with valid password
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ test_user.rebind(PASSWORD)
|
||||
+ time.sleep(.5)
|
||||
+
|
||||
+ # Check that pwdReset is TRUE
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ # Check that pwdTPRReset is TRUE
|
||||
+ # pwdTPRUseCount keeps increasing
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
|
||||
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + i + 2)
|
||||
+ log.info("Rejected bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i + 2))
|
||||
+
|
||||
+
|
||||
+ def fin():
|
||||
+ topology_st.standalone.restart()
|
||||
+ # Reset password policy config
|
||||
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ topology_st.standalone.config.replace('passwordMustChange', 'off')
|
||||
+
|
||||
+ # Remove local password policy from that entry
|
||||
+ subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(),
|
||||
+ 'slapd-standalone1',
|
||||
+ 'localpwp',
|
||||
+ 'remove',
|
||||
+ test_user.dn])
|
||||
+
|
||||
+ # Reset user's password
|
||||
+ test_user.replace('userpassword', TEST_USER_PWD)
|
||||
+
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+def test_global_tpr_delayValidFrom_1(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayValidFrom
|
||||
Test that a TPR password is not valid before reset time +
|
||||
passwordTPRDelayValidFrom
|
||||
@@ -766,7 +908,7 @@ def test_global_tpr_delayValidFrom_1(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayValidFrom_2(topology_st, create_user, request):
|
||||
+def test_global_tpr_delayValidFrom_2(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayValidFrom
|
||||
Test that a TPR password is valid after reset time +
|
||||
passwordTPRDelayValidFrom
|
||||
@@ -838,7 +980,7 @@ def test_global_tpr_delayValidFrom_2(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayValidFrom_3(topology_st, create_user, request):
|
||||
+def test_global_tpr_delayValidFrom_3(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayValidFrom
|
||||
Test that a TPR attribute passwordTPRDelayValidFrom
|
||||
can be updated by DM but not the by user itself
|
||||
@@ -940,7 +1082,7 @@ def test_global_tpr_delayValidFrom_3(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayExpireAt_1(topology_st, create_user, request):
|
||||
+def test_global_tpr_delayExpireAt_1(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayExpireAt
|
||||
Test that a TPR password is not valid after reset time +
|
||||
passwordTPRDelayExpireAt
|
||||
@@ -1010,7 +1152,7 @@ def test_global_tpr_delayExpireAt_1(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayExpireAt_2(topology_st, create_user, request):
|
||||
+def test_global_tpr_delayExpireAt_2(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayExpireAt
|
||||
Test that a TPR password is valid before reset time +
|
||||
passwordTPRDelayExpireAt
|
||||
@@ -1082,7 +1224,7 @@ def test_global_tpr_delayExpireAt_2(topology_st, create_user, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_global_tpr_delayExpireAt_3(topology_st, create_user, request):
|
||||
+def test_global_tpr_delayExpireAt_3(topology_st, test_user, request):
|
||||
"""Test global TPR policy : passwordTPRDelayExpireAt
|
||||
Test that a TPR attribute passwordTPRDelayExpireAt
|
||||
can be updated by DM but not the by user itself
|
||||
diff --git a/src/lib389/lib389/cli_conf/pwpolicy.py b/src/lib389/lib389/cli_conf/pwpolicy.py
|
||||
index 2838afcb8..26af6e7ec 100644
|
||||
--- a/src/lib389/lib389/cli_conf/pwpolicy.py
|
||||
+++ b/src/lib389/lib389/cli_conf/pwpolicy.py
|
||||
@@ -255,6 +255,9 @@ def create_parser(subparsers):
|
||||
set_parser.add_argument('--pwpinheritglobal', help="Set to \"on\" to allow local policies to inherit the global policy")
|
||||
set_parser.add_argument('--pwddictcheck', help="Set to \"on\" to enforce CrackLib dictionary checking")
|
||||
set_parser.add_argument('--pwddictpath', help="Filesystem path to specific/custom CrackLib dictionary files")
|
||||
+ set_parser.add_argument('--pwptprmaxuse', help="Number of times a reset password can be used for authentication")
|
||||
+ set_parser.add_argument('--pwptprdelayexpireat', help="Number of seconds after which a reset password expires")
|
||||
+ set_parser.add_argument('--pwptprdelayvalidfrom', help="Number of seconds to wait before using a reset password to authenticated")
|
||||
# delete local password policy
|
||||
del_parser = local_subcommands.add_parser('remove', help='Remove a local password policy')
|
||||
del_parser.set_defaults(func=del_local_policy)
|
||||
@@ -291,4 +294,4 @@ def create_parser(subparsers):
|
||||
#############################################
|
||||
set_parser.add_argument('DN', nargs=1, help='Set the local policy for this entry DN')
|
||||
add_subtree_parser.add_argument('DN', nargs=1, help='Add/replace the subtree policy for this entry DN')
|
||||
- add_user_parser.add_argument('DN', nargs=1, help='Add/replace the local password policy for this entry DN')
|
||||
\ No newline at end of file
|
||||
+ add_user_parser.add_argument('DN', nargs=1, help='Add/replace the local password policy for this entry DN')
|
||||
diff --git a/src/lib389/lib389/pwpolicy.py b/src/lib389/lib389/pwpolicy.py
|
||||
index 8653cb195..d2427933b 100644
|
||||
--- a/src/lib389/lib389/pwpolicy.py
|
||||
+++ b/src/lib389/lib389/pwpolicy.py
|
||||
@@ -65,7 +65,10 @@ class PwPolicyManager(object):
|
||||
'pwddictcheck': 'passworddictcheck',
|
||||
'pwddictpath': 'passworddictpath',
|
||||
'pwdallowhash': 'nsslapd-allow-hashed-passwords',
|
||||
- 'pwpinheritglobal': 'nsslapd-pwpolicy-inherit-global'
|
||||
+ 'pwpinheritglobal': 'nsslapd-pwpolicy-inherit-global',
|
||||
+ 'pwptprmaxuse': 'passwordTPRMaxUse',
|
||||
+ 'pwptprdelayexpireat': 'passwordTPRDelayExpireAt',
|
||||
+ 'pwptprdelayvalidfrom': 'passwordTPRDelayValidFrom'
|
||||
}
|
||||
|
||||
def is_subtree_policy(self, dn):
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,179 +0,0 @@
|
||||
From 7b7217538908ae58df864ef5cd82e1d3303c189f Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 7 Jun 2021 12:58:42 -0400
|
||||
Subject: [PATCH] Issue 4447 - Crash when the Referential Integrity log is
|
||||
manually edited
|
||||
|
||||
Bug Description: If the referint log is manually edited with a string
|
||||
that is not a DN the server will crash when processing
|
||||
the log.
|
||||
|
||||
Fix Description: Check for NULL pointers when strtoking the file line.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4447
|
||||
|
||||
Reviewed by: firstyear(Thanks!)
|
||||
---
|
||||
.../tests/suites/plugins/referint_test.py | 72 +++++++++++++++----
|
||||
ldap/servers/plugins/referint/referint.c | 7 ++
|
||||
src/lib389/lib389/plugins.py | 15 ++++
|
||||
3 files changed, 80 insertions(+), 14 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/referint_test.py b/dirsrvtests/tests/suites/plugins/referint_test.py
|
||||
index 02b985767..fda602545 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/referint_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/referint_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2016 Red Hat, Inc.
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -12,13 +12,11 @@ Created on Dec 12, 2019
|
||||
@author: tbordaz
|
||||
'''
|
||||
import logging
|
||||
-import subprocess
|
||||
import pytest
|
||||
from lib389 import Entry
|
||||
-from lib389.utils import *
|
||||
-from lib389.plugins import *
|
||||
-from lib389._constants import *
|
||||
-from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.plugins import ReferentialIntegrityPlugin
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
+from lib389.idm.user import UserAccounts
|
||||
from lib389.idm.group import Groups
|
||||
from lib389.topologies import topology_st as topo
|
||||
|
||||
@@ -29,21 +27,27 @@ log = logging.getLogger(__name__)
|
||||
ESCAPED_RDN_BASE = "foo\\,oo"
|
||||
def _user_get_dn(no):
|
||||
uid = '%s%d' % (ESCAPED_RDN_BASE, no)
|
||||
- dn = 'uid=%s,%s' % (uid, SUFFIX)
|
||||
+ dn = 'uid=%s,%s' % (uid, DEFAULT_SUFFIX)
|
||||
return (uid, dn)
|
||||
|
||||
def add_escaped_user(server, no):
|
||||
(uid, dn) = _user_get_dn(no)
|
||||
log.fatal('Adding user (%s): ' % dn)
|
||||
- server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'],
|
||||
- 'uid': [uid],
|
||||
- 'sn' : [uid],
|
||||
- 'cn' : [uid]})))
|
||||
+ users = UserAccounts(server, DEFAULT_SUFFIX, None)
|
||||
+ user_properties = {
|
||||
+ 'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson', 'posixAccount'],
|
||||
+ 'uid': uid,
|
||||
+ 'cn' : uid,
|
||||
+ 'sn' : uid,
|
||||
+ 'uidNumber' : '1000',
|
||||
+ 'gidNumber' : '2000',
|
||||
+ 'homeDirectory' : '/home/testuser',
|
||||
+ }
|
||||
+ users.create(properties=user_properties)
|
||||
return dn
|
||||
|
||||
-@pytest.mark.ds50020
|
||||
def test_referential_false_failure(topo):
|
||||
- """On MODRDN referential integrity can erronously fail
|
||||
+ """On MODRDN referential integrity can erroneously fail
|
||||
|
||||
:id: f77aeb80-c4c4-471b-8c1b-4733b714778b
|
||||
:setup: Standalone Instance
|
||||
@@ -100,6 +104,46 @@ def test_referential_false_failure(topo):
|
||||
inst.restart()
|
||||
|
||||
# Here if the bug is fixed, referential is able to update the member value
|
||||
- inst.rename_s(user1.dn, 'uid=new_test_user_1001', newsuperior=SUFFIX, delold=0)
|
||||
+ user1.rename('uid=new_test_user_1001', newsuperior=DEFAULT_SUFFIX, deloldrdn=False)
|
||||
|
||||
|
||||
+def test_invalid_referint_log(topo):
|
||||
+ """If there is an invalid log line in the referint log, make sure the server
|
||||
+ does not crash at startup
|
||||
+
|
||||
+ :id: 34807b5a-ab17-4281-ae48-4e3513e19145
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Set the referint log delay
|
||||
+ 2. Create invalid log
|
||||
+ 3. Start the server (no crash)
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+
|
||||
+ # Set delay - required for log parsing at server startup
|
||||
+ plugin = ReferentialIntegrityPlugin(inst)
|
||||
+ plugin.enable()
|
||||
+ plugin.set_update_delay('2')
|
||||
+ logfile = plugin.get_log_file()
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Create invalid log
|
||||
+ inst.stop()
|
||||
+ with open(logfile, 'w') as log_fh:
|
||||
+ log_fh.write("CRASH\n")
|
||||
+
|
||||
+ # Start the instance
|
||||
+ inst.start()
|
||||
+ assert inst.status()
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
|
||||
index fd5356d72..28240c1f6 100644
|
||||
--- a/ldap/servers/plugins/referint/referint.c
|
||||
+++ b/ldap/servers/plugins/referint/referint.c
|
||||
@@ -1447,6 +1447,13 @@ referint_thread_func(void *arg __attribute__((unused)))
|
||||
sdn = slapi_sdn_new_normdn_byref(ptoken);
|
||||
ptoken = ldap_utf8strtok_r(NULL, delimiter, &iter);
|
||||
|
||||
+ if (ptoken == NULL) {
|
||||
+ /* Invalid line in referint log, skip it */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, REFERINT_PLUGIN_SUBSYSTEM,
|
||||
+ "Skipping invalid referint log line: (%s)\n", thisline);
|
||||
+ slapi_sdn_free(&sdn);
|
||||
+ continue;
|
||||
+ }
|
||||
if (!strcasecmp(ptoken, "NULL")) {
|
||||
tmprdn = NULL;
|
||||
} else {
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 2d88e60bd..b07e80022 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -518,6 +518,21 @@ class ReferentialIntegrityPlugin(Plugin):
|
||||
|
||||
self.set('referint-update-delay', str(value))
|
||||
|
||||
+ def get_log_file(self):
|
||||
+ """Get referint log file"""
|
||||
+
|
||||
+ return self.get_attr_val_utf8('referint-logfile')
|
||||
+
|
||||
+ def get_log_file_formatted(self):
|
||||
+ """Get referint log file"""
|
||||
+
|
||||
+ return self.display_attr('referint-logfile')
|
||||
+
|
||||
+ def set_log_file(self, value):
|
||||
+ """Set referint log file"""
|
||||
+
|
||||
+ self.set('referint-logfile', value)
|
||||
+
|
||||
def get_membership_attr(self, formatted=False):
|
||||
"""Get referint-membership-attr attribute"""
|
||||
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,114 +0,0 @@
|
||||
From 964a153b420b26140e0bbddfbebb4a51aaa0e4ea Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Thu, 3 Jun 2021 15:16:22 +0000
|
||||
Subject: [PATCH 1/7] Issue 4791 - Missing dependency for RetroCL RFE
|
||||
|
||||
Description: The RetroCL exclude attribute RFE is dependent on functionality of the
|
||||
EntryUUID bug fix, that didn't make into the latest build. This breaks the
|
||||
RetroCL exclude attr feature so we need to provide a workaround.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4791
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/pull/4723
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/4224
|
||||
|
||||
Reviewed by: tbordaz, droideck (Thank you)
|
||||
---
|
||||
.../tests/suites/retrocl/basic_test.py | 6 ++--
|
||||
.../lib389/cli_conf/plugins/retrochangelog.py | 35 +++++++++++++++++--
|
||||
2 files changed, 36 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
index 112c73cb9..f3bc50f29 100644
|
||||
--- a/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
@@ -17,7 +17,7 @@ from lib389.utils import *
|
||||
from lib389.tasks import *
|
||||
from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
|
||||
from lib389.cli_base.dsrc import dsrc_arg_concat
|
||||
-from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add
|
||||
+from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add_attr
|
||||
from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
@@ -122,7 +122,7 @@ def test_retrocl_exclude_attr_add(topology_st):
|
||||
args.bindpw = None
|
||||
args.prompt = False
|
||||
args.exclude_attrs = ATTR_HOMEPHONE
|
||||
- args.func = retrochangelog_add
|
||||
+ args.func = retrochangelog_add_attr
|
||||
dsrc_inst = dsrc_arg_concat(args, None)
|
||||
inst = connect_instance(dsrc_inst, False, args)
|
||||
result = args.func(inst, None, log, args)
|
||||
@@ -255,7 +255,7 @@ def test_retrocl_exclude_attr_mod(topology_st):
|
||||
args.bindpw = None
|
||||
args.prompt = False
|
||||
args.exclude_attrs = ATTR_CARLICENSE
|
||||
- args.func = retrochangelog_add
|
||||
+ args.func = retrochangelog_add_attr
|
||||
dsrc_inst = dsrc_arg_concat(args, None)
|
||||
inst = connect_instance(dsrc_inst, False, args)
|
||||
result = args.func(inst, None, log, args)
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py
|
||||
index 9940c6532..160fbb82d 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py
|
||||
@@ -6,8 +6,13 @@
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
|
||||
+# JC Work around for missing dependency on https://github.com/389ds/389-ds-base/pull/4344
|
||||
+import ldap
|
||||
+
|
||||
from lib389.plugins import RetroChangelogPlugin
|
||||
-from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit
|
||||
+# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344
|
||||
+# from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add_attr
|
||||
+from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, _args_to_attrs
|
||||
|
||||
arg_to_attr = {
|
||||
'is_replicated': 'isReplicated',
|
||||
@@ -18,12 +23,38 @@ arg_to_attr = {
|
||||
'exclude_attrs': 'nsslapd-exclude-attrs'
|
||||
}
|
||||
|
||||
-
|
||||
def retrochangelog_edit(inst, basedn, log, args):
|
||||
log = log.getChild('retrochangelog_edit')
|
||||
plugin = RetroChangelogPlugin(inst)
|
||||
generic_object_edit(plugin, log, args, arg_to_attr)
|
||||
|
||||
+# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344
|
||||
+def retrochangelog_add_attr(inst, basedn, log, args):
|
||||
+ log = log.getChild('retrochangelog_add_attr')
|
||||
+ plugin = RetroChangelogPlugin(inst)
|
||||
+ generic_object_add_attr(plugin, log, args, arg_to_attr)
|
||||
+
|
||||
+# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344
|
||||
+def generic_object_add_attr(dsldap_object, log, args, arg_to_attr):
|
||||
+ """Add an attribute to the entry. This differs to 'edit' as edit uses replace,
|
||||
+ and this allows multivalues to be added.
|
||||
+
|
||||
+ dsldap_object should be a single instance of DSLdapObject with a set dn
|
||||
+ """
|
||||
+ log = log.getChild('generic_object_add_attr')
|
||||
+ # Gather the attributes
|
||||
+ attrs = _args_to_attrs(args, arg_to_attr)
|
||||
+
|
||||
+ modlist = []
|
||||
+ for attr, value in attrs.items():
|
||||
+ if not isinstance(value, list):
|
||||
+ value = [value]
|
||||
+ modlist.append((ldap.MOD_ADD, attr, value))
|
||||
+ if len(modlist) > 0:
|
||||
+ dsldap_object.apply_mods(modlist)
|
||||
+ log.info("Successfully changed the %s", dsldap_object.dn)
|
||||
+ else:
|
||||
+ raise ValueError("There is nothing to set in the %s plugin entry" % dsldap_object.dn)
|
||||
|
||||
def _add_parser_args(parser):
|
||||
parser.add_argument('--is-replicated', choices=['TRUE', 'FALSE'], type=str.upper,
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,642 +0,0 @@
|
||||
From d2ac7e98d53cfe6c74c99ddf3504b1072418f05a Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 11 Mar 2021 10:12:46 -0500
|
||||
Subject: [PATCH] Issue 4656 - remove problematic language from ds-replcheck
|
||||
|
||||
Description: remove master from ds-replcheck and replace it with supplier
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4656
|
||||
|
||||
Reviewed by: mreynolds
|
||||
|
||||
e with '#' will be ignored, and an empty message aborts the commit.
|
||||
---
|
||||
ldap/admin/src/scripts/ds-replcheck | 202 ++++++++++++++--------------
|
||||
1 file changed, 101 insertions(+), 101 deletions(-)
|
||||
|
||||
diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
|
||||
index 169496e8f..f411f357a 100755
|
||||
--- a/ldap/admin/src/scripts/ds-replcheck
|
||||
+++ b/ldap/admin/src/scripts/ds-replcheck
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2020 Red Hat, Inc.
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -63,7 +63,7 @@ def remove_entry(rentries, dn):
|
||||
def get_ruv_time(ruv, rid):
|
||||
"""Take a RUV element (nsds50ruv attribute) and extract the timestamp from maxcsn
|
||||
:param ruv - A lsit of RUV elements
|
||||
- :param rid - The rid of the master to extractthe maxcsn time from
|
||||
+ :param rid - The rid of the supplier to extract the maxcsn time from
|
||||
:return: The time in seconds of the maxcsn, or 0 if there is no maxcsn, or -1 if
|
||||
the rid was not found
|
||||
"""
|
||||
@@ -213,22 +213,22 @@ def get_ruv_state(opts):
|
||||
:param opts - all the script options
|
||||
:return - A text description of the replicaton state
|
||||
"""
|
||||
- mtime = get_ruv_time(opts['master_ruv'], opts['rid'])
|
||||
+ mtime = get_ruv_time(opts['supplier_ruv'], opts['rid'])
|
||||
rtime = get_ruv_time(opts['replica_ruv'], opts['rid'])
|
||||
if mtime == -1:
|
||||
- repl_state = "Replication State: Replica ID ({}) not found in Master's RUV".format(opts['rid'])
|
||||
+ repl_state = "Replication State: Replica ID ({}) not found in Supplier's RUV".format(opts['rid'])
|
||||
elif rtime == -1:
|
||||
repl_state = "Replication State: Replica ID ({}) not found in Replica's RUV (not initialized?)".format(opts['rid'])
|
||||
elif mtime == 0:
|
||||
- repl_state = "Replication State: Master has not seen any updates"
|
||||
+ repl_state = "Replication State: Supplier has not seen any updates"
|
||||
elif rtime == 0:
|
||||
- repl_state = "Replication State: Replica has not seen any changes from the Master"
|
||||
+ repl_state = "Replication State: Replica has not seen any changes from the Supplier"
|
||||
elif mtime > rtime:
|
||||
- repl_state = "Replication State: Replica is behind Master by: {} seconds".format(mtime - rtime)
|
||||
+ repl_state = "Replication State: Replica is behind Supplier by: {} seconds".format(mtime - rtime)
|
||||
elif mtime < rtime:
|
||||
- repl_state = "Replication State: Replica is ahead of Master by: {} seconds".format(rtime - mtime)
|
||||
+ repl_state = "Replication State: Replica is ahead of Supplier by: {} seconds".format(rtime - mtime)
|
||||
else:
|
||||
- repl_state = "Replication State: Master and Replica are in perfect synchronization"
|
||||
+ repl_state = "Replication State: Supplier and Replica are in perfect synchronization"
|
||||
|
||||
return repl_state
|
||||
|
||||
@@ -238,11 +238,11 @@ def get_ruv_report(opts):
|
||||
:param opts - all the script options
|
||||
:return - A text blob to display in the report
|
||||
"""
|
||||
- opts['master_ruv'].sort()
|
||||
+ opts['supplier_ruv'].sort()
|
||||
opts['replica_ruv'].sort()
|
||||
|
||||
- report = "Master RUV:\n"
|
||||
- for element in opts['master_ruv']:
|
||||
+ report = "Supplier RUV:\n"
|
||||
+ for element in opts['supplier_ruv']:
|
||||
report += " %s\n" % (element)
|
||||
report += "\nReplica RUV:\n"
|
||||
for element in opts['replica_ruv']:
|
||||
@@ -521,7 +521,7 @@ def get_ldif_ruv(LDIF, opts):
|
||||
|
||||
def cmp_entry(mentry, rentry, opts):
|
||||
"""Compare the two entries, and return a "diff map"
|
||||
- :param mentry - A Master entry
|
||||
+ :param mentry - A Supplier entry
|
||||
:param rentry - A Replica entry
|
||||
:param opts - A Dict of the scripts options
|
||||
:return - A Dict of the differences in the entry, or None
|
||||
@@ -536,7 +536,7 @@ def cmp_entry(mentry, rentry, opts):
|
||||
mlist = list(mentry.data.keys())
|
||||
|
||||
#
|
||||
- # Check master
|
||||
+ # Check Supplier
|
||||
#
|
||||
for mattr in mlist:
|
||||
if mattr in opts['ignore']:
|
||||
@@ -555,7 +555,7 @@ def cmp_entry(mentry, rentry, opts):
|
||||
if not found:
|
||||
diff['missing'].append("")
|
||||
found = True
|
||||
- diff['missing'].append(" - Master's State Info: %s" % (val))
|
||||
+ diff['missing'].append(" - Supplier's State Info: %s" % (val))
|
||||
diff['missing'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
|
||||
else:
|
||||
# No state info, just move on
|
||||
@@ -566,18 +566,18 @@ def cmp_entry(mentry, rentry, opts):
|
||||
if report_conflict(rentry, mattr, opts) and report_conflict(mentry, mattr, opts):
|
||||
diff['diff'].append(" - Attribute '%s' is different:" % mattr)
|
||||
if 'nscpentrywsi' in mentry.data:
|
||||
- # Process Master
|
||||
+ # Process Supplier
|
||||
found = False
|
||||
for val in mentry.data['nscpentrywsi']:
|
||||
if val.lower().startswith(mattr + ';'):
|
||||
if not found:
|
||||
- diff['diff'].append(" Master:")
|
||||
+ diff['diff'].append(" Supplier:")
|
||||
diff['diff'].append(" - Value: %s" % (val.split(':')[1].lstrip()))
|
||||
diff['diff'].append(" - State Info: %s" % (val))
|
||||
diff['diff'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
|
||||
found = True
|
||||
if not found:
|
||||
- diff['diff'].append(" Master: ")
|
||||
+ diff['diff'].append(" Supplier: ")
|
||||
for val in mentry.data[mattr]:
|
||||
# This is an "origin" value which means it's never been
|
||||
# updated since replication was set up. So its the
|
||||
@@ -605,7 +605,7 @@ def cmp_entry(mentry, rentry, opts):
|
||||
diff['diff'].append("")
|
||||
else:
|
||||
# no state info, report what we got
|
||||
- diff['diff'].append(" Master: ")
|
||||
+ diff['diff'].append(" Supplier: ")
|
||||
for val in mentry.data[mattr]:
|
||||
diff['diff'].append(" - %s: %s" % (mattr, val))
|
||||
diff['diff'].append(" Replica: ")
|
||||
@@ -622,9 +622,9 @@ def cmp_entry(mentry, rentry, opts):
|
||||
continue
|
||||
|
||||
if rattr not in mlist:
|
||||
- # Master is missing the attribute
|
||||
+ # Supplier is missing the attribute
|
||||
if report_conflict(rentry, rattr, opts):
|
||||
- diff['missing'].append(" - Master missing attribute: \"%s\"" % (rattr))
|
||||
+ diff['missing'].append(" - Supplier missing attribute: \"%s\"" % (rattr))
|
||||
diff_count += 1
|
||||
if 'nscpentrywsi' in rentry.data:
|
||||
found = False
|
||||
@@ -663,7 +663,7 @@ def do_offline_report(opts, output_file=None):
|
||||
try:
|
||||
MLDIF = open(opts['mldif'], "r")
|
||||
except Exception as e:
|
||||
- print('Failed to open Master LDIF: ' + str(e))
|
||||
+ print('Failed to open Supplier LDIF: ' + str(e))
|
||||
return
|
||||
|
||||
try:
|
||||
@@ -676,10 +676,10 @@ def do_offline_report(opts, output_file=None):
|
||||
# Verify LDIF Files
|
||||
try:
|
||||
if opts['verbose']:
|
||||
- print("Validating Master ldif file ({})...".format(opts['mldif']))
|
||||
+ print("Validating Supplier ldif file ({})...".format(opts['mldif']))
|
||||
LDIFRecordList(MLDIF).parse()
|
||||
except ValueError:
|
||||
- print('Master LDIF file in invalid, aborting...')
|
||||
+ print('Supplier LDIF file in invalid, aborting...')
|
||||
MLDIF.close()
|
||||
RLDIF.close()
|
||||
return
|
||||
@@ -696,34 +696,34 @@ def do_offline_report(opts, output_file=None):
|
||||
# Get all the dn's, and entry counts
|
||||
if opts['verbose']:
|
||||
print ("Gathering all the DN's...")
|
||||
- master_dns = get_dns(MLDIF, opts['mldif'], opts)
|
||||
+ supplier_dns = get_dns(MLDIF, opts['mldif'], opts)
|
||||
replica_dns = get_dns(RLDIF, opts['rldif'], opts)
|
||||
- if master_dns is None or replica_dns is None:
|
||||
+ if supplier_dns is None or replica_dns is None:
|
||||
print("Aborting scan...")
|
||||
MLDIF.close()
|
||||
RLDIF.close()
|
||||
sys.exit(1)
|
||||
- m_count = len(master_dns)
|
||||
+ m_count = len(supplier_dns)
|
||||
r_count = len(replica_dns)
|
||||
|
||||
# Get DB RUV
|
||||
if opts['verbose']:
|
||||
print ("Gathering the database RUV's...")
|
||||
- opts['master_ruv'] = get_ldif_ruv(MLDIF, opts)
|
||||
+ opts['supplier_ruv'] = get_ldif_ruv(MLDIF, opts)
|
||||
opts['replica_ruv'] = get_ldif_ruv(RLDIF, opts)
|
||||
|
||||
- """ Compare the master entries with the replica's. Take our list of dn's from
|
||||
- the master ldif and get that entry( dn) from the master and replica ldif. In
|
||||
+ """ Compare the Supplier entries with the replica's. Take our list of dn's from
|
||||
+ the Supplier ldif and get that entry( dn) from the Supplier and replica ldif. In
|
||||
this phase we keep keep track of conflict/tombstone counts, and we check for
|
||||
missing entries and entry differences. We only need to do the entry diff
|
||||
checking in this phase - we do not need to do it when process the replica dn's
|
||||
because if the entry exists in both LDIF's then we already checked or diffs
|
||||
- while processing the master dn's.
|
||||
+ while processing the Supplier dn's.
|
||||
"""
|
||||
if opts['verbose']:
|
||||
- print ("Comparing Master to Replica...")
|
||||
+ print ("Comparing Supplier to Replica...")
|
||||
missing = False
|
||||
- for dn in master_dns:
|
||||
+ for dn in supplier_dns:
|
||||
mresult = ldif_search(MLDIF, dn)
|
||||
if mresult['entry'] is None and mresult['conflict'] is None and not mresult['tombstone']:
|
||||
# Try from the beginning
|
||||
@@ -736,7 +736,7 @@ def do_offline_report(opts, output_file=None):
|
||||
rresult['conflict'] is not None or rresult['tombstone']):
|
||||
""" We can safely remove this DN from the replica dn list as it
|
||||
does not need to be checked again. This also speeds things up
|
||||
- when doing the replica vs master phase.
|
||||
+ when doing the replica vs Supplier phase.
|
||||
"""
|
||||
replica_dns.remove(dn)
|
||||
|
||||
@@ -766,7 +766,7 @@ def do_offline_report(opts, output_file=None):
|
||||
missing_report += (' Entries missing on Replica:\n')
|
||||
missing = True
|
||||
if mresult['entry'] and 'createtimestamp' in mresult['entry'].data:
|
||||
- missing_report += (' - %s (Created on Master at: %s)\n' %
|
||||
+ missing_report += (' - %s (Created on Supplier at: %s)\n' %
|
||||
(dn, convert_timestamp(mresult['entry'].data['createtimestamp'][0])))
|
||||
else:
|
||||
missing_report += (' - %s\n' % dn)
|
||||
@@ -791,7 +791,7 @@ def do_offline_report(opts, output_file=None):
|
||||
remaining conflict & tombstone entries as well.
|
||||
"""
|
||||
if opts['verbose']:
|
||||
- print ("Comparing Replica to Master...")
|
||||
+ print ("Comparing Replica to Supplier...")
|
||||
MLDIF.seek(0)
|
||||
RLDIF.seek(0)
|
||||
missing = False
|
||||
@@ -811,7 +811,7 @@ def do_offline_report(opts, output_file=None):
|
||||
if mresult['entry'] is None and mresult['glue'] is None:
|
||||
MLDIF.seek(rresult['idx']) # Set the LDIF cursor/index to the last good line
|
||||
if not missing:
|
||||
- missing_report += (' Entries missing on Master:\n')
|
||||
+ missing_report += (' Entries missing on Supplier:\n')
|
||||
missing = True
|
||||
if rresult['entry'] and 'createtimestamp' in rresult['entry'].data:
|
||||
missing_report += (' - %s (Created on Replica at: %s)\n' %
|
||||
@@ -837,12 +837,12 @@ def do_offline_report(opts, output_file=None):
|
||||
final_report += get_ruv_report(opts)
|
||||
final_report += ('Entry Counts\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
- final_report += ('Master: %d\n' % (m_count))
|
||||
+ final_report += ('Supplier: %d\n' % (m_count))
|
||||
final_report += ('Replica: %d\n\n' % (r_count))
|
||||
|
||||
final_report += ('\nTombstones\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
- final_report += ('Master: %d\n' % (mtombstones))
|
||||
+ final_report += ('Supplier: %d\n' % (mtombstones))
|
||||
final_report += ('Replica: %d\n' % (rtombstones))
|
||||
|
||||
final_report += get_conflict_report(mconflicts, rconflicts, opts['conflicts'])
|
||||
@@ -859,9 +859,9 @@ def do_offline_report(opts, output_file=None):
|
||||
final_report += ('\nResult\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
if missing_report == "" and len(diff_report) == 0:
|
||||
- final_report += ('No replication differences between Master and Replica\n')
|
||||
+ final_report += ('No replication differences between Supplier and Replica\n')
|
||||
else:
|
||||
- final_report += ('There are replication differences between Master and Replica\n')
|
||||
+ final_report += ('There are replication differences between Supplier and Replica\n')
|
||||
|
||||
if output_file:
|
||||
output_file.write(final_report)
|
||||
@@ -871,8 +871,8 @@ def do_offline_report(opts, output_file=None):
|
||||
|
||||
def check_for_diffs(mentries, mglue, rentries, rglue, report, opts):
|
||||
"""Online mode only - Check for diffs, return the updated report
|
||||
- :param mentries - Master entries
|
||||
- :param mglue - Master glue entries
|
||||
+ :param mentries - Supplier entries
|
||||
+ :param mglue - Supplier glue entries
|
||||
:param rentries - Replica entries
|
||||
:param rglue - Replica glue entries
|
||||
:param report - A Dict of the entire report
|
||||
@@ -947,8 +947,8 @@ def validate_suffix(ldapnode, suffix, hostname):
|
||||
# Check suffix is replicated
|
||||
try:
|
||||
replica_filter = "(&(objectclass=nsds5replica)(nsDS5ReplicaRoot=%s))" % suffix
|
||||
- master_replica = ldapnode.search_s("cn=config",ldap.SCOPE_SUBTREE,replica_filter)
|
||||
- if (len(master_replica) != 1):
|
||||
+ supplier_replica = ldapnode.search_s("cn=config",ldap.SCOPE_SUBTREE,replica_filter)
|
||||
+ if (len(supplier_replica) != 1):
|
||||
print("Error: Failed to validate suffix in {}. {} is not replicated.".format(hostname, suffix))
|
||||
return False
|
||||
except ldap.LDAPError as e:
|
||||
@@ -969,7 +969,7 @@ def connect_to_replicas(opts):
|
||||
muri = "%s://%s" % (opts['mprotocol'], opts['mhost'].replace("/", "%2f"))
|
||||
else:
|
||||
muri = "%s://%s:%s/" % (opts['mprotocol'], opts['mhost'], opts['mport'])
|
||||
- master = SimpleLDAPObject(muri)
|
||||
+ supplier = SimpleLDAPObject(muri)
|
||||
|
||||
if opts['rprotocol'].lower() == 'ldapi':
|
||||
ruri = "%s://%s" % (opts['rprotocol'], opts['rhost'].replace("/", "%2f"))
|
||||
@@ -978,23 +978,23 @@ def connect_to_replicas(opts):
|
||||
replica = SimpleLDAPObject(ruri)
|
||||
|
||||
# Set timeouts
|
||||
- master.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
|
||||
- master.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
|
||||
+ supplier.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
|
||||
+ supplier.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
|
||||
replica.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
|
||||
replica.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
|
||||
|
||||
# Setup Secure Connection
|
||||
if opts['certdir'] is not None:
|
||||
- # Setup Master
|
||||
+ # Setup Supplier
|
||||
if opts['mprotocol'] != LDAPI:
|
||||
- master.set_option(ldap.OPT_X_TLS_CACERTDIR, opts['certdir'])
|
||||
- master.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD)
|
||||
+ supplier.set_option(ldap.OPT_X_TLS_CACERTDIR, opts['certdir'])
|
||||
+ supplier.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD)
|
||||
if opts['mprotocol'] == LDAP:
|
||||
# Do StartTLS
|
||||
try:
|
||||
- master.start_tls_s()
|
||||
+ supplier.start_tls_s()
|
||||
except ldap.LDAPError as e:
|
||||
- print('TLS negotiation failed on Master: {}'.format(str(e)))
|
||||
+ print('TLS negotiation failed on Supplier: {}'.format(str(e)))
|
||||
exit(1)
|
||||
|
||||
# Setup Replica
|
||||
@@ -1006,17 +1006,17 @@ def connect_to_replicas(opts):
|
||||
try:
|
||||
replica.start_tls_s()
|
||||
except ldap.LDAPError as e:
|
||||
- print('TLS negotiation failed on Master: {}'.format(str(e)))
|
||||
+ print('TLS negotiation failed on Supplier: {}'.format(str(e)))
|
||||
exit(1)
|
||||
|
||||
- # Open connection to master
|
||||
+ # Open connection to Supplier
|
||||
try:
|
||||
- master.simple_bind_s(opts['binddn'], opts['bindpw'])
|
||||
+ supplier.simple_bind_s(opts['binddn'], opts['bindpw'])
|
||||
except ldap.SERVER_DOWN as e:
|
||||
print(f"Cannot connect to {muri} ({str(e)})")
|
||||
sys.exit(1)
|
||||
except ldap.LDAPError as e:
|
||||
- print("Error: Failed to authenticate to Master: ({}). "
|
||||
+ print("Error: Failed to authenticate to Supplier: ({}). "
|
||||
"Please check your credentials and LDAP urls are correct.".format(str(e)))
|
||||
sys.exit(1)
|
||||
|
||||
@@ -1034,7 +1034,7 @@ def connect_to_replicas(opts):
|
||||
# Validate suffix
|
||||
if opts['verbose']:
|
||||
print ("Validating suffix ...")
|
||||
- if not validate_suffix(master, opts['suffix'], opts['mhost']):
|
||||
+ if not validate_suffix(supplier, opts['suffix'], opts['mhost']):
|
||||
sys.exit(1)
|
||||
|
||||
if not validate_suffix(replica,opts['suffix'], opts['rhost']):
|
||||
@@ -1042,16 +1042,16 @@ def connect_to_replicas(opts):
|
||||
|
||||
# Get the RUVs
|
||||
if opts['verbose']:
|
||||
- print ("Gathering Master's RUV...")
|
||||
+ print ("Gathering Supplier's RUV...")
|
||||
try:
|
||||
- master_ruv = master.search_s(opts['suffix'], ldap.SCOPE_SUBTREE, RUV_FILTER, ['nsds50ruv'])
|
||||
- if len(master_ruv) > 0:
|
||||
- opts['master_ruv'] = ensure_list_str(master_ruv[0][1]['nsds50ruv'])
|
||||
+ supplier_ruv = supplier.search_s(opts['suffix'], ldap.SCOPE_SUBTREE, RUV_FILTER, ['nsds50ruv'])
|
||||
+ if len(supplier_ruv) > 0:
|
||||
+ opts['supplier_ruv'] = ensure_list_str(supplier_ruv[0][1]['nsds50ruv'])
|
||||
else:
|
||||
- print("Error: Master does not have an RUV entry")
|
||||
+ print("Error: Supplier does not have an RUV entry")
|
||||
sys.exit(1)
|
||||
except ldap.LDAPError as e:
|
||||
- print("Error: Failed to get Master RUV entry: {}".format(str(e)))
|
||||
+ print("Error: Failed to get Supplier RUV entry: {}".format(str(e)))
|
||||
sys.exit(1)
|
||||
|
||||
if opts['verbose']:
|
||||
@@ -1067,12 +1067,12 @@ def connect_to_replicas(opts):
|
||||
print("Error: Failed to get Replica RUV entry: {}".format(str(e)))
|
||||
sys.exit(1)
|
||||
|
||||
- # Get the master RID
|
||||
+ # Get the Supplier RID
|
||||
if opts['verbose']:
|
||||
- print("Getting Master's replica ID")
|
||||
+ print("Getting Supplier's replica ID")
|
||||
try:
|
||||
search_filter = "(&(objectclass=nsds5Replica)(nsDS5ReplicaRoot={})(nsDS5ReplicaId=*))".format(opts['suffix'])
|
||||
- replica_entry = master.search_s("cn=config", ldap.SCOPE_SUBTREE, search_filter)
|
||||
+ replica_entry = supplier.search_s("cn=config", ldap.SCOPE_SUBTREE, search_filter)
|
||||
if len(replica_entry) > 0:
|
||||
opts['rid'] = ensure_int(replica_entry[0][1]['nsDS5ReplicaId'][0])
|
||||
else:
|
||||
@@ -1081,7 +1081,7 @@ def connect_to_replicas(opts):
|
||||
print("Error: Failed to get Replica entry: {}".format(str(e)))
|
||||
sys.exit(1)
|
||||
|
||||
- return (master, replica, opts)
|
||||
+ return (supplier, replica, opts)
|
||||
|
||||
|
||||
def print_online_report(report, opts, output_file):
|
||||
@@ -1104,11 +1104,11 @@ def print_online_report(report, opts, output_file):
|
||||
final_report += get_ruv_report(opts)
|
||||
final_report += ('Entry Counts\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
- final_report += ('Master: %d\n' % (report['m_count']))
|
||||
+ final_report += ('Supplier: %d\n' % (report['m_count']))
|
||||
final_report += ('Replica: %d\n\n' % (report['r_count']))
|
||||
final_report += ('\nTombstones\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
- final_report += ('Master: %d\n' % (report['mtombstones']))
|
||||
+ final_report += ('Supplier: %d\n' % (report['mtombstones']))
|
||||
final_report += ('Replica: %d\n' % (report['rtombstones']))
|
||||
final_report += report['conflict']
|
||||
missing = False
|
||||
@@ -1121,7 +1121,7 @@ def print_online_report(report, opts, output_file):
|
||||
final_report += (' Entries missing on Replica:\n')
|
||||
for entry in report['r_missing']:
|
||||
if 'createtimestamp' in entry.data:
|
||||
- final_report += (' - %s (Created on Master at: %s)\n' %
|
||||
+ final_report += (' - %s (Created on Supplier at: %s)\n' %
|
||||
(entry.dn, convert_timestamp(entry.data['createtimestamp'][0])))
|
||||
else:
|
||||
final_report += (' - %s\n' % (entry.dn))
|
||||
@@ -1129,7 +1129,7 @@ def print_online_report(report, opts, output_file):
|
||||
if m_missing > 0:
|
||||
if r_missing > 0:
|
||||
final_report += ('\n')
|
||||
- final_report += (' Entries missing on Master:\n')
|
||||
+ final_report += (' Entries missing on Supplier:\n')
|
||||
for entry in report['m_missing']:
|
||||
if 'createtimestamp' in entry.data:
|
||||
final_report += (' - %s (Created on Replica at: %s)\n' %
|
||||
@@ -1146,9 +1146,9 @@ def print_online_report(report, opts, output_file):
|
||||
final_report += ('\nResult\n')
|
||||
final_report += ('=====================================================\n\n')
|
||||
if not missing and len(report['diff']) == 0:
|
||||
- final_report += ('No replication differences between Master and Replica\n')
|
||||
+ final_report += ('No replication differences between Supplier and Replica\n')
|
||||
else:
|
||||
- final_report += ('There are replication differences between Master and Replica\n')
|
||||
+ final_report += ('There are replication differences between Supplier and Replica\n')
|
||||
|
||||
if output_file:
|
||||
output_file.write(final_report)
|
||||
@@ -1170,7 +1170,7 @@ def remove_state_info(entry):
|
||||
|
||||
def get_conflict_report(mentries, rentries, verbose):
|
||||
"""Gather the conflict entry dn's for each replica
|
||||
- :param mentries - Master entries
|
||||
+ :param mentries - Supplier entries
|
||||
:param rentries - Replica entries
|
||||
:param verbose - verbose logging
|
||||
:return - A text blob to dispaly in the report
|
||||
@@ -1197,7 +1197,7 @@ def get_conflict_report(mentries, rentries, verbose):
|
||||
report = "\n\nConflict Entries\n"
|
||||
report += "=====================================================\n\n"
|
||||
if len(m_conflicts) > 0:
|
||||
- report += ('Master Conflict Entries: %d\n' % (len(m_conflicts)))
|
||||
+ report += ('Supplier Conflict Entries: %d\n' % (len(m_conflicts)))
|
||||
if verbose:
|
||||
for entry in m_conflicts:
|
||||
report += ('\n - %s\n' % (entry['dn']))
|
||||
@@ -1239,8 +1239,8 @@ def do_online_report(opts, output_file=None):
|
||||
rconflicts = []
|
||||
mconflicts = []
|
||||
|
||||
- # Fire off paged searches on Master and Replica
|
||||
- master, replica, opts = connect_to_replicas(opts)
|
||||
+ # Fire off paged searches on Supplier and Replica
|
||||
+ supplier, replica, opts = connect_to_replicas(opts)
|
||||
|
||||
if opts['verbose']:
|
||||
print('Start searching and comparing...')
|
||||
@@ -1248,12 +1248,12 @@ def do_online_report(opts, output_file=None):
|
||||
controls = [paged_ctrl]
|
||||
req_pr_ctrl = controls[0]
|
||||
try:
|
||||
- master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
|
||||
- "(|(objectclass=*)(objectclass=ldapsubentry)(objectclass=nstombstone))",
|
||||
- ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'],
|
||||
- serverctrls=controls)
|
||||
+ supplier_msgid = supplier.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
|
||||
+ "(|(objectclass=*)(objectclass=ldapsubentry)(objectclass=nstombstone))",
|
||||
+ ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'],
|
||||
+ serverctrls=controls)
|
||||
except ldap.LDAPError as e:
|
||||
- print("Error: Failed to get Master entries: %s", str(e))
|
||||
+ print("Error: Failed to get Supplier entries: %s", str(e))
|
||||
sys.exit(1)
|
||||
try:
|
||||
replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
|
||||
@@ -1268,11 +1268,11 @@ def do_online_report(opts, output_file=None):
|
||||
while not m_done or not r_done:
|
||||
try:
|
||||
if not m_done:
|
||||
- m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid)
|
||||
+ m_rtype, m_rdata, m_rmsgid, m_rctrls = supplier.result3(supplier_msgid)
|
||||
elif not r_done:
|
||||
m_rdata = []
|
||||
except ldap.LDAPError as e:
|
||||
- print("Error: Problem getting the results from the master: %s", str(e))
|
||||
+ print("Error: Problem getting the results from the Supplier: %s", str(e))
|
||||
sys.exit(1)
|
||||
try:
|
||||
if not r_done:
|
||||
@@ -1299,7 +1299,7 @@ def do_online_report(opts, output_file=None):
|
||||
report, opts)
|
||||
|
||||
if not m_done:
|
||||
- # Master
|
||||
+ # Supplier
|
||||
m_pctrls = [
|
||||
c
|
||||
for c in m_rctrls
|
||||
@@ -1310,11 +1310,11 @@ def do_online_report(opts, output_file=None):
|
||||
try:
|
||||
# Copy cookie from response control to request control
|
||||
req_pr_ctrl.cookie = m_pctrls[0].cookie
|
||||
- master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
|
||||
+ supplier_msgid = supplier.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
|
||||
"(|(objectclass=*)(objectclass=ldapsubentry))",
|
||||
['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
|
||||
except ldap.LDAPError as e:
|
||||
- print("Error: Problem searching the master: %s", str(e))
|
||||
+ print("Error: Problem searching the Supplier: %s", str(e))
|
||||
sys.exit(1)
|
||||
else:
|
||||
m_done = True # No more pages available
|
||||
@@ -1354,7 +1354,7 @@ def do_online_report(opts, output_file=None):
|
||||
print_online_report(report, opts, output_file)
|
||||
|
||||
# unbind
|
||||
- master.unbind_s()
|
||||
+ supplier.unbind_s()
|
||||
replica.unbind_s()
|
||||
|
||||
|
||||
@@ -1367,18 +1367,18 @@ def init_online_params(args):
|
||||
|
||||
# Make sure the URLs are different
|
||||
if args.murl == args.rurl:
|
||||
- print("Master and Replica LDAP URLs are the same, they must be different")
|
||||
+ print("Supplier and Replica LDAP URLs are the same, they must be different")
|
||||
sys.exit(1)
|
||||
|
||||
- # Parse Master url
|
||||
+ # Parse Supplier url
|
||||
if not ldapurl.isLDAPUrl(args.murl):
|
||||
- print("Master LDAP URL is invalid")
|
||||
+ print("Supplier LDAP URL is invalid")
|
||||
sys.exit(1)
|
||||
murl = ldapurl.LDAPUrl(args.murl)
|
||||
if murl.urlscheme in VALID_PROTOCOLS:
|
||||
opts['mprotocol'] = murl.urlscheme
|
||||
else:
|
||||
- print('Unsupported ldap url protocol (%s) for Master, please use "ldaps" or "ldap"' %
|
||||
+ print('Unsupported ldap url protocol (%s) for Supplier, please use "ldaps" or "ldap"' %
|
||||
murl.urlscheme)
|
||||
sys.exit(1)
|
||||
|
||||
@@ -1520,7 +1520,7 @@ def offline_report(args):
|
||||
print ("LDIF file ({}) is empty".format(ldif_dir))
|
||||
sys.exit(1)
|
||||
if opts['mldif'] == opts['rldif']:
|
||||
- print("The Master and Replica LDIF files must be different")
|
||||
+ print("The Supplier and Replica LDIF files must be different")
|
||||
sys.exit(1)
|
||||
|
||||
OUTPUT_FILE = None
|
||||
@@ -1547,7 +1547,7 @@ def get_state(args):
|
||||
"""Just do the RUV comparision
|
||||
"""
|
||||
opts = init_online_params(args)
|
||||
- master, replica, opts = connect_to_replicas(opts)
|
||||
+ supplier, replica, opts = connect_to_replicas(opts)
|
||||
print(get_ruv_state(opts))
|
||||
|
||||
|
||||
@@ -1569,10 +1569,10 @@ def main():
|
||||
# Get state
|
||||
state_parser = subparsers.add_parser('state', help="Get the current replicaton state between two replicas")
|
||||
state_parser.set_defaults(func=get_state)
|
||||
- state_parser.add_argument('-m', '--master-url', help='The LDAP URL for the Master server',
|
||||
- dest='murl', default=None, required=True)
|
||||
+ state_parser.add_argument('-m', '--supplier-url', help='The LDAP URL for the Supplier server',
|
||||
+ dest='murl', default=None, required=True)
|
||||
state_parser.add_argument('-r', '--replica-url', help='The LDAP URL for the Replica server',
|
||||
- dest='rurl', required=True, default=None)
|
||||
+ dest='rurl', required=True, default=None)
|
||||
state_parser.add_argument('-b', '--suffix', help='Replicated suffix', dest='suffix', required=True)
|
||||
state_parser.add_argument('-D', '--bind-dn', help='The Bind DN', required=True, dest='binddn', default=None)
|
||||
state_parser.add_argument('-w', '--bind-pw', help='The Bind password', dest='bindpw', default=None)
|
||||
@@ -1586,7 +1586,7 @@ def main():
|
||||
# Online mode
|
||||
online_parser = subparsers.add_parser('online', help="Compare two online replicas for differences")
|
||||
online_parser.set_defaults(func=online_report)
|
||||
- online_parser.add_argument('-m', '--master-url', help='The LDAP URL for the Master server (REQUIRED)',
|
||||
+ online_parser.add_argument('-m', '--supplier-url', help='The LDAP URL for the Supplier server (REQUIRED)',
|
||||
dest='murl', default=None, required=True)
|
||||
online_parser.add_argument('-r', '--replica-url', help='The LDAP URL for the Replica server (REQUIRED)',
|
||||
dest='rurl', required=True, default=None)
|
||||
@@ -1612,12 +1612,12 @@ def main():
|
||||
# Offline LDIF mode
|
||||
offline_parser = subparsers.add_parser('offline', help="Compare two replication LDIF files for differences (LDIF file generated by 'db2ldif -r')")
|
||||
offline_parser.set_defaults(func=offline_report)
|
||||
- offline_parser.add_argument('-m', '--master-ldif', help='Master LDIF file',
|
||||
+ offline_parser.add_argument('-m', '--supplier-ldif', help='Supplier LDIF file',
|
||||
dest='mldif', default=None, required=True)
|
||||
offline_parser.add_argument('-r', '--replica-ldif', help='Replica LDIF file',
|
||||
dest='rldif', default=None, required=True)
|
||||
offline_parser.add_argument('--rid', dest='rid', default=None, required=True,
|
||||
- help='The Replica Identifer (rid) for the "Master" server')
|
||||
+ help='The Replica Identifier (rid) for the "Supplier" server')
|
||||
offline_parser.add_argument('-b', '--suffix', help='Replicated suffix', dest='suffix', required=True)
|
||||
offline_parser.add_argument('-c', '--conflicts', help='Display verbose conflict information', action='store_true',
|
||||
dest='conflicts', default=False)
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,373 +0,0 @@
|
||||
From 55a47c1bfe1ce1c27e470384c4f1d50895db25f7 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Tue, 13 Jul 2021 14:18:03 -0400
|
||||
Subject: [PATCH] Issue 4443 - Internal unindexed searches in syncrepl/retro
|
||||
changelog
|
||||
|
||||
Bug Description:
|
||||
|
||||
When a non-system index is added to a backend it is
|
||||
disabled until the database is initialized or reindexed.
|
||||
So in the case of the retro changelog the changenumber index
|
||||
is alway disabled by default since it is never initialized.
|
||||
This leads to unexpected unindexed searches of the retro
|
||||
changelog.
|
||||
|
||||
Fix Description:
|
||||
|
||||
If an index has "nsSystemIndex" set to "true" then enable it
|
||||
immediately.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4443
|
||||
|
||||
Reviewed by: spichugi & tbordaz(Thanks!!)
|
||||
---
|
||||
.../tests/suites/retrocl/basic_test.py | 53 ++++++++-------
|
||||
.../suites/retrocl/retrocl_indexing_test.py | 68 +++++++++++++++++++
|
||||
ldap/servers/plugins/retrocl/retrocl_create.c | 2 +-
|
||||
.../slapd/back-ldbm/ldbm_index_config.c | 25 +++++--
|
||||
src/lib389/lib389/_mapped_object.py | 13 ++++
|
||||
5 files changed, 130 insertions(+), 31 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
index f3bc50f29..84d513829 100644
|
||||
--- a/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
|
||||
@@ -8,7 +8,6 @@
|
||||
|
||||
import logging
|
||||
import ldap
|
||||
-import time
|
||||
import pytest
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.plugins import RetroChangelogPlugin
|
||||
@@ -18,7 +17,8 @@ from lib389.tasks import *
|
||||
from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
|
||||
from lib389.cli_base.dsrc import dsrc_arg_concat
|
||||
from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add_attr
|
||||
-from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
|
||||
+from lib389.idm.user import UserAccount, UserAccounts
|
||||
+from lib389._mapped_object import DSLdapObjects
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
@@ -82,7 +82,7 @@ def test_retrocl_exclude_attr_add(topology_st):
|
||||
|
||||
log.info('Adding user1')
|
||||
try:
|
||||
- user1 = users.create(properties={
|
||||
+ users.create(properties={
|
||||
'sn': '1',
|
||||
'cn': 'user 1',
|
||||
'uid': 'user1',
|
||||
@@ -97,17 +97,18 @@ def test_retrocl_exclude_attr_add(topology_st):
|
||||
except ldap.ALREADY_EXISTS:
|
||||
pass
|
||||
except ldap.LDAPError as e:
|
||||
- log.error("Failed to add user1")
|
||||
+ log.error("Failed to add user1: " + str(e))
|
||||
|
||||
log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
|
||||
try:
|
||||
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX)
|
||||
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
|
||||
except ldap.LDAPError as e:
|
||||
- log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ log.fatal("Changelog search failed, error: " + str(e))
|
||||
assert False
|
||||
assert len(cllist) > 0
|
||||
- if cllist[0].hasAttr('changes'):
|
||||
- clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ if cllist[0].present('changes'):
|
||||
+ clstr = str(cllist[0].get_attr_vals_utf8('changes'))
|
||||
assert ATTR_HOMEPHONE in clstr
|
||||
assert ATTR_CARLICENSE in clstr
|
||||
|
||||
@@ -134,7 +135,7 @@ def test_retrocl_exclude_attr_add(topology_st):
|
||||
|
||||
log.info('Adding user2')
|
||||
try:
|
||||
- user2 = users.create(properties={
|
||||
+ users.create(properties={
|
||||
'sn': '2',
|
||||
'cn': 'user 2',
|
||||
'uid': 'user2',
|
||||
@@ -149,18 +150,18 @@ def test_retrocl_exclude_attr_add(topology_st):
|
||||
except ldap.ALREADY_EXISTS:
|
||||
pass
|
||||
except ldap.LDAPError as e:
|
||||
- log.error("Failed to add user2")
|
||||
+ log.error("Failed to add user2: " + str(e))
|
||||
|
||||
log.info('Verify homePhone attr is not in the changelog changestring')
|
||||
try:
|
||||
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN)
|
||||
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER2_DN})')
|
||||
assert len(cllist) > 0
|
||||
- if cllist[0].hasAttr('changes'):
|
||||
- clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ if cllist[0].present('changes'):
|
||||
+ clstr = str(cllist[0].get_attr_vals_utf8('changes'))
|
||||
assert ATTR_HOMEPHONE not in clstr
|
||||
assert ATTR_CARLICENSE in clstr
|
||||
except ldap.LDAPError as e:
|
||||
- log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ log.fatal("Changelog search failed, error: " + str(e))
|
||||
assert False
|
||||
|
||||
def test_retrocl_exclude_attr_mod(topology_st):
|
||||
@@ -228,19 +229,20 @@ def test_retrocl_exclude_attr_mod(topology_st):
|
||||
'homeDirectory': '/home/user1',
|
||||
'userpassword': USER_PW})
|
||||
except ldap.ALREADY_EXISTS:
|
||||
- pass
|
||||
+ user1 = UserAccount(st, dn=USER1_DN)
|
||||
except ldap.LDAPError as e:
|
||||
- log.error("Failed to add user1")
|
||||
+ log.error("Failed to add user1: " + str(e))
|
||||
|
||||
log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
|
||||
try:
|
||||
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX)
|
||||
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
|
||||
except ldap.LDAPError as e:
|
||||
- log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ log.fatal("Changelog search failed, error: " + str(e))
|
||||
assert False
|
||||
assert len(cllist) > 0
|
||||
- if cllist[0].hasAttr('changes'):
|
||||
- clstr = (cllist[0].getValue('changes')).decode()
|
||||
+ if cllist[0].present('changes'):
|
||||
+ clstr = str(cllist[0].get_attr_vals_utf8('changes'))
|
||||
assert ATTR_HOMEPHONE in clstr
|
||||
assert ATTR_CARLICENSE in clstr
|
||||
|
||||
@@ -267,24 +269,25 @@ def test_retrocl_exclude_attr_mod(topology_st):
|
||||
|
||||
log.info('Modify user1 carLicense attribute')
|
||||
try:
|
||||
- st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")])
|
||||
+ user1.replace(ATTR_CARLICENSE, "123WX321")
|
||||
except ldap.LDAPError as e:
|
||||
log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc'])
|
||||
assert False
|
||||
|
||||
log.info('Verify carLicense attr is not in the changelog changestring')
|
||||
try:
|
||||
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
|
||||
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
|
||||
assert len(cllist) > 0
|
||||
# There will be 2 entries in the changelog for this user, we are only
|
||||
#interested in the second one, the modify operation.
|
||||
- if cllist[1].hasAttr('changes'):
|
||||
- clstr = (cllist[1].getValue('changes')).decode()
|
||||
+ if cllist[1].present('changes'):
|
||||
+ clstr = str(cllist[1].get_attr_vals_utf8('changes'))
|
||||
assert ATTR_CARLICENSE not in clstr
|
||||
except ldap.LDAPError as e:
|
||||
- log.fatal("Changelog search failed, error: " +str(e))
|
||||
+ log.fatal("Changelog search failed, error: " + str(e))
|
||||
assert False
|
||||
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py
|
||||
new file mode 100644
|
||||
index 000000000..b1dfe962c
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py
|
||||
@@ -0,0 +1,68 @@
|
||||
+import logging
|
||||
+import pytest
|
||||
+import os
|
||||
+from lib389._constants import RETROCL_SUFFIX, DEFAULT_SUFFIX
|
||||
+from lib389.topologies import topology_st as topo
|
||||
+from lib389.plugins import RetroChangelogPlugin
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389._mapped_object import DSLdapObjects
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+def test_indexing_is_online(topo):
|
||||
+ """Test that the changenmumber index is online right after enabling the plugin
|
||||
+
|
||||
+ :id: 16f4c001-9e0c-4448-a2b3-08ac1e85d40f
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Enable retro cl
|
||||
+ 2. Perform some updates
|
||||
+ 3. Search for "(changenumber>=-1)", and it is not partially unindexed
|
||||
+ 4. Search for "(&(changenumber>=-1)(targetuniqueid=*))", and it is not partially unindexed
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ # Enable plugin
|
||||
+ topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off')
|
||||
+ plugin = RetroChangelogPlugin(topo.standalone)
|
||||
+ plugin.enable()
|
||||
+ topo.standalone.restart()
|
||||
+
|
||||
+ # Do a bunch of updates
|
||||
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ user_entry = users.create(properties={
|
||||
+ 'sn': '1',
|
||||
+ 'cn': 'user 1',
|
||||
+ 'uid': 'user1',
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'givenname': 'user1',
|
||||
+ 'homePhone': '0861234567',
|
||||
+ 'carLicense': '131D16674',
|
||||
+ 'mail': 'user1@whereever.com',
|
||||
+ 'homeDirectory': '/home'
|
||||
+ })
|
||||
+ for count in range(0, 10):
|
||||
+ user_entry.replace('mail', f'test{count}@test.com')
|
||||
+
|
||||
+ # Search the retro cl, and check for error messages
|
||||
+ filter_simple = '(changenumber>=-1)'
|
||||
+ filter_compound = '(&(changenumber>=-1)(targetuniqueid=*))'
|
||||
+ retro_changelog_suffix = DSLdapObjects(topo.standalone, basedn=RETROCL_SUFFIX)
|
||||
+ retro_changelog_suffix.filter(filter_simple)
|
||||
+ assert not topo.standalone.searchAccessLog('Partially Unindexed Filter')
|
||||
+
|
||||
+ # Search the retro cl again with compound filter
|
||||
+ retro_changelog_suffix.filter(filter_compound)
|
||||
+ assert not topo.standalone.searchAccessLog('Partially Unindexed Filter')
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
diff --git a/ldap/servers/plugins/retrocl/retrocl_create.c b/ldap/servers/plugins/retrocl/retrocl_create.c
|
||||
index 571e6899f..5bfde7831 100644
|
||||
--- a/ldap/servers/plugins/retrocl/retrocl_create.c
|
||||
+++ b/ldap/servers/plugins/retrocl/retrocl_create.c
|
||||
@@ -133,7 +133,7 @@ retrocl_create_be(const char *bedir)
|
||||
val.bv_len = strlen(val.bv_val);
|
||||
slapi_entry_add_values(e, "cn", vals);
|
||||
|
||||
- val.bv_val = "false";
|
||||
+ val.bv_val = "true"; /* enables the index */
|
||||
val.bv_len = strlen(val.bv_val);
|
||||
slapi_entry_add_values(e, "nssystemindex", vals);
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
|
||||
index 9722d0ce7..38e7368e1 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
|
||||
@@ -25,7 +25,7 @@ int ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, Slapi_Entry *en
|
||||
#define INDEXTYPE_NONE 1
|
||||
|
||||
static int
|
||||
-ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, char *err_buf)
|
||||
+ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, PRBool *is_system_index, char *err_buf)
|
||||
{
|
||||
Slapi_Attr *attr;
|
||||
const struct berval *attrValue;
|
||||
@@ -78,6 +78,15 @@ ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_st
|
||||
}
|
||||
}
|
||||
|
||||
+ *is_system_index = PR_FALSE;
|
||||
+ if (0 == slapi_entry_attr_find(e, "nsSystemIndex", &attr)) {
|
||||
+ slapi_attr_first_value(attr, &sval);
|
||||
+ attrValue = slapi_value_get_berval(sval);
|
||||
+ if (strcasecmp(attrValue->bv_val, "true") == 0) {
|
||||
+ *is_system_index = PR_TRUE;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
/* ok the entry is good to process, pass it to attr_index_config */
|
||||
if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0, err_buf)) {
|
||||
slapi_ch_free_string(index_name);
|
||||
@@ -101,9 +110,10 @@ ldbm_index_init_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
|
||||
void *arg)
|
||||
{
|
||||
ldbm_instance *inst = (ldbm_instance *)arg;
|
||||
+ PRBool is_system_index = PR_FALSE;
|
||||
|
||||
returntext[0] = '\0';
|
||||
- *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, NULL);
|
||||
+ *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, &is_system_index /* not used */, NULL);
|
||||
if (*returncode == LDAP_SUCCESS) {
|
||||
return SLAPI_DSE_CALLBACK_OK;
|
||||
} else {
|
||||
@@ -126,17 +136,21 @@ ldbm_instance_index_config_add_callback(Slapi_PBlock *pb __attribute__((unused))
|
||||
{
|
||||
ldbm_instance *inst = (ldbm_instance *)arg;
|
||||
char *index_name = NULL;
|
||||
+ PRBool is_system_index = PR_FALSE;
|
||||
|
||||
returntext[0] = '\0';
|
||||
- *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, returntext);
|
||||
+ *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, &is_system_index, returntext);
|
||||
if (*returncode == LDAP_SUCCESS) {
|
||||
struct attrinfo *ai = NULL;
|
||||
/* if the index is a "system" index, we assume it's being added by
|
||||
* by the server, and it's okay for the index to go online immediately.
|
||||
* if not, we set the index "offline" so it won't actually be used
|
||||
* until someone runs db2index on it.
|
||||
+ * If caller wants to add an index that they want to be online
|
||||
+ * immediately they can also set "nsSystemIndex" to "true" in the
|
||||
+ * index config entry (e.g. is_system_index).
|
||||
*/
|
||||
- if (!ldbm_attribute_always_indexed(index_name)) {
|
||||
+ if (!is_system_index && !ldbm_attribute_always_indexed(index_name)) {
|
||||
ainfo_get(inst->inst_be, index_name, &ai);
|
||||
PR_ASSERT(ai != NULL);
|
||||
ai->ai_indexmask |= INDEX_OFFLINE;
|
||||
@@ -386,13 +400,14 @@ ldbm_instance_index_config_enable_index(ldbm_instance *inst, Slapi_Entry *e)
|
||||
char *index_name = NULL;
|
||||
int rc = LDAP_SUCCESS;
|
||||
struct attrinfo *ai = NULL;
|
||||
+ PRBool is_system_index = PR_FALSE;
|
||||
|
||||
index_name = slapi_entry_attr_get_charptr(e, "cn");
|
||||
if (index_name) {
|
||||
ainfo_get(inst->inst_be, index_name, &ai);
|
||||
}
|
||||
if (!ai) {
|
||||
- rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, NULL);
|
||||
+ rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, &is_system_index /* not used */, NULL);
|
||||
}
|
||||
if (rc == LDAP_SUCCESS) {
|
||||
/* Assume the caller knows if it is OK to go online immediately */
|
||||
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
|
||||
index b6d778b01..fe610d175 100644
|
||||
--- a/src/lib389/lib389/_mapped_object.py
|
||||
+++ b/src/lib389/lib389/_mapped_object.py
|
||||
@@ -148,6 +148,19 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
|
||||
return True
|
||||
|
||||
+ def search(self, scope="subtree", filter='objectclass=*'):
|
||||
+ search_scope = ldap.SCOPE_SUBTREE
|
||||
+ if scope == 'base':
|
||||
+ search_scope = ldap.SCOPE_BASE
|
||||
+ elif scope == 'one':
|
||||
+ search_scope = ldap.SCOPE_ONE
|
||||
+ elif scope == 'subtree':
|
||||
+ search_scope = ldap.SCOPE_SUBTREE
|
||||
+ return self._instance.search_ext_s(self._dn, search_scope, filter,
|
||||
+ serverctrls=self._server_controls,
|
||||
+ clientctrls=self._client_controls,
|
||||
+ escapehatch='i am sure')
|
||||
+
|
||||
def display(self, attrlist=['*']):
|
||||
"""Get an entry but represent it as a string LDIF
|
||||
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,121 +0,0 @@
|
||||
From 2f0218f91d35c83a2aaecb71849a54b2481390ab Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Fri, 9 Jul 2021 11:53:35 +1000
|
||||
Subject: [PATCH] Issue 4817 - BUG - locked crypt accounts on import may allow
|
||||
all passwords (#4819)
|
||||
|
||||
Bug Description: Due to mishanding of short dbpwd hashes, the
|
||||
crypt_r algorithm was misused and was only comparing salts
|
||||
in some cases, rather than checking the actual content
|
||||
of the password.
|
||||
|
||||
Fix Description: Stricter checks on dbpwd lengths to ensure
|
||||
that content passed to crypt_r has at least 2 salt bytes and
|
||||
1 hash byte, as well as stricter checks on ct_memcmp to ensure
|
||||
that compared values are the same length, rather than potentially
|
||||
allowing overruns/short comparisons.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/4817
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @mreynolds389
|
||||
---
|
||||
.../password/pwd_crypt_asterisk_test.py | 50 +++++++++++++++++++
|
||||
ldap/servers/plugins/pwdstorage/crypt_pwd.c | 20 +++++---
|
||||
2 files changed, 64 insertions(+), 6 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py b/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py
|
||||
new file mode 100644
|
||||
index 000000000..d76614db1
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py
|
||||
@@ -0,0 +1,50 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2021 William Brown <william@blackhats.net.au>
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import ldap
|
||||
+import pytest
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389._constants import (DEFAULT_SUFFIX, PASSWORD)
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+def test_password_crypt_asterisk_is_rejected(topology_st):
|
||||
+ """It was reported that {CRYPT}* was allowing all passwords to be
|
||||
+ valid in the bind process. This checks that we should be rejecting
|
||||
+ these as they should represent locked accounts. Similar, {CRYPT}!
|
||||
+
|
||||
+ :id: 0b8f1a6a-f3eb-4443-985e-da14d0939dc3
|
||||
+ :setup: Single instance
|
||||
+ :steps: 1. Set a password hash in with CRYPT and the content *
|
||||
+ 2. Test a bind
|
||||
+ 3. Set a password hash in with CRYPT and the content !
|
||||
+ 4. Test a bind
|
||||
+ :expectedresults:
|
||||
+ 1. Successfully set the values
|
||||
+ 2. The bind fails
|
||||
+ 3. Successfully set the values
|
||||
+ 4. The bind fails
|
||||
+ """
|
||||
+ topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on')
|
||||
+ topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'off')
|
||||
+
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ user = users.create_test_user()
|
||||
+
|
||||
+ user.set('userPassword', "{CRYPT}*")
|
||||
+
|
||||
+ # Attempt to bind with incorrect password.
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ badconn = user.bind('badpassword')
|
||||
+
|
||||
+ user.set('userPassword', "{CRYPT}!")
|
||||
+ # Attempt to bind with incorrect password.
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ badconn = user.bind('badpassword')
|
||||
+
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/crypt_pwd.c b/ldap/servers/plugins/pwdstorage/crypt_pwd.c
|
||||
index 9031b2199..1b37d41ed 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/crypt_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/crypt_pwd.c
|
||||
@@ -48,15 +48,23 @@ static unsigned char itoa64[] = /* 0 ... 63 => ascii - 64 */
|
||||
int
|
||||
crypt_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
{
|
||||
- int rc;
|
||||
- char *cp;
|
||||
+ int rc = -1;
|
||||
+ char *cp = NULL;
|
||||
+ size_t dbpwd_len = strlen(dbpwd);
|
||||
struct crypt_data data;
|
||||
data.initialized = 0;
|
||||
|
||||
- /* we use salt (first 2 chars) of encoded password in call to crypt_r() */
|
||||
- cp = crypt_r(userpwd, dbpwd, &data);
|
||||
- if (cp) {
|
||||
- rc = slapi_ct_memcmp(dbpwd, cp, strlen(dbpwd));
|
||||
+ /*
|
||||
+ * there MUST be at least 2 chars of salt and some pw bytes, else this is INVALID and will
|
||||
+ * allow any password to bind as we then only compare SALTS.
|
||||
+ */
|
||||
+ if (dbpwd_len >= 3) {
|
||||
+ /* we use salt (first 2 chars) of encoded password in call to crypt_r() */
|
||||
+ cp = crypt_r(userpwd, dbpwd, &data);
|
||||
+ }
|
||||
+ /* If these are not the same length, we can not proceed safely with memcmp. */
|
||||
+ if (cp && dbpwd_len == strlen(cp)) {
|
||||
+ rc = slapi_ct_memcmp(dbpwd, cp, dbpwd_len);
|
||||
} else {
|
||||
rc = -1;
|
||||
}
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,39 +0,0 @@
|
||||
From 31d53e7da585723e66b838dcf34b77ea7c9968c6 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 21 Jul 2021 09:16:30 +0200
|
||||
Subject: [PATCH] Issue 4837 - persistent search returns entries even when an
|
||||
error is returned by content-sync-plugin (#4838)
|
||||
|
||||
Bug description:
|
||||
When a ldap client sends a sync request control, the server response may contain a sync state control.
|
||||
If the server fails to create the control the search should fail.
|
||||
|
||||
Fix description:
|
||||
In case the server fails to create the response control
|
||||
logs the failure of the pre_search
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4837
|
||||
|
||||
Reviewed by: Simon Pichugin
|
||||
|
||||
Platforms tested: RH8.4
|
||||
---
|
||||
ldap/servers/plugins/sync/sync_refresh.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/sync/sync_refresh.c b/ldap/servers/plugins/sync/sync_refresh.c
|
||||
index 646ff760b..4cbb6a949 100644
|
||||
--- a/ldap/servers/plugins/sync/sync_refresh.c
|
||||
+++ b/ldap/servers/plugins/sync/sync_refresh.c
|
||||
@@ -213,7 +213,7 @@ sync_srch_refresh_pre_entry(Slapi_PBlock *pb)
|
||||
Slapi_Entry *e;
|
||||
slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_ENTRY, &e);
|
||||
LDAPControl **ctrl = (LDAPControl **)slapi_ch_calloc(2, sizeof(LDAPControl *));
|
||||
- sync_create_state_control(e, &ctrl[0], LDAP_SYNC_ADD, NULL);
|
||||
+ rc = sync_create_state_control(e, &ctrl[0], LDAP_SYNC_ADD, NULL);
|
||||
slapi_pblock_set(pb, SLAPI_SEARCH_CTRLS, ctrl);
|
||||
}
|
||||
return (rc);
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,49 +0,0 @@
|
||||
From 616dc9964a4675dea2ab2c2efb9bd31c3903e29d Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 26 Jul 2021 15:22:08 -0400
|
||||
Subject: [PATCH] Hardcode gost crypt passsword storage scheme
|
||||
|
||||
---
|
||||
.../plugins/pwdstorage/gost_yescrypt.c | 22 -------------------
|
||||
1 file changed, 22 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/gost_yescrypt.c b/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
|
||||
index 67b39395e..7b0d1653c 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
|
||||
@@ -11,7 +11,6 @@
|
||||
|
||||
#include <crypt.h>
|
||||
|
||||
-#ifdef XCRYPT_VERSION_STR
|
||||
#include <errno.h>
|
||||
int
|
||||
gost_yescrypt_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
@@ -64,24 +63,3 @@ gost_yescrypt_pw_enc(const char *pwd)
|
||||
return enc;
|
||||
}
|
||||
|
||||
-#else
|
||||
-
|
||||
-/*
|
||||
- * We do not have xcrypt, so always fail all checks.
|
||||
- */
|
||||
-int
|
||||
-gost_yescrypt_pw_cmp(const char *userpwd __attribute__((unused)), const char *dbpwd __attribute__((unused)))
|
||||
-{
|
||||
- slapi_log_err(SLAPI_LOG_ERR, GOST_YESCRYPT_SCHEME_NAME,
|
||||
- "Unable to use gost_yescrypt_pw_cmp, xcrypt is not available.\n");
|
||||
- return 1;
|
||||
-}
|
||||
-
|
||||
-char *
|
||||
-gost_yescrypt_pw_enc(const char *pwd __attribute__((unused)))
|
||||
-{
|
||||
- slapi_log_err(SLAPI_LOG_ERR, GOST_YESCRYPT_SCHEME_NAME,
|
||||
- "Unable to use gost_yescrypt_pw_enc, xcrypt is not available.\n");
|
||||
- return NULL;
|
||||
-}
|
||||
-#endif
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,39 +0,0 @@
|
||||
From a2a51130b2f95316237b85da099a8be734969e54 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Sat, 24 Apr 2021 21:37:54 +0100
|
||||
Subject: [PATCH] Issue 4734 - import of entry with no parent warning (#4735)
|
||||
|
||||
Description: Online import of ldif file that contains an entry with
|
||||
no parent doesnt generate a task warning.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4734
|
||||
|
||||
Author: vashirov@redhat.com (Thanks)
|
||||
|
||||
Reviewed by: mreynolds, jchapma
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c | 6 ++++++
|
||||
1 file changed, 6 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
index 905a84e74..35183ed59 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
@@ -2767,8 +2767,14 @@ import_foreman(void *param)
|
||||
if (job->flags & FLAG_ABORT) {
|
||||
goto error;
|
||||
}
|
||||
+
|
||||
+ /* capture skipped entry warnings for this task */
|
||||
+ if((job) && (job->skipped)) {
|
||||
+ slapi_task_set_warning(job->task, WARN_SKIPPED_IMPORT_ENTRY);
|
||||
+ }
|
||||
}
|
||||
|
||||
+
|
||||
slapi_pblock_destroy(pb);
|
||||
info->state = FINISHED;
|
||||
return;
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,37 +0,0 @@
|
||||
From f9bc249b2baa11a8ac0eb54e4077eb706d137e38 Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Thu, 19 Aug 2021 11:06:06 +1000
|
||||
Subject: [PATCH] Issue 4872 - BUG - entryuuid enabled by default causes
|
||||
replication issues (#4876)
|
||||
|
||||
Bug Description: Due to older servers missing the syntax
|
||||
plugin this breaks schema replication and causes cascading
|
||||
errors.
|
||||
|
||||
Fix Description: This changes the syntax to be a case
|
||||
insensitive string, while leaving the plugins in place
|
||||
for other usage.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/4872
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @mreynolds389 @progier389
|
||||
---
|
||||
ldap/schema/03entryuuid.ldif | 3 ++-
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/schema/03entryuuid.ldif b/ldap/schema/03entryuuid.ldif
|
||||
index cbde981fe..f7a7f40d5 100644
|
||||
--- a/ldap/schema/03entryuuid.ldif
|
||||
+++ b/ldap/schema/03entryuuid.ldif
|
||||
@@ -13,4 +13,5 @@ dn: cn=schema
|
||||
#
|
||||
# attributes
|
||||
#
|
||||
-attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
|
||||
+# attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
|
||||
+attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,125 +0,0 @@
|
||||
From 120511d35095a48d60abbb7cb2367d0c30fbc757 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 25 Aug 2021 13:20:56 -0400
|
||||
Subject: [PATCH] Remove GOST-YESCRYPT password sotrage scheme
|
||||
|
||||
---
|
||||
.../tests/suites/password/pwd_algo_test.py | 1 -
|
||||
ldap/ldif/template-dse-minimal.ldif.in | 9 ---------
|
||||
ldap/ldif/template-dse.ldif.in | 9 ---------
|
||||
ldap/servers/plugins/pwdstorage/pwd_init.c | 18 ------------------
|
||||
ldap/servers/slapd/fedse.c | 13 -------------
|
||||
5 files changed, 50 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/pwd_algo_test.py b/dirsrvtests/tests/suites/password/pwd_algo_test.py
|
||||
index 66bda420e..88f8e40b7 100644
|
||||
--- a/dirsrvtests/tests/suites/password/pwd_algo_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/pwd_algo_test.py
|
||||
@@ -124,7 +124,6 @@ def _test_algo_for_pbkdf2(inst, algo_name):
|
||||
('CLEAR', 'CRYPT', 'CRYPT-MD5', 'CRYPT-SHA256', 'CRYPT-SHA512',
|
||||
'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA',
|
||||
'SSHA256', 'SSHA384', 'SSHA512', 'PBKDF2_SHA256', 'DEFAULT',
|
||||
- 'GOST_YESCRYPT',
|
||||
))
|
||||
def test_pwd_algo_test(topology_st, algo):
|
||||
"""Assert that all of our password algorithms correctly PASS and FAIL varying
|
||||
diff --git a/ldap/ldif/template-dse-minimal.ldif.in b/ldap/ldif/template-dse-minimal.ldif.in
|
||||
index 2eccae9b2..1a05f4a67 100644
|
||||
--- a/ldap/ldif/template-dse-minimal.ldif.in
|
||||
+++ b/ldap/ldif/template-dse-minimal.ldif.in
|
||||
@@ -194,15 +194,6 @@ nsslapd-pluginarg1: nsds5ReplicaCredentials
|
||||
nsslapd-pluginid: aes-storage-scheme
|
||||
nsslapd-pluginprecedence: 1
|
||||
|
||||
-dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config
|
||||
-objectclass: top
|
||||
-objectclass: nsSlapdPlugin
|
||||
-cn: GOST_YESCRYPT
|
||||
-nsslapd-pluginpath: libpwdstorage-plugin
|
||||
-nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init
|
||||
-nsslapd-plugintype: pwdstoragescheme
|
||||
-nsslapd-pluginenabled: on
|
||||
-
|
||||
dn: cn=Syntax Validation Task,cn=plugins,cn=config
|
||||
objectclass: top
|
||||
objectclass: nsSlapdPlugin
|
||||
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
|
||||
index 7e7480cba..f30531bec 100644
|
||||
--- a/ldap/ldif/template-dse.ldif.in
|
||||
+++ b/ldap/ldif/template-dse.ldif.in
|
||||
@@ -242,15 +242,6 @@ nsslapd-pluginarg2: nsds5ReplicaBootstrapCredentials
|
||||
nsslapd-pluginid: aes-storage-scheme
|
||||
nsslapd-pluginprecedence: 1
|
||||
|
||||
-dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config
|
||||
-objectclass: top
|
||||
-objectclass: nsSlapdPlugin
|
||||
-cn: GOST_YESCRYPT
|
||||
-nsslapd-pluginpath: libpwdstorage-plugin
|
||||
-nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init
|
||||
-nsslapd-plugintype: pwdstoragescheme
|
||||
-nsslapd-pluginenabled: on
|
||||
-
|
||||
dn: cn=Syntax Validation Task,cn=plugins,cn=config
|
||||
objectclass: top
|
||||
objectclass: nsSlapdPlugin
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/pwd_init.c b/ldap/servers/plugins/pwdstorage/pwd_init.c
|
||||
index 606e63404..59cfc4684 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/pwd_init.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/pwd_init.c
|
||||
@@ -52,8 +52,6 @@ static Slapi_PluginDesc smd5_pdesc = {"smd5-password-storage-scheme", VENDOR, DS
|
||||
|
||||
static Slapi_PluginDesc pbkdf2_sha256_pdesc = {"pbkdf2-sha256-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Salted PBKDF2 SHA256 hash algorithm (PBKDF2_SHA256)"};
|
||||
|
||||
-static Slapi_PluginDesc gost_yescrypt_pdesc = {"gost-yescrypt-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Yescrypt KDF algorithm (Streebog256)"};
|
||||
-
|
||||
static char *plugin_name = "NSPwdStoragePlugin";
|
||||
|
||||
int
|
||||
@@ -431,19 +429,3 @@ pbkdf2_sha256_pwd_storage_scheme_init(Slapi_PBlock *pb)
|
||||
return rc;
|
||||
}
|
||||
|
||||
-int
|
||||
-gost_yescrypt_pwd_storage_scheme_init(Slapi_PBlock *pb)
|
||||
-{
|
||||
- int rc;
|
||||
-
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "=> gost_yescrypt_pwd_storage_scheme_init\n");
|
||||
-
|
||||
- rc = slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION, (void *)SLAPI_PLUGIN_VERSION_01);
|
||||
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION, (void *)&gost_yescrypt_pdesc);
|
||||
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_ENC_FN, (void *)gost_yescrypt_pw_enc);
|
||||
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *)gost_yescrypt_pw_cmp);
|
||||
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, GOST_YESCRYPT_SCHEME_NAME);
|
||||
-
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "<= gost_yescrypt_pwd_storage_scheme_init %d\n", rc);
|
||||
- return rc;
|
||||
-}
|
||||
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
|
||||
index 44159c991..24b7ed11c 100644
|
||||
--- a/ldap/servers/slapd/fedse.c
|
||||
+++ b/ldap/servers/slapd/fedse.c
|
||||
@@ -203,19 +203,6 @@ static const char *internal_entries[] =
|
||||
"nsslapd-pluginVersion: none\n"
|
||||
"nsslapd-pluginVendor: 389 Project\n"
|
||||
"nsslapd-pluginDescription: CRYPT-SHA512\n",
|
||||
-
|
||||
- "dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config\n"
|
||||
- "objectclass: top\n"
|
||||
- "objectclass: nsSlapdPlugin\n"
|
||||
- "cn: GOST_YESCRYPT\n"
|
||||
- "nsslapd-pluginpath: libpwdstorage-plugin\n"
|
||||
- "nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init\n"
|
||||
- "nsslapd-plugintype: pwdstoragescheme\n"
|
||||
- "nsslapd-pluginenabled: on\n"
|
||||
- "nsslapd-pluginId: GOST_YESCRYPT\n"
|
||||
- "nsslapd-pluginVersion: none\n"
|
||||
- "nsslapd-pluginVendor: 389 Project\n"
|
||||
- "nsslapd-pluginDescription: GOST_YESCRYPT\n",
|
||||
};
|
||||
|
||||
static int NUM_INTERNAL_ENTRIES = sizeof(internal_entries) / sizeof(internal_entries[0]);
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,44 +0,0 @@
|
||||
From df0ccce06259b9ef06d522e61da4e3ffcbbf5016 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 25 Aug 2021 16:54:57 -0400
|
||||
Subject: [PATCH] Issue 4884 - server crashes when dnaInterval attribute is set
|
||||
to zero
|
||||
|
||||
Bug Description:
|
||||
|
||||
A division by zero crash occurs if the dnaInterval is set to zero
|
||||
|
||||
Fix Description:
|
||||
|
||||
Validate the config value of dnaInterval and adjust it to the
|
||||
default/safe value of "1" if needed.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4884
|
||||
|
||||
Reviewed by: tbordaz(Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/dna/dna.c | 7 +++++++
|
||||
1 file changed, 7 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
|
||||
index 928a3f54a..c983ebdd0 100644
|
||||
--- a/ldap/servers/plugins/dna/dna.c
|
||||
+++ b/ldap/servers/plugins/dna/dna.c
|
||||
@@ -1025,7 +1025,14 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
|
||||
|
||||
value = slapi_entry_attr_get_charptr(e, DNA_INTERVAL);
|
||||
if (value) {
|
||||
+ errno = 0;
|
||||
entry->interval = strtoull(value, 0, 0);
|
||||
+ if (entry->interval == 0 || errno == ERANGE) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, DNA_PLUGIN_SUBSYSTEM,
|
||||
+ "dna_parse_config_entry - Invalid value for dnaInterval (%s), "
|
||||
+ "Using default value of 1\n", value);
|
||||
+ entry->interval = 1;
|
||||
+ }
|
||||
slapi_ch_free_string(&value);
|
||||
}
|
||||
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,415 +0,0 @@
|
||||
From 18a8ed29ae0b300083a6b83665b0137948a2ef7c Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 23 Sep 2021 10:48:50 +0200
|
||||
Subject: [PATCH 1/3] Issue 4925 - Performance ACI: targetfilter evaluation
|
||||
result can be reused (#4926)
|
||||
|
||||
Bug description:
|
||||
An ACI may contain targetfilter. For a given returned entry, of a
|
||||
SRCH request, the same targetfilter is evaluated for each of the
|
||||
returned attributes.
|
||||
Once the filter has been evaluated, it is useless to reevaluate
|
||||
it for a next attribute.
|
||||
|
||||
Fix description:
|
||||
The fix implements a very simple cache (linked list) that keeps
|
||||
the results of the previously evaluated 'targetfilter'.
|
||||
This cache is per-entry. For an operation, a aclpb is allocated
|
||||
that is used to evaluate ACIs against each successive entry.
|
||||
Each time a candidate entry is added in the aclpb
|
||||
(acl_access_allowed), the cache (aclpb_curr_entry_targetfilters)
|
||||
is freed. Then for each 'targetfilter', the original targetfilter
|
||||
is lookup from the cache. If this is the first evaluation of it
|
||||
then the result of the evaluation is stored into the cache using
|
||||
the original targetfilter as the key in the cache
|
||||
|
||||
The key to lookup/store the cache is the string representation
|
||||
of the targetfilter. The string contains a redzone to detect
|
||||
that the filter exceeds the maximum size (2K). If it exceeds
|
||||
then the key is invalid and the lookup/store is noop.
|
||||
|
||||
relates: #4925
|
||||
|
||||
Reviewed by: Mark Reynolds, William Brown (Thanks)
|
||||
|
||||
Platforms tested: F34
|
||||
---
|
||||
ldap/servers/plugins/acl/acl.c | 138 +++++++++++++++++++++++++++--
|
||||
ldap/servers/plugins/acl/acl.h | 14 +++
|
||||
ldap/servers/plugins/acl/acl_ext.c | 12 +++
|
||||
ldap/servers/slapd/libglobs.c | 29 ++++++
|
||||
ldap/servers/slapd/proto-slap.h | 2 +
|
||||
ldap/servers/slapd/slap.h | 2 +
|
||||
6 files changed, 191 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c
|
||||
index 4e811f73a..377c18e68 100644
|
||||
--- a/ldap/servers/plugins/acl/acl.c
|
||||
+++ b/ldap/servers/plugins/acl/acl.c
|
||||
@@ -67,6 +67,9 @@ static void print_access_control_summary(char *source,
|
||||
const char *edn,
|
||||
aclResultReason_t *acl_reason);
|
||||
static int check_rdn_access(Slapi_PBlock *pb, Slapi_Entry *e, const char *newrdn, int access);
|
||||
+static struct targetfilter_cached_result *targetfilter_cache_lookup(struct acl_pblock *aclpb, char *filter, PRBool filter_valid);
|
||||
+static void targetfilter_cache_add(struct acl_pblock *aclpb, char *filter, int result, PRBool filter_valid);
|
||||
+
|
||||
|
||||
|
||||
/*
|
||||
@@ -176,6 +179,70 @@ check_rdn_access(Slapi_PBlock *pb, Slapi_Entry *e, const char *dn, int access)
|
||||
return (retCode);
|
||||
}
|
||||
|
||||
+/* Retrieves, in the targetfilter cache (list), this
|
||||
+ * filter in case it was already evaluated
|
||||
+ *
|
||||
+ * filter: key to retrieve the evaluation in the cache
|
||||
+ * filter_valid: PR_FALSE means that the filter key is truncated, PR_TRUE else
|
||||
+ */
|
||||
+static struct targetfilter_cached_result *
|
||||
+targetfilter_cache_lookup(struct acl_pblock *aclpb, char *filter, PRBool filter_valid)
|
||||
+{
|
||||
+ struct targetfilter_cached_result *results;
|
||||
+ if (! aclpb->targetfilter_cache_enabled) {
|
||||
+ /* targetfilter cache is disabled */
|
||||
+ return NULL;
|
||||
+ }
|
||||
+ if (filter == NULL) {
|
||||
+ return NULL;
|
||||
+ }
|
||||
+ for(results = aclpb->aclpb_curr_entry_targetfilters; results; results = results->next) {
|
||||
+ if (strcmp(results->filter, filter) == 0) {
|
||||
+ return results;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ return NULL;
|
||||
+}
|
||||
+
|
||||
+/* Free all evaluations cached for this current entry */
|
||||
+void
|
||||
+targetfilter_cache_free(struct acl_pblock *aclpb)
|
||||
+{
|
||||
+ struct targetfilter_cached_result *results, *next;
|
||||
+ if (aclpb == NULL) {
|
||||
+ return;
|
||||
+ }
|
||||
+ for(results = aclpb->aclpb_curr_entry_targetfilters; results;) {
|
||||
+ next = results->next;
|
||||
+ slapi_ch_free_string(&results->filter);
|
||||
+ slapi_ch_free((void **) &results);
|
||||
+ results = next;
|
||||
+ }
|
||||
+ aclpb->aclpb_curr_entry_targetfilters = NULL;
|
||||
+}
|
||||
+
|
||||
+/* add a new targetfilter evaluation into the cache (per entry)
|
||||
+ * ATM just use a linked list of evaluation
|
||||
+ *
|
||||
+ * filter: key to retrieve the evaluation in the cache
|
||||
+ * result: result of the evaluation
|
||||
+ * filter_valid: PR_FALSE means that the filter key is truncated, PR_TRUE else
|
||||
+ */
|
||||
+static void
|
||||
+targetfilter_cache_add(struct acl_pblock *aclpb, char *filter, int result, PRBool filter_valid)
|
||||
+{
|
||||
+ struct targetfilter_cached_result *results;
|
||||
+ if (! filter_valid || ! aclpb->targetfilter_cache_enabled) {
|
||||
+ /* targetfilter cache is disabled or filter is truncated */
|
||||
+ return;
|
||||
+ }
|
||||
+ results = (struct targetfilter_cached_result *) slapi_ch_calloc(1, (sizeof(struct targetfilter_cached_result)));
|
||||
+ results->filter = slapi_ch_strdup(filter);
|
||||
+ results->next = aclpb->aclpb_curr_entry_targetfilters;
|
||||
+ results->matching_result = result;
|
||||
+ aclpb->aclpb_curr_entry_targetfilters = results;
|
||||
+}
|
||||
/***************************************************************************
|
||||
*
|
||||
* acl_access_allowed
|
||||
@@ -496,6 +563,7 @@ acl_access_allowed(
|
||||
|
||||
/* Keep the ptr to the current entry */
|
||||
aclpb->aclpb_curr_entry = (Slapi_Entry *)e;
|
||||
+ targetfilter_cache_free(aclpb);
|
||||
|
||||
/* Get the attr info */
|
||||
deallocate_attrEval = acl__get_attrEval(aclpb, attr);
|
||||
@@ -1924,7 +1992,7 @@ acl_modified(Slapi_PBlock *pb, int optype, Slapi_DN *e_sdn, void *change)
|
||||
* None.
|
||||
*
|
||||
**************************************************************************/
|
||||
-static int
|
||||
+int
|
||||
acl__scan_for_acis(Acl_PBlock *aclpb, int *err)
|
||||
{
|
||||
aci_t *aci;
|
||||
@@ -2405,10 +2473,68 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
|
||||
ACL_EVAL_TARGET_FILTER);
|
||||
slapi_ch_free((void **)&lasinfo);
|
||||
} else {
|
||||
- if (slapi_vattr_filter_test(NULL, aclpb->aclpb_curr_entry,
|
||||
- aci->targetFilter,
|
||||
- 0 /*don't do access check*/) != 0) {
|
||||
- filter_matched = ACL_FALSE;
|
||||
+ Slapi_DN *sdn;
|
||||
+ char* attr_evaluated = "None";
|
||||
+ char logbuf[2048] = {0};
|
||||
+ char *redzone = "the redzone";
|
||||
+ int32_t redzone_idx;
|
||||
+ char *filterstr; /* key to retrieve/add targetfilter value in the cache */
|
||||
+ PRBool valid_filter;
|
||||
+ struct targetfilter_cached_result *previous_filter_test;
|
||||
+
|
||||
+ /* only usefull for debug purpose */
|
||||
+ if (aclpb->aclpb_curr_attrEval && aclpb->aclpb_curr_attrEval->attrEval_name) {
|
||||
+ attr_evaluated = aclpb->aclpb_curr_attrEval->attrEval_name;
|
||||
+ }
|
||||
+ sdn = slapi_entry_get_sdn(aclpb->aclpb_curr_entry);
|
||||
+
|
||||
+ /* The key for the cache is the string representation of the original filter
|
||||
+ * If the string can not fit into the provided buffer (overwrite redzone)
|
||||
+ * then the filter is said invalid (for the cache) and it will be evaluated
|
||||
+ */
|
||||
+ redzone_idx = sizeof(logbuf) - 1 - strlen(redzone);
|
||||
+ strcpy(&logbuf[redzone_idx], redzone);
|
||||
+ filterstr = slapi_filter_to_string(aci->targetFilter, logbuf, sizeof(logbuf));
|
||||
+
|
||||
+ /* if the redzone was overwritten that means filterstr is truncated and not valid */
|
||||
+ valid_filter = (strcmp(&logbuf[redzone_idx], redzone) == 0);
|
||||
+ if (!valid_filter) {
|
||||
+ strcpy(&logbuf[50], "...");
|
||||
+ slapi_log_err(SLAPI_LOG_ACL, "acl__ressource_match_aci", "targetfilter too large (can not be cache) %s\n", logbuf);
|
||||
+ }
|
||||
+
|
||||
+ previous_filter_test = targetfilter_cache_lookup(aclpb, filterstr, valid_filter);
|
||||
+ if (previous_filter_test) {
|
||||
+ /* The filter was already evaluated against that same entry */
|
||||
+ if (previous_filter_test->matching_result == 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ACL, "acl__ressource_match_aci", "cached result for entry %s did NOT match %s (%s)\n",
|
||||
+ slapi_sdn_get_ndn(sdn),
|
||||
+ filterstr,
|
||||
+ attr_evaluated);
|
||||
+ filter_matched = ACL_FALSE;
|
||||
+ } else {
|
||||
+ slapi_log_err(SLAPI_LOG_ACL, "acl__ressource_match_aci", "cached result for entry %s did match %s (%s)\n",
|
||||
+ slapi_sdn_get_ndn(sdn),
|
||||
+ filterstr,
|
||||
+ attr_evaluated);
|
||||
+ }
|
||||
+ } else {
|
||||
+ /* The filter has not already been evaluated against that entry
|
||||
+ * evaluate it and cache the result
|
||||
+ */
|
||||
+ if (slapi_vattr_filter_test(NULL, aclpb->aclpb_curr_entry,
|
||||
+ aci->targetFilter,
|
||||
+ 0 /*don't do access check*/) != 0) {
|
||||
+ filter_matched = ACL_FALSE;
|
||||
+ targetfilter_cache_add(aclpb, filterstr, 0, valid_filter); /* does not match */
|
||||
+ } else {
|
||||
+ targetfilter_cache_add(aclpb, filterstr, 1, valid_filter); /* does match */
|
||||
+ }
|
||||
+ slapi_log_err(SLAPI_LOG_ACL, "acl__ressource_match_aci", "entry %s %s match %s (%s)\n",
|
||||
+ slapi_sdn_get_ndn(sdn),
|
||||
+ filter_matched == ACL_FALSE ? "does not" : "does",
|
||||
+ filterstr,
|
||||
+ attr_evaluated);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2858,7 +2984,7 @@ acl__resource_match_aci_EXIT:
|
||||
* None.
|
||||
*
|
||||
**************************************************************************/
|
||||
-static int
|
||||
+int
|
||||
acl__TestRights(Acl_PBlock *aclpb, int access, const char **right, const char **map_generic, aclResultReason_t *result_reason)
|
||||
{
|
||||
ACLEvalHandle_t *acleval;
|
||||
diff --git a/ldap/servers/plugins/acl/acl.h b/ldap/servers/plugins/acl/acl.h
|
||||
index becc7f920..c9b9efa56 100644
|
||||
--- a/ldap/servers/plugins/acl/acl.h
|
||||
+++ b/ldap/servers/plugins/acl/acl.h
|
||||
@@ -407,6 +407,17 @@ struct aci_container
|
||||
};
|
||||
typedef struct aci_container AciContainer;
|
||||
|
||||
+/* This structure is stored in the aclpb.
|
||||
+ * It is a linked list containing the result of
|
||||
+ * the filter matching against a specific entry.
|
||||
+ *
|
||||
+ * This list is free for each new entry in the aclpb*/
|
||||
+struct targetfilter_cached_result {
|
||||
+ char *filter; /* strdup of string representation of aci->targetFilter */
|
||||
+ int matching_result; /* 0 does not match / 1 does match */
|
||||
+ struct targetfilter_cached_result *next; /* next targetfilter already evaluated */
|
||||
+};
|
||||
+
|
||||
struct acl_pblock
|
||||
{
|
||||
int aclpb_state;
|
||||
@@ -476,6 +487,8 @@ struct acl_pblock
|
||||
|
||||
/* Current entry/dn/attr evaluation info */
|
||||
Slapi_Entry *aclpb_curr_entry; /* current Entry being processed */
|
||||
+ int32_t targetfilter_cache_enabled;
|
||||
+ struct targetfilter_cached_result *aclpb_curr_entry_targetfilters;
|
||||
int aclpb_num_entries;
|
||||
Slapi_DN *aclpb_curr_entry_sdn; /* Entry's SDN */
|
||||
Slapi_DN *aclpb_authorization_sdn; /* dn used for authorization */
|
||||
@@ -723,6 +736,7 @@ void acl_modified(Slapi_PBlock *pb, int optype, Slapi_DN *e_sdn, void *change);
|
||||
|
||||
int acl_access_allowed_disjoint_resource(Slapi_PBlock *pb, Slapi_Entry *e, char *attr, struct berval *val, int access);
|
||||
int acl_access_allowed_main(Slapi_PBlock *pb, Slapi_Entry *e, char **attrs, struct berval *val, int access, int flags, char **errbuf);
|
||||
+void targetfilter_cache_free(struct acl_pblock *aclpb);
|
||||
int acl_access_allowed(Slapi_PBlock *pb, Slapi_Entry *e, char *attr, struct berval *val, int access);
|
||||
aclUserGroup *acl_get_usersGroup(struct acl_pblock *aclpb, char *n_dn);
|
||||
void acl_print_acllib_err(NSErr_t *errp, char *str);
|
||||
diff --git a/ldap/servers/plugins/acl/acl_ext.c b/ldap/servers/plugins/acl/acl_ext.c
|
||||
index 797c5d2fd..c88f7389f 100644
|
||||
--- a/ldap/servers/plugins/acl/acl_ext.c
|
||||
+++ b/ldap/servers/plugins/acl/acl_ext.c
|
||||
@@ -189,6 +189,11 @@ acl_operation_ext_constructor(void *object __attribute__((unused)), void *parent
|
||||
slapi_log_err(SLAPI_LOG_ERR, plugin_name,
|
||||
"acl_operation_ext_constructor - Operation extension allocation Failed\n");
|
||||
}
|
||||
+ /* targetfilter_cache toggle set during aclpb allocation
|
||||
+ * to avoid accessing configuration during the evaluation
|
||||
+ * of each aci
|
||||
+ */
|
||||
+ aclpb->targetfilter_cache_enabled = config_get_targetfilter_cache();
|
||||
|
||||
TNF_PROBE_0_DEBUG(acl_operation_ext_constructor_end, "ACL", "");
|
||||
|
||||
@@ -713,6 +718,7 @@ acl__free_aclpb(Acl_PBlock **aclpb_ptr)
|
||||
slapi_ch_free((void **)&(aclpb->aclpb_curr_entryEval_context.acle_handles_matched_target));
|
||||
slapi_ch_free((void **)&(aclpb->aclpb_prev_entryEval_context.acle_handles_matched_target));
|
||||
slapi_ch_free((void **)&(aclpb->aclpb_prev_opEval_context.acle_handles_matched_target));
|
||||
+ targetfilter_cache_free(aclpb);
|
||||
slapi_sdn_free(&aclpb->aclpb_authorization_sdn);
|
||||
slapi_sdn_free(&aclpb->aclpb_curr_entry_sdn);
|
||||
if (aclpb->aclpb_macro_ht) {
|
||||
@@ -921,6 +927,12 @@ acl__done_aclpb(struct acl_pblock *aclpb)
|
||||
aclpb->aclpb_acleval ? (char *)aclpb->aclpb_acleval : "NULL");
|
||||
}
|
||||
|
||||
+ /* This aclpb return to the aclpb pool, make sure
|
||||
+ * the cached evaluations are freed and that
|
||||
+ * aclpb_curr_entry_targetfilters is NULL
|
||||
+ */
|
||||
+ targetfilter_cache_free(aclpb);
|
||||
+
|
||||
/* Now Free the contents or clean it */
|
||||
slapi_sdn_done(aclpb->aclpb_curr_entry_sdn);
|
||||
if (aclpb->aclpb_Evalattr)
|
||||
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
|
||||
index db7d01bbc..2ea4cd760 100644
|
||||
--- a/ldap/servers/slapd/libglobs.c
|
||||
+++ b/ldap/servers/slapd/libglobs.c
|
||||
@@ -221,6 +221,7 @@ slapi_onoff_t init_return_exact_case;
|
||||
slapi_onoff_t init_result_tweak;
|
||||
slapi_onoff_t init_plugin_track;
|
||||
slapi_onoff_t init_moddn_aci;
|
||||
+slapi_onoff_t init_targetfilter_cache;
|
||||
slapi_onoff_t init_lastmod;
|
||||
slapi_onoff_t init_readonly;
|
||||
slapi_onoff_t init_accesscontrol;
|
||||
@@ -903,6 +904,11 @@ static struct config_get_and_set
|
||||
(void **)&global_slapdFrontendConfig.moddn_aci,
|
||||
CONFIG_ON_OFF, (ConfigGetFunc)config_get_moddn_aci,
|
||||
&init_moddn_aci, NULL},
|
||||
+ {CONFIG_TARGETFILTER_CACHE_ATTRIBUTE, config_set_targetfilter_cache,
|
||||
+ NULL, 0,
|
||||
+ (void **)&global_slapdFrontendConfig.targetfilter_cache,
|
||||
+ CONFIG_ON_OFF, (ConfigGetFunc)config_get_targetfilter_cache,
|
||||
+ &init_targetfilter_cache, NULL},
|
||||
{CONFIG_ATTRIBUTE_NAME_EXCEPTION_ATTRIBUTE, config_set_attrname_exceptions,
|
||||
NULL, 0,
|
||||
(void **)&global_slapdFrontendConfig.attrname_exceptions,
|
||||
@@ -1688,6 +1694,7 @@ FrontendConfig_init(void)
|
||||
init_syntaxcheck = cfg->syntaxcheck = LDAP_ON;
|
||||
init_plugin_track = cfg->plugin_track = LDAP_OFF;
|
||||
init_moddn_aci = cfg->moddn_aci = LDAP_ON;
|
||||
+ init_targetfilter_cache = cfg->targetfilter_cache = LDAP_ON;
|
||||
init_syntaxlogging = cfg->syntaxlogging = LDAP_OFF;
|
||||
init_dn_validate_strict = cfg->dn_validate_strict = LDAP_OFF;
|
||||
init_ds4_compatible_schema = cfg->ds4_compatible_schema = LDAP_OFF;
|
||||
@@ -4053,6 +4060,21 @@ config_set_moddn_aci(const char *attrname, char *value, char *errorbuf, int appl
|
||||
return retVal;
|
||||
}
|
||||
|
||||
+int32_t
|
||||
+config_set_targetfilter_cache(const char *attrname, char *value, char *errorbuf, int apply)
|
||||
+{
|
||||
+ int32_t retVal = LDAP_SUCCESS;
|
||||
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
||||
+
|
||||
+ retVal = config_set_onoff(attrname,
|
||||
+ value,
|
||||
+ &(slapdFrontendConfig->targetfilter_cache),
|
||||
+ errorbuf,
|
||||
+ apply);
|
||||
+
|
||||
+ return retVal;
|
||||
+}
|
||||
+
|
||||
int32_t
|
||||
config_set_dynamic_plugins(const char *attrname, char *value, char *errorbuf, int apply)
|
||||
{
|
||||
@@ -5903,6 +5925,13 @@ config_get_moddn_aci(void)
|
||||
return slapi_atomic_load_32(&(slapdFrontendConfig->moddn_aci), __ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
+int32_t
|
||||
+config_get_targetfilter_cache(void)
|
||||
+{
|
||||
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
||||
+ return slapi_atomic_load_32(&(slapdFrontendConfig->targetfilter_cache), __ATOMIC_ACQUIRE);
|
||||
+}
|
||||
+
|
||||
int32_t
|
||||
config_get_security(void)
|
||||
{
|
||||
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
|
||||
index 2768d5a1d..c143f3772 100644
|
||||
--- a/ldap/servers/slapd/proto-slap.h
|
||||
+++ b/ldap/servers/slapd/proto-slap.h
|
||||
@@ -263,6 +263,7 @@ int config_set_lastmod(const char *attrname, char *value, char *errorbuf, int ap
|
||||
int config_set_nagle(const char *attrname, char *value, char *errorbuf, int apply);
|
||||
int config_set_accesscontrol(const char *attrname, char *value, char *errorbuf, int apply);
|
||||
int config_set_moddn_aci(const char *attrname, char *value, char *errorbuf, int apply);
|
||||
+int32_t config_set_targetfilter_cache(const char *attrname, char *value, char *errorbuf, int apply);
|
||||
int config_set_security(const char *attrname, char *value, char *errorbuf, int apply);
|
||||
int config_set_readonly(const char *attrname, char *value, char *errorbuf, int apply);
|
||||
int config_set_schemacheck(const char *attrname, char *value, char *errorbuf, int apply);
|
||||
@@ -469,6 +470,7 @@ int config_get_accesscontrol(void);
|
||||
int config_get_return_exact_case(void);
|
||||
int config_get_result_tweak(void);
|
||||
int config_get_moddn_aci(void);
|
||||
+int32_t config_get_targetfilter_cache(void);
|
||||
int config_get_security(void);
|
||||
int config_get_schemacheck(void);
|
||||
int config_get_syntaxcheck(void);
|
||||
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
|
||||
index c48516157..a3c0eff93 100644
|
||||
--- a/ldap/servers/slapd/slap.h
|
||||
+++ b/ldap/servers/slapd/slap.h
|
||||
@@ -2229,6 +2229,7 @@ typedef struct _slapdEntryPoints
|
||||
#define CONFIG_REWRITE_RFC1274_ATTRIBUTE "nsslapd-rewrite-rfc1274"
|
||||
#define CONFIG_PLUGIN_BINDDN_TRACKING_ATTRIBUTE "nsslapd-plugin-binddn-tracking"
|
||||
#define CONFIG_MODDN_ACI_ATTRIBUTE "nsslapd-moddn-aci"
|
||||
+#define CONFIG_TARGETFILTER_CACHE_ATTRIBUTE "nsslapd-targetfilter-cache"
|
||||
#define CONFIG_GLOBAL_BACKEND_LOCK "nsslapd-global-backend-lock"
|
||||
#define CONFIG_ENABLE_NUNC_STANS "nsslapd-enable-nunc-stans"
|
||||
#define CONFIG_ENABLE_UPGRADE_HASH "nsslapd-enable-upgrade-hash"
|
||||
@@ -2401,6 +2402,7 @@ typedef struct _slapdFrontendConfig
|
||||
char **plugin;
|
||||
slapi_onoff_t plugin_track;
|
||||
slapi_onoff_t moddn_aci;
|
||||
+ slapi_onoff_t targetfilter_cache;
|
||||
struct pw_scheme *pw_storagescheme;
|
||||
slapi_onoff_t pwpolicy_local;
|
||||
slapi_onoff_t pw_is_global_policy;
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,468 +0,0 @@
|
||||
From 375c1aad59989fb418ab1ead6050f919cfa1ceea Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 5 Nov 2021 09:56:43 +0100
|
||||
Subject: [PATCH 2/3] Issue 4972 - gecos with IA5 introduces a compatibility
|
||||
issue with previous (#4981)
|
||||
|
||||
releases where it was DirectoryString
|
||||
|
||||
Bug description:
|
||||
For years 'gecos' was DirectoryString (UTF8), with #50933 it was restricted to IA5 (ascii)
|
||||
https://github.com/389ds/389-ds-base/commit/0683bcde1b667b6d0ca6e8d1ef605f17c51ea2f7#
|
||||
|
||||
IA5 definition conforms rfc2307 but is a problem for existing deployments
|
||||
where entries can have 'gecos' attribute value with UTF8.
|
||||
|
||||
Fix description:
|
||||
Revert the definition to of 'gecos' being Directory String
|
||||
|
||||
Additional fix to make test_replica_backup_and_restore more
|
||||
robust to CI
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4972
|
||||
|
||||
Reviewed by: William Brown, Pierre Rogier, James Chapman (Thanks !)
|
||||
|
||||
Platforms tested: F34
|
||||
---
|
||||
.../tests/suites/schema/schema_test.py | 398 +++++++++++++++++-
|
||||
ldap/schema/10rfc2307compat.ldif | 6 +-
|
||||
2 files changed, 400 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/schema/schema_test.py b/dirsrvtests/tests/suites/schema/schema_test.py
|
||||
index d590624b6..5d62b8d59 100644
|
||||
--- a/dirsrvtests/tests/suites/schema/schema_test.py
|
||||
+++ b/dirsrvtests/tests/suites/schema/schema_test.py
|
||||
@@ -18,8 +18,12 @@ import pytest
|
||||
import six
|
||||
from ldap.cidict import cidict
|
||||
from ldap.schema import SubSchema
|
||||
+from lib389.schema import SchemaLegacy
|
||||
from lib389._constants import *
|
||||
-from lib389.topologies import topology_st
|
||||
+from lib389.topologies import topology_st, topology_m2 as topo_m2
|
||||
+from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.replica import ReplicationManager
|
||||
+from lib389.utils import ensure_bytes
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
@@ -165,6 +169,398 @@ def test_schema_comparewithfiles(topology_st):
|
||||
|
||||
log.info('test_schema_comparewithfiles: PASSED')
|
||||
|
||||
+def test_gecos_directoryString(topology_st):
|
||||
+ """Check that gecos supports directoryString value
|
||||
+
|
||||
+ :id: aee422bb-6299-4124-b5cd-d7393dac19d3
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Add a common user
|
||||
+ 2. replace gecos with a direstoryString value
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ user_properties = {
|
||||
+ 'uid': 'testuser',
|
||||
+ 'cn' : 'testuser',
|
||||
+ 'sn' : 'user',
|
||||
+ 'uidNumber' : '1000',
|
||||
+ 'gidNumber' : '2000',
|
||||
+ 'homeDirectory' : '/home/testuser',
|
||||
+ }
|
||||
+ testuser = users.create(properties=user_properties)
|
||||
+
|
||||
+ # Add a gecos UTF value
|
||||
+ testuser.replace('gecos', 'Hélène')
|
||||
+
|
||||
+def test_gecos_mixed_definition_topo(topo_m2, request):
|
||||
+ """Check that replication is still working if schema contains
|
||||
+ definitions that does not conform with a replicated entry
|
||||
+
|
||||
+ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
|
||||
+ :setup: Two suppliers replication setup
|
||||
+ :steps:
|
||||
+ 1. Create a testuser on M1
|
||||
+ 2 Stop M1 and M2
|
||||
+ 3 Change gecos def on M2 to be IA5
|
||||
+ 4 Update testuser with gecos directoryString value
|
||||
+ 5 Check replication is still working
|
||||
+ :expectedresults:
|
||||
+ 1. success
|
||||
+ 2. success
|
||||
+ 3. success
|
||||
+ 4. success
|
||||
+ 5. success
|
||||
+
|
||||
+ """
|
||||
+
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ m1 = topo_m2.ms["supplier1"]
|
||||
+ m2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+
|
||||
+ # create a test user
|
||||
+ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
|
||||
+ testuser = UserAccount(m1, testuser_dn)
|
||||
+ try:
|
||||
+ testuser.create(properties={
|
||||
+ 'uid': 'testuser',
|
||||
+ 'cn': 'testuser',
|
||||
+ 'sn': 'testuser',
|
||||
+ 'uidNumber' : '1000',
|
||||
+ 'gidNumber' : '2000',
|
||||
+ 'homeDirectory' : '/home/testuser',
|
||||
+ })
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # Stop suppliers to update the schema
|
||||
+ m1.stop()
|
||||
+ m2.stop()
|
||||
+
|
||||
+ # on M1: gecos is DirectoryString (default)
|
||||
+ # on M2: gecos is IA5
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
|
||||
+ "'gecos' DESC 'The GECOS field; the common name' " +
|
||||
+ "EQUALITY caseIgnoreIA5Match " +
|
||||
+ "SUBSTR caseIgnoreIA5SubstringsMatch " +
|
||||
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
|
||||
+ "SINGLE-VALUE )\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+
|
||||
+ # start the instances
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+
|
||||
+ # Check that gecos is IA5 on M2
|
||||
+ schema = SchemaLegacy(m2)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
|
||||
+
|
||||
+
|
||||
+ # Add a gecos UTF value on M1
|
||||
+ testuser.replace('gecos', 'Hélène')
|
||||
+
|
||||
+ # Check replication is still working
|
||||
+ testuser.replace('displayName', 'ascii value')
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+ testuser_m2 = UserAccount(m2, testuser_dn)
|
||||
+ assert testuser_m2.exists()
|
||||
+ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
|
||||
+
|
||||
+ def fin():
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+ testuser.delete()
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # on M2 restore a default 99user.ldif
|
||||
+ m2.stop()
|
||||
+ os.remove(m2.schemadir + "/99user.ldif")
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+ m2.start()
|
||||
+ m1.start()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+def test_gecos_directoryString_wins_M1(topo_m2, request):
|
||||
+ """Check that if inital syntax are IA5(M2) and DirectoryString(M1)
|
||||
+ Then directoryString wins when nsSchemaCSN M1 is the greatest
|
||||
+
|
||||
+ :id: ad119fa5-7671-45c8-b2ef-0b28ffb68fdb
|
||||
+ :setup: Two suppliers replication setup
|
||||
+ :steps:
|
||||
+ 1. Create a testuser on M1
|
||||
+ 2 Stop M1 and M2
|
||||
+ 3 Change gecos def on M2 to be IA5
|
||||
+ 4 Start M1 and M2
|
||||
+ 5 Update M1 schema so that M1 has greatest nsSchemaCSN
|
||||
+ 6 Update testuser with gecos directoryString value
|
||||
+ 7 Check replication is still working
|
||||
+ 8 Check gecos is DirectoryString on M1 and M2
|
||||
+ :expectedresults:
|
||||
+ 1. success
|
||||
+ 2. success
|
||||
+ 3. success
|
||||
+ 4. success
|
||||
+ 5. success
|
||||
+ 6. success
|
||||
+ 7. success
|
||||
+ 8. success
|
||||
+
|
||||
+ """
|
||||
+
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ m1 = topo_m2.ms["supplier1"]
|
||||
+ m2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+
|
||||
+ # create a test user
|
||||
+ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
|
||||
+ testuser = UserAccount(m1, testuser_dn)
|
||||
+ try:
|
||||
+ testuser.create(properties={
|
||||
+ 'uid': 'testuser',
|
||||
+ 'cn': 'testuser',
|
||||
+ 'sn': 'testuser',
|
||||
+ 'uidNumber' : '1000',
|
||||
+ 'gidNumber' : '2000',
|
||||
+ 'homeDirectory' : '/home/testuser',
|
||||
+ })
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # Stop suppliers to update the schema
|
||||
+ m1.stop()
|
||||
+ m2.stop()
|
||||
+
|
||||
+ # on M1: gecos is DirectoryString (default)
|
||||
+ # on M2: gecos is IA5
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
|
||||
+ "'gecos' DESC 'The GECOS field; the common name' " +
|
||||
+ "EQUALITY caseIgnoreIA5Match " +
|
||||
+ "SUBSTR caseIgnoreIA5SubstringsMatch " +
|
||||
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
|
||||
+ "SINGLE-VALUE )\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+
|
||||
+ # start the instances
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+
|
||||
+ # Check that gecos is IA5 on M2
|
||||
+ schema = SchemaLegacy(m2)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
|
||||
+
|
||||
+
|
||||
+ # update M1 schema to increase its nsschemaCSN
|
||||
+ new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )"
|
||||
+ m1.schema.add_schema('attributetypes', ensure_bytes(new_at))
|
||||
+
|
||||
+ # Add a gecos UTF value on M1
|
||||
+ testuser.replace('gecos', 'Hélène')
|
||||
+
|
||||
+ # Check replication is still working
|
||||
+ testuser.replace('displayName', 'ascii value')
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+ testuser_m2 = UserAccount(m2, testuser_dn)
|
||||
+ assert testuser_m2.exists()
|
||||
+ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
|
||||
+
|
||||
+ # Check that gecos is DirectoryString on M1
|
||||
+ schema = SchemaLegacy(m1)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
|
||||
+
|
||||
+ # Check that gecos is DirectoryString on M2
|
||||
+ schema = SchemaLegacy(m2)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
|
||||
+
|
||||
+ def fin():
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+ testuser.delete()
|
||||
+ m1.schema.del_schema('attributetypes', ensure_bytes(new_at))
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # on M2 restore a default 99user.ldif
|
||||
+ m2.stop()
|
||||
+ os.remove(m2.schemadir + "/99user.ldif")
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+ m2.start()
|
||||
+ m1.start()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+def test_gecos_directoryString_wins_M2(topo_m2, request):
|
||||
+ """Check that if inital syntax are IA5(M2) and DirectoryString(M1)
|
||||
+ Then directoryString wins when nsSchemaCSN M2 is the greatest
|
||||
+
|
||||
+ :id: 2da7f1b1-f86d-4072-a940-ba56d4bc8348
|
||||
+ :setup: Two suppliers replication setup
|
||||
+ :steps:
|
||||
+ 1. Create a testuser on M1
|
||||
+ 2 Stop M1 and M2
|
||||
+ 3 Change gecos def on M2 to be IA5
|
||||
+ 4 Start M1 and M2
|
||||
+ 5 Update M2 schema so that M2 has greatest nsSchemaCSN
|
||||
+ 6 Update testuser on M2 and trigger replication to M1
|
||||
+ 7 Update testuser on M2 with gecos directoryString value
|
||||
+ 8 Check replication is still working
|
||||
+ 9 Check gecos is DirectoryString on M1 and M2
|
||||
+ :expectedresults:
|
||||
+ 1. success
|
||||
+ 2. success
|
||||
+ 3. success
|
||||
+ 4. success
|
||||
+ 5. success
|
||||
+ 6. success
|
||||
+ 7. success
|
||||
+ 8. success
|
||||
+ 9. success
|
||||
+
|
||||
+ """
|
||||
+
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ m1 = topo_m2.ms["supplier1"]
|
||||
+ m2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+
|
||||
+ # create a test user
|
||||
+ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
|
||||
+ testuser = UserAccount(m1, testuser_dn)
|
||||
+ try:
|
||||
+ testuser.create(properties={
|
||||
+ 'uid': 'testuser',
|
||||
+ 'cn': 'testuser',
|
||||
+ 'sn': 'testuser',
|
||||
+ 'uidNumber' : '1000',
|
||||
+ 'gidNumber' : '2000',
|
||||
+ 'homeDirectory' : '/home/testuser',
|
||||
+ })
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ testuser.replace('displayName', 'to trigger replication M1-> M2')
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # Stop suppliers to update the schema
|
||||
+ m1.stop()
|
||||
+ m2.stop()
|
||||
+
|
||||
+ # on M1: gecos is DirectoryString (default)
|
||||
+ # on M2: gecos is IA5
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
|
||||
+ "'gecos' DESC 'The GECOS field; the common name' " +
|
||||
+ "EQUALITY caseIgnoreIA5Match " +
|
||||
+ "SUBSTR caseIgnoreIA5SubstringsMatch " +
|
||||
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
|
||||
+ "SINGLE-VALUE )\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+
|
||||
+ # start the instances
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+
|
||||
+ # Check that gecos is IA5 on M2
|
||||
+ schema = SchemaLegacy(m2)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
|
||||
+
|
||||
+ # update M2 schema to increase its nsschemaCSN
|
||||
+ new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )"
|
||||
+ m2.schema.add_schema('attributetypes', ensure_bytes(new_at))
|
||||
+
|
||||
+ # update just to trigger replication M2->M1
|
||||
+ # and update of M2 schema
|
||||
+ testuser_m2 = UserAccount(m2, testuser_dn)
|
||||
+ testuser_m2.replace('displayName', 'to trigger replication M2-> M1')
|
||||
+
|
||||
+ # Add a gecos UTF value on M1
|
||||
+ testuser.replace('gecos', 'Hélène')
|
||||
+
|
||||
+ # Check replication is still working
|
||||
+ testuser.replace('displayName', 'ascii value')
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+ assert testuser_m2.exists()
|
||||
+ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
|
||||
+
|
||||
+ # Check that gecos is DirectoryString on M1
|
||||
+ schema = SchemaLegacy(m1)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
|
||||
+
|
||||
+ # Check that gecos is DirectoryString on M2
|
||||
+ schema = SchemaLegacy(m2)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
|
||||
+
|
||||
+ def fin():
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+ testuser.delete()
|
||||
+ m1.schema.del_schema('attributetypes', ensure_bytes(new_at))
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # on M2 restore a default 99user.ldif
|
||||
+ m2.stop()
|
||||
+ os.remove(m2.schemadir + "/99user.ldif")
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+ m2.start()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif
|
||||
index 8ba72e1e3..998b8983b 100644
|
||||
--- a/ldap/schema/10rfc2307compat.ldif
|
||||
+++ b/ldap/schema/10rfc2307compat.ldif
|
||||
@@ -21,9 +21,9 @@ attributeTypes: (
|
||||
attributeTypes: (
|
||||
1.3.6.1.1.1.1.2 NAME 'gecos'
|
||||
DESC 'The GECOS field; the common name'
|
||||
- EQUALITY caseIgnoreIA5Match
|
||||
- SUBSTR caseIgnoreIA5SubstringsMatch
|
||||
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
|
||||
+ EQUALITY caseIgnoreMatch
|
||||
+ SUBSTR caseIgnoreSubstringsMatch
|
||||
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
||||
SINGLE-VALUE
|
||||
)
|
||||
attributeTypes: (
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,120 +0,0 @@
|
||||
From 096c95690a27c942d47b20a85fa3d7fe15ffe624 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 8 Sep 2021 10:31:19 -0400
|
||||
Subject: [PATCH] Issue 4910 - db reindex corrupts RUV tombstone nsuiqueid
|
||||
index
|
||||
|
||||
Bug Description: During a reindex task we skip the RUV tombstone entry,
|
||||
which corrupts the nsuniqueid index.
|
||||
|
||||
Fix Description: Make sure we still index nsuniqueid index for
|
||||
the RUV tombstone entry.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4910
|
||||
|
||||
Reviewed by: firstyear & progier389 (Thanks!!)
|
||||
---
|
||||
.../tests/suites/replication/ruvstore_test.py | 35 +++++++++++++++++++
|
||||
.../slapd/back-ldbm/db-bdb/bdb_ldif2db.c | 12 +++++--
|
||||
2 files changed, 44 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/ruvstore_test.py b/dirsrvtests/tests/suites/replication/ruvstore_test.py
|
||||
index c04fd079e..4e5326227 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/ruvstore_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/ruvstore_test.py
|
||||
@@ -12,6 +12,8 @@ import ldap
|
||||
import pytest
|
||||
from ldif import LDIFParser
|
||||
from lib389.replica import Replicas
|
||||
+from lib389.backend import Backends
|
||||
+from lib389.idm.domain import Domain
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.topologies import topology_m2 as topo
|
||||
from lib389._constants import *
|
||||
@@ -156,6 +158,39 @@ def test_memoryruv_sync_with_databaseruv(topo):
|
||||
_compare_memoryruv_and_databaseruv(topo, 'delete')
|
||||
|
||||
|
||||
+def test_ruv_after_reindex(topo):
|
||||
+ """Test that the tombstone RUV entry is not corrupted after a reindex task
|
||||
+
|
||||
+ :id: 988c0fab-1905-4dc5-a45d-fbf195843a33
|
||||
+ :setup: 2 suppliers
|
||||
+ :steps:
|
||||
+ 1. Reindex database
|
||||
+ 2. Perform some updates
|
||||
+ 3. Check error log does not have "_entryrdn_insert_key" errors
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.ms['supplier1']
|
||||
+ suffix = Domain(inst, "ou=people," + DEFAULT_SUFFIX)
|
||||
+ backends = Backends(inst)
|
||||
+ backend = backends.get(DEFAULT_BENAME)
|
||||
+
|
||||
+ # Reindex nsuniqueid
|
||||
+ backend.reindex(attrs=['nsuniqueid'], wait=True)
|
||||
+
|
||||
+ # Do some updates
|
||||
+ for idx in range(0, 5):
|
||||
+ suffix.replace('description', str(idx))
|
||||
+
|
||||
+ # Check error log for RUV entryrdn errors. Stopping instance forces RUV
|
||||
+ # to be written and quickly exposes the error
|
||||
+ inst.stop()
|
||||
+ assert not inst.searchErrorsLog("entryrdn_insert_key")
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
|
||||
index 506c285a3..6100dbf77 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
|
||||
@@ -25,6 +25,7 @@
|
||||
#define DB2INDEX_ENTRYRDN 0x2 /* index entryrdn */
|
||||
#define DB2LDIF_ENTRYRDN 0x4 /* export entryrdn */
|
||||
#define DB2INDEX_OBJECTCLASS 0x10 /* for reindexing "objectclass: nstombstone" */
|
||||
+#define DB2INDEX_NSUNIQUEID 0x20 /* for reindexing RUV tombstone */
|
||||
|
||||
#define LDIF2LDBM_EXTBITS(x) ((x)&0xf)
|
||||
|
||||
@@ -1543,6 +1544,9 @@ bdb_db2index(Slapi_PBlock *pb)
|
||||
if (strcasecmp(attrs[i] + 1, SLAPI_ATTR_OBJECTCLASS) == 0) {
|
||||
index_ext |= DB2INDEX_OBJECTCLASS;
|
||||
}
|
||||
+ if (strcasecmp(attrs[i] + 1, SLAPI_ATTR_UNIQUEID) == 0) {
|
||||
+ index_ext |= DB2INDEX_NSUNIQUEID;
|
||||
+ }
|
||||
charray_add(&indexAttrs, attrs[i] + 1);
|
||||
ai->ai_indexmask |= INDEX_OFFLINE;
|
||||
slapi_task_log_notice(task, "%s: Indexing attribute: %s",
|
||||
@@ -1895,7 +1899,7 @@ bdb_db2index(Slapi_PBlock *pb)
|
||||
* Update the attribute indexes
|
||||
*/
|
||||
if (indexAttrs) {
|
||||
- if (istombstone && !(index_ext & (DB2INDEX_ENTRYRDN | DB2INDEX_OBJECTCLASS))) {
|
||||
+ if (istombstone && !(index_ext & (DB2INDEX_ENTRYRDN | DB2INDEX_OBJECTCLASS | DB2INDEX_NSUNIQUEID))) {
|
||||
/* if it is a tombstone entry, just entryrdn or "objectclass: nstombstone"
|
||||
* need to be reindexed. the to-be-indexed list does not contain them. */
|
||||
backentry_free(&ep);
|
||||
@@ -1915,8 +1919,10 @@ bdb_db2index(Slapi_PBlock *pb)
|
||||
if (istombstone) {
|
||||
if (!slapi_attr_type_cmp(indexAttrs[j], SLAPI_ATTR_OBJECTCLASS, SLAPI_TYPE_CMP_SUBTYPE)) {
|
||||
is_tombstone_obj = 1; /* is tombstone && is objectclass. need to index "nstombstone"*/
|
||||
- } else if (slapi_attr_type_cmp(indexAttrs[j], LDBM_ENTRYRDN_STR, SLAPI_TYPE_CMP_SUBTYPE)) {
|
||||
- /* Entry is a tombstone && this index is not an entryrdn. */
|
||||
+ } else if (slapi_attr_type_cmp(indexAttrs[j], LDBM_ENTRYRDN_STR, SLAPI_TYPE_CMP_SUBTYPE) &&
|
||||
+ slapi_attr_type_cmp(indexAttrs[j], SLAPI_ATTR_UNIQUEID, SLAPI_TYPE_CMP_SUBTYPE))
|
||||
+ {
|
||||
+ /* Entry is a tombstone && this index is not entryrdn or nsuniqueid */
|
||||
continue;
|
||||
}
|
||||
}
|
||||
--
|
||||
2.31.1
|
||||
|
107
389-ds-base.spec
107
389-ds-base.spec
@ -47,7 +47,7 @@ ExcludeArch: i686
|
||||
|
||||
Summary: 389 Directory Server (base)
|
||||
Name: 389-ds-base
|
||||
Version: 1.4.3.32
|
||||
Version: 1.4.3.35
|
||||
Release: %{?relprefix}1%{?prerel}%{?dist}
|
||||
License: GPLv3+ and ASL 2.0 and MIT
|
||||
URL: https://www.port389.org
|
||||
@ -66,75 +66,80 @@ Provides: bundled(crate(base64)) = 0.13.1
|
||||
Provides: bundled(crate(bitflags)) = 1.3.2
|
||||
Provides: bundled(crate(byteorder)) = 1.4.3
|
||||
Provides: bundled(crate(cbindgen)) = 0.9.1
|
||||
Provides: bundled(crate(cc)) = 1.0.76
|
||||
Provides: bundled(crate(cc)) = 1.0.79
|
||||
Provides: bundled(crate(cfg-if)) = 1.0.0
|
||||
Provides: bundled(crate(clap)) = 2.34.0
|
||||
Provides: bundled(crate(concread)) = 0.2.21
|
||||
Provides: bundled(crate(crossbeam)) = 0.8.2
|
||||
Provides: bundled(crate(crossbeam-channel)) = 0.5.6
|
||||
Provides: bundled(crate(crossbeam-deque)) = 0.8.2
|
||||
Provides: bundled(crate(crossbeam-epoch)) = 0.9.11
|
||||
Provides: bundled(crate(crossbeam-queue)) = 0.3.6
|
||||
Provides: bundled(crate(crossbeam-utils)) = 0.8.12
|
||||
Provides: bundled(crate(crossbeam-channel)) = 0.5.8
|
||||
Provides: bundled(crate(crossbeam-deque)) = 0.8.3
|
||||
Provides: bundled(crate(crossbeam-epoch)) = 0.9.14
|
||||
Provides: bundled(crate(crossbeam-queue)) = 0.3.8
|
||||
Provides: bundled(crate(crossbeam-utils)) = 0.8.15
|
||||
Provides: bundled(crate(entryuuid)) = 0.1.0
|
||||
Provides: bundled(crate(entryuuid_syntax)) = 0.1.0
|
||||
Provides: bundled(crate(fastrand)) = 1.8.0
|
||||
Provides: bundled(crate(errno)) = 0.3.1
|
||||
Provides: bundled(crate(errno-dragonfly)) = 0.1.2
|
||||
Provides: bundled(crate(fastrand)) = 1.9.0
|
||||
Provides: bundled(crate(fernet)) = 0.1.4
|
||||
Provides: bundled(crate(foreign-types)) = 0.3.2
|
||||
Provides: bundled(crate(foreign-types-shared)) = 0.1.1
|
||||
Provides: bundled(crate(getrandom)) = 0.2.8
|
||||
Provides: bundled(crate(getrandom)) = 0.2.9
|
||||
Provides: bundled(crate(hashbrown)) = 0.12.3
|
||||
Provides: bundled(crate(hermit-abi)) = 0.1.19
|
||||
Provides: bundled(crate(hermit-abi)) = 0.3.1
|
||||
Provides: bundled(crate(instant)) = 0.1.12
|
||||
Provides: bundled(crate(itoa)) = 1.0.4
|
||||
Provides: bundled(crate(jobserver)) = 0.1.25
|
||||
Provides: bundled(crate(libc)) = 0.2.137
|
||||
Provides: bundled(crate(io-lifetimes)) = 1.0.10
|
||||
Provides: bundled(crate(itoa)) = 1.0.6
|
||||
Provides: bundled(crate(jobserver)) = 0.1.26
|
||||
Provides: bundled(crate(libc)) = 0.2.144
|
||||
Provides: bundled(crate(librnsslapd)) = 0.1.0
|
||||
Provides: bundled(crate(librslapd)) = 0.1.0
|
||||
Provides: bundled(crate(linux-raw-sys)) = 0.3.8
|
||||
Provides: bundled(crate(lock_api)) = 0.4.9
|
||||
Provides: bundled(crate(log)) = 0.4.17
|
||||
Provides: bundled(crate(lru)) = 0.7.8
|
||||
Provides: bundled(crate(memoffset)) = 0.6.5
|
||||
Provides: bundled(crate(once_cell)) = 1.16.0
|
||||
Provides: bundled(crate(openssl)) = 0.10.42
|
||||
Provides: bundled(crate(openssl-macros)) = 0.1.0
|
||||
Provides: bundled(crate(openssl-sys)) = 0.9.77
|
||||
Provides: bundled(crate(memoffset)) = 0.8.0
|
||||
Provides: bundled(crate(once_cell)) = 1.17.1
|
||||
Provides: bundled(crate(openssl)) = 0.10.52
|
||||
Provides: bundled(crate(openssl-macros)) = 0.1.1
|
||||
Provides: bundled(crate(openssl-sys)) = 0.9.87
|
||||
Provides: bundled(crate(parking_lot)) = 0.11.2
|
||||
Provides: bundled(crate(parking_lot_core)) = 0.8.5
|
||||
Provides: bundled(crate(parking_lot_core)) = 0.8.6
|
||||
Provides: bundled(crate(paste)) = 0.1.18
|
||||
Provides: bundled(crate(paste-impl)) = 0.1.18
|
||||
Provides: bundled(crate(pin-project-lite)) = 0.2.9
|
||||
Provides: bundled(crate(pkg-config)) = 0.3.26
|
||||
Provides: bundled(crate(pkg-config)) = 0.3.27
|
||||
Provides: bundled(crate(ppv-lite86)) = 0.2.17
|
||||
Provides: bundled(crate(proc-macro-hack)) = 0.5.19
|
||||
Provides: bundled(crate(proc-macro2)) = 1.0.47
|
||||
Provides: bundled(crate(proc-macro-hack)) = 0.5.20+deprecated
|
||||
Provides: bundled(crate(proc-macro2)) = 1.0.58
|
||||
Provides: bundled(crate(pwdchan)) = 0.1.0
|
||||
Provides: bundled(crate(quote)) = 1.0.21
|
||||
Provides: bundled(crate(quote)) = 1.0.27
|
||||
Provides: bundled(crate(rand)) = 0.8.5
|
||||
Provides: bundled(crate(rand_chacha)) = 0.3.1
|
||||
Provides: bundled(crate(rand_core)) = 0.6.4
|
||||
Provides: bundled(crate(redox_syscall)) = 0.2.16
|
||||
Provides: bundled(crate(remove_dir_all)) = 0.5.3
|
||||
Provides: bundled(crate(redox_syscall)) = 0.3.5
|
||||
Provides: bundled(crate(rsds)) = 0.1.0
|
||||
Provides: bundled(crate(ryu)) = 1.0.11
|
||||
Provides: bundled(crate(rustix)) = 0.37.19
|
||||
Provides: bundled(crate(ryu)) = 1.0.13
|
||||
Provides: bundled(crate(scopeguard)) = 1.1.0
|
||||
Provides: bundled(crate(serde)) = 1.0.147
|
||||
Provides: bundled(crate(serde_derive)) = 1.0.147
|
||||
Provides: bundled(crate(serde_json)) = 1.0.87
|
||||
Provides: bundled(crate(serde)) = 1.0.163
|
||||
Provides: bundled(crate(serde_derive)) = 1.0.163
|
||||
Provides: bundled(crate(serde_json)) = 1.0.96
|
||||
Provides: bundled(crate(slapd)) = 0.1.0
|
||||
Provides: bundled(crate(slapi_r_plugin)) = 0.1.0
|
||||
Provides: bundled(crate(smallvec)) = 1.10.0
|
||||
Provides: bundled(crate(strsim)) = 0.8.0
|
||||
Provides: bundled(crate(syn)) = 1.0.103
|
||||
Provides: bundled(crate(synstructure)) = 0.12.6
|
||||
Provides: bundled(crate(tempfile)) = 3.3.0
|
||||
Provides: bundled(crate(syn)) = 1.0.109
|
||||
Provides: bundled(crate(syn)) = 2.0.16
|
||||
Provides: bundled(crate(tempfile)) = 3.5.0
|
||||
Provides: bundled(crate(textwrap)) = 0.11.0
|
||||
Provides: bundled(crate(tokio)) = 1.21.2
|
||||
Provides: bundled(crate(tokio-macros)) = 1.8.0
|
||||
Provides: bundled(crate(toml)) = 0.5.9
|
||||
Provides: bundled(crate(unicode-ident)) = 1.0.5
|
||||
Provides: bundled(crate(tokio)) = 1.28.1
|
||||
Provides: bundled(crate(tokio-macros)) = 2.1.0
|
||||
Provides: bundled(crate(toml)) = 0.5.11
|
||||
Provides: bundled(crate(unicode-ident)) = 1.0.8
|
||||
Provides: bundled(crate(unicode-width)) = 0.1.10
|
||||
Provides: bundled(crate(unicode-xid)) = 0.2.4
|
||||
Provides: bundled(crate(uuid)) = 0.8.2
|
||||
Provides: bundled(crate(vcpkg)) = 0.2.15
|
||||
Provides: bundled(crate(vec_map)) = 0.8.2
|
||||
@ -143,8 +148,26 @@ Provides: bundled(crate(wasi)) = 0.11.0+wasi_snapshot_preview1
|
||||
Provides: bundled(crate(winapi)) = 0.3.9
|
||||
Provides: bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0
|
||||
Provides: bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0
|
||||
Provides: bundled(crate(zeroize)) = 1.5.7
|
||||
Provides: bundled(crate(zeroize_derive)) = 1.3.2
|
||||
Provides: bundled(crate(windows-sys)) = 0.45.0
|
||||
Provides: bundled(crate(windows-sys)) = 0.48.0
|
||||
Provides: bundled(crate(windows-targets)) = 0.42.2
|
||||
Provides: bundled(crate(windows-targets)) = 0.48.0
|
||||
Provides: bundled(crate(windows_aarch64_gnullvm)) = 0.42.2
|
||||
Provides: bundled(crate(windows_aarch64_gnullvm)) = 0.48.0
|
||||
Provides: bundled(crate(windows_aarch64_msvc)) = 0.42.2
|
||||
Provides: bundled(crate(windows_aarch64_msvc)) = 0.48.0
|
||||
Provides: bundled(crate(windows_i686_gnu)) = 0.42.2
|
||||
Provides: bundled(crate(windows_i686_gnu)) = 0.48.0
|
||||
Provides: bundled(crate(windows_i686_msvc)) = 0.42.2
|
||||
Provides: bundled(crate(windows_i686_msvc)) = 0.48.0
|
||||
Provides: bundled(crate(windows_x86_64_gnu)) = 0.42.2
|
||||
Provides: bundled(crate(windows_x86_64_gnu)) = 0.48.0
|
||||
Provides: bundled(crate(windows_x86_64_gnullvm)) = 0.42.2
|
||||
Provides: bundled(crate(windows_x86_64_gnullvm)) = 0.48.0
|
||||
Provides: bundled(crate(windows_x86_64_msvc)) = 0.42.2
|
||||
Provides: bundled(crate(windows_x86_64_msvc)) = 0.48.0
|
||||
Provides: bundled(crate(zeroize)) = 1.6.0
|
||||
Provides: bundled(crate(zeroize_derive)) = 1.4.2
|
||||
##### Bundled cargo crates list - END #####
|
||||
|
||||
BuildRequires: nspr-devel >= 4.32
|
||||
@ -208,6 +231,7 @@ BuildRequires: python%{python3_pkgversion}-argcomplete
|
||||
BuildRequires: python%{python3_pkgversion}-argparse-manpage
|
||||
BuildRequires: python%{python3_pkgversion}-policycoreutils
|
||||
BuildRequires: python%{python3_pkgversion}-libselinux
|
||||
BuildRequires: python%{python3_pkgversion}-cryptography
|
||||
|
||||
# For cockpit
|
||||
BuildRequires: rsync
|
||||
@ -270,7 +294,7 @@ Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download
|
||||
%endif
|
||||
%if %{use_rust}
|
||||
Source4: vendor-%{version}-1.tar.gz
|
||||
Source5: Cargo.lock
|
||||
Source5: Cargo-%{version}.lock
|
||||
%endif
|
||||
|
||||
%description
|
||||
@ -381,6 +405,7 @@ Requires: python%{python3_pkgversion}-argcomplete
|
||||
Requires: python%{python3_pkgversion}-libselinux
|
||||
Requires: python%{python3_pkgversion}-setuptools
|
||||
Requires: python%{python3_pkgversion}-distro
|
||||
Requires: python%{python3_pkgversion}-cryptography
|
||||
%{?python_provide:%python_provide python%{python3_pkgversion}-lib389}
|
||||
|
||||
%description -n python%{python3_pkgversion}-lib389
|
||||
@ -401,7 +426,7 @@ A cockpit UI Plugin for configuring and administering the 389 Directory Server
|
||||
%autosetup -p1 -v -n %{name}-%{version}%{?prerel}
|
||||
%if %{use_rust}
|
||||
tar xvzf %{SOURCE4}
|
||||
cp %{SOURCE5} src/
|
||||
cp %{SOURCE5} src/Cargo.lock
|
||||
%endif
|
||||
%if %{bundle_jemalloc}
|
||||
%setup -q -n %{name}-%{version}%{?prerel} -T -D -b 3
|
||||
@ -892,6 +917,10 @@ exit 0
|
||||
%doc README.md
|
||||
|
||||
%changelog
|
||||
* Mon May 22 2023 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.35-1
|
||||
- Bump version to 1.4.3.35-1
|
||||
- Resolves: rhbz#2188628 - Rebase 389-ds-base in RHEL 8.9 to 1.4.3.25
|
||||
|
||||
* Tue Nov 15 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.32-1
|
||||
- Bump version to 1.4.3.32-1
|
||||
- Resolves: Bug 2098138 - broken nsslapd-subtree-rename-switch option in rhds11
|
||||
|
5
sources
5
sources
@ -1,3 +1,4 @@
|
||||
SHA512 (389-ds-base-1.4.3.32.tar.bz2) = c06520c51f01e87d6789e9ce407f7fecaabb8b7203f08d6284a72ae531ad3a68a3cb3318e31135bf1efcaf93b92c268655ef3739fe8d56d0e2c07fe2091c9a2c
|
||||
SHA512 (vendor-1.4.3.35-1.tar.gz) = d0f0e96df7d6fb7689bd6718760fbff808cbad77a0d8c658cf943b64dd815655b3b972bb6a9f7796c1c80443b6b0534bad760fc934f3983875496bbfaf8fd778
|
||||
SHA512 (Cargo-1.4.3.35.lock) = 3e848199c87084ed452f9bb06fd2f531967a252b895f3ad28da16d21b8c1c8e323cbf7c217277c1b4be233bcc899308c8646068148b3a03a4dd4f71a3f7d7c64
|
||||
SHA512 (jemalloc-5.3.0.tar.bz2) = 22907bb052096e2caffb6e4e23548aecc5cc9283dce476896a2b1127eee64170e3562fa2e7db9571298814a7a2c7df6e8d1fbe152bd3f3b0c1abec22a2de34b1
|
||||
SHA512 (vendor-1.4.3.32-1.tar.gz) = 9d5cdf09e26d6d8b11aa5d8c4f7cee4a6d2bb9b9699b6def4309af8b4201f78eabdf5afb3db4a9a08ebba9e863a719dc3f0ce97fee8b884c89b1fe68c48597be
|
||||
SHA512 (389-ds-base-1.4.3.35.tar.bz2) = cddcd4d2d8794dadfddc2bbff184d8547e0338a670c53d5270069248b056efb81de269d1aecfbf695976e757d681b15b1fe7bccd3d628189bdf1b6992fcf0030
|
||||
|
Loading…
Reference in New Issue
Block a user