import 389-ds-base-1.4.3.28-6.module+el8.6.0+14129+983ceada

This commit is contained in:
CentOS Sources 2022-03-29 12:39:44 -04:00 committed by Stepan Oksanichenko
parent 45ccf6c959
commit 0f16213f1f
51 changed files with 3471 additions and 14029 deletions

View File

@ -1,3 +1,3 @@
c69c175a2f27053dffbfefac9c84ff16c7ff4cbf SOURCES/389-ds-base-1.4.3.23.tar.bz2
9274c7088190993255749ea90bbb770c5c5e0f5c SOURCES/389-ds-base-1.4.3.28.tar.bz2
9e06b5cc57fd185379d007696da153893cf73e30 SOURCES/jemalloc-5.2.1.tar.bz2
22b1ef11852864027e184bb4bee56286b855b703 SOURCES/vendor-1.4.3.23-2.tar.gz
c6875530163f0e217ed2e0e5b768506db3d07447 SOURCES/vendor-1.4.3.28-1.tar.gz

4
.gitignore vendored
View File

@ -1,3 +1,3 @@
SOURCES/389-ds-base-1.4.3.23.tar.bz2
SOURCES/389-ds-base-1.4.3.28.tar.bz2
SOURCES/jemalloc-5.2.1.tar.bz2
SOURCES/vendor-1.4.3.23-2.tar.gz
SOURCES/vendor-1.4.3.28-1.tar.gz

View File

@ -0,0 +1,738 @@
From 67e19da62a9e8958458de54173dcd9bcaf53164d Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Thu, 30 Sep 2021 15:59:40 +0200
Subject: [PATCH 01/12] Issue 4678 - RFE automatique disable of virtual
attribute checking (#4918)
Bug description:
Virtual attributes are configured via Roles or COS definitions
and registered during initialization of those plugins.
Virtual attributes are processed during search evaluation of
filter and returned attributes. This processing is expensive
and prone to create contention between searches.
Use of virtual attribute is not frequent. So many of the
deployement process virtual attribute even if there is none.
Fix description:
The fix configure the server to ignore virtual attribute by
default (nsslapd-ignore-virtual-attrs: on).
At startup, if a new virtual attribute is registered or
it exists Roles/COS definitions, then the server is
configured to process the virtual attributes
(nsslapd-ignore-virtual-attrs: off)
design: https://www.port389.org/docs/389ds/design/vattr-automatic-toggle.html
relates: https://github.com/389ds/389-ds-base/issues/4678
Reviewed by: William Brown, Simon Pichugin, Mark Reynolds (Thanks !!)
Platforms tested: F34
---
.../tests/suites/config/config_test.py | 40 +++-
dirsrvtests/tests/suites/cos/cos_test.py | 94 ++++++--
dirsrvtests/tests/suites/roles/basic_test.py | 200 +++++++++++++++++-
ldap/servers/plugins/roles/roles_cache.c | 9 +
ldap/servers/slapd/libglobs.c | 2 +-
ldap/servers/slapd/main.c | 2 +
ldap/servers/slapd/proto-slap.h | 1 +
ldap/servers/slapd/vattr.c | 127 +++++++++++
src/lib389/lib389/idm/role.py | 4 +
9 files changed, 455 insertions(+), 24 deletions(-)
diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
index 2ecff8f98..19232c87d 100644
--- a/dirsrvtests/tests/suites/config/config_test.py
+++ b/dirsrvtests/tests/suites/config/config_test.py
@@ -351,7 +351,7 @@ def test_ignore_virtual_attrs(topo):
:setup: Standalone instance
:steps:
1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
- 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
3. Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs
4. Set invalid value for attribute nsslapd-ignore-virtual-attrs
5. Set nsslapd-ignore-virtual-attrs=off
@@ -374,8 +374,8 @@ def test_ignore_virtual_attrs(topo):
log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
- log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
- assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "off"
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
log.info("Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs")
for attribute_value in ['on', 'off', 'ON', 'OFF']:
@@ -415,6 +415,40 @@ def test_ignore_virtual_attrs(topo):
log.info("Test if virtual attribute i.e. postal code not shown while nsslapd-ignore-virtual-attrs: on")
assert not test_user.present('postalcode', '117')
+def test_ignore_virtual_attrs_after_restart(topo):
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
+ The attribute is ON by default. If it set to OFF, it keeps
+ its value on restart
+
+ :id: ac368649-4fda-473c-9ef8-e0c728b162af
+ :setup: Standalone instance
+ :steps:
+ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
+ 3. Set nsslapd-ignore-virtual-attrs=off
+ 4. restart the instance
+ 5. Check the attribute nsslapd-ignore-virtual-attrs is OFF
+ :expectedresults:
+ 1. This should be successful
+ 2. This should be successful
+ 3. This should be successful
+ 4. This should be successful
+ 5. This should be successful
+ """
+
+ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
+
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
+
+ log.info("Set nsslapd-ignore-virtual-attrs = off")
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'off')
+
+ topo.standalone.restart()
+
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
@pytest.mark.bz918694
@pytest.mark.ds408
diff --git a/dirsrvtests/tests/suites/cos/cos_test.py b/dirsrvtests/tests/suites/cos/cos_test.py
index d6a498c73..d1f99f96f 100644
--- a/dirsrvtests/tests/suites/cos/cos_test.py
+++ b/dirsrvtests/tests/suites/cos/cos_test.py
@@ -6,6 +6,8 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
+import logging
+import time
import pytest, os, ldap
from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate
from lib389._constants import DEFAULT_SUFFIX
@@ -14,26 +16,37 @@ from lib389.idm.role import FilteredRoles
from lib389.idm.nscontainer import nsContainer
from lib389.idm.user import UserAccount
+logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
+
pytestmark = pytest.mark.tier1
+@pytest.fixture(scope="function")
+def reset_ignore_vattr(topo, request):
+ default_ignore_vattr_value = topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs')
+ def fin():
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', default_ignore_vattr_value)
-def test_positive(topo):
- """
- :id: a5a74235-597f-4fe8-8c38-826860927472
- :setup: server
- :steps:
- 1. Add filter role entry
- 2. Add ns container
- 3. Add cos template
- 4. Add CosClassic Definition
- 5. Cos entries should be added and searchable
- 6. employeeType attribute should be there in user entry as per the cos plugin property
- :expectedresults:
- 1. Operation should success
- 2. Operation should success
- 3. Operation should success
- 4. Operation should success
- 5. Operation should success
- 6. Operation should success
+ request.addfinalizer(fin)
+
+def test_positive(topo, reset_ignore_vattr):
+ """CoS positive tests
+
+ :id: a5a74235-597f-4fe8-8c38-826860927472
+ :setup: server
+ :steps:
+ 1. Add filter role entry
+ 2. Add ns container
+ 3. Add cos template
+ 4. Add CosClassic Definition
+ 5. Cos entries should be added and searchable
+ 6. employeeType attribute should be there in user entry as per the cos plugin property
+ :expectedresults:
+ 1. Operation should success
+ 2. Operation should success
+ 3. Operation should success
+ 4. Operation should success
+ 5. Operation should success
+ 6. Operation should success
"""
# Adding ns filter role
roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
@@ -77,7 +90,52 @@ def test_positive(topo):
# CoS definition entry's cosSpecifier attribute specifies the employeeType attribute
assert user.present('employeeType')
+ cosdef.delete()
+
+def test_vattr_on_cos_definition(topo, reset_ignore_vattr):
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
+ The attribute is ON by default. If a cos definition is
+ added it is moved to OFF
+
+ :id: e7ef5254-386f-4362-bbb4-9409f3f51b08
+ :setup: Standalone instance
+ :steps:
+ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
+ 3. Create a cos definition for employeeType
+ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF (with a delay for postop processing)
+ 5. Check a message "slapi_vattrspi_regattr - Because employeeType,.." in error logs
+ :expectedresults:
+ 1. This should be successful
+ 2. This should be successful
+ 3. This should be successful
+ 4. This should be successful
+ 5. This should be successful
+ """
+
+ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
+
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
+
+ # creating CosClassicDefinition
+ log.info("Create a cos definition")
+ properties = {'cosTemplateDn': 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX),
+ 'cosAttribute': 'employeeType',
+ 'cosSpecifier': 'nsrole',
+ 'cn': 'cosClassicGenerateEmployeeTypeUsingnsrole'}
+ cosdef = CosClassicDefinition(topo.standalone,'cn=cosClassicGenerateEmployeeTypeUsingnsrole,{}'.format(DEFAULT_SUFFIX))\
+ .create(properties=properties)
+
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
+ time.sleep(2)
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
+ topo.standalone.stop()
+ assert topo.standalone.searchErrorsLog("slapi_vattrspi_regattr - Because employeeType is a new registered virtual attribute , nsslapd-ignore-virtual-attrs was set to \'off\'")
+ topo.standalone.start()
+ cosdef.delete()
if __name__ == "__main__":
CURRENT_FILE = os.path.realpath(__file__)
diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py
index 47a531794..bec3aedfc 100644
--- a/dirsrvtests/tests/suites/roles/basic_test.py
+++ b/dirsrvtests/tests/suites/roles/basic_test.py
@@ -11,6 +11,8 @@
Importing necessary Modules.
"""
+import logging
+import time
import os
import pytest
@@ -22,6 +24,9 @@ from lib389.topologies import topology_st as topo
from lib389.idm.role import FilteredRoles, ManagedRoles, NestedRoles
from lib389.idm.domain import Domain
+logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
+
pytestmark = pytest.mark.tier1
DNBASE = "o=acivattr,{}".format(DEFAULT_SUFFIX)
@@ -35,7 +40,7 @@ FILTERROLESALESROLE = "cn=FILTERROLESALESROLE,{}".format(DNBASE)
FILTERROLEENGROLE = "cn=FILTERROLEENGROLE,{}".format(DNBASE)
-def test_filterrole(topo):
+def test_filterrole(topo, request):
"""Test Filter Role
:id: 8ada4064-786b-11e8-8634-8c16451d917b
@@ -136,8 +141,20 @@ def test_filterrole(topo):
SALES_OU, DNBASE]:
UserAccount(topo.standalone, dn_dn).delete()
+ def fin():
+ topo.standalone.restart()
+ try:
+ filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
+ for i in filtered_roles.list():
+ i.delete()
+ except:
+ pass
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
+
+ request.addfinalizer(fin)
+
-def test_managedrole(topo):
+def test_managedrole(topo, request):
"""Test Managed Role
:id: d52a9c00-3bf6-11e9-9b7b-8c16451d917b
@@ -209,6 +226,16 @@ def test_managedrole(topo):
for i in roles.list():
i.delete()
+ def fin():
+ topo.standalone.restart()
+ try:
+ role = ManagedRoles(topo.standalone, DEFAULT_SUFFIX).get('ROLE1')
+ role.delete()
+ except:
+ pass
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
+
+ request.addfinalizer(fin)
@pytest.fixture(scope="function")
def _final(request, topo):
@@ -220,6 +247,7 @@ def _final(request, topo):
def finofaci():
"""
Removes and Restores ACIs and other users after the test.
+ And restore nsslapd-ignore-virtual-attrs to default
"""
domain = Domain(topo.standalone, DEFAULT_SUFFIX)
domain.remove_all('aci')
@@ -234,6 +262,8 @@ def _final(request, topo):
for i in aci_list:
domain.add("aci", i)
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
+
request.addfinalizer(finofaci)
@@ -296,6 +326,172 @@ def test_nestedrole(topo, _final):
conn = users.get('test_user_3').bind(PW_DM)
assert UserAccounts(conn, DEFAULT_SUFFIX).list()
+def test_vattr_on_filtered_role(topo, request):
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
+ The attribute is ON by default. If a filtered role is
+ added it is moved to OFF
+
+ :id: 88b3ad3c-f39a-4eb7-a8c9-07c685f11908
+ :setup: Standalone instance
+ :steps:
+ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
+ 3. Create a filtered role
+ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF
+ 5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs
+ :expectedresults:
+ 1. This should be successful
+ 2. This should be successful
+ 3. This should be successful
+ 4. This should be successful
+ 5. This should be successful
+ """
+
+ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
+
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
+
+ log.info("Create a filtered role")
+ try:
+ Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX)
+ except:
+ pass
+ roles = FilteredRoles(topo.standalone, DNBASE)
+ roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'})
+
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
+
+ topo.standalone.stop()
+ assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'")
+
+ def fin():
+ topo.standalone.restart()
+ try:
+ filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
+ for i in filtered_roles.list():
+ i.delete()
+ except:
+ pass
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
+
+ request.addfinalizer(fin)
+
+def test_vattr_on_filtered_role_restart(topo, request):
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
+ If it exists a filtered role definition at restart then
+ nsslapd-ignore-virtual-attrs should be set to 'off'
+
+ :id: 972183f7-d18f-40e0-94ab-580e7b7d78d0
+ :setup: Standalone instance
+ :steps:
+ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
+ 3. Create a filtered role
+ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF
+ 5. restart the instance
+ 6. Check the presence of virtual attribute is detected
+ 7. Check the value of nsslapd-ignore-virtual-attrs should be OFF
+ :expectedresults:
+ 1. This should be successful
+ 2. This should be successful
+ 3. This should be successful
+ 4. This should be successful
+ 5. This should be successful
+ 6. This should be successful
+ 7. This should be successful
+ """
+
+ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
+
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
+
+ log.info("Create a filtered role")
+ try:
+ Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX)
+ except:
+ pass
+ roles = FilteredRoles(topo.standalone, DNBASE)
+ roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'})
+
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
+
+
+ log.info("Check the virtual attribute definition is found (after a required delay)")
+ topo.standalone.restart()
+ time.sleep(5)
+ assert topo.standalone.searchErrorsLog("Found a role/cos definition in")
+ assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'")
+
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
+
+ def fin():
+ topo.standalone.restart()
+ try:
+ filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
+ for i in filtered_roles.list():
+ i.delete()
+ except:
+ pass
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
+
+ request.addfinalizer(fin)
+
+
+def test_vattr_on_managed_role(topo, request):
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
+ The attribute is ON by default. If a managed role is
+ added it is moved to OFF
+
+ :id: 664b722d-c1ea-41e4-8f6c-f9c87a212346
+ :setup: Standalone instance
+ :steps:
+ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
+ 3. Create a managed role
+ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF
+ 5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs
+ :expectedresults:
+ 1. This should be successful
+ 2. This should be successful
+ 3. This should be successful
+ 4. This should be successful
+ 5. This should be successful
+ """
+
+ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
+
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
+
+ log.info("Create a managed role")
+ roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX)
+ role = roles.create(properties={"cn": 'ROLE1'})
+
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
+
+ topo.standalone.stop()
+ assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'")
+
+ def fin():
+ topo.standalone.restart()
+ try:
+ filtered_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX)
+ for i in filtered_roles.list():
+ i.delete()
+ except:
+ pass
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
+
+ request.addfinalizer(fin)
if __name__ == "__main__":
CURRENT_FILE = os.path.realpath(__file__)
diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c
index 3d076a4cb..cd00e0aba 100644
--- a/ldap/servers/plugins/roles/roles_cache.c
+++ b/ldap/servers/plugins/roles/roles_cache.c
@@ -530,6 +530,15 @@ roles_cache_trigger_update_role(char *dn, Slapi_Entry *roles_entry, Slapi_DN *be
}
slapi_rwlock_unlock(global_lock);
+ {
+ /* A role definition has been updated, enable vattr handling */
+ char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
+ errorbuf[0] = '\0';
+ config_set_ignore_vattrs(CONFIG_IGNORE_VATTRS, "off", errorbuf, 1);
+ slapi_log_err(SLAPI_LOG_INFO,
+ "roles_cache_trigger_update_role",
+ "Because of virtual attribute definition (role), %s was set to 'off'\n", CONFIG_IGNORE_VATTRS);
+ }
slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, "<-- roles_cache_trigger_update_role: %p \n", roles_list);
}
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 2ea4cd760..f6dacce30 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -1803,7 +1803,7 @@ FrontendConfig_init(void)
init_ndn_cache_enabled = cfg->ndn_cache_enabled = LDAP_ON;
cfg->ndn_cache_max_size = SLAPD_DEFAULT_NDN_SIZE;
init_sasl_mapping_fallback = cfg->sasl_mapping_fallback = LDAP_OFF;
- init_ignore_vattrs = cfg->ignore_vattrs = LDAP_OFF;
+ init_ignore_vattrs = cfg->ignore_vattrs = LDAP_ON;
cfg->sasl_max_bufsize = SLAPD_DEFAULT_SASL_MAXBUFSIZE;
cfg->unhashed_pw_switch = SLAPD_DEFAULT_UNHASHED_PW_SWITCH;
init_return_orig_type = cfg->return_orig_type = LDAP_OFF;
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 4931a4ca4..61ed40b7d 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -1042,6 +1042,8 @@ main(int argc, char **argv)
eq_start(); /* must be done after plugins started - DEPRECATED */
eq_start_rel(); /* must be done after plugins started */
+ vattr_check(); /* Check if it exists virtual attribute definitions */
+
#ifdef HPUX10
/* HPUX linker voodoo */
if (collation_init == NULL) {
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index c143f3772..442a621aa 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -1462,6 +1462,7 @@ void subentry_create_filter(Slapi_Filter **filter);
*/
void vattr_init(void);
void vattr_cleanup(void);
+void vattr_check(void);
/*
* slapd_plhash.c - supplement to NSPR plhash
diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c
index 09dab6ecf..24750a57c 100644
--- a/ldap/servers/slapd/vattr.c
+++ b/ldap/servers/slapd/vattr.c
@@ -64,6 +64,10 @@
#define SOURCEFILE "vattr.c"
static char *sourcefile = SOURCEFILE;
+/* stolen from roles_cache.h, must remain in sync */
+#define NSROLEATTR "nsRole"
+static Slapi_Eq_Context vattr_check_ctx = {0};
+
/* Define only for module test code */
/* #define VATTR_TEST_CODE */
@@ -130,6 +134,112 @@ vattr_cleanup()
{
/* We need to free and remove anything that was inserted first */
vattr_map_destroy();
+ slapi_eq_cancel_rel(vattr_check_ctx);
+}
+
+static void
+vattr_check_thread(void *arg)
+{
+ Slapi_Backend *be = NULL;
+ char *cookie = NULL;
+ Slapi_DN *base_sdn = NULL;
+ Slapi_PBlock *search_pb = NULL;
+ Slapi_Entry **entries = NULL;
+ int32_t rc;
+ int32_t check_suffix; /* used to skip suffixes in ignored_backend */
+ PRBool exist_vattr_definition = PR_FALSE;
+ char *ignored_backend[5] = {"cn=config", "cn=schema", "cn=monitor", "cn=changelog", NULL}; /* suffixes to ignore */
+ char *suffix;
+ int ignore_vattrs;
+
+ ignore_vattrs = config_get_ignore_vattrs();
+
+ if (!ignore_vattrs) {
+ /* Nothing to do more, we are already evaluating virtual attribute */
+ return;
+ }
+
+ search_pb = slapi_pblock_new();
+ be = slapi_get_first_backend(&cookie);
+ while (be && !exist_vattr_definition && !slapi_is_shutting_down()) {
+ base_sdn = (Slapi_DN *) slapi_be_getsuffix(be, 0);
+ suffix = (char *) slapi_sdn_get_dn(base_sdn);
+
+ if (suffix) {
+ /* First check that we need to check that suffix */
+ check_suffix = 1;
+ for (size_t i = 0; ignored_backend[i]; i++) {
+ if (strcasecmp(suffix, ignored_backend[i]) == 0) {
+ check_suffix = 0;
+ break;
+ }
+ }
+
+ /* search for a role or cos definition */
+ if (check_suffix) {
+ slapi_search_internal_set_pb(search_pb, slapi_sdn_get_dn(base_sdn),
+ LDAP_SCOPE_SUBTREE, "(&(objectclass=ldapsubentry)(|(objectclass=nsRoleDefinition)(objectclass=cosSuperDefinition)))",
+ NULL, 0, NULL, NULL, (void *) plugin_get_default_component_id(), 0);
+ slapi_search_internal_pb(search_pb);
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
+
+ if (rc == LDAP_SUCCESS) {
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
+ if (entries && entries[0]) {
+ /* it exists at least a cos or role definition */
+ exist_vattr_definition = PR_TRUE;
+ slapi_log_err(SLAPI_LOG_INFO,
+ "vattr_check_thread",
+ "Found a role/cos definition in %s\n", slapi_entry_get_dn(entries[0]));
+ } else {
+ slapi_log_err(SLAPI_LOG_INFO,
+ "vattr_check_thread",
+ "No role/cos definition in %s\n", slapi_sdn_get_dn(base_sdn));
+ }
+ }
+ slapi_free_search_results_internal(search_pb);
+ } /* check_suffix */
+ } /* suffix */
+ be = (backend *) slapi_get_next_backend(cookie);
+ }
+ slapi_pblock_destroy(search_pb);
+ slapi_ch_free_string(&cookie);
+
+ /* Now if a virtual attribute is defined, then CONFIG_IGNORE_VATTRS -> off */
+ if (exist_vattr_definition) {
+ char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
+ errorbuf[0] = '\0';
+ config_set_ignore_vattrs(CONFIG_IGNORE_VATTRS, "off", errorbuf, 1);
+ slapi_log_err(SLAPI_LOG_INFO,
+ "vattr_check_thread",
+ "Because of virtual attribute definition, %s was set to 'off'\n", CONFIG_IGNORE_VATTRS);
+ }
+}
+static void
+vattr_check_schedule_once(time_t when __attribute__((unused)), void *arg)
+{
+ if (PR_CreateThread(PR_USER_THREAD,
+ vattr_check_thread,
+ (void *) arg,
+ PR_PRIORITY_NORMAL,
+ PR_GLOBAL_THREAD,
+ PR_UNJOINABLE_THREAD,
+ SLAPD_DEFAULT_THREAD_STACKSIZE) == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR,
+ "vattr_check_schedule_once",
+ "Fails to check if %s needs to be toggled to FALSE\n", CONFIG_IGNORE_VATTRS);
+ }
+}
+#define VATTR_CHECK_DELAY 3
+void
+vattr_check()
+{
+ /* Schedule running a callback that will create a thread
+ * but make sure it is called a first thing when event loop is created */
+ time_t now;
+
+ now = slapi_current_rel_time_t();
+ vattr_check_ctx = slapi_eq_once_rel(vattr_check_schedule_once, NULL, now + VATTR_CHECK_DELAY);
}
/* The public interface functions start here */
@@ -1631,6 +1741,9 @@ slapi_vattrspi_regattr(vattr_sp_handle *h, char *type_name_to_register, char *DN
char *type_to_add;
int free_type_to_add = 0;
Slapi_DN original_dn;
+ int ignore_vattrs;
+
+ ignore_vattrs = config_get_ignore_vattrs();
slapi_sdn_init(&original_dn);
@@ -1676,6 +1789,20 @@ slapi_vattrspi_regattr(vattr_sp_handle *h, char *type_name_to_register, char *DN
if (free_type_to_add) {
slapi_ch_free((void **)&type_to_add);
}
+ if (ignore_vattrs && strcasecmp(type_name_to_register, NSROLEATTR)) {
+ /* A new virtual attribute is registered.
+ * This new vattr being *different* than the default roles vattr 'nsRole'
+ * It is time to allow vattr lookup
+ */
+ char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
+ errorbuf[0] = '\0';
+ config_set_ignore_vattrs(CONFIG_IGNORE_VATTRS, "off", errorbuf, 1);
+ slapi_log_err(SLAPI_LOG_INFO,
+ "slapi_vattrspi_regattr",
+ "Because %s is a new registered virtual attribute , %s was set to 'off'\n",
+ type_name_to_register,
+ CONFIG_IGNORE_VATTRS);
+ }
return ret;
}
diff --git a/src/lib389/lib389/idm/role.py b/src/lib389/lib389/idm/role.py
index fe91aab6f..9a2bff3d6 100644
--- a/src/lib389/lib389/idm/role.py
+++ b/src/lib389/lib389/idm/role.py
@@ -252,6 +252,8 @@ class FilteredRole(Role):
self._rdn_attribute = 'cn'
self._create_objectclasses = ['nsComplexRoleDefinition', 'nsFilteredRoleDefinition']
+ self._protected = False
+
class FilteredRoles(Roles):
@@ -285,6 +287,7 @@ class ManagedRole(Role):
self._rdn_attribute = 'cn'
self._create_objectclasses = ['nsSimpleRoleDefinition', 'nsManagedRoleDefinition']
+ self._protected = False
class ManagedRoles(Roles):
"""DSLdapObjects that represents all Managed Roles entries
@@ -320,6 +323,7 @@ class NestedRole(Role):
self._rdn_attribute = 'cn'
self._create_objectclasses = ['nsComplexRoleDefinition', 'nsNestedRoleDefinition']
+ self._protected = False
class NestedRoles(Roles):
"""DSLdapObjects that represents all NestedRoles entries in suffix.
--
2.31.1

View File

@ -1,322 +0,0 @@
From 1e1c2b23c35282481628af7e971ac683da334502 Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Tue, 27 Apr 2021 17:00:15 +0100
Subject: [PATCH 02/12] Issue 4701 - RFE - Exclude attributes from retro
changelog (#4723)
Description: When the retro changelog plugin is enabled it writes the
added/modified values to the "cn-changelog" suffix. In
some cases an entries attribute values can be of a
sensitive nature and should be excluded. This RFE adds
functionality that will allow an admin exclude certain
attributes from the retro changelog DB.
Relates: https://github.com/389ds/389-ds-base/issues/4701
Reviewed by: mreynolds389, droideck (Thanks folks)
---
.../tests/suites/retrocl/basic_test.py | 292 ++++++++++++++++++
1 file changed, 292 insertions(+)
create mode 100644 dirsrvtests/tests/suites/retrocl/basic_test.py
diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
new file mode 100644
index 000000000..112c73cb9
--- /dev/null
+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
@@ -0,0 +1,292 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2021 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+
+import logging
+import ldap
+import time
+import pytest
+from lib389.topologies import topology_st
+from lib389.plugins import RetroChangelogPlugin
+from lib389._constants import *
+from lib389.utils import *
+from lib389.tasks import *
+from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
+from lib389.cli_base.dsrc import dsrc_arg_concat
+from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add
+from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
+
+pytestmark = pytest.mark.tier1
+
+USER1_DN = 'uid=user1,ou=people,'+ DEFAULT_SUFFIX
+USER2_DN = 'uid=user2,ou=people,'+ DEFAULT_SUFFIX
+USER_PW = 'password'
+ATTR_HOMEPHONE = 'homePhone'
+ATTR_CARLICENSE = 'carLicense'
+
+log = logging.getLogger(__name__)
+
+def test_retrocl_exclude_attr_add(topology_st):
+ """ Test exclude attribute feature of the retrocl plugin for add operation
+
+ :id: 3481650f-2070-45ef-9600-2500cfc51559
+
+ :setup: Standalone instance
+
+ :steps:
+ 1. Enable dynamic plugins
+ 2. Confige retro changelog plugin
+ 3. Add an entry
+ 4. Ensure entry attrs are in the changelog
+ 5. Exclude an attr
+ 6. Add another entry
+ 7. Ensure excluded attr is not in the changelog
+
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ """
+
+ st = topology_st.standalone
+
+ log.info('Enable dynamic plugins')
+ try:
+ st.config.set('nsslapd-dynamic-plugins', 'on')
+ except ldap.LDAPError as e:
+ ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc'])
+ assert False
+
+ log.info('Configure retrocl plugin')
+ rcl = RetroChangelogPlugin(st)
+ rcl.disable()
+ rcl.enable()
+ rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
+
+ log.info('Restarting instance')
+ try:
+ st.restart()
+ except ldap.LDAPError as e:
+ ldap.error('Failed to restart instance ' + e.args[0]['desc'])
+ assert False
+
+ users = UserAccounts(st, DEFAULT_SUFFIX)
+
+ log.info('Adding user1')
+ try:
+ user1 = users.create(properties={
+ 'sn': '1',
+ 'cn': 'user 1',
+ 'uid': 'user1',
+ 'uidNumber': '11',
+ 'gidNumber': '111',
+ 'givenname': 'user1',
+ 'homePhone': '0861234567',
+ 'carLicense': '131D16674',
+ 'mail': 'user1@whereever.com',
+ 'homeDirectory': '/home/user1',
+ 'userpassword': USER_PW})
+ except ldap.ALREADY_EXISTS:
+ pass
+ except ldap.LDAPError as e:
+ log.error("Failed to add user1")
+
+ log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
+ try:
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
+ except ldap.LDAPError as e:
+ log.fatal("Changelog search failed, error: " +str(e))
+ assert False
+ assert len(cllist) > 0
+ if cllist[0].hasAttr('changes'):
+ clstr = (cllist[0].getValue('changes')).decode()
+ assert ATTR_HOMEPHONE in clstr
+ assert ATTR_CARLICENSE in clstr
+
+ log.info('Excluding attribute ' + ATTR_HOMEPHONE)
+ args = FakeArgs()
+ args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM]
+ args.instance = 'standalone1'
+ args.basedn = None
+ args.binddn = None
+ args.starttls = False
+ args.pwdfile = None
+ args.bindpw = None
+ args.prompt = False
+ args.exclude_attrs = ATTR_HOMEPHONE
+ args.func = retrochangelog_add
+ dsrc_inst = dsrc_arg_concat(args, None)
+ inst = connect_instance(dsrc_inst, False, args)
+ result = args.func(inst, None, log, args)
+ disconnect_instance(inst)
+ assert result is None
+
+ log.info("5s delay for retrocl plugin to restart")
+ time.sleep(5)
+
+ log.info('Adding user2')
+ try:
+ user2 = users.create(properties={
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'uidNumber': '22',
+ 'gidNumber': '222',
+ 'givenname': 'user2',
+ 'homePhone': '0879088363',
+ 'carLicense': '04WX11038',
+ 'mail': 'user2@whereever.com',
+ 'homeDirectory': '/home/user2',
+ 'userpassword': USER_PW})
+ except ldap.ALREADY_EXISTS:
+ pass
+ except ldap.LDAPError as e:
+ log.error("Failed to add user2")
+
+ log.info('Verify homePhone attr is not in the changelog changestring')
+ try:
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN)
+ assert len(cllist) > 0
+ if cllist[0].hasAttr('changes'):
+ clstr = (cllist[0].getValue('changes')).decode()
+ assert ATTR_HOMEPHONE not in clstr
+ assert ATTR_CARLICENSE in clstr
+ except ldap.LDAPError as e:
+ log.fatal("Changelog search failed, error: " +str(e))
+ assert False
+
+def test_retrocl_exclude_attr_mod(topology_st):
+ """ Test exclude attribute feature of the retrocl plugin for mod operation
+
+ :id: f6bef689-685b-4f86-a98d-f7e6b1fcada3
+
+ :setup: Standalone instance
+
+ :steps:
+ 1. Enable dynamic plugins
+ 2. Confige retro changelog plugin
+ 3. Add user1 entry
+ 4. Ensure entry attrs are in the changelog
+ 5. Exclude an attr
+ 6. Modify user1 entry
+ 7. Ensure excluded attr is not in the changelog
+
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ """
+
+ st = topology_st.standalone
+
+ log.info('Enable dynamic plugins')
+ try:
+ st.config.set('nsslapd-dynamic-plugins', 'on')
+ except ldap.LDAPError as e:
+ ldap.error('Failed to enable dynamic plugins ' + e.args[0]['desc'])
+ assert False
+
+ log.info('Configure retrocl plugin')
+ rcl = RetroChangelogPlugin(st)
+ rcl.disable()
+ rcl.enable()
+ rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId')
+
+ log.info('Restarting instance')
+ try:
+ st.restart()
+ except ldap.LDAPError as e:
+ ldap.error('Failed to restart instance ' + e.args[0]['desc'])
+ assert False
+
+ users = UserAccounts(st, DEFAULT_SUFFIX)
+
+ log.info('Adding user1')
+ try:
+ user1 = users.create(properties={
+ 'sn': '1',
+ 'cn': 'user 1',
+ 'uid': 'user1',
+ 'uidNumber': '11',
+ 'gidNumber': '111',
+ 'givenname': 'user1',
+ 'homePhone': '0861234567',
+ 'carLicense': '131D16674',
+ 'mail': 'user1@whereever.com',
+ 'homeDirectory': '/home/user1',
+ 'userpassword': USER_PW})
+ except ldap.ALREADY_EXISTS:
+ pass
+ except ldap.LDAPError as e:
+ log.error("Failed to add user1")
+
+ log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
+ try:
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
+ except ldap.LDAPError as e:
+ log.fatal("Changelog search failed, error: " +str(e))
+ assert False
+ assert len(cllist) > 0
+ if cllist[0].hasAttr('changes'):
+ clstr = (cllist[0].getValue('changes')).decode()
+ assert ATTR_HOMEPHONE in clstr
+ assert ATTR_CARLICENSE in clstr
+
+ log.info('Excluding attribute ' + ATTR_CARLICENSE)
+ args = FakeArgs()
+ args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM]
+ args.instance = 'standalone1'
+ args.basedn = None
+ args.binddn = None
+ args.starttls = False
+ args.pwdfile = None
+ args.bindpw = None
+ args.prompt = False
+ args.exclude_attrs = ATTR_CARLICENSE
+ args.func = retrochangelog_add
+ dsrc_inst = dsrc_arg_concat(args, None)
+ inst = connect_instance(dsrc_inst, False, args)
+ result = args.func(inst, None, log, args)
+ disconnect_instance(inst)
+ assert result is None
+
+ log.info("5s delay for retrocl plugin to restart")
+ time.sleep(5)
+
+ log.info('Modify user1 carLicense attribute')
+ try:
+ st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")])
+ except ldap.LDAPError as e:
+ log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc'])
+ assert False
+
+ log.info('Verify carLicense attr is not in the changelog changestring')
+ try:
+ cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
+ assert len(cllist) > 0
+ # There will be 2 entries in the changelog for this user, we are only
+ #interested in the second one, the modify operation.
+ if cllist[1].hasAttr('changes'):
+ clstr = (cllist[1].getValue('changes')).decode()
+ assert ATTR_CARLICENSE not in clstr
+ except ldap.LDAPError as e:
+ log.fatal("Changelog search failed, error: " +str(e))
+ assert False
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
--
2.26.3

View File

@ -0,0 +1,621 @@
From 968ad6b5039d839bfbc61da755c252cc7598415b Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Mon, 25 Oct 2021 17:09:57 +0200
Subject: [PATCH 02/12] Issue 4943 - Fix csn generator to limit time skew drift
- PR 4946
---
ldap/servers/slapd/csngen.c | 433 +++++++++++++++++-------------
ldap/servers/slapd/slapi-plugin.h | 9 +
2 files changed, 255 insertions(+), 187 deletions(-)
diff --git a/ldap/servers/slapd/csngen.c b/ldap/servers/slapd/csngen.c
index fcd88b4cc..c7c5c2ba8 100644
--- a/ldap/servers/slapd/csngen.c
+++ b/ldap/servers/slapd/csngen.c
@@ -18,8 +18,9 @@
#include "prcountr.h"
#include "slap.h"
+
#define CSN_MAX_SEQNUM 0xffff /* largest sequence number */
-#define CSN_MAX_TIME_ADJUST 24 * 60 * 60 /* maximum allowed time adjustment (in seconds) = 1 day */
+#define CSN_MAX_TIME_ADJUST _SEC_PER_DAY /* maximum allowed time adjustment (in seconds) = 1 day */
#define ATTR_CSN_GENERATOR_STATE "nsState" /* attribute that stores csn state information */
#define STATE_FORMAT "%8x%8x%8x%4hx%4hx"
#define STATE_LENGTH 32
@@ -27,6 +28,8 @@
#define CSN_CALC_TSTAMP(gen) ((gen)->state.sampled_time + \
(gen)->state.local_offset + \
(gen)->state.remote_offset)
+#define TIME_DIFF_WARNING_DELAY (30*_SEC_PER_DAY) /* log an info message when difference
+ between clock is greater than this delay */
/*
* **************************************************************************
@@ -63,6 +66,7 @@ typedef struct csngen_state
struct csngen
{
csngen_state state; /* persistent state of the generator */
+ int32_t (*gettime)(struct timespec *tp); /* Get local time */
callback_list callbacks; /* list of callbacks registered with the generator */
Slapi_RWLock *lock; /* concurrency control */
};
@@ -78,7 +82,7 @@ static int _csngen_init_callbacks(CSNGen *gen);
static void _csngen_call_callbacks(const CSNGen *gen, const CSN *csn, PRBool abort);
static int _csngen_cmp_callbacks(const void *el1, const void *el2);
static void _csngen_free_callbacks(CSNGen *gen);
-static int _csngen_adjust_local_time(CSNGen *gen, time_t cur_time);
+static int _csngen_adjust_local_time(CSNGen *gen);
/*
* **************************************************************************
@@ -121,6 +125,7 @@ csngen_new(ReplicaId rid, Slapi_Attr *state)
_csngen_init_callbacks(gen);
gen->state.rid = rid;
+ gen->gettime = slapi_clock_utc_gettime;
if (state) {
rc = _csngen_parse_state(gen, state);
@@ -164,10 +169,7 @@ csngen_free(CSNGen **gen)
int
csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
{
- struct timespec now = {0};
int rc = CSN_SUCCESS;
- time_t cur_time;
- int delta;
if (gen == NULL || csn == NULL) {
slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn", "Invalid argument\n");
@@ -180,39 +182,13 @@ csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
return CSN_MEMORY_ERROR;
}
- if ((rc = slapi_clock_gettime(&now)) != 0) {
- /* Failed to get system time, we must abort */
- slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn",
- "Failed to get system time (%s)\n",
- slapd_system_strerror(rc));
- return CSN_TIME_ERROR;
- }
- cur_time = now.tv_sec;
-
slapi_rwlock_wrlock(gen->lock);
- /* check if the time should be adjusted */
- delta = cur_time - gen->state.sampled_time;
- if (delta > _SEC_PER_DAY || delta < (-1 * _SEC_PER_DAY)) {
- /* We had a jump larger than a day */
- slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn",
- "Detected large jump in CSN time. Delta: %d (current time: %ld vs previous time: %ld)\n",
- delta, cur_time, gen->state.sampled_time);
- }
- if (delta > 0) {
- rc = _csngen_adjust_local_time(gen, cur_time);
- if (rc != CSN_SUCCESS) {
- slapi_rwlock_unlock(gen->lock);
- return rc;
- }
+ rc = _csngen_adjust_local_time(gen);
+ if (rc != CSN_SUCCESS) {
+ slapi_rwlock_unlock(gen->lock);
+ return rc;
}
- /* if (delta < 0) this means the local system time was set back
- * the new csn will be generated based on sampled time, which is
- * ahead of system time and previously generated csns.
- * the time stamp of the csn will not change until system time
- * catches up or is corrected by remote csns.
- * But we need to ensure that the seq_num does not overflow.
- */
if (gen->state.seq_num == CSN_MAX_SEQNUM) {
slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn", "Sequence rollover; "
@@ -261,13 +237,36 @@ csngen_rewrite_rid(CSNGen *gen, ReplicaId rid)
}
/* this function should be called when a remote CSN for the same part of
- the dit becomes known to the server (for instance, as part of RUV during
- replication session. In response, the generator would adjust its notion
- of time so that it does not generate smaller csns */
+ * the dit becomes known to the server (for instance, as part of RUV during
+ * replication session. In response, the generator would adjust its notion
+ * of time so that it does not generate smaller csns
+ *
+ * The following counters are updated
+ * - when a new csn is generated
+ * - when csngen is adjusted (beginning of a incoming (extop) or outgoing
+ * (inc_protocol) session)
+ *
+ * sampled_time: It takes the value of current system time.
+ *
+ * remote offset: it is updated when 'csn' argument is ahead of the next csn
+ * that the csn generator will generate. It is the MAX jump ahead, it is not
+ * cumulative counter (e.g. if remote_offset=7 and 'csn' is 5sec ahead
+ * remote_offset stays the same. The jump ahead (5s) pour into the local offset.
+ * It is not clear of the interest of this counter. It gives an indication of
+ * the maximum jump ahead but not much.
+ *
+ * local offset: it is increased if
+ * - system time is going backward (compare sampled_time)
+ * - if 'csn' argument is ahead of csn that the csn generator would generate
+ * AND diff('csn', csngen.new_csn) < remote_offset
+ * then the diff "pour" into local_offset
+ * It is decreased as the clock is ticking, local offset is "consumed" as
+ * sampled_time progresses.
+ */
int
csngen_adjust_time(CSNGen *gen, const CSN *csn)
{
- time_t remote_time, remote_offset, cur_time;
+ time_t remote_time, remote_offset, cur_time, old_time, new_time;
PRUint16 remote_seqnum;
int rc;
extern int config_get_ignore_time_skew(void);
@@ -281,6 +280,11 @@ csngen_adjust_time(CSNGen *gen, const CSN *csn)
slapi_rwlock_wrlock(gen->lock);
+ /* Get last local csn time */
+ old_time = CSN_CALC_TSTAMP(gen);
+ /* update local offset and sample_time */
+ rc = _csngen_adjust_local_time(gen);
+
if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
cur_time = CSN_CALC_TSTAMP(gen);
slapi_log_err(SLAPI_LOG_REPL, "csngen_adjust_time",
@@ -290,79 +294,60 @@ csngen_adjust_time(CSNGen *gen, const CSN *csn)
gen->state.local_offset,
gen->state.remote_offset);
}
- /* make sure we have the current time */
- cur_time = slapi_current_utc_time();
-
- /* make sure sampled_time is current */
- /* must only call adjust_local_time if the current time is greater than
- the generator state time */
- if ((cur_time > gen->state.sampled_time) &&
- (CSN_SUCCESS != (rc = _csngen_adjust_local_time(gen, cur_time)))) {
+ if (rc != CSN_SUCCESS) {
/* _csngen_adjust_local_time will log error */
slapi_rwlock_unlock(gen->lock);
- csngen_dump_state(gen);
+ csngen_dump_state(gen, SLAPI_LOG_DEBUG);
return rc;
}
- cur_time = CSN_CALC_TSTAMP(gen);
- if (remote_time >= cur_time) {
- time_t new_time = 0;
-
- if (remote_seqnum > gen->state.seq_num) {
- if (remote_seqnum < CSN_MAX_SEQNUM) {
- gen->state.seq_num = remote_seqnum + 1;
- } else {
- remote_time++;
- }
- }
-
- remote_offset = remote_time - cur_time;
- if (remote_offset > gen->state.remote_offset) {
- if (ignore_time_skew || (remote_offset <= CSN_MAX_TIME_ADJUST)) {
- gen->state.remote_offset = remote_offset;
- } else /* remote_offset > CSN_MAX_TIME_ADJUST */
- {
- slapi_log_err(SLAPI_LOG_ERR, "csngen_adjust_time",
- "Adjustment limit exceeded; value - %ld, limit - %ld\n",
- remote_offset, (long)CSN_MAX_TIME_ADJUST);
- slapi_rwlock_unlock(gen->lock);
- csngen_dump_state(gen);
- return CSN_LIMIT_EXCEEDED;
- }
- } else if (remote_offset > 0) { /* still need to account for this */
- gen->state.local_offset += remote_offset;
+ remote_offset = remote_time - CSN_CALC_TSTAMP(gen);
+ if (remote_offset > 0) {
+ if (!ignore_time_skew && (gen->state.remote_offset + remote_offset > CSN_MAX_TIME_ADJUST)) {
+ slapi_log_err(SLAPI_LOG_ERR, "csngen_adjust_time",
+ "Adjustment limit exceeded; value - %ld, limit - %ld\n",
+ remote_offset, (long)CSN_MAX_TIME_ADJUST);
+ slapi_rwlock_unlock(gen->lock);
+ csngen_dump_state(gen, SLAPI_LOG_DEBUG);
+ return CSN_LIMIT_EXCEEDED;
}
-
- new_time = CSN_CALC_TSTAMP(gen);
- /* let's revisit the seq num - if the new time is > the old
- tiem, we should reset the seq number to remote + 1 if
- this won't cause a wrap around */
- if (new_time >= cur_time) {
- /* just set seq_num regardless of whether the current one
- is < or > than the remote one - the goal of this function
- is to make sure we generate CSNs > the remote CSN - if
- we have increased the time, we can decrease the seqnum
- and still guarantee that any new CSNs generated will be
- > any current CSNs we have generated */
- if (remote_seqnum < gen->state.seq_num) {
- gen->state.seq_num ++;
- } else {
- gen->state.seq_num = remote_seqnum + 1;
- }
+ gen->state.remote_offset += remote_offset;
+ /* To avoid beat phenomena between suppliers let put 1 second in local_offset
+ * it will be eaten at next clock tick rather than increasing remote offset
+ * If we do not do that we will have a time skew drift of 1 second per 2 seconds
+ * if suppliers are desynchronized by 0.5 second
+ */
+ if (gen->state.local_offset == 0) {
+ gen->state.local_offset++;
+ gen->state.remote_offset--;
}
- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
- slapi_log_err(SLAPI_LOG_REPL, "csngen_adjust_time",
- "gen state after %08lx%04x:%ld:%ld:%ld\n",
- new_time, gen->state.seq_num,
- gen->state.sampled_time,
- gen->state.local_offset,
- gen->state.remote_offset);
+ }
+ /* Time to compute seqnum so that
+ * new csn >= remote csn and new csn >= old local csn
+ */
+ new_time = CSN_CALC_TSTAMP(gen);
+ PR_ASSERT(new_time >= old_time);
+ PR_ASSERT(new_time >= remote_time);
+ if (new_time > old_time) {
+ /* Can reset (local) seqnum */
+ gen->state.seq_num = 0;
+ }
+ if (new_time == remote_time && remote_seqnum >= gen->state.seq_num) {
+ if (remote_seqnum >= CSN_MAX_SEQNUM) {
+ gen->state.seq_num = 0;
+ gen->state.local_offset++;
+ } else {
+ gen->state.seq_num = remote_seqnum + 1;
}
- } else if (gen->state.remote_offset > 0) {
- /* decrease remote offset? */
- /* how to decrease remote offset but ensure that we don't
- generate a duplicate CSN, or a CSN smaller than one we've already
- generated? */
+ }
+
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
+ slapi_log_err(SLAPI_LOG_REPL, "csngen_adjust_time",
+ "gen state after %08lx%04x:%ld:%ld:%ld\n",
+ new_time, gen->state.seq_num,
+ gen->state.sampled_time,
+ gen->state.local_offset,
+ gen->state.remote_offset);
}
slapi_rwlock_unlock(gen->lock);
@@ -435,16 +420,16 @@ csngen_unregister_callbacks(CSNGen *gen, void *cookie)
/* debugging function */
void
-csngen_dump_state(const CSNGen *gen)
+csngen_dump_state(const CSNGen *gen, int severity)
{
if (gen) {
slapi_rwlock_rdlock(gen->lock);
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "CSN generator's state:\n");
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\treplica id: %d\n", gen->state.rid);
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tsampled time: %ld\n", gen->state.sampled_time);
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tlocal offset: %ld\n", gen->state.local_offset);
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tremote offset: %ld\n", gen->state.remote_offset);
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tsequence number: %d\n", gen->state.seq_num);
+ slapi_log_err(severity, "csngen_dump_state", "CSN generator's state:\n");
+ slapi_log_err(severity, "csngen_dump_state", "\treplica id: %d\n", gen->state.rid);
+ slapi_log_err(severity, "csngen_dump_state", "\tsampled time: %ld\n", gen->state.sampled_time);
+ slapi_log_err(severity, "csngen_dump_state", "\tlocal offset: %ld\n", gen->state.local_offset);
+ slapi_log_err(severity, "csngen_dump_state", "\tremote offset: %ld\n", gen->state.remote_offset);
+ slapi_log_err(severity, "csngen_dump_state", "\tsequence number: %d\n", gen->state.seq_num);
slapi_rwlock_unlock(gen->lock);
}
}
@@ -459,7 +444,7 @@ csngen_test()
CSNGen *gen = csngen_new(255, NULL);
slapi_log_err(SLAPI_LOG_DEBUG, "csngen_test", "staring csn generator test ...");
- csngen_dump_state(gen);
+ csngen_dump_state(gen, SLAPI_LOG_INFO);
rc = _csngen_start_test_threads(gen);
if (rc == 0) {
@@ -469,7 +454,7 @@ csngen_test()
}
_csngen_stop_test_threads();
- csngen_dump_state(gen);
+ csngen_dump_state(gen, SLAPI_LOG_INFO);
slapi_log_err(SLAPI_LOG_DEBUG, "csngen_test", "csn generator test is complete...");
}
@@ -574,94 +559,93 @@ _csngen_cmp_callbacks(const void *el1, const void *el2)
return 1;
}
+/* Get time and adjust local offset */
static int
-_csngen_adjust_local_time(CSNGen *gen, time_t cur_time)
+_csngen_adjust_local_time(CSNGen *gen)
{
extern int config_get_ignore_time_skew(void);
int ignore_time_skew = config_get_ignore_time_skew();
- time_t time_diff = cur_time - gen->state.sampled_time;
+ struct timespec now = {0};
+ time_t time_diff;
+ time_t cur_time;
+ int rc;
+
+ if ((rc = gen->gettime(&now)) != 0) {
+ /* Failed to get system time, we must abort */
+ slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn",
+ "Failed to get system time (%s)\n",
+ slapd_system_strerror(rc));
+ return CSN_TIME_ERROR;
+ }
+ cur_time = now.tv_sec;
+ time_diff = cur_time - gen->state.sampled_time;
+
+ /* check if the time should be adjusted */
if (time_diff == 0) {
/* This is a no op - _csngen_adjust_local_time should never be called
in this case, because there is nothing to adjust - but just return
here to protect ourselves
*/
return CSN_SUCCESS;
- } else if (time_diff > 0) {
- time_t ts_before = CSN_CALC_TSTAMP(gen);
- time_t ts_after = 0;
- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
- time_t new_time = CSN_CALC_TSTAMP(gen);
- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
- "gen state before %08lx%04x:%ld:%ld:%ld\n",
- new_time, gen->state.seq_num,
- gen->state.sampled_time,
- gen->state.local_offset,
- gen->state.remote_offset);
- }
-
- gen->state.sampled_time = cur_time;
- if (time_diff > gen->state.local_offset)
- gen->state.local_offset = 0;
- else
- gen->state.local_offset = gen->state.local_offset - time_diff;
-
- /* only reset the seq_num if the new timestamp part of the CSN
- is going to be greater than the old one - if they are the
- same after the above adjustment (which can happen if
- csngen_adjust_time has to store the offset in the
- local_offset field) we must not allow the CSN to regress or
- generate duplicate numbers */
- ts_after = CSN_CALC_TSTAMP(gen);
- if (ts_after > ts_before) {
- gen->state.seq_num = 0; /* only reset if new time > old time */
- }
-
- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
- time_t new_time = CSN_CALC_TSTAMP(gen);
- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
- "gen state after %08lx%04x:%ld:%ld:%ld\n",
- new_time, gen->state.seq_num,
- gen->state.sampled_time,
- gen->state.local_offset,
- gen->state.remote_offset);
- }
- return CSN_SUCCESS;
- } else /* time was turned back */
- {
- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
- time_t new_time = CSN_CALC_TSTAMP(gen);
- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
- "gen state back before %08lx%04x:%ld:%ld:%ld\n",
- new_time, gen->state.seq_num,
- gen->state.sampled_time,
- gen->state.local_offset,
- gen->state.remote_offset);
- }
+ }
+ if (labs(time_diff) > TIME_DIFF_WARNING_DELAY) {
+ /* We had a jump larger than a day */
+ slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn",
+ "Detected large jump in CSN time. Delta: %ld (current time: %ld vs previous time: %ld)\n",
+ time_diff, cur_time, gen->state.sampled_time);
+ }
+ if (!ignore_time_skew && (gen->state.local_offset - time_diff > CSN_MAX_TIME_ADJUST)) {
+ slapi_log_err(SLAPI_LOG_ERR, "_csngen_adjust_local_time",
+ "Adjustment limit exceeded; value - %ld, limit - %d\n",
+ gen->state.local_offset - time_diff, CSN_MAX_TIME_ADJUST);
+ return CSN_LIMIT_EXCEEDED;
+ }
- if (!ignore_time_skew && (labs(time_diff) > CSN_MAX_TIME_ADJUST)) {
- slapi_log_err(SLAPI_LOG_ERR, "_csngen_adjust_local_time",
- "Adjustment limit exceeded; value - %ld, limit - %d\n",
- labs(time_diff), CSN_MAX_TIME_ADJUST);
- return CSN_LIMIT_EXCEEDED;
- }
+ time_t ts_before = CSN_CALC_TSTAMP(gen);
+ time_t ts_after = 0;
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
+ time_t new_time = CSN_CALC_TSTAMP(gen);
+ slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
+ "gen state before %08lx%04x:%ld:%ld:%ld\n",
+ new_time, gen->state.seq_num,
+ gen->state.sampled_time,
+ gen->state.local_offset,
+ gen->state.remote_offset);
+ }
- gen->state.sampled_time = cur_time;
- gen->state.local_offset = MAX_VAL(gen->state.local_offset, labs(time_diff));
- gen->state.seq_num = 0;
+ gen->state.sampled_time = cur_time;
+ gen->state.local_offset = MAX_VAL(0, gen->state.local_offset - time_diff);
+ /* new local_offset = MAX_VAL(0, old sample_time + old local_offset - cur_time)
+ * ==> new local_offset >= 0 and
+ * new local_offset + cur_time >= old sample_time + old local_offset
+ * ==> new local_offset + cur_time + remote_offset >=
+ * sample_time + old local_offset + remote_offset
+ * ==> CSN_CALC_TSTAMP(new gen) >= CSN_CALC_TSTAMP(old gen)
+ */
- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
- time_t new_time = CSN_CALC_TSTAMP(gen);
- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
- "gen state back after %08lx%04x:%ld:%ld:%ld\n",
- new_time, gen->state.seq_num,
- gen->state.sampled_time,
- gen->state.local_offset,
- gen->state.remote_offset);
- }
+ /* only reset the seq_num if the new timestamp part of the CSN
+ is going to be greater than the old one - if they are the
+ same after the above adjustment (which can happen if
+ csngen_adjust_time has to store the offset in the
+ local_offset field) we must not allow the CSN to regress or
+ generate duplicate numbers */
+ ts_after = CSN_CALC_TSTAMP(gen);
+ PR_ASSERT(ts_after >= ts_before);
+ if (ts_after > ts_before) {
+ gen->state.seq_num = 0; /* only reset if new time > old time */
+ }
- return CSN_SUCCESS;
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
+ time_t new_time = CSN_CALC_TSTAMP(gen);
+ slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
+ "gen state after %08lx%04x:%ld:%ld:%ld\n",
+ new_time, gen->state.seq_num,
+ gen->state.sampled_time,
+ gen->state.local_offset,
+ gen->state.remote_offset);
}
+ return CSN_SUCCESS;
}
/*
@@ -799,7 +783,7 @@ _csngen_remote_tester_main(void *data)
"Failed to adjust generator's time; csn error - %d\n", rc);
}
- csngen_dump_state(gen);
+ csngen_dump_state(gen, SLAPI_LOG_INFO);
}
csn_free(&csn);
@@ -825,8 +809,83 @@ _csngen_local_tester_main(void *data)
/*
* g_sampled_time -= slapi_rand () % 100;
*/
- csngen_dump_state(gen);
+ csngen_dump_state(gen, SLAPI_LOG_INFO);
}
PR_AtomicDecrement(&s_thread_count);
}
+
+int _csngen_tester_state;
+int _csngen_tester_state_rid;
+
+static int
+_mynoise(int time, int len, double height)
+{
+ if (((time/len) % 2) == 0) {
+ return -height + 2 * height * ( time % len ) / (len-1);
+ } else {
+ return height - 2 * height * ( time % len ) / (len-1);
+ }
+}
+
+
+int32_t _csngen_tester_gettime(struct timespec *tp)
+{
+ int vtime = _csngen_tester_state ;
+ tp->tv_sec = 0x1000000 + vtime + 2 * _csngen_tester_state_rid;
+ if (_csngen_tester_state_rid == 3) {
+ /* tp->tv_sec += _mynoise(vtime, 10, 1.5); */
+ tp->tv_sec += _mynoise(vtime, 30, 15);
+ }
+ return 0;
+}
+
+/* Mimic a fully meshed multi suplier topology */
+void csngen_multi_suppliers_test(void)
+{
+#define NB_TEST_MASTERS 6
+#define NB_TEST_STATES 500
+ CSNGen *gen[NB_TEST_MASTERS];
+ struct timespec now = {0};
+ CSN *last_csn = NULL;
+ CSN *csn = NULL;
+ int i,j,rc;
+
+ _csngen_tester_gettime(&now);
+
+ for (i=0; i< NB_TEST_MASTERS; i++) {
+ gen[i] = csngen_new(i+1, NULL);
+ gen[i]->gettime = _csngen_tester_gettime;
+ gen[i]->state.sampled_time = now.tv_sec;
+ }
+
+ for (_csngen_tester_state=0; _csngen_tester_state < NB_TEST_STATES; _csngen_tester_state++) {
+ for (i=0; i< NB_TEST_MASTERS; i++) {
+ _csngen_tester_state_rid = i+1;
+ rc = csngen_new_csn(gen[i], &csn, PR_FALSE);
+ if (rc) {
+ continue;
+ }
+ csngen_dump_state(gen[i], SLAPI_LOG_INFO);
+
+ if (csn_compare(csn, last_csn) <= 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "csngen_multi_suppliers_test",
+ "CSN generated in disorder state=%d rid=%d\n", _csngen_tester_state, _csngen_tester_state_rid);
+ _csngen_tester_state = NB_TEST_STATES;
+ break;
+ }
+ last_csn = csn;
+
+ for (j=0; j< NB_TEST_MASTERS; j++) {
+ if (i==j) {
+ continue;
+ }
+ _csngen_tester_state_rid = j+1;
+ rc = csngen_adjust_time(gen[j], csn);
+ if (rc) {
+ continue;
+ }
+ }
+ }
+ }
+}
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 56765fdfb..59c5ec9ab 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -6762,8 +6762,17 @@ time_t slapi_current_time(void) __attribute__((deprecated));
*
* \param tp - a timespec struct where the system time is set
* \return result code, upon success tp is set to the system time
+ * as a clock in UTC timezone. This clock adjusts with ntp steps,
+ * and should NOT be used for timer information.
*/
int32_t slapi_clock_gettime(struct timespec *tp);
+/*
+ * slapi_clock_gettime should have better been called
+ * slapi_clock_utc_gettime but sice the function pre-existed
+ * we are just adding an alias (to avoid risking to break
+ * some custom plugins)
+ */
+#define slapi_clock_utc_gettime slapi_clock_gettime
/**
* Returns the current system time as a hr clock relative to uptime
--
2.31.1

View File

@ -0,0 +1,240 @@
From 957ffd53b041c19d27753a028e6f514dcc75dfbd Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Tue, 26 Oct 2021 15:51:24 -0700
Subject: [PATCH 03/12] Issue 3584 - Fix PBKDF2_SHA256 hashing in FIPS mode
(#4949)
Issue Description: Use PK11_Decrypt function to get hash data
because PK11_ExtractKeyValue function is forbidden in FIPS mode.
We can't extract keys while in FIPS mode. But we use PK11_ExtractKeyValue
for hashes, and it's not forbidden.
We can't use OpenSSL's PBKDF2-SHA256 implementation right now because
we need to support an upgrade procedure while in FIPS mode (update
hash on bind). For that, we should fix existing PBKDF2 usage, and we can
switch to OpenSSL's PBKDF2-SHA256 in the following versions.
Fix Description: Use PK11_Decrypt function to get the data.
Enable TLS on all CI test topologies while in FIPS because without
that we don't set up the NSS database correctly.
Add PBKDF2-SHA256 (OpenSSL) to ldif templates, so the password scheme is
discoverable by internal functions.
https://github.com/389ds/389-ds-base/issues/3584
Reviewed by: @progier389, @mreynolds389, @Firstyear, @tbordaz (Thanks!!)
---
.../healthcheck/health_security_test.py | 10 ---
ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 62 ++++++++++++++++---
ldap/servers/slapd/main.c | 12 ++++
src/lib389/lib389/__init__.py | 4 ++
src/lib389/lib389/topologies.py | 6 +-
src/lib389/lib389/utils.py | 13 ++++
6 files changed, 86 insertions(+), 21 deletions(-)
diff --git a/dirsrvtests/tests/suites/healthcheck/health_security_test.py b/dirsrvtests/tests/suites/healthcheck/health_security_test.py
index 6c0d27aaa..c1dc7938c 100644
--- a/dirsrvtests/tests/suites/healthcheck/health_security_test.py
+++ b/dirsrvtests/tests/suites/healthcheck/health_security_test.py
@@ -40,16 +40,6 @@ else:
log = logging.getLogger(__name__)
-def is_fips():
- if os.path.exists('/proc/sys/crypto/fips_enabled'):
- with open('/proc/sys/crypto/fips_enabled', 'r') as f:
- state = f.readline().strip()
- if state == '1':
- return True
- else:
- return False
-
-
def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None):
args = FakeArgs()
args.instance = instance.serverid
diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
index d310dc792..dcac4fcdd 100644
--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
+++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
@@ -91,10 +91,11 @@ pbkdf2_sha256_extract(char *hash_in, SECItem *salt, uint32_t *iterations)
SECStatus
pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *salt, uint32_t iterations)
{
- SECItem *result = NULL;
SECAlgorithmID *algid = NULL;
PK11SlotInfo *slot = NULL;
PK11SymKey *symkey = NULL;
+ SECItem *wrapKeyData = NULL;
+ SECStatus rv = SECFailure;
/* We assume that NSS is already started. */
algid = PK11_CreatePBEV2AlgorithmID(SEC_OID_PKCS5_PBKDF2, SEC_OID_HMAC_SHA256, SEC_OID_HMAC_SHA256, hash_out_len, iterations, salt);
@@ -104,7 +105,6 @@ pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *s
slot = PK11_GetBestSlotMultiple(mechanism_array, 2, NULL);
if (slot != NULL) {
symkey = PK11_PBEKeyGen(slot, algid, pwd, PR_FALSE, NULL);
- PK11_FreeSlot(slot);
if (symkey == NULL) {
/* We try to get the Error here but NSS has two or more error interfaces, and sometimes it uses none of them. */
int32_t status = PORT_GetError();
@@ -123,18 +123,60 @@ pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *s
return SECFailure;
}
- if (PK11_ExtractKeyValue(symkey) == SECSuccess) {
- result = PK11_GetKeyData(symkey);
- if (result != NULL && result->len <= hash_out_len) {
- memcpy(hash_out, result->data, result->len);
- PK11_FreeSymKey(symkey);
+ /*
+ * First, we need to generate a wrapped key for PK11_Decrypt call:
+ * slot is the same slot we used in PK11_PBEKeyGen()
+ * 256 bits / 8 bit per byte
+ */
+ PK11SymKey *wrapKey = PK11_KeyGen(slot, CKM_AES_ECB, NULL, 256/8, NULL);
+ PK11_FreeSlot(slot);
+ if (wrapKey == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to generate a wrapped key.\n");
+ return SECFailure;
+ }
+
+ wrapKeyData = (SECItem *)PORT_Alloc(sizeof(SECItem));
+ /* Align the wrapped key with 32 bytes. */
+ wrapKeyData->len = (PK11_GetKeyLength(symkey) + 31) & ~31;
+ /* Allocate the aligned space for pkc5PBE key plus AESKey block */
+ wrapKeyData->data = (unsigned char *)slapi_ch_calloc(wrapKeyData->len, sizeof(unsigned char));
+
+ /* Get symkey wrapped with wrapKey - required for PK11_Decrypt call */
+ rv = PK11_WrapSymKey(CKM_AES_ECB, NULL, wrapKey, symkey, wrapKeyData);
+ if (rv != SECSuccess) {
+ PK11_FreeSymKey(symkey);
+ PK11_FreeSymKey(wrapKey);
+ SECITEM_FreeItem(wrapKeyData, PR_TRUE);
+ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to wrap the symkey. (%d)\n", rv);
+ return SECFailure;
+ }
+
+ /* Allocate the space for our result */
+ void *result = (char *)slapi_ch_calloc(wrapKeyData->len, sizeof(char));
+ unsigned int result_len = 0;
+
+ /* User wrapKey to decrypt the wrapped contents.
+ * result is the hash that we need;
+ * result_len is the actual lengh of the data;
+ * has_out_len is the maximum (the space we allocted for hash_out)
+ */
+ rv = PK11_Decrypt(wrapKey, CKM_AES_ECB, NULL, result, &result_len, hash_out_len, wrapKeyData->data, wrapKeyData->len);
+ PK11_FreeSymKey(symkey);
+ PK11_FreeSymKey(wrapKey);
+ SECITEM_FreeItem(wrapKeyData, PR_TRUE);
+
+ if (rv == SECSuccess) {
+ if (result != NULL && result_len <= hash_out_len) {
+ memcpy(hash_out, result, result_len);
+ slapi_ch_free((void **)&result);
} else {
- PK11_FreeSymKey(symkey);
- slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to retrieve (get) hash output.\n");
+ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to retrieve (get) hash output.\n");
+ slapi_ch_free((void **)&result);
return SECFailure;
}
} else {
- slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to extract hash output.\n");
+ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to extract hash output. (%d)\n", rv);
+ slapi_ch_free((void **)&result);
return SECFailure;
}
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 61ed40b7d..04d0494f8 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -2895,9 +2895,21 @@ slapd_do_all_nss_ssl_init(int slapd_exemode, int importexport_encrypt, int s_por
* is enabled or not. We use NSS for random number generation and
* other things even if we are not going to accept SSL connections.
* We also need NSS for attribute encryption/decryption on import and export.
+ *
+ * It's important to remember that while in FIPS mode the administrator should always enable
+ * the security, otherwise we don't call slapd_pk11_authenticate which is a requirement for FIPS mode
*/
+ PRBool isFIPS = slapd_pk11_isFIPS();
int init_ssl = config_get_security();
+ if (isFIPS && !init_ssl) {
+ slapi_log_err(SLAPI_LOG_WARNING, "slapd_do_all_nss_ssl_init",
+ "ERROR: TLS is not enabled, and the machine is in FIPS mode. "
+ "Some functionality won't work correctly (for example, "
+ "users with PBKDF2_SHA256 password scheme won't be able to log in). "
+ "It's highly advisable to enable TLS on this instance.\n");
+ }
+
if (slapd_exemode == SLAPD_EXEMODE_SLAPD) {
init_ssl = init_ssl && (0 != s_port) && (s_port <= LDAP_PORT_MAX);
} else {
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 29ee5245a..e0299c5b4 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -1588,6 +1588,10 @@ class DirSrv(SimpleLDAPObject, object):
:param post_open: Open the server connection after restart.
:type post_open: bool
"""
+ if self.config.get_attr_val_utf8_l("nsslapd-security") == 'on':
+ self.restart(post_open=post_open)
+ return
+
# If it doesn't exist, create a cadb.
ssca = NssSsl(dbpath=self.get_ssca_dir())
if not ssca._db_exists():
diff --git a/src/lib389/lib389/topologies.py b/src/lib389/lib389/topologies.py
index e9969f524..e7d56582d 100644
--- a/src/lib389/lib389/topologies.py
+++ b/src/lib389/lib389/topologies.py
@@ -15,7 +15,7 @@ import socket
import pytest
from lib389 import DirSrv
-from lib389.utils import generate_ds_params
+from lib389.utils import generate_ds_params, is_fips
from lib389.mit_krb5 import MitKrb5
from lib389.saslmap import SaslMappings
from lib389.replica import ReplicationManager, Replicas
@@ -108,6 +108,10 @@ def _create_instances(topo_dict, suffix):
if role == ReplicaRole.HUB:
hs[instance.serverid] = instance
instances.update(hs)
+ # We should always enable TLS while in FIPS mode because otherwise NSS database won't be
+ # configured in a FIPS compliant way
+ if is_fips():
+ instance.enable_tls()
log.info("Instance with parameters {} was created.".format(args_instance))
if "standalone1" in instances and len(instances) == 1:
diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py
index b270784ce..5ba0c6676 100644
--- a/src/lib389/lib389/utils.py
+++ b/src/lib389/lib389/utils.py
@@ -1430,3 +1430,16 @@ def is_valid_hostname(hostname):
hostname = hostname[:-1] # strip exactly one dot from the right, if present
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
+
+
+def is_fips():
+ if os.path.exists('/proc/sys/crypto/fips_enabled'):
+ with open('/proc/sys/crypto/fips_enabled', 'r') as f:
+ state = f.readline().strip()
+ if state == '1':
+ return True
+ else:
+ return False
+ else:
+ return False
+
--
2.31.1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,114 @@
From d037688c072c4cb84fbf9b2a6cb24927f7950605 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 20 Oct 2021 10:04:06 -0400
Subject: [PATCH 04/12] Issue 4956 - Automember allows invalid regex, and does
not log proper error
Bug Description: The server was detecting an invalid automember
regex, but it did not reject it, and it did not
log which regex rule was invalid.
Fix Description: By properly rejecting the invalid regex will also
trigger the proper error logging to occur.
relates: https://github.com/389ds/389-ds-base/issues/4956
Reviewed by: tbordaz & spichugi(Thanks!!)
---
.../automember_plugin/configuration_test.py | 49 +++++++++++++++++--
ldap/servers/plugins/automember/automember.c | 1 +
2 files changed, 46 insertions(+), 4 deletions(-)
diff --git a/dirsrvtests/tests/suites/automember_plugin/configuration_test.py b/dirsrvtests/tests/suites/automember_plugin/configuration_test.py
index 0f9cc49dc..4a6b596db 100644
--- a/dirsrvtests/tests/suites/automember_plugin/configuration_test.py
+++ b/dirsrvtests/tests/suites/automember_plugin/configuration_test.py
@@ -1,21 +1,20 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2019 Red Hat, Inc.
+# Copyright (C) 2021 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
+import ldap
import os
import pytest
-
from lib389.topologies import topology_st as topo
from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions, MemberOfPlugin
-import ldap
+from lib389._constants import DEFAULT_SUFFIX
pytestmark = pytest.mark.tier1
-
@pytest.mark.bz834056
def test_configuration(topo):
"""
@@ -52,6 +51,48 @@ def test_configuration(topo):
'"cn=SuffDef1,ou=autouserGroups,cn=config" '
'can not be a child of the plugin config area "cn=config"')
+def test_invalid_regex(topo):
+ """Test invalid regex is properly reportedin the error log
+
+ :id: a6d89f84-ec76-4871-be96-411d051800b1
+ :setup: Standalone Instance
+ :steps:
+ 1. Setup automember
+ 2. Add invalid regex
+ 3. Error log reports useful message
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ """
+ REGEX_DN = "cn=regex1,cn=testregex,cn=auto membership plugin,cn=plugins,cn=config"
+ REGEX_VALUE = "cn=*invalid*"
+ REGEX_ESC_VALUE = "cn=\\*invalid\\*"
+ GROUP_DN = "cn=demo_group,ou=groups," + DEFAULT_SUFFIX
+
+ AutoMembershipPlugin(topo.standalone).remove_all("nsslapd-pluginConfigArea")
+ automemberplugin = AutoMembershipPlugin(topo.standalone)
+
+ automember_prop = {
+ 'cn': 'testRegex',
+ 'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX,
+ 'autoMemberFilter': 'objectclass=*',
+ 'autoMemberDefaultGroup': GROUP_DN,
+ 'autoMemberGroupingAttr': 'member:dn',
+ }
+ automember_defs = AutoMembershipDefinitions(topo.standalone, "cn=Auto Membership Plugin,cn=plugins,cn=config")
+ automember_def = automember_defs.create(properties=automember_prop)
+ automember_def.add_regex_rule("regex1", GROUP_DN, include_regex=[REGEX_VALUE])
+
+ automemberplugin.enable()
+ topo.standalone.restart()
+
+ # Check errors log for invalid message
+ ERR_STR1 = "automember_parse_regex_rule - Unable to parse regex rule"
+ ERR_STR2 = f"Skipping invalid inclusive regex rule in rule entry \"{REGEX_DN}\" \\(rule = \"{REGEX_ESC_VALUE}\"\\)"
+ assert topo.standalone.searchErrorsLog(ERR_STR1)
+ assert topo.standalone.searchErrorsLog(ERR_STR2)
+
if __name__ == "__main__":
CURRENT_FILE = os.path.realpath(__file__)
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
index 39350ad53..b92b89bd5 100644
--- a/ldap/servers/plugins/automember/automember.c
+++ b/ldap/servers/plugins/automember/automember.c
@@ -1217,6 +1217,7 @@ automember_parse_regex_rule(char *rule_string)
"automember_parse_regex_rule - Unable to parse "
"regex rule (invalid regex). Error \"%s\".\n",
recomp_result ? recomp_result : "unknown");
+ goto bail;
}
/* Validation has passed, so create the regex rule struct and fill it in.
--
2.31.1

View File

@ -1,373 +0,0 @@
From c167d6127db45d8426437c273060c8c8f7fbcb9b Mon Sep 17 00:00:00 2001
From: Firstyear <william.brown@suse.com>
Date: Wed, 23 Sep 2020 09:19:34 +1000
Subject: [PATCH 04/12] Ticket 4326 - entryuuid fixup did not work correctly
(#4328)
Bug Description: due to an oversight in how fixup tasks
worked, the entryuuid fixup task did not work correctly and
would not persist over restarts.
Fix Description: Correctly implement entryuuid fixup.
fixes: #4326
Author: William Brown <william@blackhats.net.au>
Review by: mreynolds (thanks!)
---
.../tests/suites/entryuuid/basic_test.py | 24 +++-
src/plugins/entryuuid/src/lib.rs | 43 ++++++-
src/slapi_r_plugin/src/constants.rs | 5 +
src/slapi_r_plugin/src/entry.rs | 8 ++
src/slapi_r_plugin/src/lib.rs | 2 +
src/slapi_r_plugin/src/macros.rs | 2 +-
src/slapi_r_plugin/src/modify.rs | 118 ++++++++++++++++++
src/slapi_r_plugin/src/pblock.rs | 7 ++
src/slapi_r_plugin/src/value.rs | 4 +
9 files changed, 206 insertions(+), 7 deletions(-)
create mode 100644 src/slapi_r_plugin/src/modify.rs
diff --git a/dirsrvtests/tests/suites/entryuuid/basic_test.py b/dirsrvtests/tests/suites/entryuuid/basic_test.py
index beb73701d..4d8a40909 100644
--- a/dirsrvtests/tests/suites/entryuuid/basic_test.py
+++ b/dirsrvtests/tests/suites/entryuuid/basic_test.py
@@ -12,6 +12,7 @@ import time
import shutil
from lib389.idm.user import nsUserAccounts, UserAccounts
from lib389.idm.account import Accounts
+from lib389.idm.domain import Domain
from lib389.topologies import topology_st as topology
from lib389.backend import Backends
from lib389.paths import Paths
@@ -190,6 +191,7 @@ def test_entryuuid_fixup_task(topology):
3. Enable the entryuuid plugin
4. Run the fixup
5. Assert the entryuuid now exists
+ 6. Restart and check they persist
:expectedresults:
1. Success
@@ -197,6 +199,7 @@ def test_entryuuid_fixup_task(topology):
3. Success
4. Success
5. Suddenly EntryUUID!
+ 6. Still has EntryUUID!
"""
# 1. Disable the plugin
plug = EntryUUIDPlugin(topology.standalone)
@@ -220,7 +223,22 @@ def test_entryuuid_fixup_task(topology):
assert(task.is_complete() and task.get_exit_code() == 0)
topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,))
- # 5. Assert the uuid.
- euuid = account.get_attr_val_utf8('entryUUID')
- assert(euuid is not None)
+ # 5.1 Assert the uuid on the user.
+ euuid_user = account.get_attr_val_utf8('entryUUID')
+ assert(euuid_user is not None)
+
+ # 5.2 Assert it on the domain entry.
+ domain = Domain(topology.standalone, dn=DEFAULT_SUFFIX)
+ euuid_domain = domain.get_attr_val_utf8('entryUUID')
+ assert(euuid_domain is not None)
+
+ # Assert it persists after a restart.
+ topology.standalone.restart()
+ # 6.1 Assert the uuid on the use.
+ euuid_user_2 = account.get_attr_val_utf8('entryUUID')
+ assert(euuid_user_2 == euuid_user)
+
+ # 6.2 Assert it on the domain entry.
+ euuid_domain_2 = domain.get_attr_val_utf8('entryUUID')
+ assert(euuid_domain_2 == euuid_domain)
diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
index 6b5e8d1bb..92977db05 100644
--- a/src/plugins/entryuuid/src/lib.rs
+++ b/src/plugins/entryuuid/src/lib.rs
@@ -187,9 +187,46 @@ impl SlapiPlugin3 for EntryUuid {
}
}
-pub fn entryuuid_fixup_mapfn(mut e: EntryRef, _data: &()) -> Result<(), PluginError> {
- assign_uuid(&mut e);
- Ok(())
+pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError> {
+ /* Supply a modification to the entry. */
+ let sdn = e.get_sdnref();
+
+ /* Sanity check that entryuuid doesn't already exist */
+ if e.contains_attr("entryUUID") {
+ log_error!(
+ ErrorLevel::Trace,
+ "skipping fixup for -> {}",
+ sdn.to_dn_string()
+ );
+ return Ok(());
+ }
+
+ // Setup the modifications
+ let mut mods = SlapiMods::new();
+
+ let u: Uuid = Uuid::new_v4();
+ let uuid_value = Value::from(&u);
+ let values: ValueArray = std::iter::once(uuid_value).collect();
+ mods.append(ModType::Replace, "entryUUID", values);
+
+ /* */
+ let lmod = Modify::new(&sdn, mods, plugin_id())?;
+
+ match lmod.execute() {
+ Ok(_) => {
+ log_error!(ErrorLevel::Trace, "fixed-up -> {}", sdn.to_dn_string());
+ Ok(())
+ }
+ Err(e) => {
+ log_error!(
+ ErrorLevel::Error,
+ "entryuuid_fixup_mapfn -> fixup failed -> {} {:?}",
+ sdn.to_dn_string(),
+ e
+ );
+ Err(PluginError::GenericFailure)
+ }
+ }
}
#[cfg(test)]
diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs
index cf76ccbdb..34845c2f4 100644
--- a/src/slapi_r_plugin/src/constants.rs
+++ b/src/slapi_r_plugin/src/constants.rs
@@ -5,6 +5,11 @@ use std::os::raw::c_char;
pub const LDAP_SUCCESS: i32 = 0;
pub const PLUGIN_DEFAULT_PRECEDENCE: i32 = 50;
+#[repr(i32)]
+pub enum OpFlags {
+ ByassReferrals = 0x0040_0000,
+}
+
#[repr(i32)]
/// The set of possible function handles we can register via the pblock. These
/// values correspond to slapi-plugin.h.
diff --git a/src/slapi_r_plugin/src/entry.rs b/src/slapi_r_plugin/src/entry.rs
index 034efe692..22ae45189 100644
--- a/src/slapi_r_plugin/src/entry.rs
+++ b/src/slapi_r_plugin/src/entry.rs
@@ -70,6 +70,14 @@ impl EntryRef {
}
}
+ pub fn contains_attr(&self, name: &str) -> bool {
+ let cname = CString::new(name).expect("invalid attr name");
+ let va = unsafe { slapi_entry_attr_get_valuearray(self.raw_e, cname.as_ptr()) };
+
+ // If it's null, it's not present, so flip the logic.
+ !va.is_null()
+ }
+
pub fn add_value(&mut self, a: &str, v: &ValueRef) {
// turn the attr to a c string.
// TODO FIX
diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs
index d7fc22e52..076907bae 100644
--- a/src/slapi_r_plugin/src/lib.rs
+++ b/src/slapi_r_plugin/src/lib.rs
@@ -9,6 +9,7 @@ pub mod dn;
pub mod entry;
pub mod error;
pub mod log;
+pub mod modify;
pub mod pblock;
pub mod plugin;
pub mod search;
@@ -24,6 +25,7 @@ pub mod prelude {
pub use crate::entry::EntryRef;
pub use crate::error::{DseCallbackStatus, LDAPError, PluginError, RPluginError};
pub use crate::log::{log_error, ErrorLevel};
+ pub use crate::modify::{ModType, Modify, SlapiMods};
pub use crate::pblock::{Pblock, PblockRef};
pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3};
pub use crate::search::{Search, SearchScope};
diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs
index 030449632..bc8dfa60f 100644
--- a/src/slapi_r_plugin/src/macros.rs
+++ b/src/slapi_r_plugin/src/macros.rs
@@ -825,7 +825,7 @@ macro_rules! slapi_r_search_callback_mapfn {
let e = EntryRef::new(raw_e);
let data_ptr = raw_data as *const _;
let data = unsafe { &(*data_ptr) };
- match $cb_mod_ident(e, data) {
+ match $cb_mod_ident(&e, data) {
Ok(_) => LDAPError::Success as i32,
Err(e) => e as i32,
}
diff --git a/src/slapi_r_plugin/src/modify.rs b/src/slapi_r_plugin/src/modify.rs
new file mode 100644
index 000000000..30864377a
--- /dev/null
+++ b/src/slapi_r_plugin/src/modify.rs
@@ -0,0 +1,118 @@
+use crate::constants::OpFlags;
+use crate::dn::SdnRef;
+use crate::error::{LDAPError, PluginError};
+use crate::pblock::Pblock;
+use crate::plugin::PluginIdRef;
+use crate::value::{slapi_value, ValueArray};
+
+use std::ffi::CString;
+use std::ops::{Deref, DerefMut};
+use std::os::raw::c_char;
+
+extern "C" {
+ fn slapi_modify_internal_set_pb_ext(
+ pb: *const libc::c_void,
+ dn: *const libc::c_void,
+ mods: *const *const libc::c_void,
+ controls: *const *const libc::c_void,
+ uniqueid: *const c_char,
+ plugin_ident: *const libc::c_void,
+ op_flags: i32,
+ );
+ fn slapi_modify_internal_pb(pb: *const libc::c_void);
+ fn slapi_mods_free(smods: *const *const libc::c_void);
+ fn slapi_mods_get_ldapmods_byref(smods: *const libc::c_void) -> *const *const libc::c_void;
+ fn slapi_mods_new() -> *const libc::c_void;
+ fn slapi_mods_add_mod_values(
+ smods: *const libc::c_void,
+ mtype: i32,
+ attrtype: *const c_char,
+ value: *const *const slapi_value,
+ );
+}
+
+#[derive(Debug)]
+#[repr(i32)]
+pub enum ModType {
+ Add = 0,
+ Delete = 1,
+ Replace = 2,
+}
+
+pub struct SlapiMods {
+ inner: *const libc::c_void,
+ vas: Vec<ValueArray>,
+}
+
+impl Drop for SlapiMods {
+ fn drop(&mut self) {
+ unsafe { slapi_mods_free(&self.inner as *const *const libc::c_void) }
+ }
+}
+
+impl SlapiMods {
+ pub fn new() -> Self {
+ SlapiMods {
+ inner: unsafe { slapi_mods_new() },
+ vas: Vec::new(),
+ }
+ }
+
+ pub fn append(&mut self, modtype: ModType, attrtype: &str, values: ValueArray) {
+ // We can get the value array pointer here to push to the inner
+ // because the internal pointers won't change even when we push them
+ // to the list to preserve their lifetime.
+ let vas = values.as_ptr();
+ // We take ownership of this to ensure it lives as least as long as our
+ // slapimods structure.
+ self.vas.push(values);
+ // now we can insert these to the modes.
+ let c_attrtype = CString::new(attrtype).expect("failed to allocate attrtype");
+ unsafe { slapi_mods_add_mod_values(self.inner, modtype as i32, c_attrtype.as_ptr(), vas) };
+ }
+}
+
+pub struct Modify {
+ pb: Pblock,
+ mods: SlapiMods,
+}
+
+pub struct ModifyResult {
+ pb: Pblock,
+}
+
+impl Modify {
+ pub fn new(dn: &SdnRef, mods: SlapiMods, plugin_id: PluginIdRef) -> Result<Self, PluginError> {
+ let pb = Pblock::new();
+ let lmods = unsafe { slapi_mods_get_ldapmods_byref(mods.inner) };
+ // OP_FLAG_ACTION_LOG_ACCESS
+
+ unsafe {
+ slapi_modify_internal_set_pb_ext(
+ pb.deref().as_ptr(),
+ dn.as_ptr(),
+ lmods,
+ std::ptr::null(),
+ std::ptr::null(),
+ plugin_id.raw_pid,
+ OpFlags::ByassReferrals as i32,
+ )
+ };
+
+ Ok(Modify { pb, mods })
+ }
+
+ pub fn execute(self) -> Result<ModifyResult, LDAPError> {
+ let Modify {
+ mut pb,
+ mods: _mods,
+ } = self;
+ unsafe { slapi_modify_internal_pb(pb.deref().as_ptr()) };
+ let result = pb.get_op_result();
+
+ match result {
+ 0 => Ok(ModifyResult { pb }),
+ _e => Err(LDAPError::from(result)),
+ }
+ }
+}
diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs
index b69ce1680..0f83914f3 100644
--- a/src/slapi_r_plugin/src/pblock.rs
+++ b/src/slapi_r_plugin/src/pblock.rs
@@ -11,6 +11,7 @@ pub use crate::log::{log_error, ErrorLevel};
extern "C" {
fn slapi_pblock_set(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32;
fn slapi_pblock_get(pb: *const libc::c_void, arg: i32, value: *const libc::c_void) -> i32;
+ fn slapi_pblock_destroy(pb: *const libc::c_void);
fn slapi_pblock_new() -> *const libc::c_void;
}
@@ -41,6 +42,12 @@ impl DerefMut for Pblock {
}
}
+impl Drop for Pblock {
+ fn drop(&mut self) {
+ unsafe { slapi_pblock_destroy(self.value.raw_pb) }
+ }
+}
+
pub struct PblockRef {
raw_pb: *const libc::c_void,
}
diff --git a/src/slapi_r_plugin/src/value.rs b/src/slapi_r_plugin/src/value.rs
index 5a40dd279..46246837a 100644
--- a/src/slapi_r_plugin/src/value.rs
+++ b/src/slapi_r_plugin/src/value.rs
@@ -96,6 +96,10 @@ impl ValueArray {
let bs = vs.into_boxed_slice();
Box::leak(bs) as *const _ as *const *const slapi_value
}
+
+ pub fn as_ptr(&self) -> *const *const slapi_value {
+ self.data.as_ptr() as *const *const slapi_value
+ }
}
impl FromIterator<Value> for ValueArray {
--
2.26.3

View File

@ -0,0 +1,245 @@
From 9c08a053938eb28821fad7d0850c046ef2ed44c4 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 9 Dec 2020 16:16:30 -0500
Subject: [PATCH 05/12] Issue 4092 - systemd-tmpfiles warnings
Bug Description:
systemd-tmpfiles warns about legacy paths in our tmpfiles configs.
Using /var/run also introduces a race condition, see the following
issue https://pagure.io/389-ds-base/issue/47429
Fix Description:
Instead of using @localstatedir@/run use @localrundir@ which was
introduced in #850.
Relates: https://github.com/389ds/389-ds-base/issues/766
Fixes: https://github.com/389ds/389-ds-base/issues/4092
Reviewed by: vashirov & firstyear(Thanks!)
---
Makefile.am | 4 ++--
configure.ac | 10 ++++++++--
dirsrvtests/tests/suites/basic/basic_test.py | 3 ++-
ldap/admin/src/defaults.inf.in | 8 ++++----
ldap/servers/snmp/main.c | 8 ++++----
src/lib389/lib389/__init__.py | 3 +++
src/lib389/lib389/instance/options.py | 7 ++++++-
src/lib389/lib389/instance/remove.py | 13 ++++++++-----
src/lib389/lib389/instance/setup.py | 10 ++++++++--
9 files changed, 45 insertions(+), 21 deletions(-)
diff --git a/Makefile.am b/Makefile.am
index 36434cf17..fc5a6a7d1 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -141,8 +141,8 @@ PATH_DEFINES = -DLOCALSTATEDIR="\"$(localstatedir)\"" -DSYSCONFDIR="\"$(sysconfd
-DLIBDIR="\"$(libdir)\"" -DBINDIR="\"$(bindir)\"" \
-DDATADIR="\"$(datadir)\"" -DDOCDIR="\"$(docdir)\"" \
-DSBINDIR="\"$(sbindir)\"" -DPLUGINDIR="\"$(serverplugindir)\"" \
- -DTEMPLATEDIR="\"$(sampledatadir)\"" -DSYSTEMSCHEMADIR="\"$(systemschemadir)\""
-
+ -DTEMPLATEDIR="\"$(sampledatadir)\"" -DSYSTEMSCHEMADIR="\"$(systemschemadir)\"" \
+ -DLOCALRUNDIR="\"$(localrundir)\""
# Now that we have all our defines in place, setup the CPPFLAGS
# These flags are the "must have" for all components
diff --git a/configure.ac b/configure.ac
index 61bf35e4a..9845beb7d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -418,7 +418,14 @@ fi
m4_include(m4/fhs.m4)
-localrundir='/run'
+# /run directory path
+AC_ARG_WITH([localrundir],
+ AS_HELP_STRING([--with-localrundir=DIR],
+ [Runtime data directory]),
+ [localrundir=$with_localrundir],
+ [localrundir="/run"])
+AC_SUBST([localrundir])
+
cockpitdir=/389-console
# installation paths - by default, we store everything
@@ -899,7 +906,6 @@ AC_SUBST(ldaplib_defs)
AC_SUBST(ldaptool_bindir)
AC_SUBST(ldaptool_opts)
AC_SUBST(plainldif_opts)
-AC_SUBST(localrundir)
AC_SUBST(brand)
AC_SUBST(capbrand)
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
index 41726f073..7e80c443b 100644
--- a/dirsrvtests/tests/suites/basic/basic_test.py
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
@@ -901,7 +901,8 @@ def test_basic_ldapagent(topology_st, import_example_ldif):
# Remember, this is *forking*
check_output([os.path.join(topology_st.standalone.get_sbin_dir(), 'ldap-agent'), config_file])
# First kill any previous agents ....
- pidpath = os.path.join(var_dir, 'run/ldap-agent.pid')
+ run_dir = topology_st.standalone.get_run_dir()
+ pidpath = os.path.join(run_dir, 'ldap-agent.pid')
pid = None
with open(pidpath, 'r') as pf:
pid = pf.readlines()[0].strip()
diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in
index d5f504591..e02248b89 100644
--- a/ldap/admin/src/defaults.inf.in
+++ b/ldap/admin/src/defaults.inf.in
@@ -35,12 +35,12 @@ sysconf_dir = @sysconfdir@
initconfig_dir = @initconfigdir@
config_dir = @instconfigdir@/slapd-{instance_name}
local_state_dir = @localstatedir@
-run_dir = @localstatedir@/run/dirsrv
+run_dir = @localrundir@
# This is the expected location of ldapi.
-ldapi = @localstatedir@/run/slapd-{instance_name}.socket
+ldapi = @localrundir@/slapd-{instance_name}.socket
+pid_file = @localrundir@/slapd-{instance_name}.pid
ldapi_listen = on
ldapi_autobind = on
-pid_file = @localstatedir@/run/dirsrv/slapd-{instance_name}.pid
inst_dir = @serverdir@/slapd-{instance_name}
plugin_dir = @serverplugindir@
system_schema_dir = @systemschemadir@
@@ -54,7 +54,7 @@ root_dn = cn=Directory Manager
schema_dir = @instconfigdir@/slapd-{instance_name}/schema
cert_dir = @instconfigdir@/slapd-{instance_name}
-lock_dir = @localstatedir@/lock/dirsrv/slapd-{instance_name}
+lock_dir = @localrundir@/lock/dirsrv/slapd-{instance_name}
log_dir = @localstatedir@/log/dirsrv/slapd-{instance_name}
access_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/access
audit_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/audit
diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c
index 88a4d532a..e6271a8a9 100644
--- a/ldap/servers/snmp/main.c
+++ b/ldap/servers/snmp/main.c
@@ -287,14 +287,14 @@ load_config(char *conf_path)
}
/* set pidfile path */
- if ((pidfile = malloc(strlen(LOCALSTATEDIR) + strlen("/run/") +
+ if ((pidfile = malloc(strlen(LOCALRUNDIR) + strlen("/") +
strlen(LDAP_AGENT_PIDFILE) + 1)) != NULL) {
- strncpy(pidfile, LOCALSTATEDIR, strlen(LOCALSTATEDIR) + 1);
+ strncpy(pidfile, LOCALRUNDIR, strlen(LOCALRUNDIR) + 1);
/* The above will likely not be NULL terminated, but we need to
* be sure that we're properly NULL terminated for the below
* strcat() to work properly. */
- pidfile[strlen(LOCALSTATEDIR)] = (char)0;
- strcat(pidfile, "/run/");
+ pidfile[strlen(LOCALRUNDIR)] = (char)0;
+ strcat(pidfile, "/");
strcat(pidfile, LDAP_AGENT_PIDFILE);
} else {
printf("ldap-agent: malloc error processing config file\n");
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index e0299c5b4..2a0b83913 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -1709,6 +1709,9 @@ class DirSrv(SimpleLDAPObject, object):
def get_bin_dir(self):
return self.ds_paths.bin_dir
+ def get_run_dir(self):
+ return self.ds_paths.run_dir
+
def get_plugin_dir(self):
return self.ds_paths.plugin_dir
diff --git a/src/lib389/lib389/instance/options.py b/src/lib389/lib389/instance/options.py
index 4e083618c..d5b95e6df 100644
--- a/src/lib389/lib389/instance/options.py
+++ b/src/lib389/lib389/instance/options.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2019 Red Hat, Inc.
+# Copyright (C) 2021 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -32,6 +32,7 @@ format_keys = [
'backup_dir',
'db_dir',
'db_home_dir',
+ 'ldapi',
'ldif_dir',
'lock_dir',
'log_dir',
@@ -233,6 +234,10 @@ class Slapd2Base(Options2):
self._helptext['local_state_dir'] = "Sets the location of Directory Server variable data. Only set this parameter in a development environment."
self._advanced['local_state_dir'] = True
+ self._options['ldapi'] = ds_paths.ldapi
+ self._type['ldapi'] = str
+ self._helptext['ldapi'] = "Sets the location of socket interface of the Directory Server."
+
self._options['lib_dir'] = ds_paths.lib_dir
self._type['lib_dir'] = str
self._helptext['lib_dir'] = "Sets the location of Directory Server shared libraries. Only set this parameter in a development environment."
diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py
index d7bb48ce0..1a35ddc07 100644
--- a/src/lib389/lib389/instance/remove.py
+++ b/src/lib389/lib389/instance/remove.py
@@ -78,13 +78,16 @@ def remove_ds_instance(dirsrv, force=False):
_log.debug("Found instance marker at %s! Proceeding to remove ..." % dse_ldif_path)
- # Stop the instance (if running) and now we know it really does exist
- # and hopefully have permission to access it ...
- _log.debug("Stopping instance %s" % dirsrv.serverid)
- dirsrv.stop()
-
### ANY NEW REMOVAL ACTION MUST BE BELOW THIS LINE!!!
+ # Remove LDAPI socket file
+ ldapi_path = os.path.join(dirsrv.ds_paths.run_dir, "slapd-%s.socket" % dirsrv.serverid)
+ if os.path.exists(ldapi_path):
+ try:
+ os.remove(ldapi_path)
+ except OSError as e:
+ _log.debug(f"Failed to remove LDAPI socket ({ldapi_path}) Error: {str(e)}")
+
# Remove these paths:
# for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
# 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'):
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index ab7a2da85..57e7a9fd4 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -732,7 +732,10 @@ class SetupDs(object):
dse += line.replace('%', '{', 1).replace('%', '}', 1)
with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse:
- ldapi_path = os.path.join(slapd['local_state_dir'], "run/slapd-%s.socket" % slapd['instance_name'])
+ if os.path.exists(os.path.dirname(slapd['ldapi'])):
+ ldapi_path = slapd['ldapi']
+ else:
+ ldapi_path = os.path.join(slapd['run_dir'], "slapd-%s.socket" % slapd['instance_name'])
dse_fmt = dse.format(
schema_dir=slapd['schema_dir'],
lock_dir=slapd['lock_dir'],
@@ -902,10 +905,13 @@ class SetupDs(object):
self.log.info("Perform SELinux labeling ...")
selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir',
- 'run_dir', 'schema_dir', 'tmp_dir')
+ 'schema_dir', 'tmp_dir')
for path in selinux_paths:
selinux_restorecon(slapd[path])
+ # Don't run restorecon on the entire /run directory
+ selinux_restorecon(slapd['run_dir'] + '/dirsrv')
+
selinux_label_port(slapd['port'])
# Start the server
--
2.31.1

View File

@ -1,192 +0,0 @@
From b2e0a1d405d15383064e547fd15008bc136d3efe Mon Sep 17 00:00:00 2001
From: Firstyear <william@blackhats.net.au>
Date: Thu, 17 Dec 2020 08:22:23 +1000
Subject: [PATCH 05/12] Issue 4498 - BUG - entryuuid replication may not work
(#4503)
Bug Description: EntryUUID can be duplicated in replication,
due to a missing check in assign_uuid
Fix Description: Add a test case to determine how this occurs,
and add the correct check for existing entryUUID.
fixes: https://github.com/389ds/389-ds-base/issues/4498
Author: William Brown <william@blackhats.net.au>
Review by: @mreynolds389
---
.../tests/suites/entryuuid/replicated_test.py | 77 +++++++++++++++++++
rpm.mk | 2 +-
src/plugins/entryuuid/src/lib.rs | 20 ++++-
src/slapi_r_plugin/src/constants.rs | 2 +
src/slapi_r_plugin/src/pblock.rs | 7 ++
5 files changed, 106 insertions(+), 2 deletions(-)
create mode 100644 dirsrvtests/tests/suites/entryuuid/replicated_test.py
diff --git a/dirsrvtests/tests/suites/entryuuid/replicated_test.py b/dirsrvtests/tests/suites/entryuuid/replicated_test.py
new file mode 100644
index 000000000..a2ebc8ff7
--- /dev/null
+++ b/dirsrvtests/tests/suites/entryuuid/replicated_test.py
@@ -0,0 +1,77 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 William Brown <william@blackhats.net.au>
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+
+import ldap
+import pytest
+import logging
+from lib389.topologies import topology_m2 as topo_m2
+from lib389.idm.user import nsUserAccounts
+from lib389.paths import Paths
+from lib389.utils import ds_is_older
+from lib389._constants import *
+from lib389.replica import ReplicationManager
+
+default_paths = Paths()
+
+pytestmark = pytest.mark.tier1
+
+@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions")
+
+def test_entryuuid_with_replication(topo_m2):
+ """ Check that entryuuid works with replication
+
+ :id: a5f15bf9-7f63-473a-840c-b9037b787024
+
+ :setup: two node mmr
+
+ :steps:
+ 1. Create an entry on one server
+ 2. Wait for replication
+ 3. Assert it is on the second
+
+ :expectedresults:
+ 1. Success
+ 1. Success
+ 1. Success
+ """
+
+ server_a = topo_m2.ms["supplier1"]
+ server_b = topo_m2.ms["supplier2"]
+ server_a.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE))
+ server_b.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE))
+
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+
+ account_a = nsUserAccounts(server_a, DEFAULT_SUFFIX).create_test_user(uid=2000)
+ euuid_a = account_a.get_attr_vals_utf8('entryUUID')
+ print("🧩 %s" % euuid_a)
+ assert(euuid_a is not None)
+ assert(len(euuid_a) == 1)
+
+ repl.wait_for_replication(server_a, server_b)
+
+ account_b = nsUserAccounts(server_b, DEFAULT_SUFFIX).get("test_user_2000")
+ euuid_b = account_b.get_attr_vals_utf8('entryUUID')
+ print("🧩 %s" % euuid_b)
+
+ server_a.config.loglevel(vals=(ErrorLog.DEFAULT,))
+ server_b.config.loglevel(vals=(ErrorLog.DEFAULT,))
+
+ assert(euuid_b is not None)
+ assert(len(euuid_b) == 1)
+ assert(euuid_b == euuid_a)
+
+ account_b.set("description", "update")
+ repl.wait_for_replication(server_b, server_a)
+
+ euuid_c = account_a.get_attr_vals_utf8('entryUUID')
+ print("🧩 %s" % euuid_c)
+ assert(euuid_c is not None)
+ assert(len(euuid_c) == 1)
+ assert(euuid_c == euuid_a)
+
diff --git a/rpm.mk b/rpm.mk
index 02f5bba37..d1cdff7df 100644
--- a/rpm.mk
+++ b/rpm.mk
@@ -25,7 +25,7 @@ TSAN_ON = 0
# Undefined Behaviour Sanitizer
UBSAN_ON = 0
-RUST_ON = 0
+RUST_ON = 1
# PERL_ON is deprecated and turns on the LEGACY_ON, this for not breaking people's workflows.
PERL_ON = 1
diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
index 92977db05..0197c5e83 100644
--- a/src/plugins/entryuuid/src/lib.rs
+++ b/src/plugins/entryuuid/src/lib.rs
@@ -30,6 +30,16 @@ slapi_r_search_callback_mapfn!(entryuuid, entryuuid_fixup_cb, entryuuid_fixup_ma
fn assign_uuid(e: &mut EntryRef) {
let sdn = e.get_sdnref();
+ // 🚧 safety barrier 🚧
+ if e.contains_attr("entryUUID") {
+ log_error!(
+ ErrorLevel::Trace,
+ "assign_uuid -> entryUUID exists, skipping dn {}",
+ sdn.to_dn_string()
+ );
+ return;
+ }
+
// We could consider making these lazy static.
let config_sdn = Sdn::try_from("cn=config").expect("Invalid static dn");
let schema_sdn = Sdn::try_from("cn=schema").expect("Invalid static dn");
@@ -66,7 +76,15 @@ impl SlapiPlugin3 for EntryUuid {
}
fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> {
- log_error!(ErrorLevel::Trace, "betxn_pre_add");
+ if pb.get_is_replicated_operation() {
+ log_error!(
+ ErrorLevel::Trace,
+ "betxn_pre_add -> replicated operation, will not change"
+ );
+ return Ok(());
+ }
+
+ log_error!(ErrorLevel::Trace, "betxn_pre_add -> start");
let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?;
assign_uuid(&mut e);
diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs
index 34845c2f4..aa0691acc 100644
--- a/src/slapi_r_plugin/src/constants.rs
+++ b/src/slapi_r_plugin/src/constants.rs
@@ -164,6 +164,8 @@ pub(crate) enum PblockType {
AddEntry = 60,
/// SLAPI_BACKEND
Backend = 130,
+ /// SLAPI_IS_REPLICATED_OPERATION
+ IsReplicationOperation = 142,
/// SLAPI_PLUGIN_MR_NAMES
MRNames = 624,
/// SLAPI_PLUGIN_SYNTAX_NAMES
diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs
index 0f83914f3..718ff2ca7 100644
--- a/src/slapi_r_plugin/src/pblock.rs
+++ b/src/slapi_r_plugin/src/pblock.rs
@@ -279,4 +279,11 @@ impl PblockRef {
pub fn get_op_result(&mut self) -> i32 {
self.get_value_i32(PblockType::OpResult).unwrap_or(-1)
}
+
+ pub fn get_is_replicated_operation(&mut self) -> bool {
+ let i = self.get_value_i32(PblockType::IsReplicationOperation).unwrap_or(0);
+ // Because rust returns the result of the last evaluation, we can
+ // just return if not equal 0.
+ i != 0
+ }
}
--
2.26.3

View File

@ -1,626 +0,0 @@
From 04c44e74503a842561b6c6e58001faf86d924b20 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Mon, 7 Dec 2020 11:00:45 -0500
Subject: [PATCH 06/12] Issue 4421 - Unable to build with Rust enabled in
closed environment
Description: Add Makefile flags and update rpm.mk that allow updating
and downloading all the cargo/rust dependencies. This is
needed for nightly tests and upstream/downstream releases.
Fixes: https://github.com/389ds/389-ds-base/issues/4421
Reviewed by: firstyear(Thanks!)
---
rpm.mk | 3 +-
rpm/389-ds-base.spec.in | 2 +-
src/Cargo.lock | 563 ----------------------------------------
3 files changed, 3 insertions(+), 565 deletions(-)
delete mode 100644 src/Cargo.lock
diff --git a/rpm.mk b/rpm.mk
index d1cdff7df..ef810c63c 100644
--- a/rpm.mk
+++ b/rpm.mk
@@ -44,6 +44,7 @@ update-cargo-dependencies:
cargo update --manifest-path=./src/Cargo.toml
download-cargo-dependencies:
+ cargo update --manifest-path=./src/Cargo.toml
cargo vendor --manifest-path=./src/Cargo.toml
cargo fetch --manifest-path=./src/Cargo.toml
tar -czf vendor.tar.gz vendor
@@ -114,7 +115,7 @@ rpmbuildprep:
cp dist/sources/$(JEMALLOC_TARBALL) $(RPMBUILD)/SOURCES/ ; \
fi
-srpms: rpmroot srpmdistdir tarballs rpmbuildprep
+srpms: rpmroot srpmdistdir download-cargo-dependencies tarballs rpmbuildprep
rpmbuild --define "_topdir $(RPMBUILD)" -bs $(RPMBUILD)/SPECS/$(PACKAGE).spec
cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)*.src.rpm dist/srpms/
rm -rf $(RPMBUILD)
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index b9f85489b..d80de8422 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -357,7 +357,7 @@ UBSAN_FLAGS="--enable-ubsan --enable-debug"
%endif
%if %{use_rust}
-RUST_FLAGS="--enable-rust"
+RUST_FLAGS="--enable-rust --enable-rust-offline"
%endif
%if %{use_legacy}
diff --git a/src/Cargo.lock b/src/Cargo.lock
deleted file mode 100644
index 33d7b8f23..000000000
--- a/src/Cargo.lock
+++ /dev/null
@@ -1,563 +0,0 @@
-# This file is automatically @generated by Cargo.
-# It is not intended for manual editing.
-[[package]]
-name = "ansi_term"
-version = "0.11.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
-dependencies = [
- "winapi",
-]
-
-[[package]]
-name = "atty"
-version = "0.2.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
-dependencies = [
- "hermit-abi",
- "libc",
- "winapi",
-]
-
-[[package]]
-name = "autocfg"
-version = "1.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
-
-[[package]]
-name = "base64"
-version = "0.13.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
-
-[[package]]
-name = "bitflags"
-version = "1.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
-
-[[package]]
-name = "byteorder"
-version = "1.4.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
-
-[[package]]
-name = "cbindgen"
-version = "0.9.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
-dependencies = [
- "clap",
- "log",
- "proc-macro2",
- "quote",
- "serde",
- "serde_json",
- "syn",
- "tempfile",
- "toml",
-]
-
-[[package]]
-name = "cc"
-version = "1.0.67"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd"
-dependencies = [
- "jobserver",
-]
-
-[[package]]
-name = "cfg-if"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
-
-[[package]]
-name = "clap"
-version = "2.33.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
-dependencies = [
- "ansi_term",
- "atty",
- "bitflags",
- "strsim",
- "textwrap",
- "unicode-width",
- "vec_map",
-]
-
-[[package]]
-name = "entryuuid"
-version = "0.1.0"
-dependencies = [
- "cc",
- "libc",
- "paste",
- "slapi_r_plugin",
- "uuid",
-]
-
-[[package]]
-name = "entryuuid_syntax"
-version = "0.1.0"
-dependencies = [
- "cc",
- "libc",
- "paste",
- "slapi_r_plugin",
- "uuid",
-]
-
-[[package]]
-name = "fernet"
-version = "0.1.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
-dependencies = [
- "base64",
- "byteorder",
- "getrandom",
- "openssl",
- "zeroize",
-]
-
-[[package]]
-name = "foreign-types"
-version = "0.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
-dependencies = [
- "foreign-types-shared",
-]
-
-[[package]]
-name = "foreign-types-shared"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
-
-[[package]]
-name = "getrandom"
-version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
-dependencies = [
- "cfg-if",
- "libc",
- "wasi",
-]
-
-[[package]]
-name = "hermit-abi"
-version = "0.1.18"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
-dependencies = [
- "libc",
-]
-
-[[package]]
-name = "itoa"
-version = "0.4.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
-
-[[package]]
-name = "jobserver"
-version = "0.1.22"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd"
-dependencies = [
- "libc",
-]
-
-[[package]]
-name = "lazy_static"
-version = "1.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
-
-[[package]]
-name = "libc"
-version = "0.2.94"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e"
-
-[[package]]
-name = "librnsslapd"
-version = "0.1.0"
-dependencies = [
- "cbindgen",
- "libc",
- "slapd",
-]
-
-[[package]]
-name = "librslapd"
-version = "0.1.0"
-dependencies = [
- "cbindgen",
- "libc",
- "slapd",
-]
-
-[[package]]
-name = "log"
-version = "0.4.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
-dependencies = [
- "cfg-if",
-]
-
-[[package]]
-name = "once_cell"
-version = "1.7.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3"
-
-[[package]]
-name = "openssl"
-version = "0.10.34"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8"
-dependencies = [
- "bitflags",
- "cfg-if",
- "foreign-types",
- "libc",
- "once_cell",
- "openssl-sys",
-]
-
-[[package]]
-name = "openssl-sys"
-version = "0.9.63"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98"
-dependencies = [
- "autocfg",
- "cc",
- "libc",
- "pkg-config",
- "vcpkg",
-]
-
-[[package]]
-name = "paste"
-version = "0.1.18"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
-dependencies = [
- "paste-impl",
- "proc-macro-hack",
-]
-
-[[package]]
-name = "paste-impl"
-version = "0.1.18"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
-dependencies = [
- "proc-macro-hack",
-]
-
-[[package]]
-name = "pkg-config"
-version = "0.3.19"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
-
-[[package]]
-name = "ppv-lite86"
-version = "0.2.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
-
-[[package]]
-name = "proc-macro-hack"
-version = "0.5.19"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
-
-[[package]]
-name = "proc-macro2"
-version = "1.0.27"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
-dependencies = [
- "unicode-xid",
-]
-
-[[package]]
-name = "quote"
-version = "1.0.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
-dependencies = [
- "proc-macro2",
-]
-
-[[package]]
-name = "rand"
-version = "0.8.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
-dependencies = [
- "libc",
- "rand_chacha",
- "rand_core",
- "rand_hc",
-]
-
-[[package]]
-name = "rand_chacha"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
-dependencies = [
- "ppv-lite86",
- "rand_core",
-]
-
-[[package]]
-name = "rand_core"
-version = "0.6.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"
-dependencies = [
- "getrandom",
-]
-
-[[package]]
-name = "rand_hc"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
-dependencies = [
- "rand_core",
-]
-
-[[package]]
-name = "redox_syscall"
-version = "0.2.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc"
-dependencies = [
- "bitflags",
-]
-
-[[package]]
-name = "remove_dir_all"
-version = "0.5.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
-dependencies = [
- "winapi",
-]
-
-[[package]]
-name = "rsds"
-version = "0.1.0"
-
-[[package]]
-name = "ryu"
-version = "1.0.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
-
-[[package]]
-name = "serde"
-version = "1.0.126"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03"
-dependencies = [
- "serde_derive",
-]
-
-[[package]]
-name = "serde_derive"
-version = "1.0.126"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "serde_json"
-version = "1.0.64"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79"
-dependencies = [
- "itoa",
- "ryu",
- "serde",
-]
-
-[[package]]
-name = "slapd"
-version = "0.1.0"
-dependencies = [
- "fernet",
-]
-
-[[package]]
-name = "slapi_r_plugin"
-version = "0.1.0"
-dependencies = [
- "lazy_static",
- "libc",
- "paste",
- "uuid",
-]
-
-[[package]]
-name = "strsim"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
-
-[[package]]
-name = "syn"
-version = "1.0.72"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82"
-dependencies = [
- "proc-macro2",
- "quote",
- "unicode-xid",
-]
-
-[[package]]
-name = "synstructure"
-version = "0.12.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
- "unicode-xid",
-]
-
-[[package]]
-name = "tempfile"
-version = "3.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
-dependencies = [
- "cfg-if",
- "libc",
- "rand",
- "redox_syscall",
- "remove_dir_all",
- "winapi",
-]
-
-[[package]]
-name = "textwrap"
-version = "0.11.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
-dependencies = [
- "unicode-width",
-]
-
-[[package]]
-name = "toml"
-version = "0.5.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa"
-dependencies = [
- "serde",
-]
-
-[[package]]
-name = "unicode-width"
-version = "0.1.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
-
-[[package]]
-name = "unicode-xid"
-version = "0.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
-
-[[package]]
-name = "uuid"
-version = "0.8.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
-dependencies = [
- "getrandom",
-]
-
-[[package]]
-name = "vcpkg"
-version = "0.2.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cbdbff6266a24120518560b5dc983096efb98462e51d0d68169895b237be3e5d"
-
-[[package]]
-name = "vec_map"
-version = "0.8.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
-
-[[package]]
-name = "wasi"
-version = "0.10.2+wasi-snapshot-preview1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
-
-[[package]]
-name = "winapi"
-version = "0.3.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
-dependencies = [
- "winapi-i686-pc-windows-gnu",
- "winapi-x86_64-pc-windows-gnu",
-]
-
-[[package]]
-name = "winapi-i686-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
-
-[[package]]
-name = "winapi-x86_64-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
-
-[[package]]
-name = "zeroize"
-version = "1.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd"
-dependencies = [
- "zeroize_derive",
-]
-
-[[package]]
-name = "zeroize_derive"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
- "synstructure",
-]
--
2.26.3

View File

@ -0,0 +1,113 @@
From b4a3b88faeafa6aa197d88ee84e4b2dbadd37ace Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Mon, 1 Nov 2021 10:42:27 -0400
Subject: [PATCH 06/12] Issue 4973 - installer changes permissions on /run
Description: There was a regression when we switched over to using /run
that caused the installer to try and create /run which
caused the ownership to change. Fixed this by changing
the "run_dir" to /run/dirsrv
relates: https://github.com/389ds/389-ds-base/issues/4973
Reviewed by: jchapman(Thanks!)
---
ldap/admin/src/defaults.inf.in | 2 +-
src/lib389/lib389/instance/remove.py | 10 +---------
src/lib389/lib389/instance/setup.py | 13 +++----------
3 files changed, 5 insertions(+), 20 deletions(-)
diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in
index e02248b89..92b93d695 100644
--- a/ldap/admin/src/defaults.inf.in
+++ b/ldap/admin/src/defaults.inf.in
@@ -35,7 +35,7 @@ sysconf_dir = @sysconfdir@
initconfig_dir = @initconfigdir@
config_dir = @instconfigdir@/slapd-{instance_name}
local_state_dir = @localstatedir@
-run_dir = @localrundir@
+run_dir = @localrundir@/dirsrv
# This is the expected location of ldapi.
ldapi = @localrundir@/slapd-{instance_name}.socket
pid_file = @localrundir@/slapd-{instance_name}.pid
diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py
index 1a35ddc07..e96db3896 100644
--- a/src/lib389/lib389/instance/remove.py
+++ b/src/lib389/lib389/instance/remove.py
@@ -52,9 +52,9 @@ def remove_ds_instance(dirsrv, force=False):
remove_paths['ldif_dir'] = dirsrv.ds_paths.ldif_dir
remove_paths['lock_dir'] = dirsrv.ds_paths.lock_dir
remove_paths['log_dir'] = dirsrv.ds_paths.log_dir
- # remove_paths['run_dir'] = dirsrv.ds_paths.run_dir
remove_paths['inst_dir'] = dirsrv.ds_paths.inst_dir
remove_paths['etc_sysconfig'] = "%s/sysconfig/dirsrv-%s" % (dirsrv.ds_paths.sysconf_dir, dirsrv.serverid)
+ remove_paths['ldapi'] = dirsrv.ds_paths.ldapi
tmpfiles_d_path = dirsrv.ds_paths.tmpfiles_d + "/dirsrv-" + dirsrv.serverid + ".conf"
@@ -80,14 +80,6 @@ def remove_ds_instance(dirsrv, force=False):
### ANY NEW REMOVAL ACTION MUST BE BELOW THIS LINE!!!
- # Remove LDAPI socket file
- ldapi_path = os.path.join(dirsrv.ds_paths.run_dir, "slapd-%s.socket" % dirsrv.serverid)
- if os.path.exists(ldapi_path):
- try:
- os.remove(ldapi_path)
- except OSError as e:
- _log.debug(f"Failed to remove LDAPI socket ({ldapi_path}) Error: {str(e)}")
-
# Remove these paths:
# for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
# 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'):
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index 57e7a9fd4..be6854af8 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -732,10 +732,6 @@ class SetupDs(object):
dse += line.replace('%', '{', 1).replace('%', '}', 1)
with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse:
- if os.path.exists(os.path.dirname(slapd['ldapi'])):
- ldapi_path = slapd['ldapi']
- else:
- ldapi_path = os.path.join(slapd['run_dir'], "slapd-%s.socket" % slapd['instance_name'])
dse_fmt = dse.format(
schema_dir=slapd['schema_dir'],
lock_dir=slapd['lock_dir'],
@@ -759,7 +755,7 @@ class SetupDs(object):
db_dir=slapd['db_dir'],
db_home_dir=slapd['db_home_dir'],
ldapi_enabled="on",
- ldapi=ldapi_path,
+ ldapi=slapd['ldapi'],
ldapi_autobind="on",
)
file_dse.write(dse_fmt)
@@ -861,7 +857,7 @@ class SetupDs(object):
SER_ROOT_PW: self._raw_secure_password,
SER_DEPLOYED_DIR: slapd['prefix'],
SER_LDAPI_ENABLED: 'on',
- SER_LDAPI_SOCKET: ldapi_path,
+ SER_LDAPI_SOCKET: slapd['ldapi'],
SER_LDAPI_AUTOBIND: 'on'
}
@@ -905,13 +901,10 @@ class SetupDs(object):
self.log.info("Perform SELinux labeling ...")
selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir',
- 'schema_dir', 'tmp_dir')
+ 'run_dir', 'schema_dir', 'tmp_dir')
for path in selinux_paths:
selinux_restorecon(slapd[path])
- # Don't run restorecon on the entire /run directory
- selinux_restorecon(slapd['run_dir'] + '/dirsrv')
-
selinux_label_port(slapd['port'])
# Start the server
--
2.31.1

View File

@ -0,0 +1,70 @@
From c26c463ac92682dcf01ddbdc11cc1109b183eb0a Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Mon, 1 Nov 2021 16:04:28 -0400
Subject: [PATCH 07/12] Issue 4973 - update snmp to use /run/dirsrv for PID
file
Description: Previously SNMP would write the agent PID file directly
under /run (or /var/run), but this broke a CI test after
updating lib389/defaults.inf to use /run/dirsrv.
Instead of hacking the CI test, I changed the path
snmp uses to: /run/dirsrv/ Which is where it
should really be written anyway.
relates: https://github.com/389ds/389-ds-base/issues/4973
Reviewed by: vashirov(Thanks!)
---
ldap/servers/snmp/main.c | 4 ++--
wrappers/systemd-snmp.service.in | 6 +++---
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c
index e6271a8a9..d8eb918f6 100644
--- a/ldap/servers/snmp/main.c
+++ b/ldap/servers/snmp/main.c
@@ -287,14 +287,14 @@ load_config(char *conf_path)
}
/* set pidfile path */
- if ((pidfile = malloc(strlen(LOCALRUNDIR) + strlen("/") +
+ if ((pidfile = malloc(strlen(LOCALRUNDIR) + strlen("/dirsrv/") +
strlen(LDAP_AGENT_PIDFILE) + 1)) != NULL) {
strncpy(pidfile, LOCALRUNDIR, strlen(LOCALRUNDIR) + 1);
/* The above will likely not be NULL terminated, but we need to
* be sure that we're properly NULL terminated for the below
* strcat() to work properly. */
pidfile[strlen(LOCALRUNDIR)] = (char)0;
- strcat(pidfile, "/");
+ strcat(pidfile, "/dirsrv/");
strcat(pidfile, LDAP_AGENT_PIDFILE);
} else {
printf("ldap-agent: malloc error processing config file\n");
diff --git a/wrappers/systemd-snmp.service.in b/wrappers/systemd-snmp.service.in
index 477bc623d..f18766cb4 100644
--- a/wrappers/systemd-snmp.service.in
+++ b/wrappers/systemd-snmp.service.in
@@ -1,7 +1,7 @@
# do not edit this file in /lib/systemd/system - instead do the following:
# cp /lib/systemd/system/dirsrv-snmp.service /etc/systemd/system/
# edit /etc/systemd/system/dirsrv-snmp.service
-# systemctl daemon-reload
+# systemctl daemon-reload
# systemctl (re)start dirsrv-snmp.service
[Unit]
Description=@capbrand@ Directory Server SNMP Subagent.
@@ -9,8 +9,8 @@ After=network.target
[Service]
Type=forking
-PIDFile=/run/ldap-agent.pid
-ExecStart=@sbindir@/ldap-agent @configdir@/ldap-agent.conf
+PIDFile=/run/dirsrv/ldap-agent.pid
+ExecStart=@sbindir@/ldap-agent @configdir@/ldap-agent.conf
[Install]
WantedBy=multi-user.target
--
2.31.1

View File

@ -1,412 +0,0 @@
From 279bdb5148eb0b67ddab40c4dd9d08e9e1672f13 Mon Sep 17 00:00:00 2001
From: William Brown <william@blackhats.net.au>
Date: Fri, 26 Jun 2020 10:27:56 +1000
Subject: [PATCH 07/12] Ticket 51175 - resolve plugin name leaking
Bug Description: Previously pblock.c assumed that all plugin
names were static c strings. Rust can't create static C
strings, so these were intentionally leaked.
Fix Description: Rather than leak these, we do a dup/free
through the slapiplugin struct instead, meaning we can use
ephemeral, and properly managed strings in rust. This does not
affect any other existing code which will still handle the
static strings correctly.
https://pagure.io/389-ds-base/issue/51175
Author: William Brown <william@blackhats.net.au>
Review by: mreynolds, tbordaz (Thanks!)
---
Makefile.am | 1 +
configure.ac | 2 +-
ldap/servers/slapd/pagedresults.c | 6 +--
ldap/servers/slapd/pblock.c | 9 ++--
ldap/servers/slapd/plugin.c | 7 +++
ldap/servers/slapd/pw_verify.c | 1 +
ldap/servers/slapd/tools/pwenc.c | 2 +-
src/slapi_r_plugin/README.md | 6 +--
src/slapi_r_plugin/src/charray.rs | 32 ++++++++++++++
src/slapi_r_plugin/src/lib.rs | 8 ++--
src/slapi_r_plugin/src/macros.rs | 17 +++++---
src/slapi_r_plugin/src/syntax_plugin.rs | 57 +++++++------------------
12 files changed, 85 insertions(+), 63 deletions(-)
create mode 100644 src/slapi_r_plugin/src/charray.rs
diff --git a/Makefile.am b/Makefile.am
index 627953850..36434cf17 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1312,6 +1312,7 @@ rust-nsslapd-private.h: @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a
libslapi_r_plugin_SOURCES = \
src/slapi_r_plugin/src/backend.rs \
src/slapi_r_plugin/src/ber.rs \
+ src/slapi_r_plugin/src/charray.rs \
src/slapi_r_plugin/src/constants.rs \
src/slapi_r_plugin/src/dn.rs \
src/slapi_r_plugin/src/entry.rs \
diff --git a/configure.ac b/configure.ac
index b3cf77d08..61bf35e4a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -122,7 +122,7 @@ if test "$enable_debug" = yes ; then
debug_defs="-DDEBUG -DMCC_DEBUG"
debug_cflags="-g3 -O0 -rdynamic"
debug_cxxflags="-g3 -O0 -rdynamic"
- debug_rust_defs="-C debuginfo=2"
+ debug_rust_defs="-C debuginfo=2 -Z macro-backtrace"
cargo_defs=""
rust_target_dir="debug"
else
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
index d8b8798b6..e3444e944 100644
--- a/ldap/servers/slapd/pagedresults.c
+++ b/ldap/servers/slapd/pagedresults.c
@@ -738,10 +738,10 @@ pagedresults_cleanup(Connection *conn, int needlock)
int i;
PagedResults *prp = NULL;
- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n");
+ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "=>\n"); */
if (NULL == conn) {
- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n");
+ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= Connection is NULL\n"); */
return 0;
}
@@ -767,7 +767,7 @@ pagedresults_cleanup(Connection *conn, int needlock)
if (needlock) {
pthread_mutex_unlock(&(conn->c_mutex));
}
- slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc);
+ /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc); */
return rc;
}
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index 1ad9d0399..f7d1f8885 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -3351,13 +3351,15 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
return (-1);
}
- pblock->pb_plugin->plg_syntax_names = (char **)value;
+ PR_ASSERT(pblock->pb_plugin->plg_syntax_names == NULL);
+ pblock->pb_plugin->plg_syntax_names = slapi_ch_array_dup((char **)value);
break;
case SLAPI_PLUGIN_SYNTAX_OID:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
return (-1);
}
- pblock->pb_plugin->plg_syntax_oid = (char *)value;
+ PR_ASSERT(pblock->pb_plugin->plg_syntax_oid == NULL);
+ pblock->pb_plugin->plg_syntax_oid = slapi_ch_strdup((char *)value);
break;
case SLAPI_PLUGIN_SYNTAX_FLAGS:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
@@ -3806,7 +3808,8 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
return (-1);
}
- pblock->pb_plugin->plg_mr_names = (char **)value;
+ PR_ASSERT(pblock->pb_plugin->plg_mr_names == NULL);
+ pblock->pb_plugin->plg_mr_names = slapi_ch_array_dup((char **)value);
break;
case SLAPI_PLUGIN_MR_COMPARE:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c
index 282b98738..e6b48de60 100644
--- a/ldap/servers/slapd/plugin.c
+++ b/ldap/servers/slapd/plugin.c
@@ -2694,6 +2694,13 @@ plugin_free(struct slapdplugin *plugin)
if (plugin->plg_type == SLAPI_PLUGIN_PWD_STORAGE_SCHEME || plugin->plg_type == SLAPI_PLUGIN_REVER_PWD_STORAGE_SCHEME) {
slapi_ch_free_string(&plugin->plg_pwdstorageschemename);
}
+ if (plugin->plg_type == SLAPI_PLUGIN_SYNTAX) {
+ slapi_ch_free_string(&plugin->plg_syntax_oid);
+ slapi_ch_array_free(plugin->plg_syntax_names);
+ }
+ if (plugin->plg_type == SLAPI_PLUGIN_MATCHINGRULE) {
+ slapi_ch_array_free(plugin->plg_mr_names);
+ }
release_componentid(plugin->plg_identity);
slapi_counter_destroy(&plugin->plg_op_counter);
if (!plugin->plg_group) {
diff --git a/ldap/servers/slapd/pw_verify.c b/ldap/servers/slapd/pw_verify.c
index 4f0944b73..4ff1fa2fd 100644
--- a/ldap/servers/slapd/pw_verify.c
+++ b/ldap/servers/slapd/pw_verify.c
@@ -111,6 +111,7 @@ pw_verify_token_dn(Slapi_PBlock *pb) {
if (fernet_verify_token(dn, cred->bv_val, key, tok_ttl) != 0) {
rc = SLAPI_BIND_SUCCESS;
}
+ slapi_ch_free_string(&key);
#endif
return rc;
}
diff --git a/ldap/servers/slapd/tools/pwenc.c b/ldap/servers/slapd/tools/pwenc.c
index 1629c06cd..d89225e34 100644
--- a/ldap/servers/slapd/tools/pwenc.c
+++ b/ldap/servers/slapd/tools/pwenc.c
@@ -34,7 +34,7 @@
int ldap_syslog;
int ldap_syslog_level;
-int slapd_ldap_debug = LDAP_DEBUG_ANY;
+/* int slapd_ldap_debug = LDAP_DEBUG_ANY; */
int detached;
FILE *error_logfp;
FILE *access_logfp;
diff --git a/src/slapi_r_plugin/README.md b/src/slapi_r_plugin/README.md
index af9743ec9..1c9bcbf17 100644
--- a/src/slapi_r_plugin/README.md
+++ b/src/slapi_r_plugin/README.md
@@ -15,7 +15,7 @@ the [Rust Nomicon](https://doc.rust-lang.org/nomicon/index.html)
> warning about danger.
This document will not detail the specifics of unsafe or the invariants you must adhere to for rust
-to work with C.
+to work with C. Failure to uphold these invariants will lead to less than optimal consequences.
If you still want to see more about the plugin bindings, go on ...
@@ -135,7 +135,7 @@ associated functions.
Now, you may notice that not all members of the trait are implemented. This is due to a feature
of rust known as default trait impls. This allows the trait origin (src/plugin.rs) to provide
template versions of these functions. If you "overwrite" them, your implementation is used. Unlike
-OO, you may not inherit or call the default function.
+OO, you may not inherit or call the default function.
If a default is not provided you *must* implement that function to be considered valid. Today (20200422)
this only applies to `start` and `close`.
@@ -183,7 +183,7 @@ It's important to understand how Rust manages memory both on the stack and the h
As a result, this means that we must express in code, assertions about the proper ownership of memory
and who is responsible for it (unlike C, where it can be hard to determine who or what is responsible
for freeing some value.) Failure to handle this correctly, can and will lead to crashes, leaks or
-*hand waving* magical failures that are eXtReMeLy FuN to debug.
+*hand waving* magical failures that are `eXtReMeLy FuN` to debug.
### Reference Types
diff --git a/src/slapi_r_plugin/src/charray.rs b/src/slapi_r_plugin/src/charray.rs
new file mode 100644
index 000000000..d2e44693c
--- /dev/null
+++ b/src/slapi_r_plugin/src/charray.rs
@@ -0,0 +1,32 @@
+use std::ffi::CString;
+use std::iter::once;
+use std::os::raw::c_char;
+use std::ptr;
+
+pub struct Charray {
+ pin: Vec<CString>,
+ charray: Vec<*const c_char>,
+}
+
+impl Charray {
+ pub fn new(input: &[&str]) -> Result<Self, ()> {
+ let pin: Result<Vec<_>, ()> = input
+ .iter()
+ .map(|s| CString::new(*s).map_err(|_e| ()))
+ .collect();
+
+ let pin = pin?;
+
+ let charray: Vec<_> = pin
+ .iter()
+ .map(|s| s.as_ptr())
+ .chain(once(ptr::null()))
+ .collect();
+
+ Ok(Charray { pin, charray })
+ }
+
+ pub fn as_ptr(&self) -> *const *const c_char {
+ self.charray.as_ptr()
+ }
+}
diff --git a/src/slapi_r_plugin/src/lib.rs b/src/slapi_r_plugin/src/lib.rs
index 076907bae..be28cac95 100644
--- a/src/slapi_r_plugin/src/lib.rs
+++ b/src/slapi_r_plugin/src/lib.rs
@@ -1,9 +1,11 @@
-// extern crate lazy_static;
+#[macro_use]
+extern crate lazy_static;
#[macro_use]
pub mod macros;
pub mod backend;
pub mod ber;
+pub mod charray;
mod constants;
pub mod dn;
pub mod entry;
@@ -20,6 +22,7 @@ pub mod value;
pub mod prelude {
pub use crate::backend::{BackendRef, BackendRefTxn};
pub use crate::ber::BerValRef;
+ pub use crate::charray::Charray;
pub use crate::constants::{FilterType, PluginFnType, PluginType, PluginVersion, LDAP_SUCCESS};
pub use crate::dn::{Sdn, SdnRef};
pub use crate::entry::EntryRef;
@@ -30,8 +33,7 @@ pub mod prelude {
pub use crate::plugin::{register_plugin_ext, PluginIdRef, SlapiPlugin3};
pub use crate::search::{Search, SearchScope};
pub use crate::syntax_plugin::{
- matchingrule_register, name_to_leaking_char, names_to_leaking_char_array, SlapiOrdMr,
- SlapiSubMr, SlapiSyntaxPlugin1,
+ matchingrule_register, SlapiOrdMr, SlapiSubMr, SlapiSyntaxPlugin1,
};
pub use crate::task::{task_register_handler_fn, task_unregister_handler_fn, Task, TaskRef};
pub use crate::value::{Value, ValueArray, ValueArrayRef, ValueRef};
diff --git a/src/slapi_r_plugin/src/macros.rs b/src/slapi_r_plugin/src/macros.rs
index bc8dfa60f..97fc5d7ef 100644
--- a/src/slapi_r_plugin/src/macros.rs
+++ b/src/slapi_r_plugin/src/macros.rs
@@ -249,6 +249,7 @@ macro_rules! slapi_r_syntax_plugin_hooks {
paste::item! {
use libc;
use std::convert::TryFrom;
+ use std::ffi::CString;
#[no_mangle]
pub extern "C" fn [<$mod_ident _plugin_init>](raw_pb: *const libc::c_void) -> i32 {
@@ -261,15 +262,15 @@ macro_rules! slapi_r_syntax_plugin_hooks {
};
// Setup the names/oids that this plugin provides syntaxes for.
-
- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::attr_supported_names()) };
- match pb.register_syntax_names(name_ptr) {
+ // DS will clone these, so they can be ephemeral to this function.
+ let name_vec = Charray::new($hooks_ident::attr_supported_names().as_slice()).expect("invalid supported names");
+ match pb.register_syntax_names(name_vec.as_ptr()) {
0 => {},
e => return e,
};
- let name_ptr = unsafe { name_to_leaking_char($hooks_ident::attr_oid()) };
- match pb.register_syntax_oid(name_ptr) {
+ let attr_oid = CString::new($hooks_ident::attr_oid()).expect("invalid attr oid");
+ match pb.register_syntax_oid(attr_oid.as_ptr()) {
0 => {},
e => return e,
};
@@ -430,7 +431,8 @@ macro_rules! slapi_r_syntax_plugin_hooks {
e => return e,
};
- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::eq_mr_supported_names()) };
+ let name_vec = Charray::new($hooks_ident::eq_mr_supported_names().as_slice()).expect("invalid mr supported names");
+ let name_ptr = name_vec.as_ptr();
// SLAPI_PLUGIN_MR_NAMES
match pb.register_mr_names(name_ptr) {
0 => {},
@@ -672,7 +674,8 @@ macro_rules! slapi_r_syntax_plugin_hooks {
e => return e,
};
- let name_ptr = unsafe { names_to_leaking_char_array(&$hooks_ident::ord_mr_supported_names()) };
+ let name_vec = Charray::new($hooks_ident::ord_mr_supported_names().as_slice()).expect("invalid ord supported names");
+ let name_ptr = name_vec.as_ptr();
// SLAPI_PLUGIN_MR_NAMES
match pb.register_mr_names(name_ptr) {
0 => {},
diff --git a/src/slapi_r_plugin/src/syntax_plugin.rs b/src/slapi_r_plugin/src/syntax_plugin.rs
index e7d5c01bd..86f84bdd8 100644
--- a/src/slapi_r_plugin/src/syntax_plugin.rs
+++ b/src/slapi_r_plugin/src/syntax_plugin.rs
@@ -1,11 +1,11 @@
use crate::ber::BerValRef;
// use crate::constants::FilterType;
+use crate::charray::Charray;
use crate::error::PluginError;
use crate::pblock::PblockRef;
use crate::value::{ValueArray, ValueArrayRef};
use std::cmp::Ordering;
use std::ffi::CString;
-use std::iter::once;
use std::os::raw::c_char;
use std::ptr;
@@ -26,37 +26,6 @@ struct slapi_matchingRuleEntry {
mr_compat_syntax: *const *const c_char,
}
-pub unsafe fn name_to_leaking_char(name: &str) -> *const c_char {
- let n = CString::new(name)
- .expect("An invalid string has been hardcoded!")
- .into_boxed_c_str();
- let n_ptr = n.as_ptr();
- // Now we intentionally leak the name here, and the pointer will remain valid.
- Box::leak(n);
- n_ptr
-}
-
-pub unsafe fn names_to_leaking_char_array(names: &[&str]) -> *const *const c_char {
- let n_arr: Vec<CString> = names
- .iter()
- .map(|s| CString::new(*s).expect("An invalid string has been hardcoded!"))
- .collect();
- let n_arr = n_arr.into_boxed_slice();
- let n_ptr_arr: Vec<*const c_char> = n_arr
- .iter()
- .map(|v| v.as_ptr())
- .chain(once(ptr::null()))
- .collect();
- let n_ptr_arr = n_ptr_arr.into_boxed_slice();
-
- // Now we intentionally leak these names here,
- let _r_n_arr = Box::leak(n_arr);
- let r_n_ptr_arr = Box::leak(n_ptr_arr);
-
- let name_ptr = r_n_ptr_arr as *const _ as *const *const c_char;
- name_ptr
-}
-
// oid - the oid of the matching rule
// name - the name of the mr
// desc - description
@@ -69,20 +38,24 @@ pub unsafe fn matchingrule_register(
syntax: &str,
compat_syntax: &[&str],
) -> i32 {
- let oid_ptr = name_to_leaking_char(oid);
- let name_ptr = name_to_leaking_char(name);
- let desc_ptr = name_to_leaking_char(desc);
- let syntax_ptr = name_to_leaking_char(syntax);
- let compat_syntax_ptr = names_to_leaking_char_array(compat_syntax);
+ // Make everything CStrings that live long enough.
+
+ let oid_cs = CString::new(oid).expect("invalid oid");
+ let name_cs = CString::new(name).expect("invalid name");
+ let desc_cs = CString::new(desc).expect("invalid desc");
+ let syntax_cs = CString::new(syntax).expect("invalid syntax");
+
+ // We have to do this so the cstrings live long enough.
+ let compat_syntax_ca = Charray::new(compat_syntax).expect("invalid compat_syntax");
let new_mr = slapi_matchingRuleEntry {
- mr_oid: oid_ptr,
+ mr_oid: oid_cs.as_ptr(),
_mr_oidalias: ptr::null(),
- mr_name: name_ptr,
- mr_desc: desc_ptr,
- mr_syntax: syntax_ptr,
+ mr_name: name_cs.as_ptr(),
+ mr_desc: desc_cs.as_ptr(),
+ mr_syntax: syntax_cs.as_ptr(),
_mr_obsolete: 0,
- mr_compat_syntax: compat_syntax_ptr,
+ mr_compat_syntax: compat_syntax_ca.as_ptr(),
};
let new_mr_ptr = &new_mr as *const _;
--
2.26.3

View File

@ -1,37 +0,0 @@
From 40e9a4835a6e95f021a711a7c42ce0c1bddc5ba4 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 21 May 2021 13:09:12 -0400
Subject: [PATCH 08/12] Issue 4773 - Enable interval feature of DNA plugin
Description: Enable the dormant interval feature in DNA plugin
relates: https://github.com/389ds/389-ds-base/issues/4773
Review by: mreynolds (one line commit rule)
---
ldap/servers/plugins/dna/dna.c | 2 --
1 file changed, 2 deletions(-)
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
index bf6b74a99..928a3f54a 100644
--- a/ldap/servers/plugins/dna/dna.c
+++ b/ldap/servers/plugins/dna/dna.c
@@ -1023,7 +1023,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
/* Set the default interval to 1 */
entry->interval = 1;
-#ifdef DNA_ENABLE_INTERVAL
value = slapi_entry_attr_get_charptr(e, DNA_INTERVAL);
if (value) {
entry->interval = strtoull(value, 0, 0);
@@ -1032,7 +1031,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
slapi_log_err(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM,
"dna_parse_config_entry - %s [%" PRIu64 "]\n", DNA_INTERVAL, entry->interval);
-#endif
value = slapi_entry_attr_get_charptr(e, DNA_GENERATE);
if (value) {
--
2.26.3

View File

@ -0,0 +1,70 @@
From 88d6ceb18e17c5a18bafb5092ae0c22241b212df Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Mon, 1 Nov 2021 14:01:11 -0400
Subject: [PATCH 08/12] Issue 4978 - make installer robust
Description: When run in a container the server can fail to start
because the installer sets the db_home_dir to /dev/shm,
but in containers the default size of /dev/shm is too
small for libdb. We should detect if we are in a
container and not set db_home_dir to /dev/shm.
During instance removal, if an instance was not properly
created then it can not be removed either. Make the
uninstall more robust to accept some errors and continue
removing the instance.
relates: https://github.com/389ds/389-ds-base/issues/4978
Reviewed by: firstyear & tbordaz(Thanks!)
---
src/lib389/lib389/instance/setup.py | 9 +++++++++
src/lib389/lib389/utils.py | 5 ++++-
2 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index be6854af8..7b0147cf9 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -731,6 +731,15 @@ class SetupDs(object):
for line in template_dse.readlines():
dse += line.replace('%', '{', 1).replace('%', '}', 1)
+ # Check if we are in a container, if so don't use /dev/shm for the db home dir
+ # as containers typically don't allocate enough space for dev/shm and we don't
+ # want to unexpectedly break the server after an upgrade
+ container_result = subprocess.run(["systemd-detect-virt", "-c"], capture_output=True)
+ if container_result.returncode == 0:
+ # In a container, set the db_home_dir to the db path
+ self.log.debug("Container detected setting db home directory to db directory.")
+ slapd['db_home_dir'] = slapd['db_dir']
+
with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse:
dse_fmt = dse.format(
schema_dir=slapd['schema_dir'],
diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py
index 5ba0c6676..c63b4d0ee 100644
--- a/src/lib389/lib389/utils.py
+++ b/src/lib389/lib389/utils.py
@@ -266,6 +266,8 @@ def selinux_label_port(port, remove_label=False):
:type remove_label: boolean
:raises: ValueError: Error message
"""
+ if port is None:
+ return
try:
import selinux
except ImportError:
@@ -662,7 +664,8 @@ def isLocalHost(host_name):
Uses gethostbyname()
"""
# first see if this is a "well known" local hostname
- if host_name == 'localhost' or \
+ if host_name is None or \
+ host_name == 'localhost' or \
host_name == 'localhost.localdomain' or \
host_name == socket.gethostname():
return True
--
2.31.1

View File

@ -1,926 +0,0 @@
From 8df95679519364d0993572ecbea72ab89e5250a5 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Thu, 20 May 2021 14:24:25 +0200
Subject: [PATCH 09/12] Issue 4623 - RFE - Monitor the current DB locks (#4762)
Description: DB lock gets exhausted because of unindexed internal searches
(under a transaction). Indexing those searches is the way to prevent exhaustion.
If db lock get exhausted during a txn, it leads to db panic and the later recovery
can possibly fail. That leads to a full reinit of the instance where the db locks
got exhausted.
Add three attributes to global BDB config: "nsslapd-db-locks-monitoring-enabled",
"nsslapd-db-locks-monitoring-threshold" and "nsslapd-db-locks-monitoring-pause".
By default, nsslapd-db-locks-monitoring-enabled is turned on, nsslapd-db-locks-monitoring-threshold is set to 90% and nsslapd-db-locks-monitoring-threshold is 500ms.
When current locks are close to the maximum locks value of 90% - returning
the next candidate will fail until the maximum of locks won't be
increased or current locks are released.
The monitoring thread runs with the configurable interval of 500ms.
Add the setting to UI and CLI tools.
Fixes: https://github.com/389ds/389-ds-base/issues/4623
Reviewed by: @Firstyear, @tbordaz, @jchapma, @mreynolds389 (Thank you!!)
---
.../suites/monitor/db_locks_monitor_test.py | 251 ++++++++++++++++++
ldap/servers/slapd/back-ldbm/back-ldbm.h | 13 +-
.../slapd/back-ldbm/db-bdb/bdb_config.c | 99 +++++++
.../slapd/back-ldbm/db-bdb/bdb_layer.c | 85 ++++++
ldap/servers/slapd/back-ldbm/init.c | 3 +
ldap/servers/slapd/back-ldbm/ldbm_config.c | 3 +
ldap/servers/slapd/back-ldbm/ldbm_config.h | 3 +
ldap/servers/slapd/back-ldbm/ldbm_search.c | 13 +
ldap/servers/slapd/libglobs.c | 4 +-
src/cockpit/389-console/src/css/ds.css | 4 +
src/cockpit/389-console/src/database.jsx | 7 +
src/cockpit/389-console/src/index.html | 2 +-
.../src/lib/database/databaseConfig.jsx | 88 +++++-
src/lib389/lib389/backend.py | 3 +
src/lib389/lib389/cli_conf/backend.py | 10 +
15 files changed, 576 insertions(+), 12 deletions(-)
create mode 100644 dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
diff --git a/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
new file mode 100644
index 000000000..7f9938f30
--- /dev/null
+++ b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py
@@ -0,0 +1,251 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2021 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import logging
+import pytest
+import datetime
+import subprocess
+from multiprocessing import Process, Queue
+from lib389 import pid_from_file
+from lib389.utils import ldap, os
+from lib389._constants import DEFAULT_SUFFIX, ReplicaRole
+from lib389.cli_base import LogCapture
+from lib389.idm.user import UserAccounts
+from lib389.idm.organizationalunit import OrganizationalUnits
+from lib389.tasks import AccessLog
+from lib389.backend import Backends
+from lib389.ldclt import Ldclt
+from lib389.dbgen import dbgen_users
+from lib389.tasks import ImportTask
+from lib389.index import Indexes
+from lib389.plugins import AttributeUniquenessPlugin
+from lib389.config import BDB_LDBMConfig
+from lib389.monitor import MonitorLDBM
+from lib389.topologies import create_topology, _remove_ssca_db
+
+pytestmark = pytest.mark.tier2
+db_locks_monitoring_ack = pytest.mark.skipif(not os.environ.get('DB_LOCKS_MONITORING_ACK', False),
+ reason="DB locks monitoring tests may take hours if the feature is not present or another failure exists. "
+ "Also, the feature requires a big amount of space as we set nsslapd-db-locks to 1300000.")
+
+DEBUGGING = os.getenv('DEBUGGING', default=False)
+if DEBUGGING:
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
+else:
+ logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
+
+
+def _kill_ns_slapd(inst):
+ pid = str(pid_from_file(inst.ds_paths.pid_file))
+ cmd = ['kill', '-9', pid]
+ subprocess.Popen(cmd, stdout=subprocess.PIPE)
+
+
+@pytest.fixture(scope="function")
+def topology_st_fn(request):
+ """Create DS standalone instance for each test case"""
+
+ topology = create_topology({ReplicaRole.STANDALONE: 1})
+
+ def fin():
+ # Kill the hanging process at the end of test to prevent failures in the following tests
+ if DEBUGGING:
+ [_kill_ns_slapd(inst) for inst in topology]
+ else:
+ [_kill_ns_slapd(inst) for inst in topology]
+ assert _remove_ssca_db(topology)
+ [inst.stop() for inst in topology if inst.exists()]
+ [inst.delete() for inst in topology if inst.exists()]
+ request.addfinalizer(fin)
+
+ topology.logcap = LogCapture()
+ return topology
+
+
+@pytest.fixture(scope="function")
+def setup_attruniq_index_be_import(topology_st_fn):
+ """Enable Attribute Uniqueness, disable indexes and
+ import 120000 entries to the default backend
+ """
+ inst = topology_st_fn.standalone
+
+ inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access')
+ inst.config.set('nsslapd-plugin-logging', 'on')
+ inst.restart()
+
+ attruniq = AttributeUniquenessPlugin(inst, dn="cn=attruniq,cn=plugins,cn=config")
+ attruniq.create(properties={'cn': 'attruniq'})
+ for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']:
+ attruniq.add_unique_attribute(cn)
+ attruniq.add_unique_subtree(DEFAULT_SUFFIX)
+ attruniq.enable_all_subtrees()
+ attruniq.enable()
+
+ indexes = Indexes(inst)
+ for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']:
+ indexes.ensure_state(properties={
+ 'cn': cn,
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': 'none'})
+
+ bdb_config = BDB_LDBMConfig(inst)
+ bdb_config.replace("nsslapd-db-locks", "130000")
+ inst.restart()
+
+ ldif_dir = inst.get_ldif_dir()
+ import_ldif = ldif_dir + '/perf_import.ldif'
+
+ # Valid online import
+ import_task = ImportTask(inst)
+ dbgen_users(inst, 120000, import_ldif, DEFAULT_SUFFIX, entry_name="userNew")
+ import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
+ import_task.wait()
+ assert import_task.is_complete()
+
+
+def create_user_wrapper(q, users):
+ try:
+ users.create_test_user()
+ except Exception as ex:
+ q.put(ex)
+
+
+def spawn_worker_thread(function, users, log, timeout, info):
+ log.info(f"Starting the thread - {info}")
+ q = Queue()
+ p = Process(target=function, args=(q,users,))
+ p.start()
+
+ log.info(f"Waiting for {timeout} seconds for the thread to finish")
+ p.join(timeout)
+
+ if p.is_alive():
+ log.info("Killing the thread as it's still running")
+ p.terminate()
+ p.join()
+ raise RuntimeError(f"Function call was aborted: {info}")
+ result = q.get()
+ if isinstance(result, Exception):
+ raise result
+ else:
+ return result
+
+
+@db_locks_monitoring_ack
+@pytest.mark.parametrize("lock_threshold", [("70"), ("80"), ("95")])
+def test_exhaust_db_locks_basic(topology_st_fn, setup_attruniq_index_be_import, lock_threshold):
+ """Test that when all of the locks are exhausted the instance still working
+ and database is not corrupted
+
+ :id: 299108cc-04d8-4ddc-b58e-99157fccd643
+ :setup: Standalone instance with Attr Uniq plugin and user indexes disabled
+ :steps: 1. Set nsslapd-db-locks to 11000
+ 2. Check that we stop acquiring new locks when the threshold is reached
+ 3. Check that we can regulate a pause interval for DB locks monitoring thread
+ 4. Make sure the feature works for different backends on the same suffix
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ inst = topology_st_fn.standalone
+ ADDITIONAL_SUFFIX = 'ou=newpeople,dc=example,dc=com'
+
+ backends = Backends(inst)
+ backends.create(properties={'nsslapd-suffix': ADDITIONAL_SUFFIX,
+ 'name': ADDITIONAL_SUFFIX[-3:]})
+ ous = OrganizationalUnits(inst, DEFAULT_SUFFIX)
+ ous.create(properties={'ou': 'newpeople'})
+
+ bdb_config = BDB_LDBMConfig(inst)
+ bdb_config.replace("nsslapd-db-locks", "11000")
+
+ # Restart server
+ inst.restart()
+
+ for lock_enabled in ["on", "off"]:
+ for lock_pause in ["100", "500", "1000"]:
+ bdb_config.replace("nsslapd-db-locks-monitoring-enabled", lock_enabled)
+ bdb_config.replace("nsslapd-db-locks-monitoring-threshold", lock_threshold)
+ bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause)
+ inst.restart()
+
+ if lock_enabled == "off":
+ raised_exception = (RuntimeError, ldap.SERVER_DOWN)
+ else:
+ raised_exception = ldap.OPERATIONS_ERROR
+
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
+ with pytest.raises(raised_exception):
+ spawn_worker_thread(create_user_wrapper, users, log, 30,
+ f"Adding user with monitoring enabled='{lock_enabled}'; "
+ f"threshold='{lock_threshold}'; pause='{lock_pause}'.")
+ # Restart because we already run out of locks and the next unindexed searches will fail eventually
+ if lock_enabled == "off":
+ _kill_ns_slapd(inst)
+ inst.restart()
+
+ users = UserAccounts(inst, ADDITIONAL_SUFFIX, rdn=None)
+ with pytest.raises(raised_exception):
+ spawn_worker_thread(create_user_wrapper, users, log, 30,
+ f"Adding user with monitoring enabled='{lock_enabled}'; "
+ f"threshold='{lock_threshold}'; pause='{lock_pause}'.")
+ # In case feature is disabled - restart for the clean up
+ if lock_enabled == "off":
+ _kill_ns_slapd(inst)
+ inst.restart()
+
+
+@db_locks_monitoring_ack
+def test_exhaust_db_locks_big_pause(topology_st_fn, setup_attruniq_index_be_import):
+ """Test that DB lock pause setting increases the wait interval value for the monitoring thread
+
+ :id: 7d5bf838-5d4e-4ad5-8c03-5716afb84ea6
+ :setup: Standalone instance with Attr Uniq plugin and user indexes disabled
+ :steps: 1. Set nsslapd-db-locks to 20000 while using the default threshold value (95%)
+ 2. Set nsslapd-db-locks-monitoring-pause to 10000 (10 seconds)
+ 3. Make sure that the pause is successfully increased a few times in a row
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ """
+
+ inst = topology_st_fn.standalone
+
+ bdb_config = BDB_LDBMConfig(inst)
+ bdb_config.replace("nsslapd-db-locks", "20000")
+ lock_pause = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-pause")
+ assert lock_pause == 500
+ lock_pause = "10000"
+ bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause)
+
+ # Restart server
+ inst.restart()
+
+ lock_enabled = bdb_config.get_attr_val_utf8_l("nsslapd-db-locks-monitoring-enabled")
+ lock_threshold = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-threshold")
+ assert lock_enabled == "on"
+ assert lock_threshold == 90
+
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
+ start = datetime.datetime.now()
+ with pytest.raises(ldap.OPERATIONS_ERROR):
+ spawn_worker_thread(create_user_wrapper, users, log, 30,
+ f"Adding user with monitoring enabled='{lock_enabled}'; "
+ f"threshold='{lock_threshold}'; pause='{lock_pause}'. Expect it to 'Work'")
+ end = datetime.datetime.now()
+ time_delta = end - start
+ if time_delta.seconds < 9:
+ raise RuntimeError("nsslapd-db-locks-monitoring-pause attribute doesn't function correctly. "
+ f"Finished the execution in {time_delta.seconds} seconds")
+ # In case something has failed - restart for the clean up
+ inst.restart()
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
index 571b0a58b..afb831c32 100644
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
@@ -155,6 +155,8 @@ typedef unsigned short u_int16_t;
#define DEFAULT_DNCACHE_MAXCOUNT -1 /* no limit */
#define DEFAULT_DBCACHE_SIZE 33554432
#define DEFAULT_DBCACHE_SIZE_STR "33554432"
+#define DEFAULT_DBLOCK_PAUSE 500
+#define DEFAULT_DBLOCK_PAUSE_STR "500"
#define DEFAULT_MODE 0600
#define DEFAULT_ALLIDSTHRESHOLD 4000
#define DEFAULT_IDL_TUNE 1
@@ -575,12 +577,21 @@ struct ldbminfo
char *li_backend_implement; /* low layer backend implementation */
int li_noparentcheck; /* check if parent exists on add */
- /* the next 3 fields are for the params that don't get changed until
+ /* db lock monitoring */
+ /* if we decide to move the values to bdb_config, we can use slapi_back_get_info function to retrieve the values */
+ int32_t li_dblock_monitoring; /* enables db locks monitoring thread - requires restart */
+ uint32_t li_dblock_monitoring_pause; /* an interval for db locks monitoring thread */
+ uint32_t li_dblock_threshold; /* when the percentage is reached, abort the search in ldbm_back_next_search_entry - requires restart*/
+ uint32_t li_dblock_threshold_reached;
+
+ /* the next 4 fields are for the params that don't get changed until
* the server is restarted (used by the admin console)
*/
char *li_new_directory;
uint64_t li_new_dbcachesize;
int li_new_dblock;
+ int32_t li_new_dblock_monitoring;
+ uint64_t li_new_dblock_threshold;
int li_new_dbncache;
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
index 738b841aa..167644943 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
@@ -190,6 +190,102 @@ bdb_config_db_lock_set(void *arg, void *value, char *errorbuf, int phase, int ap
return retval;
}
+static void *
+bdb_config_db_lock_monitoring_get(void *arg)
+{
+ struct ldbminfo *li = (struct ldbminfo *)arg;
+
+ return (void *)((intptr_t)(li->li_new_dblock_monitoring));
+}
+
+static int
+bdb_config_db_lock_monitoring_set(void *arg, void *value, char *errorbuf __attribute__((unused)), int phase __attribute__((unused)), int apply)
+{
+ struct ldbminfo *li = (struct ldbminfo *)arg;
+ int retval = LDAP_SUCCESS;
+ int val = (int32_t)((intptr_t)value);
+
+ if (apply) {
+ if (CONFIG_PHASE_RUNNING == phase) {
+ li->li_new_dblock_monitoring = val;
+ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_monitoring_set",
+ "New nsslapd-db-lock-monitoring value will not take affect until the server is restarted\n");
+ } else {
+ li->li_new_dblock_monitoring = val;
+ li->li_dblock_monitoring = val;
+ }
+ }
+
+ return retval;
+}
+
+static void *
+bdb_config_db_lock_pause_get(void *arg)
+{
+ struct ldbminfo *li = (struct ldbminfo *)arg;
+
+ return (void *)((uintptr_t)(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED)));
+}
+
+static int
+bdb_config_db_lock_pause_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply)
+{
+ struct ldbminfo *li = (struct ldbminfo *)arg;
+ int retval = LDAP_SUCCESS;
+ u_int32_t val = (u_int32_t)((uintptr_t)value);
+
+ if (val == 0) {
+ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_pause_set",
+ "%s was set to '0'. The default value will be used (%s)",
+ CONFIG_DB_LOCKS_PAUSE, DEFAULT_DBLOCK_PAUSE_STR);
+ val = DEFAULT_DBLOCK_PAUSE;
+ }
+
+ if (apply) {
+ slapi_atomic_store_32((int32_t *)&(li->li_dblock_monitoring_pause), val, __ATOMIC_RELAXED);
+ }
+ return retval;
+}
+
+static void *
+bdb_config_db_lock_threshold_get(void *arg)
+{
+ struct ldbminfo *li = (struct ldbminfo *)arg;
+
+ return (void *)((uintptr_t)(li->li_new_dblock_threshold));
+}
+
+static int
+bdb_config_db_lock_threshold_set(void *arg, void *value, char *errorbuf, int phase __attribute__((unused)), int apply)
+{
+ struct ldbminfo *li = (struct ldbminfo *)arg;
+ int retval = LDAP_SUCCESS;
+ u_int32_t val = (u_int32_t)((uintptr_t)value);
+
+ if (val < 70 || val > 95) {
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95",
+ CONFIG_DB_LOCKS_THRESHOLD, val);
+ slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_lock_threshold_set",
+ "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95",
+ CONFIG_DB_LOCKS_THRESHOLD, val);
+ retval = LDAP_OPERATIONS_ERROR;
+ return retval;
+ }
+
+ if (apply) {
+ if (CONFIG_PHASE_RUNNING == phase) {
+ li->li_new_dblock_threshold = val;
+ slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_threshold_set",
+ "New nsslapd-db-lock-monitoring-threshold value will not take affect until the server is restarted\n");
+ } else {
+ li->li_new_dblock_threshold = val;
+ li->li_dblock_threshold = val;
+ }
+ }
+ return retval;
+}
+
static void *
bdb_config_dbcachesize_get(void *arg)
{
@@ -1409,6 +1505,9 @@ static config_info bdb_config_param[] = {
{CONFIG_SERIAL_LOCK, CONFIG_TYPE_ONOFF, "on", &bdb_config_serial_lock_get, &bdb_config_serial_lock_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_USE_LEGACY_ERRORCODE, CONFIG_TYPE_ONOFF, "off", &bdb_config_legacy_errcode_get, &bdb_config_legacy_errcode_set, 0},
{CONFIG_DB_DEADLOCK_POLICY, CONFIG_TYPE_INT, STRINGIFYDEFINE(DB_LOCK_YOUNGEST), &bdb_config_db_deadlock_policy_get, &bdb_config_db_deadlock_policy_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+ {CONFIG_DB_LOCKS_MONITORING, CONFIG_TYPE_ONOFF, "on", &bdb_config_db_lock_monitoring_get, &bdb_config_db_lock_monitoring_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+ {CONFIG_DB_LOCKS_THRESHOLD, CONFIG_TYPE_INT, "90", &bdb_config_db_lock_threshold_get, &bdb_config_db_lock_threshold_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+ {CONFIG_DB_LOCKS_PAUSE, CONFIG_TYPE_INT, DEFAULT_DBLOCK_PAUSE_STR, &bdb_config_db_lock_pause_get, &bdb_config_db_lock_pause_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{NULL, 0, NULL, NULL, NULL, 0}};
void
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
index 6cccad8e6..2f25f67a2 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
@@ -35,6 +35,8 @@
(env)->txn_checkpoint((env), (kbyte), (min), (flags))
#define MEMP_STAT(env, gsp, fsp, flags, malloc) \
(env)->memp_stat((env), (gsp), (fsp), (flags))
+#define LOCK_STAT(env, statp, flags, malloc) \
+ (env)->lock_stat((env), (statp), (flags))
#define MEMP_TRICKLE(env, pct, nwrotep) \
(env)->memp_trickle((env), (pct), (nwrotep))
#define LOG_ARCHIVE(env, listp, flags, malloc) \
@@ -66,6 +68,7 @@
#define NEWDIR_MODE 0755
#define DB_REGION_PREFIX "__db."
+static int locks_monitoring_threadmain(void *param);
static int perf_threadmain(void *param);
static int checkpoint_threadmain(void *param);
static int trickle_threadmain(void *param);
@@ -84,6 +87,7 @@ static int bdb_start_checkpoint_thread(struct ldbminfo *li);
static int bdb_start_trickle_thread(struct ldbminfo *li);
static int bdb_start_perf_thread(struct ldbminfo *li);
static int bdb_start_txn_test_thread(struct ldbminfo *li);
+static int bdb_start_locks_monitoring_thread(struct ldbminfo *li);
static int trans_batch_count = 0;
static int trans_batch_limit = 0;
static int trans_batch_txn_min_sleep = 50; /* ms */
@@ -1299,6 +1303,10 @@ bdb_start(struct ldbminfo *li, int dbmode)
return return_value;
}
+ if (0 != (return_value = bdb_start_locks_monitoring_thread(li))) {
+ return return_value;
+ }
+
/* We need to free the memory to avoid a leak
* Also, we have to evaluate if the performance counter
* should be preserved or not for database restore.
@@ -2885,6 +2893,7 @@ bdb_start_perf_thread(struct ldbminfo *li)
return return_value;
}
+
/* Performance thread */
static int
perf_threadmain(void *param)
@@ -2910,6 +2919,82 @@ perf_threadmain(void *param)
return 0;
}
+
+/*
+ * create a thread for locks_monitoring_threadmain
+ */
+static int
+bdb_start_locks_monitoring_thread(struct ldbminfo *li)
+{
+ int return_value = 0;
+ if (li->li_dblock_monitoring) {
+ if (NULL == PR_CreateThread(PR_USER_THREAD,
+ (VFP)(void *)locks_monitoring_threadmain, li,
+ PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+ PR_UNJOINABLE_THREAD,
+ SLAPD_DEFAULT_THREAD_STACKSIZE)) {
+ PRErrorCode prerr = PR_GetError();
+ slapi_log_err(SLAPI_LOG_ERR, "bdb_start_locks_monitoring_thread",
+ "Failed to create database locks monitoring thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
+ prerr, slapd_pr_strerror(prerr));
+ return_value = -1;
+ }
+ }
+ return return_value;
+}
+
+
+/* DB Locks Monitoring thread */
+static int
+locks_monitoring_threadmain(void *param)
+{
+ int ret = 0;
+ uint64_t current_locks = 0;
+ uint64_t max_locks = 0;
+ uint32_t lock_exhaustion = 0;
+ PRIntervalTime interval;
+ struct ldbminfo *li = NULL;
+
+ PR_ASSERT(NULL != param);
+ li = (struct ldbminfo *)param;
+
+ dblayer_private *priv = li->li_dblayer_private;
+ bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
+ PR_ASSERT(NULL != priv);
+
+ INCR_THREAD_COUNT(pEnv);
+
+ while (!BDB_CONFIG(li)->bdb_stop_threads) {
+ if (dblayer_db_uses_locking(pEnv->bdb_DB_ENV)) {
+ DB_LOCK_STAT *lockstat = NULL;
+ ret = LOCK_STAT(pEnv->bdb_DB_ENV, &lockstat, 0, (void *)slapi_ch_malloc);
+ if (0 == ret) {
+ current_locks = lockstat->st_nlocks;
+ max_locks = lockstat->st_maxlocks;
+ if (max_locks){
+ lock_exhaustion = (uint32_t)((double)current_locks / (double)max_locks * 100.0);
+ } else {
+ lock_exhaustion = 0;
+ }
+ if ((li->li_dblock_threshold) &&
+ (lock_exhaustion >= li->li_dblock_threshold)) {
+ slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 1, __ATOMIC_RELAXED);
+ } else {
+ slapi_atomic_store_32((int32_t *)&(li->li_dblock_threshold_reached), 0, __ATOMIC_RELAXED);
+ }
+ }
+ slapi_ch_free((void **)&lockstat);
+ }
+ interval = PR_MillisecondsToInterval(slapi_atomic_load_32((int32_t *)&(li->li_dblock_monitoring_pause), __ATOMIC_RELAXED));
+ DS_Sleep(interval);
+ }
+
+ DECR_THREAD_COUNT(pEnv);
+ slapi_log_err(SLAPI_LOG_TRACE, "locks_monitoring_threadmain", "Leaving locks_monitoring_threadmain\n");
+ return 0;
+}
+
+
/*
* create a thread for deadlock_threadmain
*/
diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c
index 893776699..4165c8fad 100644
--- a/ldap/servers/slapd/back-ldbm/init.c
+++ b/ldap/servers/slapd/back-ldbm/init.c
@@ -70,6 +70,9 @@ ldbm_back_init(Slapi_PBlock *pb)
/* Initialize the set of instances. */
li->li_instance_set = objset_new(&ldbm_back_instance_set_destructor);
+ /* Init lock threshold value */
+ li->li_dblock_threshold_reached = 0;
+
/* ask the factory to give us space in the Connection object
* (only bulk import uses this)
*/
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
index 10cef250f..60884cf33 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
@@ -87,6 +87,9 @@ static char *ldbm_config_moved_attributes[] =
CONFIG_SERIAL_LOCK,
CONFIG_USE_LEGACY_ERRORCODE,
CONFIG_DB_DEADLOCK_POLICY,
+ CONFIG_DB_LOCKS_MONITORING,
+ CONFIG_DB_LOCKS_THRESHOLD,
+ CONFIG_DB_LOCKS_PAUSE,
""};
/* Used to add an array of entries, like the one above and
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.h b/ldap/servers/slapd/back-ldbm/ldbm_config.h
index 58e64799c..6fa8292eb 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.h
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.h
@@ -104,6 +104,9 @@ struct config_info
#define CONFIG_DB_VERBOSE "nsslapd-db-verbose"
#define CONFIG_DB_DEBUG "nsslapd-db-debug"
#define CONFIG_DB_LOCK "nsslapd-db-locks"
+#define CONFIG_DB_LOCKS_MONITORING "nsslapd-db-locks-monitoring-enabled"
+#define CONFIG_DB_LOCKS_THRESHOLD "nsslapd-db-locks-monitoring-threshold"
+#define CONFIG_DB_LOCKS_PAUSE "nsslapd-db-locks-monitoring-pause"
#define CONFIG_DB_NAMED_REGIONS "nsslapd-db-named-regions"
#define CONFIG_DB_PRIVATE_MEM "nsslapd-db-private-mem"
#define CONFIG_DB_PRIVATE_IMPORT_MEM "nsslapd-db-private-import-mem"
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
index 1a7b510d4..6e22debde 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
@@ -1472,6 +1472,7 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
slapi_pblock_get(pb, SLAPI_CONNECTION, &conn);
slapi_pblock_get(pb, SLAPI_OPERATION, &op);
+
if ((reverse_list = operation_is_flag_set(op, OP_FLAG_REVERSE_CANDIDATE_ORDER))) {
/*
* Start at the end of the list and work our way forward. Since a single
@@ -1538,6 +1539,18 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
/* Find the next candidate entry and return it. */
while (1) {
+ if (li->li_dblock_monitoring &&
+ slapi_atomic_load_32((int32_t *)&(li->li_dblock_threshold_reached), __ATOMIC_RELAXED)) {
+ slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_next_search_entry",
+ "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold "
+ "under cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config). "
+ "Please, increase nsslapd-db-locks according to your needs.\n");
+ slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_ENTRY, NULL);
+ delete_search_result_set(pb, &sr);
+ rc = SLAPI_FAIL_GENERAL;
+ slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "DB locks threshold is reached (nsslapd-db-locks-monitoring-threshold)", 0, NULL);
+ goto bail;
+ }
/* check for abandon */
if (slapi_op_abandoned(pb) || (NULL == sr)) {
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 388616b36..db7d01bbc 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -8171,8 +8171,8 @@ config_set(const char *attr, struct berval **values, char *errorbuf, int apply)
#if 0
debugHashTable(attr);
#endif
- slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored", attr);
- slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored", attr);
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown attribute %s will be ignored\n", attr);
+ slapi_log_err(SLAPI_LOG_ERR, "config_set", "Unknown attribute %s will be ignored\n", attr);
return LDAP_NO_SUCH_ATTRIBUTE;
}
diff --git a/src/cockpit/389-console/src/css/ds.css b/src/cockpit/389-console/src/css/ds.css
index 9248116e7..3cf50b593 100644
--- a/src/cockpit/389-console/src/css/ds.css
+++ b/src/cockpit/389-console/src/css/ds.css
@@ -639,6 +639,10 @@ option {
padding-right: 0 !important;
}
+.ds-vertical-scroll-auto {
+ overflow-y: auto !important;
+}
+
.alert {
max-width: 750px;
}
diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx
index efa3ce6d5..11cae972c 100644
--- a/src/cockpit/389-console/src/database.jsx
+++ b/src/cockpit/389-console/src/database.jsx
@@ -157,6 +157,7 @@ export class Database extends React.Component {
const attrs = config.attrs;
let db_cache_auto = false;
let import_cache_auto = false;
+ let dblocksMonitoring = false;
let dbhome = "";
if ('nsslapd-db-home-directory' in attrs) {
@@ -168,6 +169,9 @@ export class Database extends React.Component {
if (attrs['nsslapd-import-cache-autosize'] != "0") {
import_cache_auto = true;
}
+ if (attrs['nsslapd-db-locks-monitoring-enabled'][0] == "on") {
+ dblocksMonitoring = true;
+ }
this.setState(() => (
{
@@ -187,6 +191,9 @@ export class Database extends React.Component {
txnlogdir: attrs['nsslapd-db-logdirectory'],
dbhomedir: dbhome,
dblocks: attrs['nsslapd-db-locks'],
+ dblocksMonitoring: dblocksMonitoring,
+ dblocksMonitoringThreshold: attrs['nsslapd-db-locks-monitoring-threshold'],
+ dblocksMonitoringPause: attrs['nsslapd-db-locks-monitoring-pause'],
chxpoint: attrs['nsslapd-db-checkpoint-interval'],
compactinterval: attrs['nsslapd-db-compactdb-interval'],
importcacheauto: attrs['nsslapd-import-cache-autosize'],
diff --git a/src/cockpit/389-console/src/index.html b/src/cockpit/389-console/src/index.html
index 1278844fc..fd0eeb669 100644
--- a/src/cockpit/389-console/src/index.html
+++ b/src/cockpit/389-console/src/index.html
@@ -12,7 +12,7 @@
</head>
-<body>
+<body class="ds-vertical-scroll-auto">
<div id="dsinstance"></div>
<script src="index.js"></script>
</body>
diff --git a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
index f6e662bca..6a71c138d 100644
--- a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
+++ b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx
@@ -31,6 +31,9 @@ export class GlobalDatabaseConfig extends React.Component {
txnlogdir: this.props.data.txnlogdir,
dbhomedir: this.props.data.dbhomedir,
dblocks: this.props.data.dblocks,
+ dblocksMonitoring: this.props.data.dblocksMonitoring,
+ dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold,
+ dblocksMonitoringPause: this.props.data.dblocksMonitoringPause,
chxpoint: this.props.data.chxpoint,
compactinterval: this.props.data.compactinterval,
importcachesize: this.props.data.importcachesize,
@@ -47,6 +50,9 @@ export class GlobalDatabaseConfig extends React.Component {
_txnlogdir: this.props.data.txnlogdir,
_dbhomedir: this.props.data.dbhomedir,
_dblocks: this.props.data.dblocks,
+ _dblocksMonitoring: this.props.data.dblocksMonitoring,
+ _dblocksMonitoringThreshold: this.props.data.dblocksMonitoringThreshold,
+ _dblocksMonitoringPause: this.props.data.dblocksMonitoringPause,
_chxpoint: this.props.data.chxpoint,
_compactinterval: this.props.data.compactinterval,
_importcachesize: this.props.data.importcachesize,
@@ -55,6 +61,7 @@ export class GlobalDatabaseConfig extends React.Component {
_import_cache_auto: this.props.data.import_cache_auto,
};
this.handleChange = this.handleChange.bind(this);
+ this.select_db_locks_monitoring = this.select_db_locks_monitoring.bind(this);
this.select_auto_cache = this.select_auto_cache.bind(this);
this.select_auto_import_cache = this.select_auto_import_cache.bind(this);
this.save_db_config = this.save_db_config.bind(this);
@@ -76,6 +83,12 @@ export class GlobalDatabaseConfig extends React.Component {
}, this.handleChange(e));
}
+ select_db_locks_monitoring (val, e) {
+ this.setState({
+ dblocksMonitoring: !this.state.dblocksMonitoring
+ }, this.handleChange(val, e));
+ }
+
handleChange(e) {
// Generic
const value = e.target.type === 'checkbox' ? e.target.checked : e.target.value;
@@ -150,6 +163,21 @@ export class GlobalDatabaseConfig extends React.Component {
cmd.push("--locks=" + this.state.dblocks);
requireRestart = true;
}
+ if (this.state._dblocksMonitoring != this.state.dblocksMonitoring) {
+ if (this.state.dblocksMonitoring) {
+ cmd.push("--locks-monitoring-enabled=on");
+ } else {
+ cmd.push("--locks-monitoring-enabled=off");
+ }
+ requireRestart = true;
+ }
+ if (this.state._dblocksMonitoringThreshold != this.state.dblocksMonitoringThreshold) {
+ cmd.push("--locks-monitoring-threshold=" + this.state.dblocksMonitoringThreshold);
+ requireRestart = true;
+ }
+ if (this.state._dblocksMonitoringPause != this.state.dblocksMonitoringPause) {
+ cmd.push("--locks-monitoring-pause=" + this.state.dblocksMonitoringPause);
+ }
if (this.state._chxpoint != this.state.chxpoint) {
cmd.push("--checkpoint-interval=" + this.state.chxpoint);
requireRestart = true;
@@ -216,6 +244,28 @@ export class GlobalDatabaseConfig extends React.Component {
let import_cache_form;
let db_auto_checked = false;
let import_auto_checked = false;
+ let dblocksMonitor = "";
+
+ if (this.state.dblocksMonitoring) {
+ dblocksMonitor = <div className="ds-margin-top">
+ <Row className="ds-margin-top" title="Sets the DB lock exhaustion value in percentage (valid range is 70-95). If too many locks are acquired, the server will abort the searches while the number of locks are not decreased. It helps to avoid DB corruption and long recovery. (nsslapd-db-locks-monitoring-threshold)">
+ <Col componentClass={ControlLabel} sm={4}>
+ DB Locks Threshold Percentage
+ </Col>
+ <Col sm={8}>
+ <input className="ds-input" type="number" id="dblocksMonitoringThreshold" size="10" onChange={this.handleChange} value={this.state.dblocksMonitoringThreshold} />
+ </Col>
+ </Row>
+ <Row className="ds-margin-top" title="Sets the amount of time (milliseconds) that the monitoring thread spends waiting between checks. (nsslapd-db-locks-monitoring-pause)">
+ <Col componentClass={ControlLabel} sm={4}>
+ DB Locks Pause Milliseconds
+ </Col>
+ <Col sm={8}>
+ <input className="ds-input" type="number" id="dblocksMonitoringPause" size="10" onChange={this.handleChange} value={this.state.dblocksMonitoringPause} />
+ </Col>
+ </Row>
+ </div>;
+ }
if (this.state.db_cache_auto) {
db_cache_form = <div id="auto-cache-form" className="ds-margin-left">
@@ -422,14 +472,6 @@ export class GlobalDatabaseConfig extends React.Component {
<input id="dbhomedir" value={this.state.dbhomedir} onChange={this.handleChange} className="ds-input-auto" type="text" />
</Col>
</Row>
- <Row className="ds-margin-top" title="The number of database locks (nsslapd-db-locks).">
- <Col componentClass={ControlLabel} sm={4}>
- Database Locks
- </Col>
- <Col sm={8}>
- <input id="dblocks" value={this.state.dblocks} onChange={this.handleChange} className="ds-input-auto" type="text" />
- </Col>
- </Row>
<Row className="ds-margin-top" title="Amount of time in seconds after which the Directory Server sends a checkpoint entry to the database transaction log (nsslapd-db-checkpoint-interval).">
<Col componentClass={ControlLabel} sm={4}>
Database Checkpoint Interval
@@ -446,6 +488,36 @@ export class GlobalDatabaseConfig extends React.Component {
<input id="compactinterval" value={this.state.compactinterval} onChange={this.handleChange} className="ds-input-auto" type="text" />
</Col>
</Row>
+ <Row className="ds-margin-top" title="The number of database locks (nsslapd-db-locks).">
+ <Col componentClass={ControlLabel} sm={4}>
+ Database Locks
+ </Col>
+ <Col sm={8}>
+ <input id="dblocks" value={this.state.dblocks} onChange={this.handleChange} className="ds-input-auto" type="text" />
+ </Col>
+ </Row>
+ <Row>
+ <Col sm={12}>
+ <h5 className="ds-sub-header">DB Locks Monitoring</h5>
+ <hr />
+ </Col>
+ </Row>
+ <Row>
+ <Col sm={12}>
+ <Checkbox title="Set input to be set automatically"
+ id="dblocksMonitoring"
+ checked={this.state.dblocksMonitoring}
+ onChange={this.select_db_locks_monitoring}
+ >
+ Enable Monitoring
+ </Checkbox>
+ </Col>
+ </Row>
+ <Row>
+ <Col sm={12}>
+ {dblocksMonitor}
+ </Col>
+ </Row>
</Form>
</div>
</div>
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
index bcd7b383f..13bb27842 100644
--- a/src/lib389/lib389/backend.py
+++ b/src/lib389/lib389/backend.py
@@ -1011,6 +1011,9 @@ class DatabaseConfig(DSLdapObject):
'nsslapd-db-transaction-batch-max-wait',
'nsslapd-db-logbuf-size',
'nsslapd-db-locks',
+ 'nsslapd-db-locks-monitoring-enabled',
+ 'nsslapd-db-locks-monitoring-threshold',
+ 'nsslapd-db-locks-monitoring-pause',
'nsslapd-db-private-import-mem',
'nsslapd-import-cache-autosize',
'nsslapd-cache-autosize',
diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py
index 6bfbcb036..722764d10 100644
--- a/src/lib389/lib389/cli_conf/backend.py
+++ b/src/lib389/lib389/cli_conf/backend.py
@@ -46,6 +46,9 @@ arg_to_attr = {
'txn_batch_max': 'nsslapd-db-transaction-batch-max-wait',
'logbufsize': 'nsslapd-db-logbuf-size',
'locks': 'nsslapd-db-locks',
+ 'locks_monitoring_enabled': 'nsslapd-db-locks-monitoring-enabled',
+ 'locks_monitoring_threshold': 'nsslapd-db-locks-monitoring-threshold',
+ 'locks_monitoring_pause': 'nsslapd-db-locks-monitoring-pause',
'import_cache_autosize': 'nsslapd-import-cache-autosize',
'cache_autosize': 'nsslapd-cache-autosize',
'cache_autosize_split': 'nsslapd-cache-autosize-split',
@@ -998,6 +1001,13 @@ def create_parser(subparsers):
'the batch count (only works when txn-batch-val is set)')
set_db_config_parser.add_argument('--logbufsize', help='Specifies the transaction log information buffer size')
set_db_config_parser.add_argument('--locks', help='Sets the maximum number of database locks')
+ set_db_config_parser.add_argument('--locks-monitoring-enabled', help='Set to "on" or "off" to monitor DB locks. When it crosses the percentage value '
+ 'set with "--locks-monitoring-threshold" ("on" by default)')
+ set_db_config_parser.add_argument('--locks-monitoring-threshold', help='Sets the DB lock exhaustion value in percentage (valid range is 70-95). If too many locks are '
+ 'acquired, the server will abort the searches while the number of locks '
+ 'are not decreased. It helps to avoid DB corruption and long recovery.')
+ set_db_config_parser.add_argument('--locks-monitoring-pause', help='Sets the DB lock monitoring value in milliseconds for the amount of time '
+ 'that the monitoring thread spends waiting between checks.')
set_db_config_parser.add_argument('--import-cache-autosize', help='Set to "on" or "off" to automatically set the size of the import '
'cache to be used during the the import process of LDIF files')
set_db_config_parser.add_argument('--cache-autosize', help='Sets the percentage of free memory that is used in total for the database '
--
2.26.3

View File

@ -0,0 +1,468 @@
From 2ae2f53756b6f13e2816bb30812740cb7ad97403 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Fri, 5 Nov 2021 09:56:43 +0100
Subject: [PATCH 09/12] Issue 4972 - gecos with IA5 introduces a compatibility
issue with previous (#4981)
releases where it was DirectoryString
Bug description:
For years 'gecos' was DirectoryString (UTF8), with #50933 it was restricted to IA5 (ascii)
https://github.com/389ds/389-ds-base/commit/0683bcde1b667b6d0ca6e8d1ef605f17c51ea2f7#
IA5 definition conforms rfc2307 but is a problem for existing deployments
where entries can have 'gecos' attribute value with UTF8.
Fix description:
Revert the definition to of 'gecos' being Directory String
Additional fix to make test_replica_backup_and_restore more
robust to CI
relates: https://github.com/389ds/389-ds-base/issues/4972
Reviewed by: William Brown, Pierre Rogier, James Chapman (Thanks !)
Platforms tested: F34
---
.../tests/suites/schema/schema_test.py | 398 +++++++++++++++++-
ldap/schema/10rfc2307compat.ldif | 6 +-
2 files changed, 400 insertions(+), 4 deletions(-)
diff --git a/dirsrvtests/tests/suites/schema/schema_test.py b/dirsrvtests/tests/suites/schema/schema_test.py
index d590624b6..5d62b8d59 100644
--- a/dirsrvtests/tests/suites/schema/schema_test.py
+++ b/dirsrvtests/tests/suites/schema/schema_test.py
@@ -18,8 +18,12 @@ import pytest
import six
from ldap.cidict import cidict
from ldap.schema import SubSchema
+from lib389.schema import SchemaLegacy
from lib389._constants import *
-from lib389.topologies import topology_st
+from lib389.topologies import topology_st, topology_m2 as topo_m2
+from lib389.idm.user import UserAccounts, UserAccount
+from lib389.replica import ReplicationManager
+from lib389.utils import ensure_bytes
pytestmark = pytest.mark.tier1
@@ -165,6 +169,398 @@ def test_schema_comparewithfiles(topology_st):
log.info('test_schema_comparewithfiles: PASSED')
+def test_gecos_directoryString(topology_st):
+ """Check that gecos supports directoryString value
+
+ :id: aee422bb-6299-4124-b5cd-d7393dac19d3
+
+ :setup: Standalone instance
+
+ :steps:
+ 1. Add a common user
+ 2. replace gecos with a direstoryString value
+
+ :expectedresults:
+ 1. Success
+ 2. Success
+ """
+
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
+
+ user_properties = {
+ 'uid': 'testuser',
+ 'cn' : 'testuser',
+ 'sn' : 'user',
+ 'uidNumber' : '1000',
+ 'gidNumber' : '2000',
+ 'homeDirectory' : '/home/testuser',
+ }
+ testuser = users.create(properties=user_properties)
+
+ # Add a gecos UTF value
+ testuser.replace('gecos', 'Hélène')
+
+def test_gecos_mixed_definition_topo(topo_m2, request):
+ """Check that replication is still working if schema contains
+ definitions that does not conform with a replicated entry
+
+ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
+ :setup: Two suppliers replication setup
+ :steps:
+ 1. Create a testuser on M1
+ 2 Stop M1 and M2
+ 3 Change gecos def on M2 to be IA5
+ 4 Update testuser with gecos directoryString value
+ 5 Check replication is still working
+ :expectedresults:
+ 1. success
+ 2. success
+ 3. success
+ 4. success
+ 5. success
+
+ """
+
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+ m1 = topo_m2.ms["supplier1"]
+ m2 = topo_m2.ms["supplier2"]
+
+
+ # create a test user
+ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
+ testuser = UserAccount(m1, testuser_dn)
+ try:
+ testuser.create(properties={
+ 'uid': 'testuser',
+ 'cn': 'testuser',
+ 'sn': 'testuser',
+ 'uidNumber' : '1000',
+ 'gidNumber' : '2000',
+ 'homeDirectory' : '/home/testuser',
+ })
+ except ldap.ALREADY_EXISTS:
+ pass
+ repl.wait_for_replication(m1, m2)
+
+ # Stop suppliers to update the schema
+ m1.stop()
+ m2.stop()
+
+ # on M1: gecos is DirectoryString (default)
+ # on M2: gecos is IA5
+ schema_filename = (m2.schemadir + "/99user.ldif")
+ try:
+ with open(schema_filename, 'w') as schema_file:
+ schema_file.write("dn: cn=schema\n")
+ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
+ "'gecos' DESC 'The GECOS field; the common name' " +
+ "EQUALITY caseIgnoreIA5Match " +
+ "SUBSTR caseIgnoreIA5SubstringsMatch " +
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
+ "SINGLE-VALUE )\n")
+ os.chmod(schema_filename, 0o777)
+ except OSError as e:
+ log.fatal("Failed to update schema file: " +
+ "{} Error: {}".format(schema_filename, str(e)))
+
+ # start the instances
+ m1.start()
+ m2.start()
+
+ # Check that gecos is IA5 on M2
+ schema = SchemaLegacy(m2)
+ attributetypes = schema.query_attributetype('gecos')
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
+
+
+ # Add a gecos UTF value on M1
+ testuser.replace('gecos', 'Hélène')
+
+ # Check replication is still working
+ testuser.replace('displayName', 'ascii value')
+ repl.wait_for_replication(m1, m2)
+ testuser_m2 = UserAccount(m2, testuser_dn)
+ assert testuser_m2.exists()
+ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
+
+ def fin():
+ m1.start()
+ m2.start()
+ testuser.delete()
+ repl.wait_for_replication(m1, m2)
+
+ # on M2 restore a default 99user.ldif
+ m2.stop()
+ os.remove(m2.schemadir + "/99user.ldif")
+ schema_filename = (m2.schemadir + "/99user.ldif")
+ try:
+ with open(schema_filename, 'w') as schema_file:
+ schema_file.write("dn: cn=schema\n")
+ os.chmod(schema_filename, 0o777)
+ except OSError as e:
+ log.fatal("Failed to update schema file: " +
+ "{} Error: {}".format(schema_filename, str(e)))
+ m2.start()
+ m1.start()
+
+ request.addfinalizer(fin)
+
+def test_gecos_directoryString_wins_M1(topo_m2, request):
+ """Check that if inital syntax are IA5(M2) and DirectoryString(M1)
+ Then directoryString wins when nsSchemaCSN M1 is the greatest
+
+ :id: ad119fa5-7671-45c8-b2ef-0b28ffb68fdb
+ :setup: Two suppliers replication setup
+ :steps:
+ 1. Create a testuser on M1
+ 2 Stop M1 and M2
+ 3 Change gecos def on M2 to be IA5
+ 4 Start M1 and M2
+ 5 Update M1 schema so that M1 has greatest nsSchemaCSN
+ 6 Update testuser with gecos directoryString value
+ 7 Check replication is still working
+ 8 Check gecos is DirectoryString on M1 and M2
+ :expectedresults:
+ 1. success
+ 2. success
+ 3. success
+ 4. success
+ 5. success
+ 6. success
+ 7. success
+ 8. success
+
+ """
+
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+ m1 = topo_m2.ms["supplier1"]
+ m2 = topo_m2.ms["supplier2"]
+
+
+ # create a test user
+ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
+ testuser = UserAccount(m1, testuser_dn)
+ try:
+ testuser.create(properties={
+ 'uid': 'testuser',
+ 'cn': 'testuser',
+ 'sn': 'testuser',
+ 'uidNumber' : '1000',
+ 'gidNumber' : '2000',
+ 'homeDirectory' : '/home/testuser',
+ })
+ except ldap.ALREADY_EXISTS:
+ pass
+ repl.wait_for_replication(m1, m2)
+
+ # Stop suppliers to update the schema
+ m1.stop()
+ m2.stop()
+
+ # on M1: gecos is DirectoryString (default)
+ # on M2: gecos is IA5
+ schema_filename = (m2.schemadir + "/99user.ldif")
+ try:
+ with open(schema_filename, 'w') as schema_file:
+ schema_file.write("dn: cn=schema\n")
+ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
+ "'gecos' DESC 'The GECOS field; the common name' " +
+ "EQUALITY caseIgnoreIA5Match " +
+ "SUBSTR caseIgnoreIA5SubstringsMatch " +
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
+ "SINGLE-VALUE )\n")
+ os.chmod(schema_filename, 0o777)
+ except OSError as e:
+ log.fatal("Failed to update schema file: " +
+ "{} Error: {}".format(schema_filename, str(e)))
+
+ # start the instances
+ m1.start()
+ m2.start()
+
+ # Check that gecos is IA5 on M2
+ schema = SchemaLegacy(m2)
+ attributetypes = schema.query_attributetype('gecos')
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
+
+
+ # update M1 schema to increase its nsschemaCSN
+ new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )"
+ m1.schema.add_schema('attributetypes', ensure_bytes(new_at))
+
+ # Add a gecos UTF value on M1
+ testuser.replace('gecos', 'Hélène')
+
+ # Check replication is still working
+ testuser.replace('displayName', 'ascii value')
+ repl.wait_for_replication(m1, m2)
+ testuser_m2 = UserAccount(m2, testuser_dn)
+ assert testuser_m2.exists()
+ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
+
+ # Check that gecos is DirectoryString on M1
+ schema = SchemaLegacy(m1)
+ attributetypes = schema.query_attributetype('gecos')
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
+
+ # Check that gecos is DirectoryString on M2
+ schema = SchemaLegacy(m2)
+ attributetypes = schema.query_attributetype('gecos')
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
+
+ def fin():
+ m1.start()
+ m2.start()
+ testuser.delete()
+ m1.schema.del_schema('attributetypes', ensure_bytes(new_at))
+ repl.wait_for_replication(m1, m2)
+
+ # on M2 restore a default 99user.ldif
+ m2.stop()
+ os.remove(m2.schemadir + "/99user.ldif")
+ schema_filename = (m2.schemadir + "/99user.ldif")
+ try:
+ with open(schema_filename, 'w') as schema_file:
+ schema_file.write("dn: cn=schema\n")
+ os.chmod(schema_filename, 0o777)
+ except OSError as e:
+ log.fatal("Failed to update schema file: " +
+ "{} Error: {}".format(schema_filename, str(e)))
+ m2.start()
+ m1.start()
+
+ request.addfinalizer(fin)
+
+def test_gecos_directoryString_wins_M2(topo_m2, request):
+ """Check that if inital syntax are IA5(M2) and DirectoryString(M1)
+ Then directoryString wins when nsSchemaCSN M2 is the greatest
+
+ :id: 2da7f1b1-f86d-4072-a940-ba56d4bc8348
+ :setup: Two suppliers replication setup
+ :steps:
+ 1. Create a testuser on M1
+ 2 Stop M1 and M2
+ 3 Change gecos def on M2 to be IA5
+ 4 Start M1 and M2
+ 5 Update M2 schema so that M2 has greatest nsSchemaCSN
+ 6 Update testuser on M2 and trigger replication to M1
+ 7 Update testuser on M2 with gecos directoryString value
+ 8 Check replication is still working
+ 9 Check gecos is DirectoryString on M1 and M2
+ :expectedresults:
+ 1. success
+ 2. success
+ 3. success
+ 4. success
+ 5. success
+ 6. success
+ 7. success
+ 8. success
+ 9. success
+
+ """
+
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+ m1 = topo_m2.ms["supplier1"]
+ m2 = topo_m2.ms["supplier2"]
+
+
+ # create a test user
+ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
+ testuser = UserAccount(m1, testuser_dn)
+ try:
+ testuser.create(properties={
+ 'uid': 'testuser',
+ 'cn': 'testuser',
+ 'sn': 'testuser',
+ 'uidNumber' : '1000',
+ 'gidNumber' : '2000',
+ 'homeDirectory' : '/home/testuser',
+ })
+ except ldap.ALREADY_EXISTS:
+ pass
+ testuser.replace('displayName', 'to trigger replication M1-> M2')
+ repl.wait_for_replication(m1, m2)
+
+ # Stop suppliers to update the schema
+ m1.stop()
+ m2.stop()
+
+ # on M1: gecos is DirectoryString (default)
+ # on M2: gecos is IA5
+ schema_filename = (m2.schemadir + "/99user.ldif")
+ try:
+ with open(schema_filename, 'w') as schema_file:
+ schema_file.write("dn: cn=schema\n")
+ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
+ "'gecos' DESC 'The GECOS field; the common name' " +
+ "EQUALITY caseIgnoreIA5Match " +
+ "SUBSTR caseIgnoreIA5SubstringsMatch " +
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
+ "SINGLE-VALUE )\n")
+ os.chmod(schema_filename, 0o777)
+ except OSError as e:
+ log.fatal("Failed to update schema file: " +
+ "{} Error: {}".format(schema_filename, str(e)))
+
+ # start the instances
+ m1.start()
+ m2.start()
+
+ # Check that gecos is IA5 on M2
+ schema = SchemaLegacy(m2)
+ attributetypes = schema.query_attributetype('gecos')
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
+
+ # update M2 schema to increase its nsschemaCSN
+ new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )"
+ m2.schema.add_schema('attributetypes', ensure_bytes(new_at))
+
+ # update just to trigger replication M2->M1
+ # and update of M2 schema
+ testuser_m2 = UserAccount(m2, testuser_dn)
+ testuser_m2.replace('displayName', 'to trigger replication M2-> M1')
+
+ # Add a gecos UTF value on M1
+ testuser.replace('gecos', 'Hélène')
+
+ # Check replication is still working
+ testuser.replace('displayName', 'ascii value')
+ repl.wait_for_replication(m1, m2)
+ assert testuser_m2.exists()
+ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
+
+ # Check that gecos is DirectoryString on M1
+ schema = SchemaLegacy(m1)
+ attributetypes = schema.query_attributetype('gecos')
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
+
+ # Check that gecos is DirectoryString on M2
+ schema = SchemaLegacy(m2)
+ attributetypes = schema.query_attributetype('gecos')
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
+
+ def fin():
+ m1.start()
+ m2.start()
+ testuser.delete()
+ m1.schema.del_schema('attributetypes', ensure_bytes(new_at))
+ repl.wait_for_replication(m1, m2)
+
+ # on M2 restore a default 99user.ldif
+ m2.stop()
+ os.remove(m2.schemadir + "/99user.ldif")
+ schema_filename = (m2.schemadir + "/99user.ldif")
+ try:
+ with open(schema_filename, 'w') as schema_file:
+ schema_file.write("dn: cn=schema\n")
+ os.chmod(schema_filename, 0o777)
+ except OSError as e:
+ log.fatal("Failed to update schema file: " +
+ "{} Error: {}".format(schema_filename, str(e)))
+ m2.start()
+
+ request.addfinalizer(fin)
if __name__ == '__main__':
# Run isolated
diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif
index 8ba72e1e3..998b8983b 100644
--- a/ldap/schema/10rfc2307compat.ldif
+++ b/ldap/schema/10rfc2307compat.ldif
@@ -21,9 +21,9 @@ attributeTypes: (
attributeTypes: (
1.3.6.1.1.1.1.2 NAME 'gecos'
DESC 'The GECOS field; the common name'
- EQUALITY caseIgnoreIA5Match
- SUBSTR caseIgnoreIA5SubstringsMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ EQUALITY caseIgnoreMatch
+ SUBSTR caseIgnoreSubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributeTypes: (
--
2.31.1

View File

@ -1,33 +0,0 @@
From 7573c62a2e61293a4800e67919d79341fa1a1532 Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Wed, 26 May 2021 16:07:43 +0200
Subject: [PATCH 10/12] Issue 4764 - replicated operation sometime checks ACI
(#4783)
(cherry picked from commit 0cfdea7abcacfca6686a6cf84dbf7ae1167f3022)
---
ldap/servers/slapd/connection.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index c7a15e775..e0c1a52d2 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -1771,6 +1771,14 @@ connection_threadmain()
}
}
+ /*
+ * Fix bz 1931820 issue (the check to set OP_FLAG_REPLICATED may be done
+ * before replication session is properly set).
+ */
+ if (replication_connection) {
+ operation_set_flag(op, OP_FLAG_REPLICATED);
+ }
+
/*
* Call the do_<operation> function to process this request.
*/
--
2.26.3

View File

@ -0,0 +1,32 @@
From 3909877f12e50556e844bc20e72870a4fa905ada Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Tue, 9 Nov 2021 12:55:28 +0000
Subject: [PATCH 10/12] Issue 4997 - Function declaration compiler error on
1.4.3
Bug description: Building the server on the 1.4.3 branch generates a
compiler error due to a typo in function declaration.
Fixes: https://github.com/389ds/389-ds-base/issues/4997
Reviewed by: @jchapman (one line commit rule)
---
ldap/servers/slapd/slapi-private.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index 570765e47..d6d74e8a7 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -273,7 +273,7 @@ void *csngen_register_callbacks(CSNGen *gen, GenCSNFn genFn, void *genArg, Abort
void csngen_unregister_callbacks(CSNGen *gen, void *cookie);
/* debugging function */
-void csngen_dump_state(const CSNGen *gen);
+void csngen_dump_state(const CSNGen *gen, int severity);
/* this function tests csn generator */
void csngen_test(void);
--
2.31.1

View File

@ -0,0 +1,32 @@
From 60d570e52465b58167301f64792f5f85cbc85e20 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 10 Nov 2021 08:53:45 -0500
Subject: [PATCH 11/12] Issue 4978 - use more portable python command for
checking containers
Description: During the installation check for containers use arguments
for subprocess.run() that work on all versions of python
relates: https://github.com/389ds/389-ds-base/issues/4978
Reviewed by: mreynolds(one line commit rule)
---
src/lib389/lib389/instance/setup.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index 7b0147cf9..b23d2deb8 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -734,7 +734,7 @@ class SetupDs(object):
# Check if we are in a container, if so don't use /dev/shm for the db home dir
# as containers typically don't allocate enough space for dev/shm and we don't
# want to unexpectedly break the server after an upgrade
- container_result = subprocess.run(["systemd-detect-virt", "-c"], capture_output=True)
+ container_result = subprocess.run(["systemd-detect-virt", "-c"], stdout=subprocess.PIPE)
if container_result.returncode == 0:
# In a container, set the db_home_dir to the db path
self.log.debug("Container detected setting db home directory to db directory.")
--
2.31.1

View File

@ -1,155 +0,0 @@
From 580880a598a8f9972994684c49593a4cf8b8969b Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Sat, 29 May 2021 13:19:53 -0400
Subject: [PATCH 12/12] Issue 4778 - RFE - Add changelog compaction task in
1.4.3
Description: In 1.4.3 the replication changelog is a separate database,
so it needs a separate "nsds5task" compaction task (COMPACT_CL5)
relates: https://github.com/389ds/389-ds-base/issues/4778
ASAN tested and approved
Reviewed by: mreynolds
---
ldap/servers/plugins/replication/cl5_api.c | 21 +++++++++----------
ldap/servers/plugins/replication/cl5_api.h | 1 +
.../replication/repl5_replica_config.c | 9 +++++++-
3 files changed, 19 insertions(+), 12 deletions(-)
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
index 75a2f46f5..4c5077b48 100644
--- a/ldap/servers/plugins/replication/cl5_api.c
+++ b/ldap/servers/plugins/replication/cl5_api.c
@@ -266,7 +266,6 @@ static int _cl5TrimInit(void);
static void _cl5TrimCleanup(void);
static int _cl5TrimMain(void *param);
static void _cl5DoTrimming(void);
-static void _cl5CompactDBs(void);
static void _cl5PurgeRID(Object *file_obj, ReplicaId cleaned_rid);
static int _cl5PurgeGetFirstEntry(Object *file_obj, CL5Entry *entry, void **iterator, DB_TXN *txnid, int rid, DBT *key);
static int _cl5PurgeGetNextEntry(CL5Entry *entry, void *iterator, DBT *key);
@@ -3152,7 +3151,7 @@ _cl5TrimMain(void *param __attribute__((unused)))
if (slapi_current_utc_time() > compactdb_time) {
/* time to trim */
timeCompactPrev = timeNow;
- _cl5CompactDBs();
+ cl5CompactDBs();
compacting = PR_FALSE;
}
}
@@ -3250,8 +3249,8 @@ _cl5DoPurging(cleanruv_purge_data *purge_data)
}
/* clear free page files to reduce changelog */
-static void
-_cl5CompactDBs(void)
+void
+cl5CompactDBs(void)
{
int rc;
Object *fileObj = NULL;
@@ -3264,14 +3263,14 @@ _cl5CompactDBs(void)
rc = TXN_BEGIN(s_cl5Desc.dbEnv, NULL, &txnid, 0);
if (rc) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
- "_cl5CompactDBs - Failed to begin transaction; db error - %d %s\n",
+ "cl5CompactDBs - Failed to begin transaction; db error - %d %s\n",
rc, db_strerror(rc));
goto bail;
}
slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl,
- "_cl5CompactDBs - compacting replication changelogs...\n");
+ "cl5CompactDBs - compacting replication changelogs...\n");
for (fileObj = objset_first_obj(s_cl5Desc.dbFiles);
fileObj;
fileObj = objset_next_obj(s_cl5Desc.dbFiles, fileObj)) {
@@ -3284,17 +3283,17 @@ _cl5CompactDBs(void)
&c_data, DB_FREE_SPACE, NULL /*end*/);
if (rc) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
- "_cl5CompactDBs - Failed to compact %s; db error - %d %s\n",
+ "cl5CompactDBs - Failed to compact %s; db error - %d %s\n",
dbFile->replName, rc, db_strerror(rc));
goto bail;
}
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
- "_cl5CompactDBs - %s - %d pages freed\n",
+ "cl5CompactDBs - %s - %d pages freed\n",
dbFile->replName, c_data.compact_pages_free);
}
slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name_cl,
- "_cl5CompactDBs - compacting replication changelogs finished.\n");
+ "cl5CompactDBs - compacting replication changelogs finished.\n");
bail:
if (fileObj) {
object_release(fileObj);
@@ -3303,14 +3302,14 @@ bail:
rc = TXN_ABORT(txnid);
if (rc) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
- "_cl5CompactDBs - Failed to abort transaction; db error - %d %s\n",
+ "cl5CompactDBs - Failed to abort transaction; db error - %d %s\n",
rc, db_strerror(rc));
}
} else {
rc = TXN_COMMIT(txnid);
if (rc) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
- "_cl5CompactDBs - Failed to commit transaction; db error - %d %s\n",
+ "cl5CompactDBs - Failed to commit transaction; db error - %d %s\n",
rc, db_strerror(rc));
}
}
diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h
index 4b0949fb3..11db771f2 100644
--- a/ldap/servers/plugins/replication/cl5_api.h
+++ b/ldap/servers/plugins/replication/cl5_api.h
@@ -405,5 +405,6 @@ int cl5DeleteRUV(void);
void cl5CleanRUV(ReplicaId rid);
void cl5NotifyCleanup(int rid);
void trigger_cl_purging(cleanruv_purge_data *purge_data);
+void cl5CompactDBs(void);
#endif
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index a969ef82f..e708a1ccb 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -29,6 +29,8 @@
#define CLEANRUVLEN 8
#define CLEANALLRUV "CLEANALLRUV"
#define CLEANALLRUVLEN 11
+#define COMPACT_CL5 "COMPACT_CL5"
+#define COMPACT_CL5_LEN 11
#define REPLICA_RDN "cn=replica"
#define CLEANALLRUV_MAX_WAIT 7200 /* 2 hours */
@@ -1050,7 +1052,6 @@ replica_config_change_flags(Replica *r, const char *new_flags, char *returntext
static int
replica_execute_task(Replica *r, const char *task_name, char *returntext, int apply_mods)
{
-
if (strcasecmp(task_name, CL2LDIF_TASK) == 0) {
if (apply_mods) {
return replica_execute_cl2ldif_task(r, returntext);
@@ -1084,6 +1085,12 @@ replica_execute_task(Replica *r, const char *task_name, char *returntext, int ap
return replica_execute_cleanall_ruv_task(r, (ReplicaId)temprid, empty_task, "no", PR_TRUE, returntext);
} else
return LDAP_SUCCESS;
+ } else if (strncasecmp(task_name, COMPACT_CL5, COMPACT_CL5_LEN) == 0) {
+ /* compact the replication changelogs */
+ if (apply_mods) {
+ cl5CompactDBs();
+ }
+ return LDAP_SUCCESS;
} else {
PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, "Unsupported replica task - %s", task_name);
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
--
2.26.3

View File

@ -0,0 +1,31 @@
From 2c6653edef793d46815e6df607e55d68e14fe232 Mon Sep 17 00:00:00 2001
From: spike <spike@fedoraproject.org>
Date: Fri, 5 Nov 2021 13:56:41 +0100
Subject: [PATCH 12/12] Issue 4959 - Invalid /etc/hosts setup can cause
isLocalHost to fail.
Description: Use local_simple_allocate in dsctl so that isLocal is always set properly
Relates: https://github.com/389ds/389-ds-base/issues/4959
Reviewed by: @droideck (Thanks!)
---
src/lib389/cli/dsctl | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl
index b6c42b5cc..d2ea6cd29 100755
--- a/src/lib389/cli/dsctl
+++ b/src/lib389/cli/dsctl
@@ -135,7 +135,7 @@ if __name__ == '__main__':
log.error("Unable to access instance information. Are you running as the correct user? (usually dirsrv or root)")
sys.exit(1)
- inst.allocate(insts[0])
+ inst.local_simple_allocate(insts[0]['server-id'])
log.debug('Instance allocated')
try:
--
2.31.1

View File

@ -0,0 +1,105 @@
From d000349089eb15b3476ec302f4279f118336290e Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 16 Dec 2021 16:13:08 -0500
Subject: [PATCH 1/2] CVE-2021-4091 (BZ#2030367) double-free of the virtual
attribute context in persistent search
description:
A search is processed by a worker using a private pblock.
If the search is persistent, the worker spawn a thread
and kind of duplicate its private pblock so that the spawn
thread continue to process the persistent search.
Then worker ends the initial search, reinit (free) its private pblock,
and returns monitoring the wait_queue.
When the persistent search completes, it frees the duplicated
pblock.
The problem is that private pblock and duplicated pblock
are referring to a same structure (pb_vattr_context).
That lead to a double free
Fix:
When cloning the pblock (slapi_pblock_clone) make sure
to transfert the references inside the original (private)
pblock to the target (cloned) one
That includes pb_vattr_context pointer.
Reviewed by: Mark Reynolds, James Chapman, Pierre Rogier (Thanks !)
---
ldap/servers/slapd/connection.c | 8 +++++---
ldap/servers/slapd/pblock.c | 14 ++++++++++++--
2 files changed, 17 insertions(+), 5 deletions(-)
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index e0c1a52d2..fc7ed9c4a 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -1823,9 +1823,11 @@ connection_threadmain()
pthread_mutex_unlock(&(conn->c_mutex));
}
/* ps_add makes a shallow copy of the pb - so we
- * can't free it or init it here - just set operation to NULL.
- * ps_send_results will call connection_remove_operation_ext to free it
- */
+ * can't free it or init it here - just set operation to NULL.
+ * ps_send_results will call connection_remove_operation_ext to free it
+ * The connection_thread private pblock ('pb') has be cloned and should only
+ * be reinit (slapi_pblock_init)
+ */
slapi_pblock_set(pb, SLAPI_OPERATION, NULL);
slapi_pblock_init(pb);
} else {
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index a64986aeb..c78d1250f 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -292,6 +292,12 @@ _pblock_assert_pb_deprecated(Slapi_PBlock *pblock)
}
}
+/* It clones the pblock
+ * the content of the source pblock is transfered
+ * to the target pblock (returned)
+ * The source pblock should not be used for any operation
+ * it needs to be reinit (slapi_pblock_init)
+ */
Slapi_PBlock *
slapi_pblock_clone(Slapi_PBlock *pb)
{
@@ -312,28 +318,32 @@ slapi_pblock_clone(Slapi_PBlock *pb)
if (pb->pb_task != NULL) {
_pblock_assert_pb_task(new_pb);
*(new_pb->pb_task) = *(pb->pb_task);
+ memset(pb->pb_task, 0, sizeof(slapi_pblock_task));
}
if (pb->pb_mr != NULL) {
_pblock_assert_pb_mr(new_pb);
*(new_pb->pb_mr) = *(pb->pb_mr);
+ memset(pb->pb_mr, 0, sizeof(slapi_pblock_matching_rule));
}
if (pb->pb_misc != NULL) {
_pblock_assert_pb_misc(new_pb);
*(new_pb->pb_misc) = *(pb->pb_misc);
+ memset(pb->pb_misc, 0, sizeof(slapi_pblock_misc));
}
if (pb->pb_intop != NULL) {
_pblock_assert_pb_intop(new_pb);
*(new_pb->pb_intop) = *(pb->pb_intop);
- /* set pwdpolicy to NULL so this clone allocates its own policy */
- new_pb->pb_intop->pwdpolicy = NULL;
+ memset(pb->pb_intop, 0, sizeof(slapi_pblock_intop));
}
if (pb->pb_intplugin != NULL) {
_pblock_assert_pb_intplugin(new_pb);
*(new_pb->pb_intplugin) = *(pb->pb_intplugin);
+ memset(pb->pb_intplugin, 0,sizeof(slapi_pblock_intplugin));
}
if (pb->pb_deprecated != NULL) {
_pblock_assert_pb_deprecated(new_pb);
*(new_pb->pb_deprecated) = *(pb->pb_deprecated);
+ memset(pb->pb_deprecated, 0, sizeof(slapi_pblock_deprecated));
}
#ifdef PBLOCK_ANALYTICS
new_pb->analytics = NULL;
--
2.31.1

View File

@ -1,52 +0,0 @@
From bc41bbb89405b2059b80e344b2d4c59ae39aabe6 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Thu, 10 Jun 2021 15:03:27 +0200
Subject: [PATCH 1/3] Issue 4797 - ACL IP ADDRESS evaluation may corrupt
c_isreplication_session connection flags (#4799)
Bug description:
The fix for ticket #3764 was broken with a missing break in a
switch. The consequence is that while setting the client IP
address in the pblock (SLAPI_CONN_CLIENTNETADDR_ACLIP), the
connection is erroneously set as replication connection.
This can lead to crash or failure of testcase
test_access_from_certain_network_only_ip.
This bug was quite hidden until the fix for #4764 is
showing it more frequently
Fix description:
Add the missing break
relates: https://github.com/389ds/389-ds-base/issues/4797
Reviewed by: Mark Reynolds
Platforms tested: F33
---
ldap/servers/slapd/pblock.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index fcac53839..a64986aeb 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -2595,7 +2595,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
pblock->pb_conn->c_authtype = slapi_ch_strdup((char *)value);
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
break;
- case SLAPI_CONN_CLIENTNETADDR_ACLIP:
+ case SLAPI_CONN_CLIENTNETADDR_ACLIP:
if (pblock->pb_conn == NULL) {
break;
}
@@ -2603,6 +2603,7 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
slapi_ch_free((void **)&pblock->pb_conn->cin_addr_aclip);
pblock->pb_conn->cin_addr_aclip = (PRNetAddr *)value;
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
+ break;
case SLAPI_CONN_IS_REPLICATION_SESSION:
if (pblock->pb_conn == NULL) {
slapi_log_err(SLAPI_LOG_ERR,
--
2.31.1

View File

@ -1,79 +0,0 @@
From b3170e39519530c39d59202413b20e6bd466224d Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Wed, 27 Jan 2021 09:56:38 +0000
Subject: [PATCH 2/3] Issue 4396 - Minor memory leak in backend (#4558) (#4572)
Bug Description: As multiple suffixes per backend were no longer used, this
functionality has been replaced with a single suffix per backend. Legacy
code remains that adds multiple suffixes to the dse internal backend,
resulting in memory allocations that are lost.
Also a minor typo is corrected in backend.c
Fix Description: Calls to be_addsuffix on the DSE backend are removed
as they are never used.
Fixes: https://github.com/389ds/389-ds-base/issues/4396
Reviewed by: mreynolds389, Firstyear, droideck (Thank you)
---
ldap/servers/slapd/backend.c | 2 +-
ldap/servers/slapd/fedse.c | 12 +++---------
2 files changed, 4 insertions(+), 10 deletions(-)
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
index bc52b4643..5707504a9 100644
--- a/ldap/servers/slapd/backend.c
+++ b/ldap/servers/slapd/backend.c
@@ -42,7 +42,7 @@ be_init(Slapi_Backend *be, const char *type, const char *name, int isprivate, in
}
be->be_monitordn = slapi_create_dn_string("cn=monitor,cn=%s,cn=%s,cn=plugins,cn=config",
name, type);
- if (NULL == be->be_configdn) {
+ if (NULL == be->be_monitordn) {
slapi_log_err(SLAPI_LOG_ERR,
"be_init", "Failed create instance monitor dn for "
"plugin %s, instance %s\n",
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
index 0d645f909..7b820b540 100644
--- a/ldap/servers/slapd/fedse.c
+++ b/ldap/servers/slapd/fedse.c
@@ -2827,7 +2827,7 @@ search_snmp(Slapi_PBlock *pb __attribute__((unused)),
}
/*
- * Called from config.c to install the internal backends
+ * Called from main.c to install the internal backends
*/
int
setup_internal_backends(char *configdir)
@@ -2846,7 +2846,6 @@ setup_internal_backends(char *configdir)
Slapi_DN counters;
Slapi_DN snmp;
Slapi_DN root;
- Slapi_Backend *be;
Slapi_DN encryption;
Slapi_DN saslmapping;
Slapi_DN plugins;
@@ -2895,16 +2894,11 @@ setup_internal_backends(char *configdir)
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &saslmapping, LDAP_SCOPE_SUBTREE, "(objectclass=nsSaslMapping)", sasl_map_config_add, NULL, NULL);
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &plugins, LDAP_SCOPE_SUBTREE, "(objectclass=nsSlapdPlugin)", check_plugin_path, NULL, NULL);
- be = be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
- be_addsuffix(be, &root);
- be_addsuffix(be, &monitor);
- be_addsuffix(be, &config);
+ be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
/*
- * Now that the be's are in place, we can
- * setup the mapping tree.
+ * Now that the be's are in place, we can setup the mapping tree.
*/
-
if (mapping_tree_init()) {
slapi_log_err(SLAPI_LOG_EMERG, "setup_internal_backends", "Failed to init mapping tree\n");
exit(1);
--
2.31.1

View File

@ -0,0 +1,102 @@
From 03ca5111a8de602ecef9ad33206ba593b242d0df Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 21 Jan 2022 10:15:35 -0500
Subject: [PATCH 1/2] Issue 5127 - run restorecon on /dev/shm at server startup
Description:
Update the systemd service file to execute a script that runs
restorecon on the DB home directory. This addresses issues with
backup/restore, reboot, and FS restore issues that can happen when
/dev/shm is missing or created outside of dscreate.
relates: https://github.com/389ds/389-ds-base/issues/5127
Reviewed by: progier & viktor (Thanks!!)
---
Makefile.am | 2 +-
rpm/389-ds-base.spec.in | 1 +
wrappers/ds_selinux_restorecon.sh.in | 33 ++++++++++++++++++++++++++++
wrappers/systemd.template.service.in | 1 +
4 files changed, 36 insertions(+), 1 deletion(-)
create mode 100644 wrappers/ds_selinux_restorecon.sh.in
diff --git a/Makefile.am b/Makefile.am
index fc5a6a7d1..d6ad273c3 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -775,7 +775,7 @@ libexec_SCRIPTS += ldap/admin/src/scripts/ds_selinux_enabled \
ldap/admin/src/scripts/ds_selinux_port_query
endif
if SYSTEMD
-libexec_SCRIPTS += wrappers/ds_systemd_ask_password_acl
+libexec_SCRIPTS += wrappers/ds_systemd_ask_password_acl wrappers/ds_selinux_restorecon.sh
endif
install-data-hook:
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index d80de8422..6c0d95abd 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -623,6 +623,7 @@ exit 0
%{_sbindir}/ns-slapd
%{_mandir}/man8/ns-slapd.8.gz
%{_libexecdir}/%{pkgname}/ds_systemd_ask_password_acl
+%{_libexecdir}/%{pkgname}/ds_selinux_restorecon.sh
%{_mandir}/man5/99user.ldif.5.gz
%{_mandir}/man5/certmap.conf.5.gz
%{_mandir}/man5/slapd-collations.conf.5.gz
diff --git a/wrappers/ds_selinux_restorecon.sh.in b/wrappers/ds_selinux_restorecon.sh.in
new file mode 100644
index 000000000..063347de3
--- /dev/null
+++ b/wrappers/ds_selinux_restorecon.sh.in
@@ -0,0 +1,33 @@
+#!/bin/sh
+# BEGIN COPYRIGHT BLOCK
+# Copyright (C) 2022 Red Hat, Inc.
+#
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# END COPYRIGHT BLOCK
+
+# Make sure we have the path to the dse.ldif
+if [ -z $1 ]
+then
+ echo "usage: ${0} /etc/dirsrv/slapd-<instance>/dse.ldif"
+ exit 0
+fi
+
+if ! command -v restorecon &> /dev/null
+then
+ # restorecon is not available
+ exit 0
+fi
+
+# Grep the db_home_dir out of the config file
+DS_HOME_DIR=`grep 'nsslapd-db-home-directory: ' $1 | awk '{print $2}'`
+if [ -z "$DS_HOME_DIR" ]
+then
+ # No DB home set, that's ok
+ exit 0
+fi
+
+# Now run restorecon
+restorecon ${DS_HOME_DIR}
diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in
index a8c21a9be..4485e0ec0 100644
--- a/wrappers/systemd.template.service.in
+++ b/wrappers/systemd.template.service.in
@@ -14,6 +14,7 @@ EnvironmentFile=-@initconfigdir@/@package_name@
EnvironmentFile=-@initconfigdir@/@package_name@-%i
PIDFile=/run/@package_name@/slapd-%i.pid
ExecStartPre=@libexecdir@/ds_systemd_ask_password_acl @instconfigdir@/slapd-%i/dse.ldif
+ExecStartPre=@libexecdir@/ds_selinux_restorecon.sh @instconfigdir@/slapd-%i/dse.ldif
ExecStart=@sbindir@/ns-slapd -D @instconfigdir@/slapd-%i -i /run/@package_name@/slapd-%i.pid
PrivateTmp=on
--
2.31.1

View File

@ -1,66 +0,0 @@
From 8d06fdf44b0d337f1e321e61ee1b22972ddea917 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Fri, 2 Apr 2021 14:05:41 +0200
Subject: [PATCH 3/3] Issue 4700 - Regression in winsync replication agreement
(#4712)
Bug description:
#4396 fixes a memory leak but did not set 'cn=config' as
DSE backend.
It had no signicant impact unless with sidgen IPA plugin
Fix description:
revert the portion of the #4364 patch that set be_suffix
in be_addsuffix, free the suffix before setting it
relates: https://github.com/389ds/389-ds-base/issues/4700
Reviewed by: Pierre Rogier (thanks !)
Platforms tested: F33
---
ldap/servers/slapd/backend.c | 3 ++-
ldap/servers/slapd/fedse.c | 6 +++++-
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
index 5707504a9..5db706841 100644
--- a/ldap/servers/slapd/backend.c
+++ b/ldap/servers/slapd/backend.c
@@ -173,7 +173,8 @@ void
be_addsuffix(Slapi_Backend *be, const Slapi_DN *suffix)
{
if (be->be_state != BE_STATE_DELETED) {
- be->be_suffix = slapi_sdn_dup(suffix);;
+ slapi_sdn_free(&be->be_suffix);
+ be->be_suffix = slapi_sdn_dup(suffix);
}
}
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
index 7b820b540..44159c991 100644
--- a/ldap/servers/slapd/fedse.c
+++ b/ldap/servers/slapd/fedse.c
@@ -2846,6 +2846,7 @@ setup_internal_backends(char *configdir)
Slapi_DN counters;
Slapi_DN snmp;
Slapi_DN root;
+ Slapi_Backend *be;
Slapi_DN encryption;
Slapi_DN saslmapping;
Slapi_DN plugins;
@@ -2894,7 +2895,10 @@ setup_internal_backends(char *configdir)
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &saslmapping, LDAP_SCOPE_SUBTREE, "(objectclass=nsSaslMapping)", sasl_map_config_add, NULL, NULL);
dse_register_callback(pfedse, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, &plugins, LDAP_SCOPE_SUBTREE, "(objectclass=nsSlapdPlugin)", check_plugin_path, NULL, NULL);
- be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
+ be = be_new_internal(pfedse, "DSE", DSE_BACKEND, &fedse_plugin);
+ be_addsuffix(be, &root);
+ be_addsuffix(be, &monitor);
+ be_addsuffix(be, &config);
/*
* Now that the be's are in place, we can setup the mapping tree.
--
2.31.1

View File

@ -0,0 +1,35 @@
From 0ed471bae52bb0debd23336cbc5f3f1d400cbbc9 Mon Sep 17 00:00:00 2001
From: Adam Williamson <awilliam@redhat.com>
Date: Thu, 27 Jan 2022 11:07:26 -0800
Subject: [PATCH] Issue 5127 - ds_selinux_restorecon.sh: always exit 0
Description:
We don't want to error out and give up on starting the service
if the restorecon fails - it might just be that the directory
doesn't exist and doesn't need restoring. Issue identified and
fix suggested by Simon Farnsworth
relates: https://github.com/389ds/389-ds-base/issues/5127
Reviewed by: adamw & mreynolds
---
wrappers/ds_selinux_restorecon.sh.in | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/wrappers/ds_selinux_restorecon.sh.in b/wrappers/ds_selinux_restorecon.sh.in
index 063347de3..2d7386233 100644
--- a/wrappers/ds_selinux_restorecon.sh.in
+++ b/wrappers/ds_selinux_restorecon.sh.in
@@ -29,5 +29,6 @@ then
exit 0
fi
-# Now run restorecon
-restorecon ${DS_HOME_DIR}
+# Now run restorecon, but don't die if it fails (could be that the
+# directory doesn't exist)
+restorecon ${DS_HOME_DIR} || :
--
2.31.1

View File

@ -1,88 +0,0 @@
From 7345c51c68dfd90a704ccbb0e5b1e736af80f146 Mon Sep 17 00:00:00 2001
From: Thierry Bordaz <tbordaz@redhat.com>
Date: Mon, 17 May 2021 16:10:22 +0200
Subject: [PATCH] Issue 4725 - Fix compiler warnings
---
ldap/servers/slapd/proto-slap.h | 2 +-
ldap/servers/slapd/pw.c | 9 ++++-----
ldap/servers/slapd/pw_retry.c | 2 --
3 files changed, 5 insertions(+), 8 deletions(-)
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 6ff178127..2768d5a1d 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -1012,7 +1012,7 @@ int add_shadow_ext_password_attrs(Slapi_PBlock *pb, Slapi_Entry **e);
* pw_retry.c
*/
int update_pw_retry(Slapi_PBlock *pb);
-int update_trp_pw_usecount(Slapi_PBlock *pb, Slapi_Entry *e, int32_t use_count);
+int update_tpr_pw_usecount(Slapi_PBlock *pb, Slapi_Entry *e, int32_t use_count);
void pw_apply_mods(const Slapi_DN *sdn, Slapi_Mods *mods);
void pw_set_componentID(struct slapi_componentid *cid);
struct slapi_componentid *pw_get_componentID(void);
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
index d98422513..2a167c8f1 100644
--- a/ldap/servers/slapd/pw.c
+++ b/ldap/servers/slapd/pw.c
@@ -2622,7 +2622,6 @@ int
slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int send_result) {
passwdPolicy *pwpolicy = NULL;
char *dn = NULL;
- int tpr_maxuse;
char *value;
time_t cur_time;
char *cur_time_str = NULL;
@@ -2638,7 +2637,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
return 0;
}
- if (slapi_entry_attr_hasvalue(bind_target_entry, "pwdTPRReset", "TRUE") == NULL) {
+ if (!slapi_entry_attr_hasvalue(bind_target_entry, "pwdTPRReset", "TRUE")) {
/* the password was not reset by an admin while a TRP pwp was set, just returned */
return 0;
}
@@ -2646,7 +2645,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
/* Check entry TPR max use */
if (pwpolicy->pw_tpr_maxuse >= 0) {
uint use_count;
- value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRUseCount");
+ value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRUseCount");
if (value) {
/* max Use is enforced */
use_count = strtoull(value, 0, 0);
@@ -2681,7 +2680,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
/* Check entry TPR expiration at a specific time */
if (pwpolicy->pw_tpr_delay_expire_at >= 0) {
- value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRExpireAt");
+ value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRExpireAt");
if (value) {
/* max Use is enforced */
if (difftime(parse_genTime(cur_time_str), parse_genTime(value)) >= 0) {
@@ -2709,7 +2708,7 @@ slapi_check_tpr_limits(Slapi_PBlock *pb, Slapi_Entry *bind_target_entry, int sen
/* Check entry TPR valid after a specific time */
if (pwpolicy->pw_tpr_delay_valid_from >= 0) {
- value = slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRValidFrom");
+ value = (char *) slapi_entry_attr_get_ref(bind_target_entry, "pwdTPRValidFrom");
if (value) {
/* validity after a specific time is enforced */
if (difftime(parse_genTime(value), parse_genTime(cur_time_str)) >= 0) {
diff --git a/ldap/servers/slapd/pw_retry.c b/ldap/servers/slapd/pw_retry.c
index 5d13eb636..af54aa19d 100644
--- a/ldap/servers/slapd/pw_retry.c
+++ b/ldap/servers/slapd/pw_retry.c
@@ -163,8 +163,6 @@ set_retry_cnt_and_time(Slapi_PBlock *pb, int count, time_t cur_time)
int
set_tpr_usecount_mods(Slapi_PBlock *pb, Slapi_Mods *smods, int count)
{
- char *timestr;
- time_t unlock_time;
char retry_cnt[16] = {0}; /* 1-65535 */
const char *dn = NULL;
Slapi_DN *sdn = NULL;
--
2.31.1

View File

@ -0,0 +1,262 @@
From 93588ea455aff691bdfbf59cdef4df8fcedb69f2 Mon Sep 17 00:00:00 2001
From: Firstyear <william@blackhats.net.au>
Date: Thu, 19 Aug 2021 10:46:00 +1000
Subject: [PATCH 1/2] Issue 4775 - Add entryuuid CLI and Fixup (#4776)
Bug Description: EntryUUID when added was missing it's CLI
and helpers for fixups.
Fix Description: Add the CLI elements.
fixes: https://github.com/389ds/389-ds-base/issues/4775
Author: William Brown <william@blackhats.net.au>
Review by: @mreynolds389 (thanks!)
---
src/lib389/lib389/cli_conf/plugin.py | 6 ++-
.../lib389/cli_conf/plugins/entryuuid.py | 39 ++++++++++++++
src/plugins/entryuuid/src/lib.rs | 54 ++++++++-----------
3 files changed, 65 insertions(+), 34 deletions(-)
create mode 100644 src/lib389/lib389/cli_conf/plugins/entryuuid.py
diff --git a/src/lib389/lib389/cli_conf/plugin.py b/src/lib389/lib389/cli_conf/plugin.py
index 560c57f9b..7c0cf2c80 100644
--- a/src/lib389/lib389/cli_conf/plugin.py
+++ b/src/lib389/lib389/cli_conf/plugin.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2018 Red Hat, Inc.
+# Copyright (C) 2022 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -27,6 +27,8 @@ from lib389.cli_conf.plugins import passthroughauth as cli_passthroughauth
from lib389.cli_conf.plugins import retrochangelog as cli_retrochangelog
from lib389.cli_conf.plugins import automember as cli_automember
from lib389.cli_conf.plugins import posix_winsync as cli_posix_winsync
+from lib389.cli_conf.plugins import contentsync as cli_contentsync
+from lib389.cli_conf.plugins import entryuuid as cli_entryuuid
SINGULAR = Plugin
MANY = Plugins
@@ -113,6 +115,8 @@ def create_parser(subparsers):
cli_passthroughauth.create_parser(subcommands)
cli_retrochangelog.create_parser(subcommands)
cli_posix_winsync.create_parser(subcommands)
+ cli_contentsync.create_parser(subcommands)
+ cli_entryuuid.create_parser(subcommands)
list_parser = subcommands.add_parser('list', help="List current configured (enabled and disabled) plugins")
list_parser.set_defaults(func=plugin_list)
diff --git a/src/lib389/lib389/cli_conf/plugins/entryuuid.py b/src/lib389/lib389/cli_conf/plugins/entryuuid.py
new file mode 100644
index 000000000..6c86bff4b
--- /dev/null
+++ b/src/lib389/lib389/cli_conf/plugins/entryuuid.py
@@ -0,0 +1,39 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2021 William Brown <william@blackhats.net.au>
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+
+import ldap
+from lib389.plugins import EntryUUIDPlugin
+from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add
+
+def do_fixup(inst, basedn, log, args):
+ plugin = EntryUUIDPlugin(inst)
+ log.info('Attempting to add task entry...')
+ if not plugin.status():
+ log.error("'%s' is disabled. Fix up task can't be executed" % plugin.rdn)
+ return
+ fixup_task = plugin.fixup(args.DN, args.filter)
+ fixup_task.wait()
+ exitcode = fixup_task.get_exit_code()
+ if exitcode != 0:
+ log.error('EntryUUID fixup task has failed. Please, check the error log for more - %s' % exitcode)
+ else:
+ log.info('Successfully added task entry')
+
+def create_parser(subparsers):
+ referint = subparsers.add_parser('entryuuid', help='Manage and configure EntryUUID plugin')
+ subcommands = referint.add_subparsers(help='action')
+
+ add_generic_plugin_parsers(subcommands, EntryUUIDPlugin)
+
+ fixup = subcommands.add_parser('fixup', help='Run the fix-up task for EntryUUID plugin')
+ fixup.set_defaults(func=do_fixup)
+ fixup.add_argument('DN', help="Base DN that contains entries to fix up")
+ fixup.add_argument('-f', '--filter',
+ help='Filter for entries to fix up.\n If omitted, all entries under base DN'
+ 'will have their EntryUUID attribute regenerated if not present.')
+
diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
index da9f0c239..29a9f1258 100644
--- a/src/plugins/entryuuid/src/lib.rs
+++ b/src/plugins/entryuuid/src/lib.rs
@@ -33,7 +33,7 @@ fn assign_uuid(e: &mut EntryRef) {
// 🚧 safety barrier 🚧
if e.contains_attr("entryUUID") {
log_error!(
- ErrorLevel::Trace,
+ ErrorLevel::Plugin,
"assign_uuid -> entryUUID exists, skipping dn {}",
sdn.to_dn_string()
);
@@ -47,7 +47,7 @@ fn assign_uuid(e: &mut EntryRef) {
if sdn.is_below_suffix(&*config_sdn) || sdn.is_below_suffix(&*schema_sdn) {
// We don't need to assign to these suffixes.
log_error!(
- ErrorLevel::Trace,
+ ErrorLevel::Plugin,
"assign_uuid -> not assigning to {:?} as part of system suffix",
sdn.to_dn_string()
);
@@ -57,7 +57,7 @@ fn assign_uuid(e: &mut EntryRef) {
// Generate a new Uuid.
let u: Uuid = Uuid::new_v4();
log_error!(
- ErrorLevel::Trace,
+ ErrorLevel::Plugin,
"assign_uuid -> assigning {:?} to dn {}",
u,
sdn.to_dn_string()
@@ -78,13 +78,13 @@ impl SlapiPlugin3 for EntryUuid {
fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> {
if pb.get_is_replicated_operation() {
log_error!(
- ErrorLevel::Trace,
+ ErrorLevel::Plugin,
"betxn_pre_add -> replicated operation, will not change"
);
return Ok(());
}
- log_error!(ErrorLevel::Trace, "betxn_pre_add -> start");
+ log_error!(ErrorLevel::Plugin, "betxn_pre_add -> start");
let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?;
assign_uuid(&mut e);
@@ -105,7 +105,7 @@ impl SlapiPlugin3 for EntryUuid {
.first()
.ok_or_else(|| {
log_error!(
- ErrorLevel::Trace,
+ ErrorLevel::Plugin,
"task_validate basedn error -> empty value array?"
);
LDAPError::Operation
@@ -113,7 +113,7 @@ impl SlapiPlugin3 for EntryUuid {
.as_ref()
.try_into()
.map_err(|e| {
- log_error!(ErrorLevel::Trace, "task_validate basedn error -> {:?}", e);
+ log_error!(ErrorLevel::Plugin, "task_validate basedn error -> {:?}", e);
LDAPError::Operation
})?,
None => return Err(LDAPError::ObjectClassViolation),
@@ -124,7 +124,7 @@ impl SlapiPlugin3 for EntryUuid {
.first()
.ok_or_else(|| {
log_error!(
- ErrorLevel::Trace,
+ ErrorLevel::Plugin,
"task_validate filter error -> empty value array?"
);
LDAPError::Operation
@@ -132,7 +132,7 @@ impl SlapiPlugin3 for EntryUuid {
.as_ref()
.try_into()
.map_err(|e| {
- log_error!(ErrorLevel::Trace, "task_validate filter error -> {:?}", e);
+ log_error!(ErrorLevel::Plugin, "task_validate filter error -> {:?}", e);
LDAPError::Operation
})?,
None => {
@@ -144,17 +144,11 @@ impl SlapiPlugin3 for EntryUuid {
// Error if the first filter is empty?
// Now, to make things faster, we wrap the filter in a exclude term.
-
- // 2021 - #4877 because we allow entryuuid to be strings, on import these may
- // be invalid. As a result, we DO need to allow the fixup to check the entryuuid
- // value is correct, so we can not exclude these during the search.
- /*
let raw_filter = if !raw_filter.starts_with('(') && !raw_filter.ends_with('(') {
format!("(&({})(!(entryuuid=*)))", raw_filter)
} else {
format!("(&{}(!(entryuuid=*)))", raw_filter)
};
- */
Ok(FixupData { basedn, raw_filter })
}
@@ -165,7 +159,7 @@ impl SlapiPlugin3 for EntryUuid {
fn task_handler(_task: &Task, data: Self::TaskData) -> Result<Self::TaskData, PluginError> {
log_error!(
- ErrorLevel::Trace,
+ ErrorLevel::Plugin,
"task_handler -> start thread with -> {:?}",
data
);
@@ -205,12 +199,12 @@ impl SlapiPlugin3 for EntryUuid {
}
fn start(_pb: &mut PblockRef) -> Result<(), PluginError> {
- log_error!(ErrorLevel::Trace, "plugin start");
+ log_error!(ErrorLevel::Plugin, "plugin start");
Ok(())
}
fn close(_pb: &mut PblockRef) -> Result<(), PluginError> {
- log_error!(ErrorLevel::Trace, "plugin close");
+ log_error!(ErrorLevel::Plugin, "plugin close");
Ok(())
}
}
@@ -219,20 +213,14 @@ pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError
/* Supply a modification to the entry. */
let sdn = e.get_sdnref();
- /* Check that entryuuid doesn't already exist, and is valid */
- if let Some(valueset) = e.get_attr("entryUUID") {
- if valueset.iter().all(|v| {
- let u: Result<Uuid, _> = (&v).try_into();
- u.is_ok()
- }) {
- // All values were valid uuid, move on!
- log_error!(
- ErrorLevel::Plugin,
- "skipping fixup for -> {}",
- sdn.to_dn_string()
- );
- return Ok(());
- }
+ /* Sanity check that entryuuid doesn't already exist */
+ if e.contains_attr("entryUUID") {
+ log_error!(
+ ErrorLevel::Plugin,
+ "skipping fixup for -> {}",
+ sdn.to_dn_string()
+ );
+ return Ok(());
}
// Setup the modifications
@@ -248,7 +236,7 @@ pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError
match lmod.execute() {
Ok(_) => {
- log_error!(ErrorLevel::Trace, "fixed-up -> {}", sdn.to_dn_string());
+ log_error!(ErrorLevel::Plugin, "fixed-up -> {}", sdn.to_dn_string());
Ok(())
}
Err(e) => {
--
2.34.1

View File

@ -0,0 +1,42 @@
From 525f2307fa3e2d0ae55c8c922e6f7220a1e5bd1b Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 3 Feb 2022 16:51:38 -0500
Subject: [PATCH] Issue 4775 - Fix cherry-pick error
Bug Description: EntryUUID when added was missing it's CLI
and helpers for fixups.
Fix Description: Add the CLI elements.
fixes: https://github.com/389ds/389-ds-base/issues/4775
Author: William Brown <william@blackhats.net.au>
Review by: @mreynolds389 (thanks!)
---
src/lib389/lib389/cli_conf/plugin.py | 2 --
1 file changed, 2 deletions(-)
diff --git a/src/lib389/lib389/cli_conf/plugin.py b/src/lib389/lib389/cli_conf/plugin.py
index 7c0cf2c80..fb0ef3077 100644
--- a/src/lib389/lib389/cli_conf/plugin.py
+++ b/src/lib389/lib389/cli_conf/plugin.py
@@ -27,7 +27,6 @@ from lib389.cli_conf.plugins import passthroughauth as cli_passthroughauth
from lib389.cli_conf.plugins import retrochangelog as cli_retrochangelog
from lib389.cli_conf.plugins import automember as cli_automember
from lib389.cli_conf.plugins import posix_winsync as cli_posix_winsync
-from lib389.cli_conf.plugins import contentsync as cli_contentsync
from lib389.cli_conf.plugins import entryuuid as cli_entryuuid
SINGULAR = Plugin
@@ -115,7 +114,6 @@ def create_parser(subparsers):
cli_passthroughauth.create_parser(subcommands)
cli_retrochangelog.create_parser(subcommands)
cli_posix_winsync.create_parser(subcommands)
- cli_contentsync.create_parser(subcommands)
cli_entryuuid.create_parser(subcommands)
list_parser = subcommands.add_parser('list', help="List current configured (enabled and disabled) plugins")
--
2.34.1

View File

@ -1,202 +0,0 @@
From 59266365eda8130abf6901263efae4c87586376a Mon Sep 17 00:00:00 2001
From: Thierry Bordaz <tbordaz@redhat.com>
Date: Mon, 28 Jun 2021 16:40:15 +0200
Subject: [PATCH] Issue 4814 - _cl5_get_tod_expiration may crash at startup
Bug description:
This bug exist only in 1.4.3 branch
In 1.4.3, CL open as a separated database so
compaction mechanism is started along a CL
mechanism (CL trimming).
The problem is that the configuration of the CL
compaction is done after the compaction mechanism
(is started). Depending on thread scheduling it
crashes
Fix description:
Make sure configuration of compaction thread is
taken into account (cl5ConfigSetCompaction) before
the compaction thread starts (cl5open)
relates: https://github.com/389ds/389-ds-base/issues/4814
Reviewed by: Mark Reynolds, Simon Pichugin (thanks !)
Platforms tested: 8.5
---
ldap/servers/plugins/replication/cl5_api.c | 24 ++++++++++++-------
ldap/servers/plugins/replication/cl5_api.h | 10 +++++++-
ldap/servers/plugins/replication/cl5_config.c | 8 +++++--
ldap/servers/plugins/replication/cl5_init.c | 4 +++-
ldap/servers/plugins/replication/cl5_test.c | 2 +-
.../servers/plugins/replication/repl_shared.h | 2 +-
6 files changed, 35 insertions(+), 15 deletions(-)
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
index 4c5077b48..954b6b9e3 100644
--- a/ldap/servers/plugins/replication/cl5_api.c
+++ b/ldap/servers/plugins/replication/cl5_api.c
@@ -1016,6 +1016,20 @@ cl5GetState()
return s_cl5Desc.dbState;
}
+void
+cl5ConfigSetCompaction(int compactInterval, char *compactTime)
+{
+
+ if (compactInterval != CL5_NUM_IGNORE) {
+ s_cl5Desc.dbTrim.compactInterval = compactInterval;
+ }
+
+ if (strcmp(compactTime, CL5_STR_IGNORE) != 0) {
+ s_cl5Desc.dbTrim.compactTime = slapi_ch_strdup(compactTime);
+ }
+
+}
+
/* Name: cl5ConfigTrimming
Description: sets changelog trimming parameters; changelog must be open.
Parameters: maxEntries - maximum number of entries in the chnagelog (in all files);
@@ -1026,7 +1040,7 @@ cl5GetState()
CL5_BAD_STATE if changelog is not open
*/
int
-cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval)
+cl5ConfigTrimming(int maxEntries, const char *maxAge, int trimInterval)
{
if (s_cl5Desc.dbState == CL5_STATE_NONE) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
@@ -1058,14 +1072,6 @@ cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char
s_cl5Desc.dbTrim.maxEntries = maxEntries;
}
- if (compactInterval != CL5_NUM_IGNORE) {
- s_cl5Desc.dbTrim.compactInterval = compactInterval;
- }
-
- if (strcmp(compactTime, CL5_STR_IGNORE) != 0) {
- s_cl5Desc.dbTrim.compactTime = slapi_ch_strdup(compactTime);
- }
-
if (trimInterval != CL5_NUM_IGNORE) {
s_cl5Desc.dbTrim.trimInterval = trimInterval;
}
diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h
index 11db771f2..6aa48aec4 100644
--- a/ldap/servers/plugins/replication/cl5_api.h
+++ b/ldap/servers/plugins/replication/cl5_api.h
@@ -227,6 +227,14 @@ int cl5ImportLDIF(const char *clDir, const char *ldifFile, Replica **replicas);
int cl5GetState(void);
+/* Name: cl5ConfigSetCompaction
+ * Description: sets the database compaction parameters
+ * Parameters: compactInterval - Interval for compaction default is 30days
+ * compactTime - Compact time default is 23:59
+ * Return: void
+ */
+void cl5ConfigSetCompaction(int compactInterval, char *compactTime);
+
/* Name: cl5ConfigTrimming
Description: sets changelog trimming parameters
Parameters: maxEntries - maximum number of entries in the log;
@@ -236,7 +244,7 @@ int cl5GetState(void);
Return: CL5_SUCCESS if successful;
CL5_BAD_STATE if changelog has not been open
*/
-int cl5ConfigTrimming(int maxEntries, const char *maxAge, int compactInterval, char *compactTime, int trimInterval);
+int cl5ConfigTrimming(int maxEntries, const char *maxAge, int trimInterval);
void cl5DestroyIterator(void *iterator);
diff --git a/ldap/servers/plugins/replication/cl5_config.c b/ldap/servers/plugins/replication/cl5_config.c
index b32686788..a43534c9b 100644
--- a/ldap/servers/plugins/replication/cl5_config.c
+++ b/ldap/servers/plugins/replication/cl5_config.c
@@ -197,6 +197,8 @@ changelog5_config_add(Slapi_PBlock *pb __attribute__((unused)),
goto done;
}
+ /* Set compaction parameters */
+ cl5ConfigSetCompaction(config.compactInterval, config.compactTime);
/* start the changelog */
rc = cl5Open(config.dir, &config.dbconfig);
@@ -212,7 +214,7 @@ changelog5_config_add(Slapi_PBlock *pb __attribute__((unused)),
}
/* set trimming parameters */
- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
+ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval);
if (rc != CL5_SUCCESS) {
*returncode = 1;
if (returntext) {
@@ -548,6 +550,8 @@ changelog5_config_modify(Slapi_PBlock *pb,
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
"changelog5_config_modify - Deleted the changelog at %s\n", currentDir);
}
+ /* Set compaction parameters */
+ cl5ConfigSetCompaction(config.compactInterval, config.compactTime);
rc = cl5Open(config.dir, &config.dbconfig);
if (rc != CL5_SUCCESS) {
@@ -575,7 +579,7 @@ changelog5_config_modify(Slapi_PBlock *pb,
if (config.maxEntries != CL5_NUM_IGNORE ||
config.trimInterval != CL5_NUM_IGNORE ||
strcmp(config.maxAge, CL5_STR_IGNORE) != 0) {
- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
+ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval);
if (rc != CL5_SUCCESS) {
*returncode = 1;
if (returntext) {
diff --git a/ldap/servers/plugins/replication/cl5_init.c b/ldap/servers/plugins/replication/cl5_init.c
index 251859714..567e0274c 100644
--- a/ldap/servers/plugins/replication/cl5_init.c
+++ b/ldap/servers/plugins/replication/cl5_init.c
@@ -45,6 +45,8 @@ changelog5_init()
rc = 0; /* OK */
goto done;
}
+ /* Set compaction parameters */
+ cl5ConfigSetCompaction(config.compactInterval, config.compactTime);
/* start changelog */
rc = cl5Open(config.dir, &config.dbconfig);
@@ -57,7 +59,7 @@ changelog5_init()
}
/* set trimming parameters */
- rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.compactInterval, config.compactTime, config.trimInterval);
+ rc = cl5ConfigTrimming(config.maxEntries, config.maxAge, config.trimInterval);
if (rc != CL5_SUCCESS) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
"changelog5_init: failed to configure changelog trimming\n");
diff --git a/ldap/servers/plugins/replication/cl5_test.c b/ldap/servers/plugins/replication/cl5_test.c
index d6656653c..efb8c543a 100644
--- a/ldap/servers/plugins/replication/cl5_test.c
+++ b/ldap/servers/plugins/replication/cl5_test.c
@@ -281,7 +281,7 @@ testTrimming()
rc = populateChangelog(300, NULL);
if (rc == 0)
- rc = cl5ConfigTrimming(300, "1d", CHANGELOGDB_COMPACT_INTERVAL, CHANGELOGDB_TRIM_INTERVAL);
+ rc = cl5ConfigTrimming(300, "1d", CHANGELOGDB_TRIM_INTERVAL);
interval = PR_SecondsToInterval(300); /* 5 min is default trimming interval */
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
diff --git a/ldap/servers/plugins/replication/repl_shared.h b/ldap/servers/plugins/replication/repl_shared.h
index 6708e12f7..b59b2bd27 100644
--- a/ldap/servers/plugins/replication/repl_shared.h
+++ b/ldap/servers/plugins/replication/repl_shared.h
@@ -26,7 +26,7 @@
#define CHANGELOGDB_TRIM_INTERVAL 300 /* 5 minutes */
#define CHANGELOGDB_COMPACT_INTERVAL 2592000 /* 30 days */
-#define CHANGELOGDB_COMPACT_TIME "23:55" /* 30 days */
+#define CHANGELOGDB_COMPACT_TIME "23:59" /* around midnight */
#define CONFIG_CHANGELOG_DIR_ATTRIBUTE "nsslapd-changelogdir"
#define CONFIG_CHANGELOG_MAXENTRIES_ATTRIBUTE "nsslapd-changelogmaxentries"
--
2.31.1

View File

@ -1,51 +0,0 @@
From e7fdfe527a5f72674fe4b577a0555cabf8ec73a5 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Mon, 7 Jun 2021 11:23:35 +0200
Subject: [PATCH] Issue 4789 - Temporary password rules are not enforce with
local password policy (#4790)
Bug description:
When allocating a password policy structure (new_passwdPolicy)
it is initialized with the local policy definition or
the global one. If it exists a local policy entry, the TPR
attributes (passwordTPRMaxUse, passwordTPRDelayValidFrom and
passwordTPRDelayExpireAt) are not taken into account.
Fix description:
Take into account TPR attributes to initialize the policy
relates: https://github.com/389ds/389-ds-base/issues/4789
Reviewed by: Simon Pichugin, William Brown
Platforms tested: F34
---
ldap/servers/slapd/pw.c | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
index 2a167c8f1..7680df41d 100644
--- a/ldap/servers/slapd/pw.c
+++ b/ldap/servers/slapd/pw.c
@@ -2356,6 +2356,18 @@ new_passwdPolicy(Slapi_PBlock *pb, const char *dn)
if ((sval = attr_get_present_values(attr))) {
pwdpolicy->pw_dict_path = (char *)slapi_value_get_string(*sval);
}
+ } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_MAXUSE)) {
+ if ((sval = attr_get_present_values(attr))) {
+ pwdpolicy->pw_tpr_maxuse = slapi_value_get_int(*sval);
+ }
+ } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_DELAY_EXPIRE_AT)) {
+ if ((sval = attr_get_present_values(attr))) {
+ pwdpolicy->pw_tpr_delay_expire_at = slapi_value_get_int(*sval);
+ }
+ } else if (!strcasecmp(attr_name, CONFIG_PW_TPR_DELAY_VALID_FROM)) {
+ if ((sval = attr_get_present_values(attr))) {
+ pwdpolicy->pw_tpr_delay_valid_from = slapi_value_get_int(*sval);
+ }
}
} /* end of for() loop */
if (pw_entry) {
--
2.31.1

View File

@ -1,350 +0,0 @@
From 6a741b3ef50babf2ac2479437a38829204ffd438 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Thu, 17 Jun 2021 16:22:09 +0200
Subject: [PATCH] Issue 4788 - CLI should support Temporary Password Rules
attributes (#4793)
Bug description:
Since #4725, password policy support temporary password rules.
CLI (dsconf) does not support this RFE and only direct ldap
operation can configure global/local password policy
Fix description:
Update dsconf to support this new RFE.
To run successfully the testcase it relies on #4788
relates: #4788
Reviewed by: Simon Pichugin (thanks !!)
Platforms tested: F34
---
.../password/pwdPolicy_attribute_test.py | 172 ++++++++++++++++--
src/lib389/lib389/cli_conf/pwpolicy.py | 5 +-
src/lib389/lib389/pwpolicy.py | 5 +-
3 files changed, 165 insertions(+), 17 deletions(-)
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
index aee3a91ad..085d0a373 100644
--- a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
@@ -34,7 +34,7 @@ log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
-def create_user(topology_st, request):
+def test_user(topology_st, request):
"""User for binding operation"""
topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on')
log.info('Adding test user {}')
@@ -56,10 +56,11 @@ def create_user(topology_st, request):
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
request.addfinalizer(fin)
+ return user
@pytest.fixture(scope="module")
-def password_policy(topology_st, create_user):
+def password_policy(topology_st, test_user):
"""Set up password policy for subtree and user"""
pwp = PwPolicyManager(topology_st.standalone)
@@ -71,7 +72,7 @@ def password_policy(topology_st, create_user):
pwp.create_user_policy(TEST_USER_DN, policy_props)
@pytest.mark.skipif(ds_is_older('1.4.3.3'), reason="Not implemented")
-def test_pwd_reset(topology_st, create_user):
+def test_pwd_reset(topology_st, test_user):
"""Test new password policy attribute "pwdReset"
:id: 03db357b-4800-411e-a36e-28a534293004
@@ -124,7 +125,7 @@ def test_pwd_reset(topology_st, create_user):
[('on', 'off', ldap.UNWILLING_TO_PERFORM),
('off', 'off', ldap.UNWILLING_TO_PERFORM),
('off', 'on', False), ('on', 'on', False)])
-def test_change_pwd(topology_st, create_user, password_policy,
+def test_change_pwd(topology_st, test_user, password_policy,
subtree_pwchange, user_pwchange, exception):
"""Verify that 'passwordChange' attr works as expected
User should have a priority over a subtree.
@@ -184,7 +185,7 @@ def test_change_pwd(topology_st, create_user, password_policy,
user.reset_password(TEST_USER_PWD)
-def test_pwd_min_age(topology_st, create_user, password_policy):
+def test_pwd_min_age(topology_st, test_user, password_policy):
"""If we set passwordMinAge to some value, for example to 10, then it
should not allow the user to change the password within 10 seconds after
his previous change.
@@ -257,7 +258,7 @@ def test_pwd_min_age(topology_st, create_user, password_policy):
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
user.reset_password(TEST_USER_PWD)
-def test_global_tpr_maxuse_1(topology_st, create_user, request):
+def test_global_tpr_maxuse_1(topology_st, test_user, request):
"""Test global TPR policy : passwordTPRMaxUse
Test that after passwordTPRMaxUse failures to bind
additional bind with valid password are failing with CONSTRAINT_VIOLATION
@@ -374,7 +375,7 @@ def test_global_tpr_maxuse_1(topology_st, create_user, request):
request.addfinalizer(fin)
-def test_global_tpr_maxuse_2(topology_st, create_user, request):
+def test_global_tpr_maxuse_2(topology_st, test_user, request):
"""Test global TPR policy : passwordTPRMaxUse
Test that after less than passwordTPRMaxUse failures to bind
additional bind with valid password are successfull
@@ -474,7 +475,7 @@ def test_global_tpr_maxuse_2(topology_st, create_user, request):
request.addfinalizer(fin)
-def test_global_tpr_maxuse_3(topology_st, create_user, request):
+def test_global_tpr_maxuse_3(topology_st, test_user, request):
"""Test global TPR policy : passwordTPRMaxUse
Test that after less than passwordTPRMaxUse failures to bind
A bind with valid password is successfull but passwordMustChange
@@ -587,7 +588,7 @@ def test_global_tpr_maxuse_3(topology_st, create_user, request):
request.addfinalizer(fin)
-def test_global_tpr_maxuse_4(topology_st, create_user, request):
+def test_global_tpr_maxuse_4(topology_st, test_user, request):
"""Test global TPR policy : passwordTPRMaxUse
Test that a TPR attribute passwordTPRMaxUse
can be updated by DM but not the by user itself
@@ -701,7 +702,148 @@ def test_global_tpr_maxuse_4(topology_st, create_user, request):
request.addfinalizer(fin)
-def test_global_tpr_delayValidFrom_1(topology_st, create_user, request):
+def test_local_tpr_maxuse_5(topology_st, test_user, request):
+ """Test TPR local policy overpass global one: passwordTPRMaxUse
+ Test that after passwordTPRMaxUse failures to bind
+ additional bind with valid password are failing with CONSTRAINT_VIOLATION
+
+ :id: c3919707-d804-445a-8754-8385b1072c42
+ :customerscenario: False
+ :setup: Standalone instance
+ :steps:
+ 1. Global password policy Enable passwordMustChange
+ 2. Global password policy Set passwordTPRMaxUse=5
+ 3. Global password policy Set passwordMaxFailure to a higher value to not disturb the test
+ 4. Local password policy Enable passwordMustChange
+ 5. Local password policy Set passwordTPRMaxUse=10 (higher than global)
+ 6. Bind with a wrong password 10 times and check INVALID_CREDENTIALS
+ 7. Check that passwordTPRUseCount got to the limit (5)
+ 8. Bind with a wrong password (CONSTRAINT_VIOLATION)
+ and check passwordTPRUseCount overpass the limit by 1 (11)
+ 9. Bind with a valid password 10 times and check CONSTRAINT_VIOLATION
+ and check passwordTPRUseCount increases
+ 10. Reset password policy configuration and remove local password from user
+ :expected results:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ 10. Success
+ """
+
+ global_tpr_maxuse = 5
+ # Set global password policy config, passwordMaxFailure being higher than
+ # passwordTPRMaxUse so that TPR is enforced first
+ topology_st.standalone.config.replace('passwordMustChange', 'on')
+ topology_st.standalone.config.replace('passwordMaxFailure', str(global_tpr_maxuse + 20))
+ topology_st.standalone.config.replace('passwordTPRMaxUse', str(global_tpr_maxuse))
+ time.sleep(.5)
+
+ local_tpr_maxuse = global_tpr_maxuse + 5
+ # Reset user's password with a local password policy
+ # that has passwordTPRMaxUse higher than global
+ #our_user = UserAccount(topology_st.standalone, TEST_USER_DN)
+ subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(),
+ 'slapd-standalone1',
+ 'localpwp',
+ 'adduser',
+ test_user.dn])
+ subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(),
+ 'slapd-standalone1',
+ 'localpwp',
+ 'set',
+ '--pwptprmaxuse',
+ str(local_tpr_maxuse),
+ '--pwdmustchange',
+ 'on',
+ test_user.dn])
+ test_user.replace('userpassword', PASSWORD)
+ time.sleep(.5)
+
+ # look up to passwordTPRMaxUse with failing
+ # bind to check that the limits of TPR are enforced
+ for i in range(local_tpr_maxuse):
+ # Bind as user with a wrong password
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
+ test_user.rebind('wrong password')
+ time.sleep(.5)
+
+ # Check that pwdReset is TRUE
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ #assert test_user.get_attr_val_utf8('pwdReset') == 'TRUE'
+
+ # Check that pwdTPRReset is TRUE
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1)
+ log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1))
+
+
+ # Now the #failures reached passwordTPRMaxUse
+ # Check that pwdReset is TRUE
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+
+ # Check that pwdTPRReset is TRUE
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse)
+ log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (local_tpr_maxuse))
+
+ # Bind as user with wrong password --> ldap.CONSTRAINT_VIOLATION
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
+ test_user.rebind("wrong password")
+ time.sleep(.5)
+
+ # Check that pwdReset is TRUE
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+
+ # Check that pwdTPRReset is TRUE
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + 1)
+ log.info("failing bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i))
+
+ # Now check that all next attempts with correct password are all in LDAP_CONSTRAINT_VIOLATION
+ # and passwordTPRRetryCount remains unchanged
+ # account is now similar to locked
+ for i in range(10):
+ # Bind as user with valid password
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
+ test_user.rebind(PASSWORD)
+ time.sleep(.5)
+
+ # Check that pwdReset is TRUE
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+
+ # Check that pwdTPRReset is TRUE
+ # pwdTPRUseCount keeps increasing
+ assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE'
+ assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + i + 2)
+ log.info("Rejected bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i + 2))
+
+
+ def fin():
+ topology_st.standalone.restart()
+ # Reset password policy config
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.config.replace('passwordMustChange', 'off')
+
+ # Remove local password policy from that entry
+ subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(),
+ 'slapd-standalone1',
+ 'localpwp',
+ 'remove',
+ test_user.dn])
+
+ # Reset user's password
+ test_user.replace('userpassword', TEST_USER_PWD)
+
+
+ request.addfinalizer(fin)
+
+def test_global_tpr_delayValidFrom_1(topology_st, test_user, request):
"""Test global TPR policy : passwordTPRDelayValidFrom
Test that a TPR password is not valid before reset time +
passwordTPRDelayValidFrom
@@ -766,7 +908,7 @@ def test_global_tpr_delayValidFrom_1(topology_st, create_user, request):
request.addfinalizer(fin)
-def test_global_tpr_delayValidFrom_2(topology_st, create_user, request):
+def test_global_tpr_delayValidFrom_2(topology_st, test_user, request):
"""Test global TPR policy : passwordTPRDelayValidFrom
Test that a TPR password is valid after reset time +
passwordTPRDelayValidFrom
@@ -838,7 +980,7 @@ def test_global_tpr_delayValidFrom_2(topology_st, create_user, request):
request.addfinalizer(fin)
-def test_global_tpr_delayValidFrom_3(topology_st, create_user, request):
+def test_global_tpr_delayValidFrom_3(topology_st, test_user, request):
"""Test global TPR policy : passwordTPRDelayValidFrom
Test that a TPR attribute passwordTPRDelayValidFrom
can be updated by DM but not the by user itself
@@ -940,7 +1082,7 @@ def test_global_tpr_delayValidFrom_3(topology_st, create_user, request):
request.addfinalizer(fin)
-def test_global_tpr_delayExpireAt_1(topology_st, create_user, request):
+def test_global_tpr_delayExpireAt_1(topology_st, test_user, request):
"""Test global TPR policy : passwordTPRDelayExpireAt
Test that a TPR password is not valid after reset time +
passwordTPRDelayExpireAt
@@ -1010,7 +1152,7 @@ def test_global_tpr_delayExpireAt_1(topology_st, create_user, request):
request.addfinalizer(fin)
-def test_global_tpr_delayExpireAt_2(topology_st, create_user, request):
+def test_global_tpr_delayExpireAt_2(topology_st, test_user, request):
"""Test global TPR policy : passwordTPRDelayExpireAt
Test that a TPR password is valid before reset time +
passwordTPRDelayExpireAt
@@ -1082,7 +1224,7 @@ def test_global_tpr_delayExpireAt_2(topology_st, create_user, request):
request.addfinalizer(fin)
-def test_global_tpr_delayExpireAt_3(topology_st, create_user, request):
+def test_global_tpr_delayExpireAt_3(topology_st, test_user, request):
"""Test global TPR policy : passwordTPRDelayExpireAt
Test that a TPR attribute passwordTPRDelayExpireAt
can be updated by DM but not the by user itself
diff --git a/src/lib389/lib389/cli_conf/pwpolicy.py b/src/lib389/lib389/cli_conf/pwpolicy.py
index 2838afcb8..26af6e7ec 100644
--- a/src/lib389/lib389/cli_conf/pwpolicy.py
+++ b/src/lib389/lib389/cli_conf/pwpolicy.py
@@ -255,6 +255,9 @@ def create_parser(subparsers):
set_parser.add_argument('--pwpinheritglobal', help="Set to \"on\" to allow local policies to inherit the global policy")
set_parser.add_argument('--pwddictcheck', help="Set to \"on\" to enforce CrackLib dictionary checking")
set_parser.add_argument('--pwddictpath', help="Filesystem path to specific/custom CrackLib dictionary files")
+ set_parser.add_argument('--pwptprmaxuse', help="Number of times a reset password can be used for authentication")
+ set_parser.add_argument('--pwptprdelayexpireat', help="Number of seconds after which a reset password expires")
+ set_parser.add_argument('--pwptprdelayvalidfrom', help="Number of seconds to wait before using a reset password to authenticated")
# delete local password policy
del_parser = local_subcommands.add_parser('remove', help='Remove a local password policy')
del_parser.set_defaults(func=del_local_policy)
@@ -291,4 +294,4 @@ def create_parser(subparsers):
#############################################
set_parser.add_argument('DN', nargs=1, help='Set the local policy for this entry DN')
add_subtree_parser.add_argument('DN', nargs=1, help='Add/replace the subtree policy for this entry DN')
- add_user_parser.add_argument('DN', nargs=1, help='Add/replace the local password policy for this entry DN')
\ No newline at end of file
+ add_user_parser.add_argument('DN', nargs=1, help='Add/replace the local password policy for this entry DN')
diff --git a/src/lib389/lib389/pwpolicy.py b/src/lib389/lib389/pwpolicy.py
index 8653cb195..d2427933b 100644
--- a/src/lib389/lib389/pwpolicy.py
+++ b/src/lib389/lib389/pwpolicy.py
@@ -65,7 +65,10 @@ class PwPolicyManager(object):
'pwddictcheck': 'passworddictcheck',
'pwddictpath': 'passworddictpath',
'pwdallowhash': 'nsslapd-allow-hashed-passwords',
- 'pwpinheritglobal': 'nsslapd-pwpolicy-inherit-global'
+ 'pwpinheritglobal': 'nsslapd-pwpolicy-inherit-global',
+ 'pwptprmaxuse': 'passwordTPRMaxUse',
+ 'pwptprdelayexpireat': 'passwordTPRDelayExpireAt',
+ 'pwptprdelayvalidfrom': 'passwordTPRDelayValidFrom'
}
def is_subtree_policy(self, dn):
--
2.31.1

View File

@ -1,179 +0,0 @@
From 7b7217538908ae58df864ef5cd82e1d3303c189f Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Mon, 7 Jun 2021 12:58:42 -0400
Subject: [PATCH] Issue 4447 - Crash when the Referential Integrity log is
manually edited
Bug Description: If the referint log is manually edited with a string
that is not a DN the server will crash when processing
the log.
Fix Description: Check for NULL pointers when strtoking the file line.
relates: https://github.com/389ds/389-ds-base/issues/4447
Reviewed by: firstyear(Thanks!)
---
.../tests/suites/plugins/referint_test.py | 72 +++++++++++++++----
ldap/servers/plugins/referint/referint.c | 7 ++
src/lib389/lib389/plugins.py | 15 ++++
3 files changed, 80 insertions(+), 14 deletions(-)
diff --git a/dirsrvtests/tests/suites/plugins/referint_test.py b/dirsrvtests/tests/suites/plugins/referint_test.py
index 02b985767..fda602545 100644
--- a/dirsrvtests/tests/suites/plugins/referint_test.py
+++ b/dirsrvtests/tests/suites/plugins/referint_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2016 Red Hat, Inc.
+# Copyright (C) 2021 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -12,13 +12,11 @@ Created on Dec 12, 2019
@author: tbordaz
'''
import logging
-import subprocess
import pytest
from lib389 import Entry
-from lib389.utils import *
-from lib389.plugins import *
-from lib389._constants import *
-from lib389.idm.user import UserAccounts, UserAccount
+from lib389.plugins import ReferentialIntegrityPlugin
+from lib389._constants import DEFAULT_SUFFIX
+from lib389.idm.user import UserAccounts
from lib389.idm.group import Groups
from lib389.topologies import topology_st as topo
@@ -29,21 +27,27 @@ log = logging.getLogger(__name__)
ESCAPED_RDN_BASE = "foo\\,oo"
def _user_get_dn(no):
uid = '%s%d' % (ESCAPED_RDN_BASE, no)
- dn = 'uid=%s,%s' % (uid, SUFFIX)
+ dn = 'uid=%s,%s' % (uid, DEFAULT_SUFFIX)
return (uid, dn)
def add_escaped_user(server, no):
(uid, dn) = _user_get_dn(no)
log.fatal('Adding user (%s): ' % dn)
- server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'],
- 'uid': [uid],
- 'sn' : [uid],
- 'cn' : [uid]})))
+ users = UserAccounts(server, DEFAULT_SUFFIX, None)
+ user_properties = {
+ 'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson', 'posixAccount'],
+ 'uid': uid,
+ 'cn' : uid,
+ 'sn' : uid,
+ 'uidNumber' : '1000',
+ 'gidNumber' : '2000',
+ 'homeDirectory' : '/home/testuser',
+ }
+ users.create(properties=user_properties)
return dn
-@pytest.mark.ds50020
def test_referential_false_failure(topo):
- """On MODRDN referential integrity can erronously fail
+ """On MODRDN referential integrity can erroneously fail
:id: f77aeb80-c4c4-471b-8c1b-4733b714778b
:setup: Standalone Instance
@@ -100,6 +104,46 @@ def test_referential_false_failure(topo):
inst.restart()
# Here if the bug is fixed, referential is able to update the member value
- inst.rename_s(user1.dn, 'uid=new_test_user_1001', newsuperior=SUFFIX, delold=0)
+ user1.rename('uid=new_test_user_1001', newsuperior=DEFAULT_SUFFIX, deloldrdn=False)
+def test_invalid_referint_log(topo):
+ """If there is an invalid log line in the referint log, make sure the server
+ does not crash at startup
+
+ :id: 34807b5a-ab17-4281-ae48-4e3513e19145
+ :setup: Standalone Instance
+ :steps:
+ 1. Set the referint log delay
+ 2. Create invalid log
+ 3. Start the server (no crash)
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ """
+
+ inst = topo.standalone
+
+ # Set delay - required for log parsing at server startup
+ plugin = ReferentialIntegrityPlugin(inst)
+ plugin.enable()
+ plugin.set_update_delay('2')
+ logfile = plugin.get_log_file()
+ inst.restart()
+
+ # Create invalid log
+ inst.stop()
+ with open(logfile, 'w') as log_fh:
+ log_fh.write("CRASH\n")
+
+ # Start the instance
+ inst.start()
+ assert inst.status()
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
index fd5356d72..28240c1f6 100644
--- a/ldap/servers/plugins/referint/referint.c
+++ b/ldap/servers/plugins/referint/referint.c
@@ -1447,6 +1447,13 @@ referint_thread_func(void *arg __attribute__((unused)))
sdn = slapi_sdn_new_normdn_byref(ptoken);
ptoken = ldap_utf8strtok_r(NULL, delimiter, &iter);
+ if (ptoken == NULL) {
+ /* Invalid line in referint log, skip it */
+ slapi_log_err(SLAPI_LOG_ERR, REFERINT_PLUGIN_SUBSYSTEM,
+ "Skipping invalid referint log line: (%s)\n", thisline);
+ slapi_sdn_free(&sdn);
+ continue;
+ }
if (!strcasecmp(ptoken, "NULL")) {
tmprdn = NULL;
} else {
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
index 2d88e60bd..b07e80022 100644
--- a/src/lib389/lib389/plugins.py
+++ b/src/lib389/lib389/plugins.py
@@ -518,6 +518,21 @@ class ReferentialIntegrityPlugin(Plugin):
self.set('referint-update-delay', str(value))
+ def get_log_file(self):
+ """Get referint log file"""
+
+ return self.get_attr_val_utf8('referint-logfile')
+
+ def get_log_file_formatted(self):
+ """Get referint log file"""
+
+ return self.display_attr('referint-logfile')
+
+ def set_log_file(self, value):
+ """Set referint log file"""
+
+ self.set('referint-logfile', value)
+
def get_membership_attr(self, formatted=False):
"""Get referint-membership-attr attribute"""
--
2.31.1

View File

@ -1,114 +0,0 @@
From 964a153b420b26140e0bbddfbebb4a51aaa0e4ea Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Thu, 3 Jun 2021 15:16:22 +0000
Subject: [PATCH 1/7] Issue 4791 - Missing dependency for RetroCL RFE
Description: The RetroCL exclude attribute RFE is dependent on functionality of the
EntryUUID bug fix, that didn't make into the latest build. This breaks the
RetroCL exclude attr feature so we need to provide a workaround.
Fixes: https://github.com/389ds/389-ds-base/issues/4791
Relates: https://github.com/389ds/389-ds-base/pull/4723
Relates: https://github.com/389ds/389-ds-base/issues/4224
Reviewed by: tbordaz, droideck (Thank you)
---
.../tests/suites/retrocl/basic_test.py | 6 ++--
.../lib389/cli_conf/plugins/retrochangelog.py | 35 +++++++++++++++++--
2 files changed, 36 insertions(+), 5 deletions(-)
diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
index 112c73cb9..f3bc50f29 100644
--- a/dirsrvtests/tests/suites/retrocl/basic_test.py
+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
@@ -17,7 +17,7 @@ from lib389.utils import *
from lib389.tasks import *
from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
from lib389.cli_base.dsrc import dsrc_arg_concat
-from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add
+from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add_attr
from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
pytestmark = pytest.mark.tier1
@@ -122,7 +122,7 @@ def test_retrocl_exclude_attr_add(topology_st):
args.bindpw = None
args.prompt = False
args.exclude_attrs = ATTR_HOMEPHONE
- args.func = retrochangelog_add
+ args.func = retrochangelog_add_attr
dsrc_inst = dsrc_arg_concat(args, None)
inst = connect_instance(dsrc_inst, False, args)
result = args.func(inst, None, log, args)
@@ -255,7 +255,7 @@ def test_retrocl_exclude_attr_mod(topology_st):
args.bindpw = None
args.prompt = False
args.exclude_attrs = ATTR_CARLICENSE
- args.func = retrochangelog_add
+ args.func = retrochangelog_add_attr
dsrc_inst = dsrc_arg_concat(args, None)
inst = connect_instance(dsrc_inst, False, args)
result = args.func(inst, None, log, args)
diff --git a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py
index 9940c6532..160fbb82d 100644
--- a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py
+++ b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py
@@ -6,8 +6,13 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
+# JC Work around for missing dependency on https://github.com/389ds/389-ds-base/pull/4344
+import ldap
+
from lib389.plugins import RetroChangelogPlugin
-from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit
+# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344
+# from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add_attr
+from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, _args_to_attrs
arg_to_attr = {
'is_replicated': 'isReplicated',
@@ -18,12 +23,38 @@ arg_to_attr = {
'exclude_attrs': 'nsslapd-exclude-attrs'
}
-
def retrochangelog_edit(inst, basedn, log, args):
log = log.getChild('retrochangelog_edit')
plugin = RetroChangelogPlugin(inst)
generic_object_edit(plugin, log, args, arg_to_attr)
+# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344
+def retrochangelog_add_attr(inst, basedn, log, args):
+ log = log.getChild('retrochangelog_add_attr')
+ plugin = RetroChangelogPlugin(inst)
+ generic_object_add_attr(plugin, log, args, arg_to_attr)
+
+# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344
+def generic_object_add_attr(dsldap_object, log, args, arg_to_attr):
+ """Add an attribute to the entry. This differs to 'edit' as edit uses replace,
+ and this allows multivalues to be added.
+
+ dsldap_object should be a single instance of DSLdapObject with a set dn
+ """
+ log = log.getChild('generic_object_add_attr')
+ # Gather the attributes
+ attrs = _args_to_attrs(args, arg_to_attr)
+
+ modlist = []
+ for attr, value in attrs.items():
+ if not isinstance(value, list):
+ value = [value]
+ modlist.append((ldap.MOD_ADD, attr, value))
+ if len(modlist) > 0:
+ dsldap_object.apply_mods(modlist)
+ log.info("Successfully changed the %s", dsldap_object.dn)
+ else:
+ raise ValueError("There is nothing to set in the %s plugin entry" % dsldap_object.dn)
def _add_parser_args(parser):
parser.add_argument('--is-replicated', choices=['TRUE', 'FALSE'], type=str.upper,
--
2.31.1

View File

@ -1,642 +0,0 @@
From d2ac7e98d53cfe6c74c99ddf3504b1072418f05a Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 11 Mar 2021 10:12:46 -0500
Subject: [PATCH] Issue 4656 - remove problematic language from ds-replcheck
Description: remove master from ds-replcheck and replace it with supplier
relates: https://github.com/389ds/389-ds-base/issues/4656
Reviewed by: mreynolds
e with '#' will be ignored, and an empty message aborts the commit.
---
ldap/admin/src/scripts/ds-replcheck | 202 ++++++++++++++--------------
1 file changed, 101 insertions(+), 101 deletions(-)
diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
index 169496e8f..f411f357a 100755
--- a/ldap/admin/src/scripts/ds-replcheck
+++ b/ldap/admin/src/scripts/ds-replcheck
@@ -1,7 +1,7 @@
#!/usr/bin/python3
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2020 Red Hat, Inc.
+# Copyright (C) 2021 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -63,7 +63,7 @@ def remove_entry(rentries, dn):
def get_ruv_time(ruv, rid):
"""Take a RUV element (nsds50ruv attribute) and extract the timestamp from maxcsn
:param ruv - A lsit of RUV elements
- :param rid - The rid of the master to extractthe maxcsn time from
+ :param rid - The rid of the supplier to extract the maxcsn time from
:return: The time in seconds of the maxcsn, or 0 if there is no maxcsn, or -1 if
the rid was not found
"""
@@ -213,22 +213,22 @@ def get_ruv_state(opts):
:param opts - all the script options
:return - A text description of the replicaton state
"""
- mtime = get_ruv_time(opts['master_ruv'], opts['rid'])
+ mtime = get_ruv_time(opts['supplier_ruv'], opts['rid'])
rtime = get_ruv_time(opts['replica_ruv'], opts['rid'])
if mtime == -1:
- repl_state = "Replication State: Replica ID ({}) not found in Master's RUV".format(opts['rid'])
+ repl_state = "Replication State: Replica ID ({}) not found in Supplier's RUV".format(opts['rid'])
elif rtime == -1:
repl_state = "Replication State: Replica ID ({}) not found in Replica's RUV (not initialized?)".format(opts['rid'])
elif mtime == 0:
- repl_state = "Replication State: Master has not seen any updates"
+ repl_state = "Replication State: Supplier has not seen any updates"
elif rtime == 0:
- repl_state = "Replication State: Replica has not seen any changes from the Master"
+ repl_state = "Replication State: Replica has not seen any changes from the Supplier"
elif mtime > rtime:
- repl_state = "Replication State: Replica is behind Master by: {} seconds".format(mtime - rtime)
+ repl_state = "Replication State: Replica is behind Supplier by: {} seconds".format(mtime - rtime)
elif mtime < rtime:
- repl_state = "Replication State: Replica is ahead of Master by: {} seconds".format(rtime - mtime)
+ repl_state = "Replication State: Replica is ahead of Supplier by: {} seconds".format(rtime - mtime)
else:
- repl_state = "Replication State: Master and Replica are in perfect synchronization"
+ repl_state = "Replication State: Supplier and Replica are in perfect synchronization"
return repl_state
@@ -238,11 +238,11 @@ def get_ruv_report(opts):
:param opts - all the script options
:return - A text blob to display in the report
"""
- opts['master_ruv'].sort()
+ opts['supplier_ruv'].sort()
opts['replica_ruv'].sort()
- report = "Master RUV:\n"
- for element in opts['master_ruv']:
+ report = "Supplier RUV:\n"
+ for element in opts['supplier_ruv']:
report += " %s\n" % (element)
report += "\nReplica RUV:\n"
for element in opts['replica_ruv']:
@@ -521,7 +521,7 @@ def get_ldif_ruv(LDIF, opts):
def cmp_entry(mentry, rentry, opts):
"""Compare the two entries, and return a "diff map"
- :param mentry - A Master entry
+ :param mentry - A Supplier entry
:param rentry - A Replica entry
:param opts - A Dict of the scripts options
:return - A Dict of the differences in the entry, or None
@@ -536,7 +536,7 @@ def cmp_entry(mentry, rentry, opts):
mlist = list(mentry.data.keys())
#
- # Check master
+ # Check Supplier
#
for mattr in mlist:
if mattr in opts['ignore']:
@@ -555,7 +555,7 @@ def cmp_entry(mentry, rentry, opts):
if not found:
diff['missing'].append("")
found = True
- diff['missing'].append(" - Master's State Info: %s" % (val))
+ diff['missing'].append(" - Supplier's State Info: %s" % (val))
diff['missing'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
else:
# No state info, just move on
@@ -566,18 +566,18 @@ def cmp_entry(mentry, rentry, opts):
if report_conflict(rentry, mattr, opts) and report_conflict(mentry, mattr, opts):
diff['diff'].append(" - Attribute '%s' is different:" % mattr)
if 'nscpentrywsi' in mentry.data:
- # Process Master
+ # Process Supplier
found = False
for val in mentry.data['nscpentrywsi']:
if val.lower().startswith(mattr + ';'):
if not found:
- diff['diff'].append(" Master:")
+ diff['diff'].append(" Supplier:")
diff['diff'].append(" - Value: %s" % (val.split(':')[1].lstrip()))
diff['diff'].append(" - State Info: %s" % (val))
diff['diff'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
found = True
if not found:
- diff['diff'].append(" Master: ")
+ diff['diff'].append(" Supplier: ")
for val in mentry.data[mattr]:
# This is an "origin" value which means it's never been
# updated since replication was set up. So its the
@@ -605,7 +605,7 @@ def cmp_entry(mentry, rentry, opts):
diff['diff'].append("")
else:
# no state info, report what we got
- diff['diff'].append(" Master: ")
+ diff['diff'].append(" Supplier: ")
for val in mentry.data[mattr]:
diff['diff'].append(" - %s: %s" % (mattr, val))
diff['diff'].append(" Replica: ")
@@ -622,9 +622,9 @@ def cmp_entry(mentry, rentry, opts):
continue
if rattr not in mlist:
- # Master is missing the attribute
+ # Supplier is missing the attribute
if report_conflict(rentry, rattr, opts):
- diff['missing'].append(" - Master missing attribute: \"%s\"" % (rattr))
+ diff['missing'].append(" - Supplier missing attribute: \"%s\"" % (rattr))
diff_count += 1
if 'nscpentrywsi' in rentry.data:
found = False
@@ -663,7 +663,7 @@ def do_offline_report(opts, output_file=None):
try:
MLDIF = open(opts['mldif'], "r")
except Exception as e:
- print('Failed to open Master LDIF: ' + str(e))
+ print('Failed to open Supplier LDIF: ' + str(e))
return
try:
@@ -676,10 +676,10 @@ def do_offline_report(opts, output_file=None):
# Verify LDIF Files
try:
if opts['verbose']:
- print("Validating Master ldif file ({})...".format(opts['mldif']))
+ print("Validating Supplier ldif file ({})...".format(opts['mldif']))
LDIFRecordList(MLDIF).parse()
except ValueError:
- print('Master LDIF file in invalid, aborting...')
+ print('Supplier LDIF file in invalid, aborting...')
MLDIF.close()
RLDIF.close()
return
@@ -696,34 +696,34 @@ def do_offline_report(opts, output_file=None):
# Get all the dn's, and entry counts
if opts['verbose']:
print ("Gathering all the DN's...")
- master_dns = get_dns(MLDIF, opts['mldif'], opts)
+ supplier_dns = get_dns(MLDIF, opts['mldif'], opts)
replica_dns = get_dns(RLDIF, opts['rldif'], opts)
- if master_dns is None or replica_dns is None:
+ if supplier_dns is None or replica_dns is None:
print("Aborting scan...")
MLDIF.close()
RLDIF.close()
sys.exit(1)
- m_count = len(master_dns)
+ m_count = len(supplier_dns)
r_count = len(replica_dns)
# Get DB RUV
if opts['verbose']:
print ("Gathering the database RUV's...")
- opts['master_ruv'] = get_ldif_ruv(MLDIF, opts)
+ opts['supplier_ruv'] = get_ldif_ruv(MLDIF, opts)
opts['replica_ruv'] = get_ldif_ruv(RLDIF, opts)
- """ Compare the master entries with the replica's. Take our list of dn's from
- the master ldif and get that entry( dn) from the master and replica ldif. In
+ """ Compare the Supplier entries with the replica's. Take our list of dn's from
+ the Supplier ldif and get that entry( dn) from the Supplier and replica ldif. In
this phase we keep keep track of conflict/tombstone counts, and we check for
missing entries and entry differences. We only need to do the entry diff
checking in this phase - we do not need to do it when process the replica dn's
because if the entry exists in both LDIF's then we already checked or diffs
- while processing the master dn's.
+ while processing the Supplier dn's.
"""
if opts['verbose']:
- print ("Comparing Master to Replica...")
+ print ("Comparing Supplier to Replica...")
missing = False
- for dn in master_dns:
+ for dn in supplier_dns:
mresult = ldif_search(MLDIF, dn)
if mresult['entry'] is None and mresult['conflict'] is None and not mresult['tombstone']:
# Try from the beginning
@@ -736,7 +736,7 @@ def do_offline_report(opts, output_file=None):
rresult['conflict'] is not None or rresult['tombstone']):
""" We can safely remove this DN from the replica dn list as it
does not need to be checked again. This also speeds things up
- when doing the replica vs master phase.
+ when doing the replica vs Supplier phase.
"""
replica_dns.remove(dn)
@@ -766,7 +766,7 @@ def do_offline_report(opts, output_file=None):
missing_report += (' Entries missing on Replica:\n')
missing = True
if mresult['entry'] and 'createtimestamp' in mresult['entry'].data:
- missing_report += (' - %s (Created on Master at: %s)\n' %
+ missing_report += (' - %s (Created on Supplier at: %s)\n' %
(dn, convert_timestamp(mresult['entry'].data['createtimestamp'][0])))
else:
missing_report += (' - %s\n' % dn)
@@ -791,7 +791,7 @@ def do_offline_report(opts, output_file=None):
remaining conflict & tombstone entries as well.
"""
if opts['verbose']:
- print ("Comparing Replica to Master...")
+ print ("Comparing Replica to Supplier...")
MLDIF.seek(0)
RLDIF.seek(0)
missing = False
@@ -811,7 +811,7 @@ def do_offline_report(opts, output_file=None):
if mresult['entry'] is None and mresult['glue'] is None:
MLDIF.seek(rresult['idx']) # Set the LDIF cursor/index to the last good line
if not missing:
- missing_report += (' Entries missing on Master:\n')
+ missing_report += (' Entries missing on Supplier:\n')
missing = True
if rresult['entry'] and 'createtimestamp' in rresult['entry'].data:
missing_report += (' - %s (Created on Replica at: %s)\n' %
@@ -837,12 +837,12 @@ def do_offline_report(opts, output_file=None):
final_report += get_ruv_report(opts)
final_report += ('Entry Counts\n')
final_report += ('=====================================================\n\n')
- final_report += ('Master: %d\n' % (m_count))
+ final_report += ('Supplier: %d\n' % (m_count))
final_report += ('Replica: %d\n\n' % (r_count))
final_report += ('\nTombstones\n')
final_report += ('=====================================================\n\n')
- final_report += ('Master: %d\n' % (mtombstones))
+ final_report += ('Supplier: %d\n' % (mtombstones))
final_report += ('Replica: %d\n' % (rtombstones))
final_report += get_conflict_report(mconflicts, rconflicts, opts['conflicts'])
@@ -859,9 +859,9 @@ def do_offline_report(opts, output_file=None):
final_report += ('\nResult\n')
final_report += ('=====================================================\n\n')
if missing_report == "" and len(diff_report) == 0:
- final_report += ('No replication differences between Master and Replica\n')
+ final_report += ('No replication differences between Supplier and Replica\n')
else:
- final_report += ('There are replication differences between Master and Replica\n')
+ final_report += ('There are replication differences between Supplier and Replica\n')
if output_file:
output_file.write(final_report)
@@ -871,8 +871,8 @@ def do_offline_report(opts, output_file=None):
def check_for_diffs(mentries, mglue, rentries, rglue, report, opts):
"""Online mode only - Check for diffs, return the updated report
- :param mentries - Master entries
- :param mglue - Master glue entries
+ :param mentries - Supplier entries
+ :param mglue - Supplier glue entries
:param rentries - Replica entries
:param rglue - Replica glue entries
:param report - A Dict of the entire report
@@ -947,8 +947,8 @@ def validate_suffix(ldapnode, suffix, hostname):
# Check suffix is replicated
try:
replica_filter = "(&(objectclass=nsds5replica)(nsDS5ReplicaRoot=%s))" % suffix
- master_replica = ldapnode.search_s("cn=config",ldap.SCOPE_SUBTREE,replica_filter)
- if (len(master_replica) != 1):
+ supplier_replica = ldapnode.search_s("cn=config",ldap.SCOPE_SUBTREE,replica_filter)
+ if (len(supplier_replica) != 1):
print("Error: Failed to validate suffix in {}. {} is not replicated.".format(hostname, suffix))
return False
except ldap.LDAPError as e:
@@ -969,7 +969,7 @@ def connect_to_replicas(opts):
muri = "%s://%s" % (opts['mprotocol'], opts['mhost'].replace("/", "%2f"))
else:
muri = "%s://%s:%s/" % (opts['mprotocol'], opts['mhost'], opts['mport'])
- master = SimpleLDAPObject(muri)
+ supplier = SimpleLDAPObject(muri)
if opts['rprotocol'].lower() == 'ldapi':
ruri = "%s://%s" % (opts['rprotocol'], opts['rhost'].replace("/", "%2f"))
@@ -978,23 +978,23 @@ def connect_to_replicas(opts):
replica = SimpleLDAPObject(ruri)
# Set timeouts
- master.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
- master.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
+ supplier.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
+ supplier.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
replica.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
replica.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
# Setup Secure Connection
if opts['certdir'] is not None:
- # Setup Master
+ # Setup Supplier
if opts['mprotocol'] != LDAPI:
- master.set_option(ldap.OPT_X_TLS_CACERTDIR, opts['certdir'])
- master.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD)
+ supplier.set_option(ldap.OPT_X_TLS_CACERTDIR, opts['certdir'])
+ supplier.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD)
if opts['mprotocol'] == LDAP:
# Do StartTLS
try:
- master.start_tls_s()
+ supplier.start_tls_s()
except ldap.LDAPError as e:
- print('TLS negotiation failed on Master: {}'.format(str(e)))
+ print('TLS negotiation failed on Supplier: {}'.format(str(e)))
exit(1)
# Setup Replica
@@ -1006,17 +1006,17 @@ def connect_to_replicas(opts):
try:
replica.start_tls_s()
except ldap.LDAPError as e:
- print('TLS negotiation failed on Master: {}'.format(str(e)))
+ print('TLS negotiation failed on Supplier: {}'.format(str(e)))
exit(1)
- # Open connection to master
+ # Open connection to Supplier
try:
- master.simple_bind_s(opts['binddn'], opts['bindpw'])
+ supplier.simple_bind_s(opts['binddn'], opts['bindpw'])
except ldap.SERVER_DOWN as e:
print(f"Cannot connect to {muri} ({str(e)})")
sys.exit(1)
except ldap.LDAPError as e:
- print("Error: Failed to authenticate to Master: ({}). "
+ print("Error: Failed to authenticate to Supplier: ({}). "
"Please check your credentials and LDAP urls are correct.".format(str(e)))
sys.exit(1)
@@ -1034,7 +1034,7 @@ def connect_to_replicas(opts):
# Validate suffix
if opts['verbose']:
print ("Validating suffix ...")
- if not validate_suffix(master, opts['suffix'], opts['mhost']):
+ if not validate_suffix(supplier, opts['suffix'], opts['mhost']):
sys.exit(1)
if not validate_suffix(replica,opts['suffix'], opts['rhost']):
@@ -1042,16 +1042,16 @@ def connect_to_replicas(opts):
# Get the RUVs
if opts['verbose']:
- print ("Gathering Master's RUV...")
+ print ("Gathering Supplier's RUV...")
try:
- master_ruv = master.search_s(opts['suffix'], ldap.SCOPE_SUBTREE, RUV_FILTER, ['nsds50ruv'])
- if len(master_ruv) > 0:
- opts['master_ruv'] = ensure_list_str(master_ruv[0][1]['nsds50ruv'])
+ supplier_ruv = supplier.search_s(opts['suffix'], ldap.SCOPE_SUBTREE, RUV_FILTER, ['nsds50ruv'])
+ if len(supplier_ruv) > 0:
+ opts['supplier_ruv'] = ensure_list_str(supplier_ruv[0][1]['nsds50ruv'])
else:
- print("Error: Master does not have an RUV entry")
+ print("Error: Supplier does not have an RUV entry")
sys.exit(1)
except ldap.LDAPError as e:
- print("Error: Failed to get Master RUV entry: {}".format(str(e)))
+ print("Error: Failed to get Supplier RUV entry: {}".format(str(e)))
sys.exit(1)
if opts['verbose']:
@@ -1067,12 +1067,12 @@ def connect_to_replicas(opts):
print("Error: Failed to get Replica RUV entry: {}".format(str(e)))
sys.exit(1)
- # Get the master RID
+ # Get the Supplier RID
if opts['verbose']:
- print("Getting Master's replica ID")
+ print("Getting Supplier's replica ID")
try:
search_filter = "(&(objectclass=nsds5Replica)(nsDS5ReplicaRoot={})(nsDS5ReplicaId=*))".format(opts['suffix'])
- replica_entry = master.search_s("cn=config", ldap.SCOPE_SUBTREE, search_filter)
+ replica_entry = supplier.search_s("cn=config", ldap.SCOPE_SUBTREE, search_filter)
if len(replica_entry) > 0:
opts['rid'] = ensure_int(replica_entry[0][1]['nsDS5ReplicaId'][0])
else:
@@ -1081,7 +1081,7 @@ def connect_to_replicas(opts):
print("Error: Failed to get Replica entry: {}".format(str(e)))
sys.exit(1)
- return (master, replica, opts)
+ return (supplier, replica, opts)
def print_online_report(report, opts, output_file):
@@ -1104,11 +1104,11 @@ def print_online_report(report, opts, output_file):
final_report += get_ruv_report(opts)
final_report += ('Entry Counts\n')
final_report += ('=====================================================\n\n')
- final_report += ('Master: %d\n' % (report['m_count']))
+ final_report += ('Supplier: %d\n' % (report['m_count']))
final_report += ('Replica: %d\n\n' % (report['r_count']))
final_report += ('\nTombstones\n')
final_report += ('=====================================================\n\n')
- final_report += ('Master: %d\n' % (report['mtombstones']))
+ final_report += ('Supplier: %d\n' % (report['mtombstones']))
final_report += ('Replica: %d\n' % (report['rtombstones']))
final_report += report['conflict']
missing = False
@@ -1121,7 +1121,7 @@ def print_online_report(report, opts, output_file):
final_report += (' Entries missing on Replica:\n')
for entry in report['r_missing']:
if 'createtimestamp' in entry.data:
- final_report += (' - %s (Created on Master at: %s)\n' %
+ final_report += (' - %s (Created on Supplier at: %s)\n' %
(entry.dn, convert_timestamp(entry.data['createtimestamp'][0])))
else:
final_report += (' - %s\n' % (entry.dn))
@@ -1129,7 +1129,7 @@ def print_online_report(report, opts, output_file):
if m_missing > 0:
if r_missing > 0:
final_report += ('\n')
- final_report += (' Entries missing on Master:\n')
+ final_report += (' Entries missing on Supplier:\n')
for entry in report['m_missing']:
if 'createtimestamp' in entry.data:
final_report += (' - %s (Created on Replica at: %s)\n' %
@@ -1146,9 +1146,9 @@ def print_online_report(report, opts, output_file):
final_report += ('\nResult\n')
final_report += ('=====================================================\n\n')
if not missing and len(report['diff']) == 0:
- final_report += ('No replication differences between Master and Replica\n')
+ final_report += ('No replication differences between Supplier and Replica\n')
else:
- final_report += ('There are replication differences between Master and Replica\n')
+ final_report += ('There are replication differences between Supplier and Replica\n')
if output_file:
output_file.write(final_report)
@@ -1170,7 +1170,7 @@ def remove_state_info(entry):
def get_conflict_report(mentries, rentries, verbose):
"""Gather the conflict entry dn's for each replica
- :param mentries - Master entries
+ :param mentries - Supplier entries
:param rentries - Replica entries
:param verbose - verbose logging
:return - A text blob to dispaly in the report
@@ -1197,7 +1197,7 @@ def get_conflict_report(mentries, rentries, verbose):
report = "\n\nConflict Entries\n"
report += "=====================================================\n\n"
if len(m_conflicts) > 0:
- report += ('Master Conflict Entries: %d\n' % (len(m_conflicts)))
+ report += ('Supplier Conflict Entries: %d\n' % (len(m_conflicts)))
if verbose:
for entry in m_conflicts:
report += ('\n - %s\n' % (entry['dn']))
@@ -1239,8 +1239,8 @@ def do_online_report(opts, output_file=None):
rconflicts = []
mconflicts = []
- # Fire off paged searches on Master and Replica
- master, replica, opts = connect_to_replicas(opts)
+ # Fire off paged searches on Supplier and Replica
+ supplier, replica, opts = connect_to_replicas(opts)
if opts['verbose']:
print('Start searching and comparing...')
@@ -1248,12 +1248,12 @@ def do_online_report(opts, output_file=None):
controls = [paged_ctrl]
req_pr_ctrl = controls[0]
try:
- master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
- "(|(objectclass=*)(objectclass=ldapsubentry)(objectclass=nstombstone))",
- ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'],
- serverctrls=controls)
+ supplier_msgid = supplier.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+ "(|(objectclass=*)(objectclass=ldapsubentry)(objectclass=nstombstone))",
+ ['*', 'createtimestamp', 'nscpentrywsi', 'nsds5replconflict'],
+ serverctrls=controls)
except ldap.LDAPError as e:
- print("Error: Failed to get Master entries: %s", str(e))
+ print("Error: Failed to get Supplier entries: %s", str(e))
sys.exit(1)
try:
replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
@@ -1268,11 +1268,11 @@ def do_online_report(opts, output_file=None):
while not m_done or not r_done:
try:
if not m_done:
- m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid)
+ m_rtype, m_rdata, m_rmsgid, m_rctrls = supplier.result3(supplier_msgid)
elif not r_done:
m_rdata = []
except ldap.LDAPError as e:
- print("Error: Problem getting the results from the master: %s", str(e))
+ print("Error: Problem getting the results from the Supplier: %s", str(e))
sys.exit(1)
try:
if not r_done:
@@ -1299,7 +1299,7 @@ def do_online_report(opts, output_file=None):
report, opts)
if not m_done:
- # Master
+ # Supplier
m_pctrls = [
c
for c in m_rctrls
@@ -1310,11 +1310,11 @@ def do_online_report(opts, output_file=None):
try:
# Copy cookie from response control to request control
req_pr_ctrl.cookie = m_pctrls[0].cookie
- master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+ supplier_msgid = supplier.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
"(|(objectclass=*)(objectclass=ldapsubentry))",
['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
except ldap.LDAPError as e:
- print("Error: Problem searching the master: %s", str(e))
+ print("Error: Problem searching the Supplier: %s", str(e))
sys.exit(1)
else:
m_done = True # No more pages available
@@ -1354,7 +1354,7 @@ def do_online_report(opts, output_file=None):
print_online_report(report, opts, output_file)
# unbind
- master.unbind_s()
+ supplier.unbind_s()
replica.unbind_s()
@@ -1367,18 +1367,18 @@ def init_online_params(args):
# Make sure the URLs are different
if args.murl == args.rurl:
- print("Master and Replica LDAP URLs are the same, they must be different")
+ print("Supplier and Replica LDAP URLs are the same, they must be different")
sys.exit(1)
- # Parse Master url
+ # Parse Supplier url
if not ldapurl.isLDAPUrl(args.murl):
- print("Master LDAP URL is invalid")
+ print("Supplier LDAP URL is invalid")
sys.exit(1)
murl = ldapurl.LDAPUrl(args.murl)
if murl.urlscheme in VALID_PROTOCOLS:
opts['mprotocol'] = murl.urlscheme
else:
- print('Unsupported ldap url protocol (%s) for Master, please use "ldaps" or "ldap"' %
+ print('Unsupported ldap url protocol (%s) for Supplier, please use "ldaps" or "ldap"' %
murl.urlscheme)
sys.exit(1)
@@ -1520,7 +1520,7 @@ def offline_report(args):
print ("LDIF file ({}) is empty".format(ldif_dir))
sys.exit(1)
if opts['mldif'] == opts['rldif']:
- print("The Master and Replica LDIF files must be different")
+ print("The Supplier and Replica LDIF files must be different")
sys.exit(1)
OUTPUT_FILE = None
@@ -1547,7 +1547,7 @@ def get_state(args):
"""Just do the RUV comparision
"""
opts = init_online_params(args)
- master, replica, opts = connect_to_replicas(opts)
+ supplier, replica, opts = connect_to_replicas(opts)
print(get_ruv_state(opts))
@@ -1569,10 +1569,10 @@ def main():
# Get state
state_parser = subparsers.add_parser('state', help="Get the current replicaton state between two replicas")
state_parser.set_defaults(func=get_state)
- state_parser.add_argument('-m', '--master-url', help='The LDAP URL for the Master server',
- dest='murl', default=None, required=True)
+ state_parser.add_argument('-m', '--supplier-url', help='The LDAP URL for the Supplier server',
+ dest='murl', default=None, required=True)
state_parser.add_argument('-r', '--replica-url', help='The LDAP URL for the Replica server',
- dest='rurl', required=True, default=None)
+ dest='rurl', required=True, default=None)
state_parser.add_argument('-b', '--suffix', help='Replicated suffix', dest='suffix', required=True)
state_parser.add_argument('-D', '--bind-dn', help='The Bind DN', required=True, dest='binddn', default=None)
state_parser.add_argument('-w', '--bind-pw', help='The Bind password', dest='bindpw', default=None)
@@ -1586,7 +1586,7 @@ def main():
# Online mode
online_parser = subparsers.add_parser('online', help="Compare two online replicas for differences")
online_parser.set_defaults(func=online_report)
- online_parser.add_argument('-m', '--master-url', help='The LDAP URL for the Master server (REQUIRED)',
+ online_parser.add_argument('-m', '--supplier-url', help='The LDAP URL for the Supplier server (REQUIRED)',
dest='murl', default=None, required=True)
online_parser.add_argument('-r', '--replica-url', help='The LDAP URL for the Replica server (REQUIRED)',
dest='rurl', required=True, default=None)
@@ -1612,12 +1612,12 @@ def main():
# Offline LDIF mode
offline_parser = subparsers.add_parser('offline', help="Compare two replication LDIF files for differences (LDIF file generated by 'db2ldif -r')")
offline_parser.set_defaults(func=offline_report)
- offline_parser.add_argument('-m', '--master-ldif', help='Master LDIF file',
+ offline_parser.add_argument('-m', '--supplier-ldif', help='Supplier LDIF file',
dest='mldif', default=None, required=True)
offline_parser.add_argument('-r', '--replica-ldif', help='Replica LDIF file',
dest='rldif', default=None, required=True)
offline_parser.add_argument('--rid', dest='rid', default=None, required=True,
- help='The Replica Identifer (rid) for the "Master" server')
+ help='The Replica Identifier (rid) for the "Supplier" server')
offline_parser.add_argument('-b', '--suffix', help='Replicated suffix', dest='suffix', required=True)
offline_parser.add_argument('-c', '--conflicts', help='Display verbose conflict information', action='store_true',
dest='conflicts', default=False)
--
2.31.1

View File

@ -1,373 +0,0 @@
From 55a47c1bfe1ce1c27e470384c4f1d50895db25f7 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 13 Jul 2021 14:18:03 -0400
Subject: [PATCH] Issue 4443 - Internal unindexed searches in syncrepl/retro
changelog
Bug Description:
When a non-system index is added to a backend it is
disabled until the database is initialized or reindexed.
So in the case of the retro changelog the changenumber index
is alway disabled by default since it is never initialized.
This leads to unexpected unindexed searches of the retro
changelog.
Fix Description:
If an index has "nsSystemIndex" set to "true" then enable it
immediately.
relates: https://github.com/389ds/389-ds-base/issues/4443
Reviewed by: spichugi & tbordaz(Thanks!!)
---
.../tests/suites/retrocl/basic_test.py | 53 ++++++++-------
.../suites/retrocl/retrocl_indexing_test.py | 68 +++++++++++++++++++
ldap/servers/plugins/retrocl/retrocl_create.c | 2 +-
.../slapd/back-ldbm/ldbm_index_config.c | 25 +++++--
src/lib389/lib389/_mapped_object.py | 13 ++++
5 files changed, 130 insertions(+), 31 deletions(-)
create mode 100644 dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py
diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py
index f3bc50f29..84d513829 100644
--- a/dirsrvtests/tests/suites/retrocl/basic_test.py
+++ b/dirsrvtests/tests/suites/retrocl/basic_test.py
@@ -8,7 +8,6 @@
import logging
import ldap
-import time
import pytest
from lib389.topologies import topology_st
from lib389.plugins import RetroChangelogPlugin
@@ -18,7 +17,8 @@ from lib389.tasks import *
from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance
from lib389.cli_base.dsrc import dsrc_arg_concat
from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add_attr
-from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts
+from lib389.idm.user import UserAccount, UserAccounts
+from lib389._mapped_object import DSLdapObjects
pytestmark = pytest.mark.tier1
@@ -82,7 +82,7 @@ def test_retrocl_exclude_attr_add(topology_st):
log.info('Adding user1')
try:
- user1 = users.create(properties={
+ users.create(properties={
'sn': '1',
'cn': 'user 1',
'uid': 'user1',
@@ -97,17 +97,18 @@ def test_retrocl_exclude_attr_add(topology_st):
except ldap.ALREADY_EXISTS:
pass
except ldap.LDAPError as e:
- log.error("Failed to add user1")
+ log.error("Failed to add user1: " + str(e))
log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
try:
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
+ retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX)
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
except ldap.LDAPError as e:
- log.fatal("Changelog search failed, error: " +str(e))
+ log.fatal("Changelog search failed, error: " + str(e))
assert False
assert len(cllist) > 0
- if cllist[0].hasAttr('changes'):
- clstr = (cllist[0].getValue('changes')).decode()
+ if cllist[0].present('changes'):
+ clstr = str(cllist[0].get_attr_vals_utf8('changes'))
assert ATTR_HOMEPHONE in clstr
assert ATTR_CARLICENSE in clstr
@@ -134,7 +135,7 @@ def test_retrocl_exclude_attr_add(topology_st):
log.info('Adding user2')
try:
- user2 = users.create(properties={
+ users.create(properties={
'sn': '2',
'cn': 'user 2',
'uid': 'user2',
@@ -149,18 +150,18 @@ def test_retrocl_exclude_attr_add(topology_st):
except ldap.ALREADY_EXISTS:
pass
except ldap.LDAPError as e:
- log.error("Failed to add user2")
+ log.error("Failed to add user2: " + str(e))
log.info('Verify homePhone attr is not in the changelog changestring')
try:
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN)
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER2_DN})')
assert len(cllist) > 0
- if cllist[0].hasAttr('changes'):
- clstr = (cllist[0].getValue('changes')).decode()
+ if cllist[0].present('changes'):
+ clstr = str(cllist[0].get_attr_vals_utf8('changes'))
assert ATTR_HOMEPHONE not in clstr
assert ATTR_CARLICENSE in clstr
except ldap.LDAPError as e:
- log.fatal("Changelog search failed, error: " +str(e))
+ log.fatal("Changelog search failed, error: " + str(e))
assert False
def test_retrocl_exclude_attr_mod(topology_st):
@@ -228,19 +229,20 @@ def test_retrocl_exclude_attr_mod(topology_st):
'homeDirectory': '/home/user1',
'userpassword': USER_PW})
except ldap.ALREADY_EXISTS:
- pass
+ user1 = UserAccount(st, dn=USER1_DN)
except ldap.LDAPError as e:
- log.error("Failed to add user1")
+ log.error("Failed to add user1: " + str(e))
log.info('Verify homePhone and carLicense attrs are in the changelog changestring')
try:
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
+ retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX)
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
except ldap.LDAPError as e:
- log.fatal("Changelog search failed, error: " +str(e))
+ log.fatal("Changelog search failed, error: " + str(e))
assert False
assert len(cllist) > 0
- if cllist[0].hasAttr('changes'):
- clstr = (cllist[0].getValue('changes')).decode()
+ if cllist[0].present('changes'):
+ clstr = str(cllist[0].get_attr_vals_utf8('changes'))
assert ATTR_HOMEPHONE in clstr
assert ATTR_CARLICENSE in clstr
@@ -267,24 +269,25 @@ def test_retrocl_exclude_attr_mod(topology_st):
log.info('Modify user1 carLicense attribute')
try:
- st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")])
+ user1.replace(ATTR_CARLICENSE, "123WX321")
except ldap.LDAPError as e:
log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc'])
assert False
log.info('Verify carLicense attr is not in the changelog changestring')
try:
- cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN)
+ cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})')
assert len(cllist) > 0
# There will be 2 entries in the changelog for this user, we are only
#interested in the second one, the modify operation.
- if cllist[1].hasAttr('changes'):
- clstr = (cllist[1].getValue('changes')).decode()
+ if cllist[1].present('changes'):
+ clstr = str(cllist[1].get_attr_vals_utf8('changes'))
assert ATTR_CARLICENSE not in clstr
except ldap.LDAPError as e:
- log.fatal("Changelog search failed, error: " +str(e))
+ log.fatal("Changelog search failed, error: " + str(e))
assert False
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py
new file mode 100644
index 000000000..b1dfe962c
--- /dev/null
+++ b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py
@@ -0,0 +1,68 @@
+import logging
+import pytest
+import os
+from lib389._constants import RETROCL_SUFFIX, DEFAULT_SUFFIX
+from lib389.topologies import topology_st as topo
+from lib389.plugins import RetroChangelogPlugin
+from lib389.idm.user import UserAccounts
+from lib389._mapped_object import DSLdapObjects
+log = logging.getLogger(__name__)
+
+
+def test_indexing_is_online(topo):
+ """Test that the changenmumber index is online right after enabling the plugin
+
+ :id: 16f4c001-9e0c-4448-a2b3-08ac1e85d40f
+ :setup: Standalone Instance
+ :steps:
+ 1. Enable retro cl
+ 2. Perform some updates
+ 3. Search for "(changenumber>=-1)", and it is not partially unindexed
+ 4. Search for "(&(changenumber>=-1)(targetuniqueid=*))", and it is not partially unindexed
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ # Enable plugin
+ topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off')
+ plugin = RetroChangelogPlugin(topo.standalone)
+ plugin.enable()
+ topo.standalone.restart()
+
+ # Do a bunch of updates
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
+ user_entry = users.create(properties={
+ 'sn': '1',
+ 'cn': 'user 1',
+ 'uid': 'user1',
+ 'uidNumber': '11',
+ 'gidNumber': '111',
+ 'givenname': 'user1',
+ 'homePhone': '0861234567',
+ 'carLicense': '131D16674',
+ 'mail': 'user1@whereever.com',
+ 'homeDirectory': '/home'
+ })
+ for count in range(0, 10):
+ user_entry.replace('mail', f'test{count}@test.com')
+
+ # Search the retro cl, and check for error messages
+ filter_simple = '(changenumber>=-1)'
+ filter_compound = '(&(changenumber>=-1)(targetuniqueid=*))'
+ retro_changelog_suffix = DSLdapObjects(topo.standalone, basedn=RETROCL_SUFFIX)
+ retro_changelog_suffix.filter(filter_simple)
+ assert not topo.standalone.searchAccessLog('Partially Unindexed Filter')
+
+ # Search the retro cl again with compound filter
+ retro_changelog_suffix.filter(filter_compound)
+ assert not topo.standalone.searchAccessLog('Partially Unindexed Filter')
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main(["-s", CURRENT_FILE])
diff --git a/ldap/servers/plugins/retrocl/retrocl_create.c b/ldap/servers/plugins/retrocl/retrocl_create.c
index 571e6899f..5bfde7831 100644
--- a/ldap/servers/plugins/retrocl/retrocl_create.c
+++ b/ldap/servers/plugins/retrocl/retrocl_create.c
@@ -133,7 +133,7 @@ retrocl_create_be(const char *bedir)
val.bv_len = strlen(val.bv_val);
slapi_entry_add_values(e, "cn", vals);
- val.bv_val = "false";
+ val.bv_val = "true"; /* enables the index */
val.bv_len = strlen(val.bv_val);
slapi_entry_add_values(e, "nssystemindex", vals);
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
index 9722d0ce7..38e7368e1 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
@@ -25,7 +25,7 @@ int ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, Slapi_Entry *en
#define INDEXTYPE_NONE 1
static int
-ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, char *err_buf)
+ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, PRBool *is_system_index, char *err_buf)
{
Slapi_Attr *attr;
const struct berval *attrValue;
@@ -78,6 +78,15 @@ ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_st
}
}
+ *is_system_index = PR_FALSE;
+ if (0 == slapi_entry_attr_find(e, "nsSystemIndex", &attr)) {
+ slapi_attr_first_value(attr, &sval);
+ attrValue = slapi_value_get_berval(sval);
+ if (strcasecmp(attrValue->bv_val, "true") == 0) {
+ *is_system_index = PR_TRUE;
+ }
+ }
+
/* ok the entry is good to process, pass it to attr_index_config */
if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0, err_buf)) {
slapi_ch_free_string(index_name);
@@ -101,9 +110,10 @@ ldbm_index_init_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
void *arg)
{
ldbm_instance *inst = (ldbm_instance *)arg;
+ PRBool is_system_index = PR_FALSE;
returntext[0] = '\0';
- *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, NULL);
+ *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, &is_system_index /* not used */, NULL);
if (*returncode == LDAP_SUCCESS) {
return SLAPI_DSE_CALLBACK_OK;
} else {
@@ -126,17 +136,21 @@ ldbm_instance_index_config_add_callback(Slapi_PBlock *pb __attribute__((unused))
{
ldbm_instance *inst = (ldbm_instance *)arg;
char *index_name = NULL;
+ PRBool is_system_index = PR_FALSE;
returntext[0] = '\0';
- *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, returntext);
+ *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, &is_system_index, returntext);
if (*returncode == LDAP_SUCCESS) {
struct attrinfo *ai = NULL;
/* if the index is a "system" index, we assume it's being added by
* by the server, and it's okay for the index to go online immediately.
* if not, we set the index "offline" so it won't actually be used
* until someone runs db2index on it.
+ * If caller wants to add an index that they want to be online
+ * immediately they can also set "nsSystemIndex" to "true" in the
+ * index config entry (e.g. is_system_index).
*/
- if (!ldbm_attribute_always_indexed(index_name)) {
+ if (!is_system_index && !ldbm_attribute_always_indexed(index_name)) {
ainfo_get(inst->inst_be, index_name, &ai);
PR_ASSERT(ai != NULL);
ai->ai_indexmask |= INDEX_OFFLINE;
@@ -386,13 +400,14 @@ ldbm_instance_index_config_enable_index(ldbm_instance *inst, Slapi_Entry *e)
char *index_name = NULL;
int rc = LDAP_SUCCESS;
struct attrinfo *ai = NULL;
+ PRBool is_system_index = PR_FALSE;
index_name = slapi_entry_attr_get_charptr(e, "cn");
if (index_name) {
ainfo_get(inst->inst_be, index_name, &ai);
}
if (!ai) {
- rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, NULL);
+ rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, &is_system_index /* not used */, NULL);
}
if (rc == LDAP_SUCCESS) {
/* Assume the caller knows if it is OK to go online immediately */
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
index b6d778b01..fe610d175 100644
--- a/src/lib389/lib389/_mapped_object.py
+++ b/src/lib389/lib389/_mapped_object.py
@@ -148,6 +148,19 @@ class DSLdapObject(DSLogging, DSLint):
return True
+ def search(self, scope="subtree", filter='objectclass=*'):
+ search_scope = ldap.SCOPE_SUBTREE
+ if scope == 'base':
+ search_scope = ldap.SCOPE_BASE
+ elif scope == 'one':
+ search_scope = ldap.SCOPE_ONE
+ elif scope == 'subtree':
+ search_scope = ldap.SCOPE_SUBTREE
+ return self._instance.search_ext_s(self._dn, search_scope, filter,
+ serverctrls=self._server_controls,
+ clientctrls=self._client_controls,
+ escapehatch='i am sure')
+
def display(self, attrlist=['*']):
"""Get an entry but represent it as a string LDIF
--
2.31.1

View File

@ -1,121 +0,0 @@
From 2f0218f91d35c83a2aaecb71849a54b2481390ab Mon Sep 17 00:00:00 2001
From: Firstyear <william@blackhats.net.au>
Date: Fri, 9 Jul 2021 11:53:35 +1000
Subject: [PATCH] Issue 4817 - BUG - locked crypt accounts on import may allow
all passwords (#4819)
Bug Description: Due to mishanding of short dbpwd hashes, the
crypt_r algorithm was misused and was only comparing salts
in some cases, rather than checking the actual content
of the password.
Fix Description: Stricter checks on dbpwd lengths to ensure
that content passed to crypt_r has at least 2 salt bytes and
1 hash byte, as well as stricter checks on ct_memcmp to ensure
that compared values are the same length, rather than potentially
allowing overruns/short comparisons.
fixes: https://github.com/389ds/389-ds-base/issues/4817
Author: William Brown <william@blackhats.net.au>
Review by: @mreynolds389
---
.../password/pwd_crypt_asterisk_test.py | 50 +++++++++++++++++++
ldap/servers/plugins/pwdstorage/crypt_pwd.c | 20 +++++---
2 files changed, 64 insertions(+), 6 deletions(-)
create mode 100644 dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py
diff --git a/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py b/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py
new file mode 100644
index 000000000..d76614db1
--- /dev/null
+++ b/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py
@@ -0,0 +1,50 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2021 William Brown <william@blackhats.net.au>
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import ldap
+import pytest
+from lib389.topologies import topology_st
+from lib389.idm.user import UserAccounts
+from lib389._constants import (DEFAULT_SUFFIX, PASSWORD)
+
+pytestmark = pytest.mark.tier1
+
+def test_password_crypt_asterisk_is_rejected(topology_st):
+ """It was reported that {CRYPT}* was allowing all passwords to be
+ valid in the bind process. This checks that we should be rejecting
+ these as they should represent locked accounts. Similar, {CRYPT}!
+
+ :id: 0b8f1a6a-f3eb-4443-985e-da14d0939dc3
+ :setup: Single instance
+ :steps: 1. Set a password hash in with CRYPT and the content *
+ 2. Test a bind
+ 3. Set a password hash in with CRYPT and the content !
+ 4. Test a bind
+ :expectedresults:
+ 1. Successfully set the values
+ 2. The bind fails
+ 3. Successfully set the values
+ 4. The bind fails
+ """
+ topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on')
+ topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'off')
+
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
+ user = users.create_test_user()
+
+ user.set('userPassword', "{CRYPT}*")
+
+ # Attempt to bind with incorrect password.
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
+ badconn = user.bind('badpassword')
+
+ user.set('userPassword', "{CRYPT}!")
+ # Attempt to bind with incorrect password.
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
+ badconn = user.bind('badpassword')
+
diff --git a/ldap/servers/plugins/pwdstorage/crypt_pwd.c b/ldap/servers/plugins/pwdstorage/crypt_pwd.c
index 9031b2199..1b37d41ed 100644
--- a/ldap/servers/plugins/pwdstorage/crypt_pwd.c
+++ b/ldap/servers/plugins/pwdstorage/crypt_pwd.c
@@ -48,15 +48,23 @@ static unsigned char itoa64[] = /* 0 ... 63 => ascii - 64 */
int
crypt_pw_cmp(const char *userpwd, const char *dbpwd)
{
- int rc;
- char *cp;
+ int rc = -1;
+ char *cp = NULL;
+ size_t dbpwd_len = strlen(dbpwd);
struct crypt_data data;
data.initialized = 0;
- /* we use salt (first 2 chars) of encoded password in call to crypt_r() */
- cp = crypt_r(userpwd, dbpwd, &data);
- if (cp) {
- rc = slapi_ct_memcmp(dbpwd, cp, strlen(dbpwd));
+ /*
+ * there MUST be at least 2 chars of salt and some pw bytes, else this is INVALID and will
+ * allow any password to bind as we then only compare SALTS.
+ */
+ if (dbpwd_len >= 3) {
+ /* we use salt (first 2 chars) of encoded password in call to crypt_r() */
+ cp = crypt_r(userpwd, dbpwd, &data);
+ }
+ /* If these are not the same length, we can not proceed safely with memcmp. */
+ if (cp && dbpwd_len == strlen(cp)) {
+ rc = slapi_ct_memcmp(dbpwd, cp, dbpwd_len);
} else {
rc = -1;
}
--
2.31.1

View File

@ -1,39 +0,0 @@
From 31d53e7da585723e66b838dcf34b77ea7c9968c6 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 21 Jul 2021 09:16:30 +0200
Subject: [PATCH] Issue 4837 - persistent search returns entries even when an
error is returned by content-sync-plugin (#4838)
Bug description:
When a ldap client sends a sync request control, the server response may contain a sync state control.
If the server fails to create the control the search should fail.
Fix description:
In case the server fails to create the response control
logs the failure of the pre_search
relates: https://github.com/389ds/389-ds-base/issues/4837
Reviewed by: Simon Pichugin
Platforms tested: RH8.4
---
ldap/servers/plugins/sync/sync_refresh.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ldap/servers/plugins/sync/sync_refresh.c b/ldap/servers/plugins/sync/sync_refresh.c
index 646ff760b..4cbb6a949 100644
--- a/ldap/servers/plugins/sync/sync_refresh.c
+++ b/ldap/servers/plugins/sync/sync_refresh.c
@@ -213,7 +213,7 @@ sync_srch_refresh_pre_entry(Slapi_PBlock *pb)
Slapi_Entry *e;
slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_ENTRY, &e);
LDAPControl **ctrl = (LDAPControl **)slapi_ch_calloc(2, sizeof(LDAPControl *));
- sync_create_state_control(e, &ctrl[0], LDAP_SYNC_ADD, NULL);
+ rc = sync_create_state_control(e, &ctrl[0], LDAP_SYNC_ADD, NULL);
slapi_pblock_set(pb, SLAPI_SEARCH_CTRLS, ctrl);
}
return (rc);
--
2.31.1

View File

@ -1,49 +0,0 @@
From 616dc9964a4675dea2ab2c2efb9bd31c3903e29d Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Mon, 26 Jul 2021 15:22:08 -0400
Subject: [PATCH] Hardcode gost crypt passsword storage scheme
---
.../plugins/pwdstorage/gost_yescrypt.c | 22 -------------------
1 file changed, 22 deletions(-)
diff --git a/ldap/servers/plugins/pwdstorage/gost_yescrypt.c b/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
index 67b39395e..7b0d1653c 100644
--- a/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
+++ b/ldap/servers/plugins/pwdstorage/gost_yescrypt.c
@@ -11,7 +11,6 @@
#include <crypt.h>
-#ifdef XCRYPT_VERSION_STR
#include <errno.h>
int
gost_yescrypt_pw_cmp(const char *userpwd, const char *dbpwd)
@@ -64,24 +63,3 @@ gost_yescrypt_pw_enc(const char *pwd)
return enc;
}
-#else
-
-/*
- * We do not have xcrypt, so always fail all checks.
- */
-int
-gost_yescrypt_pw_cmp(const char *userpwd __attribute__((unused)), const char *dbpwd __attribute__((unused)))
-{
- slapi_log_err(SLAPI_LOG_ERR, GOST_YESCRYPT_SCHEME_NAME,
- "Unable to use gost_yescrypt_pw_cmp, xcrypt is not available.\n");
- return 1;
-}
-
-char *
-gost_yescrypt_pw_enc(const char *pwd __attribute__((unused)))
-{
- slapi_log_err(SLAPI_LOG_ERR, GOST_YESCRYPT_SCHEME_NAME,
- "Unable to use gost_yescrypt_pw_enc, xcrypt is not available.\n");
- return NULL;
-}
-#endif
--
2.31.1

View File

@ -1,39 +0,0 @@
From a2a51130b2f95316237b85da099a8be734969e54 Mon Sep 17 00:00:00 2001
From: James Chapman <jachapma@redhat.com>
Date: Sat, 24 Apr 2021 21:37:54 +0100
Subject: [PATCH] Issue 4734 - import of entry with no parent warning (#4735)
Description: Online import of ldif file that contains an entry with
no parent doesnt generate a task warning.
Fixes: https://github.com/389ds/389-ds-base/issues/4734
Author: vashirov@redhat.com (Thanks)
Reviewed by: mreynolds, jchapma
---
ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
index 905a84e74..35183ed59 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
@@ -2767,8 +2767,14 @@ import_foreman(void *param)
if (job->flags & FLAG_ABORT) {
goto error;
}
+
+ /* capture skipped entry warnings for this task */
+ if((job) && (job->skipped)) {
+ slapi_task_set_warning(job->task, WARN_SKIPPED_IMPORT_ENTRY);
+ }
}
+
slapi_pblock_destroy(pb);
info->state = FINISHED;
return;
--
2.31.1

View File

@ -1,37 +0,0 @@
From f9bc249b2baa11a8ac0eb54e4077eb706d137e38 Mon Sep 17 00:00:00 2001
From: Firstyear <william@blackhats.net.au>
Date: Thu, 19 Aug 2021 11:06:06 +1000
Subject: [PATCH] Issue 4872 - BUG - entryuuid enabled by default causes
replication issues (#4876)
Bug Description: Due to older servers missing the syntax
plugin this breaks schema replication and causes cascading
errors.
Fix Description: This changes the syntax to be a case
insensitive string, while leaving the plugins in place
for other usage.
fixes: https://github.com/389ds/389-ds-base/issues/4872
Author: William Brown <william@blackhats.net.au>
Review by: @mreynolds389 @progier389
---
ldap/schema/03entryuuid.ldif | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/ldap/schema/03entryuuid.ldif b/ldap/schema/03entryuuid.ldif
index cbde981fe..f7a7f40d5 100644
--- a/ldap/schema/03entryuuid.ldif
+++ b/ldap/schema/03entryuuid.ldif
@@ -13,4 +13,5 @@ dn: cn=schema
#
# attributes
#
-attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
+# attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
+attributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation )
--
2.31.1

View File

@ -1,125 +0,0 @@
From 120511d35095a48d60abbb7cb2367d0c30fbc757 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 25 Aug 2021 13:20:56 -0400
Subject: [PATCH] Remove GOST-YESCRYPT password sotrage scheme
---
.../tests/suites/password/pwd_algo_test.py | 1 -
ldap/ldif/template-dse-minimal.ldif.in | 9 ---------
ldap/ldif/template-dse.ldif.in | 9 ---------
ldap/servers/plugins/pwdstorage/pwd_init.c | 18 ------------------
ldap/servers/slapd/fedse.c | 13 -------------
5 files changed, 50 deletions(-)
diff --git a/dirsrvtests/tests/suites/password/pwd_algo_test.py b/dirsrvtests/tests/suites/password/pwd_algo_test.py
index 66bda420e..88f8e40b7 100644
--- a/dirsrvtests/tests/suites/password/pwd_algo_test.py
+++ b/dirsrvtests/tests/suites/password/pwd_algo_test.py
@@ -124,7 +124,6 @@ def _test_algo_for_pbkdf2(inst, algo_name):
('CLEAR', 'CRYPT', 'CRYPT-MD5', 'CRYPT-SHA256', 'CRYPT-SHA512',
'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA',
'SSHA256', 'SSHA384', 'SSHA512', 'PBKDF2_SHA256', 'DEFAULT',
- 'GOST_YESCRYPT',
))
def test_pwd_algo_test(topology_st, algo):
"""Assert that all of our password algorithms correctly PASS and FAIL varying
diff --git a/ldap/ldif/template-dse-minimal.ldif.in b/ldap/ldif/template-dse-minimal.ldif.in
index 2eccae9b2..1a05f4a67 100644
--- a/ldap/ldif/template-dse-minimal.ldif.in
+++ b/ldap/ldif/template-dse-minimal.ldif.in
@@ -194,15 +194,6 @@ nsslapd-pluginarg1: nsds5ReplicaCredentials
nsslapd-pluginid: aes-storage-scheme
nsslapd-pluginprecedence: 1
-dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config
-objectclass: top
-objectclass: nsSlapdPlugin
-cn: GOST_YESCRYPT
-nsslapd-pluginpath: libpwdstorage-plugin
-nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init
-nsslapd-plugintype: pwdstoragescheme
-nsslapd-pluginenabled: on
-
dn: cn=Syntax Validation Task,cn=plugins,cn=config
objectclass: top
objectclass: nsSlapdPlugin
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
index 7e7480cba..f30531bec 100644
--- a/ldap/ldif/template-dse.ldif.in
+++ b/ldap/ldif/template-dse.ldif.in
@@ -242,15 +242,6 @@ nsslapd-pluginarg2: nsds5ReplicaBootstrapCredentials
nsslapd-pluginid: aes-storage-scheme
nsslapd-pluginprecedence: 1
-dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config
-objectclass: top
-objectclass: nsSlapdPlugin
-cn: GOST_YESCRYPT
-nsslapd-pluginpath: libpwdstorage-plugin
-nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init
-nsslapd-plugintype: pwdstoragescheme
-nsslapd-pluginenabled: on
-
dn: cn=Syntax Validation Task,cn=plugins,cn=config
objectclass: top
objectclass: nsSlapdPlugin
diff --git a/ldap/servers/plugins/pwdstorage/pwd_init.c b/ldap/servers/plugins/pwdstorage/pwd_init.c
index 606e63404..59cfc4684 100644
--- a/ldap/servers/plugins/pwdstorage/pwd_init.c
+++ b/ldap/servers/plugins/pwdstorage/pwd_init.c
@@ -52,8 +52,6 @@ static Slapi_PluginDesc smd5_pdesc = {"smd5-password-storage-scheme", VENDOR, DS
static Slapi_PluginDesc pbkdf2_sha256_pdesc = {"pbkdf2-sha256-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Salted PBKDF2 SHA256 hash algorithm (PBKDF2_SHA256)"};
-static Slapi_PluginDesc gost_yescrypt_pdesc = {"gost-yescrypt-password-storage-scheme", VENDOR, DS_PACKAGE_VERSION, "Yescrypt KDF algorithm (Streebog256)"};
-
static char *plugin_name = "NSPwdStoragePlugin";
int
@@ -431,19 +429,3 @@ pbkdf2_sha256_pwd_storage_scheme_init(Slapi_PBlock *pb)
return rc;
}
-int
-gost_yescrypt_pwd_storage_scheme_init(Slapi_PBlock *pb)
-{
- int rc;
-
- slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "=> gost_yescrypt_pwd_storage_scheme_init\n");
-
- rc = slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION, (void *)SLAPI_PLUGIN_VERSION_01);
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION, (void *)&gost_yescrypt_pdesc);
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_ENC_FN, (void *)gost_yescrypt_pw_enc);
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *)gost_yescrypt_pw_cmp);
- rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, GOST_YESCRYPT_SCHEME_NAME);
-
- slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "<= gost_yescrypt_pwd_storage_scheme_init %d\n", rc);
- return rc;
-}
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
index 44159c991..24b7ed11c 100644
--- a/ldap/servers/slapd/fedse.c
+++ b/ldap/servers/slapd/fedse.c
@@ -203,19 +203,6 @@ static const char *internal_entries[] =
"nsslapd-pluginVersion: none\n"
"nsslapd-pluginVendor: 389 Project\n"
"nsslapd-pluginDescription: CRYPT-SHA512\n",
-
- "dn: cn=GOST_YESCRYPT,cn=Password Storage Schemes,cn=plugins,cn=config\n"
- "objectclass: top\n"
- "objectclass: nsSlapdPlugin\n"
- "cn: GOST_YESCRYPT\n"
- "nsslapd-pluginpath: libpwdstorage-plugin\n"
- "nsslapd-plugininitfunc: gost_yescrypt_pwd_storage_scheme_init\n"
- "nsslapd-plugintype: pwdstoragescheme\n"
- "nsslapd-pluginenabled: on\n"
- "nsslapd-pluginId: GOST_YESCRYPT\n"
- "nsslapd-pluginVersion: none\n"
- "nsslapd-pluginVendor: 389 Project\n"
- "nsslapd-pluginDescription: GOST_YESCRYPT\n",
};
static int NUM_INTERNAL_ENTRIES = sizeof(internal_entries) / sizeof(internal_entries[0]);
--
2.31.1

View File

@ -1,44 +0,0 @@
From df0ccce06259b9ef06d522e61da4e3ffcbbf5016 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 25 Aug 2021 16:54:57 -0400
Subject: [PATCH] Issue 4884 - server crashes when dnaInterval attribute is set
to zero
Bug Description:
A division by zero crash occurs if the dnaInterval is set to zero
Fix Description:
Validate the config value of dnaInterval and adjust it to the
default/safe value of "1" if needed.
relates: https://github.com/389ds/389-ds-base/issues/4884
Reviewed by: tbordaz(Thanks!)
---
ldap/servers/plugins/dna/dna.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
index 928a3f54a..c983ebdd0 100644
--- a/ldap/servers/plugins/dna/dna.c
+++ b/ldap/servers/plugins/dna/dna.c
@@ -1025,7 +1025,14 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
value = slapi_entry_attr_get_charptr(e, DNA_INTERVAL);
if (value) {
+ errno = 0;
entry->interval = strtoull(value, 0, 0);
+ if (entry->interval == 0 || errno == ERANGE) {
+ slapi_log_err(SLAPI_LOG_WARNING, DNA_PLUGIN_SUBSYSTEM,
+ "dna_parse_config_entry - Invalid value for dnaInterval (%s), "
+ "Using default value of 1\n", value);
+ entry->interval = 1;
+ }
slapi_ch_free_string(&value);
}
--
2.31.1

108
SOURCES/Cargo.lock generated
View File

@ -36,9 +36,9 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
[[package]]
name = "bitflags"
version = "1.2.1"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "byteorder"
@ -65,9 +65,9 @@ dependencies = [
[[package]]
name = "cc"
version = "1.0.68"
version = "1.0.71"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787"
checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd"
dependencies = [
"jobserver",
]
@ -156,24 +156,24 @@ dependencies = [
[[package]]
name = "hermit-abi"
version = "0.1.18"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
dependencies = [
"libc",
]
[[package]]
name = "itoa"
version = "0.4.7"
version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4"
[[package]]
name = "jobserver"
version = "0.1.22"
version = "0.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd"
checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa"
dependencies = [
"libc",
]
@ -186,9 +186,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.95"
version = "0.2.104"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36"
checksum = "7b2f96d100e1cf1929e7719b7edb3b90ab5298072638fccd77be9ce942ecdfce"
[[package]]
name = "librnsslapd"
@ -219,15 +219,15 @@ dependencies = [
[[package]]
name = "once_cell"
version = "1.7.2"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3"
checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56"
[[package]]
name = "openssl"
version = "0.10.34"
version = "0.10.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8"
checksum = "8d9facdb76fec0b73c406f125d44d86fdad818d66fef0531eec9233ca425ff4a"
dependencies = [
"bitflags",
"cfg-if",
@ -239,9 +239,9 @@ dependencies = [
[[package]]
name = "openssl-sys"
version = "0.9.63"
version = "0.9.67"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98"
checksum = "69df2d8dfc6ce3aaf44b40dec6f487d5a886516cf6879c49e98e0710f310a058"
dependencies = [
"autocfg",
"cc",
@ -271,15 +271,15 @@ dependencies = [
[[package]]
name = "pkg-config"
version = "0.3.19"
version = "0.3.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
checksum = "7c9b1041b4387893b91ee6746cddfc28516aff326a3519fb2adf820932c5e6cb"
[[package]]
name = "ppv-lite86"
version = "0.2.10"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
checksum = "c3ca011bd0129ff4ae15cd04c4eef202cadf6c51c21e47aba319b4e0501db741"
[[package]]
name = "proc-macro-hack"
@ -289,27 +289,27 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
[[package]]
name = "proc-macro2"
version = "1.0.27"
version = "1.0.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
checksum = "edc3358ebc67bc8b7fa0c007f945b0b18226f78437d61bec735a9eb96b61ee70"
dependencies = [
"unicode-xid",
]
[[package]]
name = "quote"
version = "1.0.9"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rand"
version = "0.8.3"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8"
dependencies = [
"libc",
"rand_chacha",
@ -319,9 +319,9 @@ dependencies = [
[[package]]
name = "rand_chacha"
version = "0.3.0"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
@ -329,27 +329,27 @@ dependencies = [
[[package]]
name = "rand_core"
version = "0.6.2"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"
checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
dependencies = [
"getrandom",
]
[[package]]
name = "rand_hc"
version = "0.3.0"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7"
dependencies = [
"rand_core",
]
[[package]]
name = "redox_syscall"
version = "0.2.8"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc"
checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff"
dependencies = [
"bitflags",
]
@ -375,18 +375,18 @@ checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
[[package]]
name = "serde"
version = "1.0.126"
version = "1.0.130"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03"
checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.126"
version = "1.0.130"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43"
checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b"
dependencies = [
"proc-macro2",
"quote",
@ -395,9 +395,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.64"
version = "1.0.68"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79"
checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8"
dependencies = [
"itoa",
"ryu",
@ -429,9 +429,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
[[package]]
name = "syn"
version = "1.0.72"
version = "1.0.80"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82"
checksum = "d010a1623fbd906d51d650a9916aaefc05ffa0e4053ff7fe601167f3e715d194"
dependencies = [
"proc-macro2",
"quote",
@ -440,9 +440,9 @@ dependencies = [
[[package]]
name = "synstructure"
version = "0.12.4"
version = "0.12.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f"
dependencies = [
"proc-macro2",
"quote",
@ -484,9 +484,9 @@ dependencies = [
[[package]]
name = "unicode-width"
version = "0.1.8"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
[[package]]
name = "unicode-xid"
@ -505,9 +505,9 @@ dependencies = [
[[package]]
name = "vcpkg"
version = "0.2.13"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "025ce40a007e1907e58d5bc1a594def78e5573bb0b1160bc389634e8f12e4faa"
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
[[package]]
name = "vec_map"
@ -545,18 +545,18 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "zeroize"
version = "1.3.0"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd"
checksum = "bf68b08513768deaa790264a7fac27a58cbf2705cfcdc9448362229217d7e970"
dependencies = [
"zeroize_derive",
]
[[package]]
name = "zeroize_derive"
version = "1.1.0"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1"
checksum = "bdff2024a851a322b08f179173ae2ba620445aef1e838f0c196820eade4ae0c7"
dependencies = [
"proc-macro2",
"quote",

View File

@ -47,8 +47,8 @@ ExcludeArch: i686
Summary: 389 Directory Server (base)
Name: 389-ds-base
Version: 1.4.3.23
Release: %{?relprefix}10%{?prerel}%{?dist}
Version: 1.4.3.28
Release: %{?relprefix}6%{?prerel}%{?dist}
License: GPLv3+
URL: https://www.port389.org
Group: System Environment/Daemons
@ -61,56 +61,67 @@ Provides: ldif2ldbm >= 0
Provides: bundled(crate(ansi_term)) = 0.11.0
Provides: bundled(crate(atty)) = 0.2.14
Provides: bundled(crate(autocfg)) = 1.0.1
Provides: bundled(crate(base64)) = 0.10.1
Provides: bundled(crate(bitflags)) = 1.2.1
Provides: bundled(crate(byteorder)) = 1.4.2
Provides: bundled(crate(base64)) = 0.13.0
Provides: bundled(crate(bitflags)) = 1.3.2
Provides: bundled(crate(byteorder)) = 1.4.3
Provides: bundled(crate(cbindgen)) = 0.9.1
Provides: bundled(crate(cc)) = 1.0.66
Provides: bundled(crate(cfg-if)) = 0.1.10
Provides: bundled(crate(cc)) = 1.0.71
Provides: bundled(crate(cfg-if)) = 1.0.0
Provides: bundled(crate(clap)) = 2.33.3
Provides: bundled(crate(fernet)) = 0.1.3
Provides: bundled(crate(entryuuid)) = 0.1.0
Provides: bundled(crate(entryuuid_syntax)) = 0.1.0
Provides: bundled(crate(fernet)) = 0.1.4
Provides: bundled(crate(foreign-types)) = 0.3.2
Provides: bundled(crate(foreign-types-shared)) = 0.1.1
Provides: bundled(crate(getrandom)) = 0.1.16
Provides: bundled(crate(hermit-abi)) = 0.1.17
Provides: bundled(crate(itoa)) = 0.4.7
Provides: bundled(crate(getrandom)) = 0.2.3
Provides: bundled(crate(hermit-abi)) = 0.1.19
Provides: bundled(crate(itoa)) = 0.4.8
Provides: bundled(crate(jobserver)) = 0.1.24
Provides: bundled(crate(lazy_static)) = 1.4.0
Provides: bundled(crate(libc)) = 0.2.82
Provides: bundled(crate(libc)) = 0.2.104
Provides: bundled(crate(librnsslapd)) = 0.1.0
Provides: bundled(crate(librslapd)) = 0.1.0
Provides: bundled(crate(log)) = 0.4.11
Provides: bundled(crate(openssl)) = 0.10.32
Provides: bundled(crate(openssl-sys)) = 0.9.60
Provides: bundled(crate(pkg-config)) = 0.3.19
Provides: bundled(crate(ppv-lite86)) = 0.2.10
Provides: bundled(crate(proc-macro2)) = 1.0.24
Provides: bundled(crate(quote)) = 1.0.8
Provides: bundled(crate(rand)) = 0.7.3
Provides: bundled(crate(rand_chacha)) = 0.2.2
Provides: bundled(crate(rand_core)) = 0.5.1
Provides: bundled(crate(rand_hc)) = 0.2.0
Provides: bundled(crate(redox_syscall)) = 0.1.57
Provides: bundled(crate(log)) = 0.4.14
Provides: bundled(crate(once_cell)) = 1.8.0
Provides: bundled(crate(openssl)) = 0.10.36
Provides: bundled(crate(openssl-sys)) = 0.9.67
Provides: bundled(crate(paste)) = 0.1.18
Provides: bundled(crate(paste-impl)) = 0.1.18
Provides: bundled(crate(pkg-config)) = 0.3.20
Provides: bundled(crate(ppv-lite86)) = 0.2.14
Provides: bundled(crate(proc-macro-hack)) = 0.5.19
Provides: bundled(crate(proc-macro2)) = 1.0.30
Provides: bundled(crate(quote)) = 1.0.10
Provides: bundled(crate(rand)) = 0.8.4
Provides: bundled(crate(rand_chacha)) = 0.3.1
Provides: bundled(crate(rand_core)) = 0.6.3
Provides: bundled(crate(rand_hc)) = 0.3.1
Provides: bundled(crate(redox_syscall)) = 0.2.10
Provides: bundled(crate(remove_dir_all)) = 0.5.3
Provides: bundled(crate(rsds)) = 0.1.0
Provides: bundled(crate(ryu)) = 1.0.5
Provides: bundled(crate(serde)) = 1.0.118
Provides: bundled(crate(serde_derive)) = 1.0.118
Provides: bundled(crate(serde_json)) = 1.0.61
Provides: bundled(crate(serde)) = 1.0.130
Provides: bundled(crate(serde_derive)) = 1.0.130
Provides: bundled(crate(serde_json)) = 1.0.68
Provides: bundled(crate(slapd)) = 0.1.0
Provides: bundled(crate(slapi_r_plugin)) = 0.1.0
Provides: bundled(crate(strsim)) = 0.8.0
Provides: bundled(crate(syn)) = 1.0.58
Provides: bundled(crate(tempfile)) = 3.1.0
Provides: bundled(crate(syn)) = 1.0.80
Provides: bundled(crate(synstructure)) = 0.12.6
Provides: bundled(crate(tempfile)) = 3.2.0
Provides: bundled(crate(textwrap)) = 0.11.0
Provides: bundled(crate(toml)) = 0.5.8
Provides: bundled(crate(unicode-width)) = 0.1.8
Provides: bundled(crate(unicode-xid)) = 0.2.1
Provides: bundled(crate(vcpkg)) = 0.2.11
Provides: bundled(crate(unicode-width)) = 0.1.9
Provides: bundled(crate(unicode-xid)) = 0.2.2
Provides: bundled(crate(uuid)) = 0.8.2
Provides: bundled(crate(vcpkg)) = 0.2.15
Provides: bundled(crate(vec_map)) = 0.8.2
Provides: bundled(crate(wasi)) = 0.9.0+wasi_snapshot_preview1
Provides: bundled(crate(wasi)) = 0.10.2+wasi_snapshot_preview1
Provides: bundled(crate(winapi)) = 0.3.9
Provides: bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0
Provides: bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0
Provides: bundled(crate(zeroize)) = 1.4.2
Provides: bundled(crate(zeroize_derive)) = 1.2.0
##### Bundled cargo crates list - END #####
BuildRequires: nspr-devel
@ -234,40 +245,27 @@ Source2: %{name}-devel.README
Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2
%endif
%if %{use_rust}
Source4: vendor-%{version}-2.tar.gz
Source4: vendor-%{version}-1.tar.gz
Source5: Cargo.lock
%endif
Patch01: 0001-Issue-4747-Remove-unstable-unstatus-tests-from-PRCI-.patch
Patch02: 0002-Issue-4701-RFE-Exclude-attributes-from-retro-changel.patch
Patch03: 0003-Ticket-137-Implement-EntryUUID-plugin.patch
Patch04: 0004-Ticket-4326-entryuuid-fixup-did-not-work-correctly-4.patch
Patch05: 0005-Issue-4498-BUG-entryuuid-replication-may-not-work-45.patch
Patch06: 0006-Issue-4421-Unable-to-build-with-Rust-enabled-in-clos.patch
Patch07: 0007-Ticket-51175-resolve-plugin-name-leaking.patch
Patch08: 0008-Issue-4773-Enable-interval-feature-of-DNA-plugin.patch
Patch09: 0009-Issue-4623-RFE-Monitor-the-current-DB-locks-4762.patch
Patch10: 0010-Issue-4764-replicated-operation-sometime-checks-ACI-.patch
Patch11: 0011-Issue-4778-RFE-Allow-setting-TOD-for-db-compaction-a.patch
Patch12: 0012-Issue-4778-RFE-Add-changelog-compaction-task-in-1.4..patch
Patch13: 0013-Issue-4797-ACL-IP-ADDRESS-evaluation-may-corrupt-c_i.patch
Patch14: 0014-Issue-4396-Minor-memory-leak-in-backend-4558-4572.patch
Patch15: 0015-Issue-4700-Regression-in-winsync-replication-agreeme.patch
Patch16: 0016-Issue-4725-Fix-compiler-warnings.patch
Patch17: 0017-Issue-4814-_cl5_get_tod_expiration-may-crash-at-star.patch
Patch18: 0018-Issue-4789-Temporary-password-rules-are-not-enforce-.patch
Patch19: 0019-Issue-4788-CLI-should-support-Temporary-Password-Rul.patch
Patch20: 0020-Issue-4447-Crash-when-the-Referential-Integrity-log-.patch
Patch21: 0021-Issue-4791-Missing-dependency-for-RetroCL-RFE.patch
Patch22: 0022-Issue-4656-remove-problematic-language-from-ds-replc.patch
Patch23: 0023-Issue-4443-Internal-unindexed-searches-in-syncrepl-r.patch
Patch24: 0024-Issue-4817-BUG-locked-crypt-accounts-on-import-may-a.patch
Patch25: 0025-Issue-4837-persistent-search-returns-entries-even-wh.patch
Patch26: 0026-Hardcode-gost-crypt-passsword-storage-scheme.patch
Patch27: 0027-Issue-4734-import-of-entry-with-no-parent-warning-47.patch
Patch28: 0028-Issue-4872-BUG-entryuuid-enabled-by-default-causes-r.patch
Patch29: 0029-Remove-GOST-YESCRYPT-password-sotrage-scheme.patch
Patch30: 0030-Issue-4884-server-crashes-when-dnaInterval-attribute.patch
Patch01: 0001-Issue-4678-RFE-automatique-disable-of-virtual-attrib.patch
Patch02: 0002-Issue-4943-Fix-csn-generator-to-limit-time-skew-drif.patch
Patch03: 0003-Issue-3584-Fix-PBKDF2_SHA256-hashing-in-FIPS-mode-49.patch
Patch04: 0004-Issue-4956-Automember-allows-invalid-regex-and-does-.patch
Patch05: 0005-Issue-4092-systemd-tmpfiles-warnings.patch
Patch06: 0006-Issue-4973-installer-changes-permissions-on-run.patch
Patch07: 0007-Issue-4973-update-snmp-to-use-run-dirsrv-for-PID-fil.patch
Patch08: 0008-Issue-4978-make-installer-robust.patch
Patch09: 0009-Issue-4972-gecos-with-IA5-introduces-a-compatibility.patch
Patch10: 0010-Issue-4997-Function-declaration-compiler-error-on-1..patch
Patch11: 0011-Issue-4978-use-more-portable-python-command-for-chec.patch
Patch12: 0012-Issue-4959-Invalid-etc-hosts-setup-can-cause-isLocal.patch
Patch13: 0013-CVE-2021-4091-BZ-2030367-double-free-of-the-virtual-.patch
Patch14: 0014-Issue-5127-run-restorecon-on-dev-shm-at-server-start.patch
Patch15: 0015-Issue-5127-ds_selinux_restorecon.sh-always-exit-0.patch
Patch16: 0016-Issue-4775-Add-entryuuid-CLI-and-Fixup-4776.patch
Patch17: 0017-Issue-4775-Fix-cherry-pick-error.patch
%description
389 Directory Server is an LDAPv3 compliant server. The base package includes
@ -695,6 +693,7 @@ exit 0
%{_sbindir}/ns-slapd
%{_mandir}/man8/ns-slapd.8.gz
%{_libexecdir}/%{pkgname}/ds_systemd_ask_password_acl
%{_libexecdir}/%{pkgname}/ds_selinux_restorecon.sh
%{_mandir}/man5/99user.ldif.5.gz
%{_mandir}/man5/certmap.conf.5.gz
%{_mandir}/man5/slapd-collations.conf.5.gz
@ -886,63 +885,42 @@ exit 0
%doc README.md
%changelog
* Thu Aug 26 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-10
- Bump version to 1.4.3.23-10
- Resolves: Bug 1997138 - LDAP server crashes when dnaInterval attribute is set to 0
* Thu Feb 3 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-6
- Bump version to 1.4.3.28-6
- Resolves: Bug 2047171 - Based on 1944494 (RFC 4530 entryUUID attribute) - plugin entryuuid failing
* Wed Aug 25 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-9
- Bump version to 1.4.3.23-9
- Resolves: Bug 1947044 - remove unsupported GOST password storage scheme
* Fri Jan 28 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-5
- Bump version to 1.4.3.28-5
- Resolves: Bug 2045223 - ipa-restore command is failing when restore after uninstalling the server (aprt 2)
* Thu Aug 19 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-8
- Bump version to 1.4.3.23-8
- Resolves: Bug 1947044 - add missing patch for import result code
- Resolves: Bug 1944494 - support for RFC 4530 entryUUID attribute
* Tue Jan 25 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-4
- Bump version to 1.4.3.28-4
- Resolves: Bug 2045223 - ipa-restore command is failing when restore after uninstalling the server
* Mon Jul 26 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-7
- Bump version to 1.4.3.23-7
- Resolves: Bug 1983921 - persistent search returns entries even when an error is returned by content-sync-plugin
* Thu Nov 18 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-3
- Bump version to 1.4.3.28-3
- Resolves: Bug 2030367 - EMBARGOED CVE-2021-4091 389-ds:1.4/389-ds-base: double-free of the virtual attribute context in persistent search
- Resolves: Bug 2033398 - PBKDF2 hashing does not work in FIPS mode
* Fri Jul 16 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-6
- Bump version to 1.4.3.23-6
- Resolves: Bug 1982787 - CRYPT password hash with asterisk allows any bind attempt to succeed
* Thu Nov 18 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-2
- Bump version to 1.4.3.28-2
- Resolves: Bug 2024695 - DB corruption "_entryrdn_insert_key - Same DN (dn: nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff,<SUFFIX>) is already in the entryrdn file"
- Resolves: Bug 1859210 - systemd-tmpfiles warnings
- Resolves: Bug 1913199 - IPA server (389ds) is very slow in execution of some searches (`&(memberOf=...)(objectClass=ipaHost)` in particular)
- Resolves: Bug 1974236 - automatique disable of virtual attribute checking
- Resolves: Bug 1976882 - logconv.pl -j: Use of uninitialized value $first in numeric gt (>)
- Resolves: Bug 1981281 - ipa user-add fails with "gecos: value invalid per syntax: Invalid syntax"
- Resolves: Bug 2015998 - Log the Auto Member invalid regex rules in the LDAP errors log
* Thu Jul 15 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-5
- Bump version to 1.4.3.23-5
- Resolves: Bug 1951020 - Internal unindexed searches in syncrepl
- Resolves: Bug 1978279 - ds-replcheck state output message has 'Master' instead of 'Supplier'
* Thu Oct 21 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-1
- Bump version to 1.4.3.28-1
- Resolves: Bug 2016014 - rebase RHEL 8.6 with 389-ds-base-1.4.3
- Resolves: Bug 1990002 - monitor displays wrong date for connection
- Resolves: Bug 1950335 - upgrade password hash on bind also causes passwordExpirationtime to be updated
- Resolves: Bug 1916292 - Indexing a single backend actually processes all configured backends
- Resolves: Bug 1780842 - [RFE] set db home directory to /dev/shm by default
- Resolves: Bug 2000975 - Retro Changelog does not trim changes
* Tue Jun 29 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-4
- Bump version to 1.4.3.23-4
- Resolves: Bug 1976906 - Instance crash at restart after changelog configuration
- Resolves: Bug 1480323 - ns-slapd crash at startup - Segmentation fault in strcmpi_fast() when the Referential Integrity log is manually edited
- Resolves: Bug 1967596 - Temporary password - add CLI and fix compiler errors
* Thu Jun 17 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-3
- Bump version to 1.4.3.23-3
- Resolves: Bug 1944494 - support for RFC 4530 entryUUID attribute
- Resolves: Bug 1967839 - ACIs are being evaluated against the Replication Manager account in a replication context
- Resolves: Bug 1970259 - A connection can be erroneously flagged as replication conn during evaluation of an aci with ip bind rule
- Resolves: Bug 1972590 - Large updates can reset the CLcache to the beginning of the changelog
- Resolves: Bug 1903221 - Memory leak in 389ds backend (Minor)
* Sun May 30 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-2
- Bump version to 1.4.3.23-2
- Resolves: Bug 1812286 - RFE - Monitor the current DB locks ( nsslapd-db-current-locks )
- Resolves: Bug 1748441 - RFE - Schedule execution of "compactdb" at specific date/time
- Resolves: Bug 1938239 - RFE - Extend DNA plugin to support intervals sizes for subuids
* Fri May 14 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.23-1
- Bump version to 1.4.3.23-1
- Resolves: Bug 1947044 - Rebase 389 DS with 389-ds-base-1.4.3.23 for RHEL 8.5
- Resolves: Bug 1850664 - RFE - Add an option for the Retro Changelog to ignore some attributes
- Resolves: Bug 1903221 - Memory leak in 389ds backend (Minor)
- Resolves: Bug 1898541 - Changelog cache can upload updates from a wrong starting point (CSN)
- Resolves: Bug 1889562 - client psearch with multiple threads hangs if nsslapd-maxthreadsperconn is under sized
- Resolves: Bug 1924848 - Negative wtime on ldapcompare
- Resolves: Bug 1895460 - RFE - Log an additional message if the server certificate nickname doesn't match nsSSLPersonalitySSL value
- Resolves: Bug 1897614 - Performance search rate: change entry cache monitor to recursive pthread mutex
- Resolves: Bug 1939607 - hang because of incorrect accounting of readers in vattr rwlock
- Resolves: Bug 1626633 - [RFE] DS - Update the password policy to support a Temporary Password with expiration
- Resolves: Bug 1952804 - CVE-2021-3514 389-ds:1.4/389-ds-base: sync_repl NULL pointer dereference in sync_create_state_control()