commit 981d5878de3fd5dbf9ec4a63ff7c9b441e45098c Author: James Antill Date: Mon Aug 8 12:26:52 2022 -0400 Import rpm: 51b5b9f9be1e8d5a4e51af73c7b7180c00163a86 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5367d88 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +SOURCES/389-ds-base-1.4.3.28.tar.bz2 +SOURCES/jemalloc-5.2.1.tar.bz2 +SOURCES/vendor-1.4.3.28-1.tar.gz diff --git a/0001-Issue-4678-RFE-automatique-disable-of-virtual-attrib.patch b/0001-Issue-4678-RFE-automatique-disable-of-virtual-attrib.patch new file mode 100644 index 0000000..5990610 --- /dev/null +++ b/0001-Issue-4678-RFE-automatique-disable-of-virtual-attrib.patch @@ -0,0 +1,738 @@ +From 67e19da62a9e8958458de54173dcd9bcaf53164d Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Thu, 30 Sep 2021 15:59:40 +0200 +Subject: [PATCH 01/12] Issue 4678 - RFE automatique disable of virtual + attribute checking (#4918) + +Bug description: + Virtual attributes are configured via Roles or COS definitions + and registered during initialization of those plugins. + Virtual attributes are processed during search evaluation of + filter and returned attributes. This processing is expensive + and prone to create contention between searches. + Use of virtual attribute is not frequent. So many of the + deployement process virtual attribute even if there is none. + +Fix description: + The fix configure the server to ignore virtual attribute by + default (nsslapd-ignore-virtual-attrs: on). + At startup, if a new virtual attribute is registered or + it exists Roles/COS definitions, then the server is + configured to process the virtual attributes + (nsslapd-ignore-virtual-attrs: off) + design: https://www.port389.org/docs/389ds/design/vattr-automatic-toggle.html + +relates: https://github.com/389ds/389-ds-base/issues/4678 + +Reviewed by: William Brown, Simon Pichugin, Mark Reynolds (Thanks !!) + +Platforms tested: F34 +--- + .../tests/suites/config/config_test.py | 40 +++- + dirsrvtests/tests/suites/cos/cos_test.py | 94 ++++++-- + dirsrvtests/tests/suites/roles/basic_test.py | 200 +++++++++++++++++- + ldap/servers/plugins/roles/roles_cache.c | 9 + + ldap/servers/slapd/libglobs.c | 2 +- + ldap/servers/slapd/main.c | 2 + + ldap/servers/slapd/proto-slap.h | 1 + + ldap/servers/slapd/vattr.c | 127 +++++++++++ + src/lib389/lib389/idm/role.py | 4 + + 9 files changed, 455 insertions(+), 24 deletions(-) + +diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py +index 2ecff8f98..19232c87d 100644 +--- a/dirsrvtests/tests/suites/config/config_test.py ++++ b/dirsrvtests/tests/suites/config/config_test.py +@@ -351,7 +351,7 @@ def test_ignore_virtual_attrs(topo): + :setup: Standalone instance + :steps: + 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config +- 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF ++ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON + 3. Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs + 4. Set invalid value for attribute nsslapd-ignore-virtual-attrs + 5. Set nsslapd-ignore-virtual-attrs=off +@@ -374,8 +374,8 @@ def test_ignore_virtual_attrs(topo): + log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') + +- log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") +- assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "off" ++ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") ++ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + + log.info("Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs") + for attribute_value in ['on', 'off', 'ON', 'OFF']: +@@ -415,6 +415,40 @@ def test_ignore_virtual_attrs(topo): + log.info("Test if virtual attribute i.e. postal code not shown while nsslapd-ignore-virtual-attrs: on") + assert not test_user.present('postalcode', '117') + ++def test_ignore_virtual_attrs_after_restart(topo): ++ """Test nsslapd-ignore-virtual-attrs configuration attribute ++ The attribute is ON by default. If it set to OFF, it keeps ++ its value on restart ++ ++ :id: ac368649-4fda-473c-9ef8-e0c728b162af ++ :setup: Standalone instance ++ :steps: ++ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config ++ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON ++ 3. Set nsslapd-ignore-virtual-attrs=off ++ 4. restart the instance ++ 5. Check the attribute nsslapd-ignore-virtual-attrs is OFF ++ :expectedresults: ++ 1. This should be successful ++ 2. This should be successful ++ 3. This should be successful ++ 4. This should be successful ++ 5. This should be successful ++ """ ++ ++ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") ++ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') ++ ++ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") ++ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" ++ ++ log.info("Set nsslapd-ignore-virtual-attrs = off") ++ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'off') ++ ++ topo.standalone.restart() ++ ++ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") ++ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') + + @pytest.mark.bz918694 + @pytest.mark.ds408 +diff --git a/dirsrvtests/tests/suites/cos/cos_test.py b/dirsrvtests/tests/suites/cos/cos_test.py +index d6a498c73..d1f99f96f 100644 +--- a/dirsrvtests/tests/suites/cos/cos_test.py ++++ b/dirsrvtests/tests/suites/cos/cos_test.py +@@ -6,6 +6,8 @@ + # See LICENSE for details. + # --- END COPYRIGHT BLOCK --- + ++import logging ++import time + import pytest, os, ldap + from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate + from lib389._constants import DEFAULT_SUFFIX +@@ -14,26 +16,37 @@ from lib389.idm.role import FilteredRoles + from lib389.idm.nscontainer import nsContainer + from lib389.idm.user import UserAccount + ++logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ + pytestmark = pytest.mark.tier1 ++@pytest.fixture(scope="function") ++def reset_ignore_vattr(topo, request): ++ default_ignore_vattr_value = topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') ++ def fin(): ++ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', default_ignore_vattr_value) + +-def test_positive(topo): +- """ +- :id: a5a74235-597f-4fe8-8c38-826860927472 +- :setup: server +- :steps: +- 1. Add filter role entry +- 2. Add ns container +- 3. Add cos template +- 4. Add CosClassic Definition +- 5. Cos entries should be added and searchable +- 6. employeeType attribute should be there in user entry as per the cos plugin property +- :expectedresults: +- 1. Operation should success +- 2. Operation should success +- 3. Operation should success +- 4. Operation should success +- 5. Operation should success +- 6. Operation should success ++ request.addfinalizer(fin) ++ ++def test_positive(topo, reset_ignore_vattr): ++ """CoS positive tests ++ ++ :id: a5a74235-597f-4fe8-8c38-826860927472 ++ :setup: server ++ :steps: ++ 1. Add filter role entry ++ 2. Add ns container ++ 3. Add cos template ++ 4. Add CosClassic Definition ++ 5. Cos entries should be added and searchable ++ 6. employeeType attribute should be there in user entry as per the cos plugin property ++ :expectedresults: ++ 1. Operation should success ++ 2. Operation should success ++ 3. Operation should success ++ 4. Operation should success ++ 5. Operation should success ++ 6. Operation should success + """ + # Adding ns filter role + roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) +@@ -77,7 +90,52 @@ def test_positive(topo): + + # CoS definition entry's cosSpecifier attribute specifies the employeeType attribute + assert user.present('employeeType') ++ cosdef.delete() ++ ++def test_vattr_on_cos_definition(topo, reset_ignore_vattr): ++ """Test nsslapd-ignore-virtual-attrs configuration attribute ++ The attribute is ON by default. If a cos definition is ++ added it is moved to OFF ++ ++ :id: e7ef5254-386f-4362-bbb4-9409f3f51b08 ++ :setup: Standalone instance ++ :steps: ++ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config ++ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON ++ 3. Create a cos definition for employeeType ++ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF (with a delay for postop processing) ++ 5. Check a message "slapi_vattrspi_regattr - Because employeeType,.." in error logs ++ :expectedresults: ++ 1. This should be successful ++ 2. This should be successful ++ 3. This should be successful ++ 4. This should be successful ++ 5. This should be successful ++ """ ++ ++ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") ++ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') ++ ++ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") ++ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" ++ ++ # creating CosClassicDefinition ++ log.info("Create a cos definition") ++ properties = {'cosTemplateDn': 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX), ++ 'cosAttribute': 'employeeType', ++ 'cosSpecifier': 'nsrole', ++ 'cn': 'cosClassicGenerateEmployeeTypeUsingnsrole'} ++ cosdef = CosClassicDefinition(topo.standalone,'cn=cosClassicGenerateEmployeeTypeUsingnsrole,{}'.format(DEFAULT_SUFFIX))\ ++ .create(properties=properties) ++ ++ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") ++ time.sleep(2) ++ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') + ++ topo.standalone.stop() ++ assert topo.standalone.searchErrorsLog("slapi_vattrspi_regattr - Because employeeType is a new registered virtual attribute , nsslapd-ignore-virtual-attrs was set to \'off\'") ++ topo.standalone.start() ++ cosdef.delete() + + if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) +diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py +index 47a531794..bec3aedfc 100644 +--- a/dirsrvtests/tests/suites/roles/basic_test.py ++++ b/dirsrvtests/tests/suites/roles/basic_test.py +@@ -11,6 +11,8 @@ + Importing necessary Modules. + """ + ++import logging ++import time + import os + import pytest + +@@ -22,6 +24,9 @@ from lib389.topologies import topology_st as topo + from lib389.idm.role import FilteredRoles, ManagedRoles, NestedRoles + from lib389.idm.domain import Domain + ++logging.getLogger(__name__).setLevel(logging.INFO) ++log = logging.getLogger(__name__) ++ + pytestmark = pytest.mark.tier1 + + DNBASE = "o=acivattr,{}".format(DEFAULT_SUFFIX) +@@ -35,7 +40,7 @@ FILTERROLESALESROLE = "cn=FILTERROLESALESROLE,{}".format(DNBASE) + FILTERROLEENGROLE = "cn=FILTERROLEENGROLE,{}".format(DNBASE) + + +-def test_filterrole(topo): ++def test_filterrole(topo, request): + """Test Filter Role + + :id: 8ada4064-786b-11e8-8634-8c16451d917b +@@ -136,8 +141,20 @@ def test_filterrole(topo): + SALES_OU, DNBASE]: + UserAccount(topo.standalone, dn_dn).delete() + ++ def fin(): ++ topo.standalone.restart() ++ try: ++ filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) ++ for i in filtered_roles.list(): ++ i.delete() ++ except: ++ pass ++ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') ++ ++ request.addfinalizer(fin) ++ + +-def test_managedrole(topo): ++def test_managedrole(topo, request): + """Test Managed Role + + :id: d52a9c00-3bf6-11e9-9b7b-8c16451d917b +@@ -209,6 +226,16 @@ def test_managedrole(topo): + for i in roles.list(): + i.delete() + ++ def fin(): ++ topo.standalone.restart() ++ try: ++ role = ManagedRoles(topo.standalone, DEFAULT_SUFFIX).get('ROLE1') ++ role.delete() ++ except: ++ pass ++ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') ++ ++ request.addfinalizer(fin) + + @pytest.fixture(scope="function") + def _final(request, topo): +@@ -220,6 +247,7 @@ def _final(request, topo): + def finofaci(): + """ + Removes and Restores ACIs and other users after the test. ++ And restore nsslapd-ignore-virtual-attrs to default + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.remove_all('aci') +@@ -234,6 +262,8 @@ def _final(request, topo): + for i in aci_list: + domain.add("aci", i) + ++ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') ++ + request.addfinalizer(finofaci) + + +@@ -296,6 +326,172 @@ def test_nestedrole(topo, _final): + conn = users.get('test_user_3').bind(PW_DM) + assert UserAccounts(conn, DEFAULT_SUFFIX).list() + ++def test_vattr_on_filtered_role(topo, request): ++ """Test nsslapd-ignore-virtual-attrs configuration attribute ++ The attribute is ON by default. If a filtered role is ++ added it is moved to OFF ++ ++ :id: 88b3ad3c-f39a-4eb7-a8c9-07c685f11908 ++ :setup: Standalone instance ++ :steps: ++ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config ++ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON ++ 3. Create a filtered role ++ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF ++ 5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs ++ :expectedresults: ++ 1. This should be successful ++ 2. This should be successful ++ 3. This should be successful ++ 4. This should be successful ++ 5. This should be successful ++ """ ++ ++ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") ++ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') ++ ++ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") ++ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" ++ ++ log.info("Create a filtered role") ++ try: ++ Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX) ++ except: ++ pass ++ roles = FilteredRoles(topo.standalone, DNBASE) ++ roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'}) ++ ++ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") ++ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') ++ ++ topo.standalone.stop() ++ assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'") ++ ++ def fin(): ++ topo.standalone.restart() ++ try: ++ filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) ++ for i in filtered_roles.list(): ++ i.delete() ++ except: ++ pass ++ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') ++ ++ request.addfinalizer(fin) ++ ++def test_vattr_on_filtered_role_restart(topo, request): ++ """Test nsslapd-ignore-virtual-attrs configuration attribute ++ If it exists a filtered role definition at restart then ++ nsslapd-ignore-virtual-attrs should be set to 'off' ++ ++ :id: 972183f7-d18f-40e0-94ab-580e7b7d78d0 ++ :setup: Standalone instance ++ :steps: ++ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config ++ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON ++ 3. Create a filtered role ++ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF ++ 5. restart the instance ++ 6. Check the presence of virtual attribute is detected ++ 7. Check the value of nsslapd-ignore-virtual-attrs should be OFF ++ :expectedresults: ++ 1. This should be successful ++ 2. This should be successful ++ 3. This should be successful ++ 4. This should be successful ++ 5. This should be successful ++ 6. This should be successful ++ 7. This should be successful ++ """ ++ ++ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") ++ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') ++ ++ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") ++ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" ++ ++ log.info("Create a filtered role") ++ try: ++ Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX) ++ except: ++ pass ++ roles = FilteredRoles(topo.standalone, DNBASE) ++ roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'}) ++ ++ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") ++ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') ++ ++ ++ log.info("Check the virtual attribute definition is found (after a required delay)") ++ topo.standalone.restart() ++ time.sleep(5) ++ assert topo.standalone.searchErrorsLog("Found a role/cos definition in") ++ assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'") ++ ++ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") ++ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') ++ ++ def fin(): ++ topo.standalone.restart() ++ try: ++ filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) ++ for i in filtered_roles.list(): ++ i.delete() ++ except: ++ pass ++ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') ++ ++ request.addfinalizer(fin) ++ ++ ++def test_vattr_on_managed_role(topo, request): ++ """Test nsslapd-ignore-virtual-attrs configuration attribute ++ The attribute is ON by default. If a managed role is ++ added it is moved to OFF ++ ++ :id: 664b722d-c1ea-41e4-8f6c-f9c87a212346 ++ :setup: Standalone instance ++ :steps: ++ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config ++ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON ++ 3. Create a managed role ++ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF ++ 5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs ++ :expectedresults: ++ 1. This should be successful ++ 2. This should be successful ++ 3. This should be successful ++ 4. This should be successful ++ 5. This should be successful ++ """ ++ ++ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") ++ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') ++ ++ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") ++ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" ++ ++ log.info("Create a managed role") ++ roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) ++ role = roles.create(properties={"cn": 'ROLE1'}) ++ ++ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") ++ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') ++ ++ topo.standalone.stop() ++ assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'") ++ ++ def fin(): ++ topo.standalone.restart() ++ try: ++ filtered_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) ++ for i in filtered_roles.list(): ++ i.delete() ++ except: ++ pass ++ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') ++ ++ request.addfinalizer(fin) + + if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) +diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c +index 3d076a4cb..cd00e0aba 100644 +--- a/ldap/servers/plugins/roles/roles_cache.c ++++ b/ldap/servers/plugins/roles/roles_cache.c +@@ -530,6 +530,15 @@ roles_cache_trigger_update_role(char *dn, Slapi_Entry *roles_entry, Slapi_DN *be + } + + slapi_rwlock_unlock(global_lock); ++ { ++ /* A role definition has been updated, enable vattr handling */ ++ char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE]; ++ errorbuf[0] = '\0'; ++ config_set_ignore_vattrs(CONFIG_IGNORE_VATTRS, "off", errorbuf, 1); ++ slapi_log_err(SLAPI_LOG_INFO, ++ "roles_cache_trigger_update_role", ++ "Because of virtual attribute definition (role), %s was set to 'off'\n", CONFIG_IGNORE_VATTRS); ++ } + + slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, "<-- roles_cache_trigger_update_role: %p \n", roles_list); + } +diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c +index 2ea4cd760..f6dacce30 100644 +--- a/ldap/servers/slapd/libglobs.c ++++ b/ldap/servers/slapd/libglobs.c +@@ -1803,7 +1803,7 @@ FrontendConfig_init(void) + init_ndn_cache_enabled = cfg->ndn_cache_enabled = LDAP_ON; + cfg->ndn_cache_max_size = SLAPD_DEFAULT_NDN_SIZE; + init_sasl_mapping_fallback = cfg->sasl_mapping_fallback = LDAP_OFF; +- init_ignore_vattrs = cfg->ignore_vattrs = LDAP_OFF; ++ init_ignore_vattrs = cfg->ignore_vattrs = LDAP_ON; + cfg->sasl_max_bufsize = SLAPD_DEFAULT_SASL_MAXBUFSIZE; + cfg->unhashed_pw_switch = SLAPD_DEFAULT_UNHASHED_PW_SWITCH; + init_return_orig_type = cfg->return_orig_type = LDAP_OFF; +diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c +index 4931a4ca4..61ed40b7d 100644 +--- a/ldap/servers/slapd/main.c ++++ b/ldap/servers/slapd/main.c +@@ -1042,6 +1042,8 @@ main(int argc, char **argv) + eq_start(); /* must be done after plugins started - DEPRECATED */ + eq_start_rel(); /* must be done after plugins started */ + ++ vattr_check(); /* Check if it exists virtual attribute definitions */ ++ + #ifdef HPUX10 + /* HPUX linker voodoo */ + if (collation_init == NULL) { +diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h +index c143f3772..442a621aa 100644 +--- a/ldap/servers/slapd/proto-slap.h ++++ b/ldap/servers/slapd/proto-slap.h +@@ -1462,6 +1462,7 @@ void subentry_create_filter(Slapi_Filter **filter); + */ + void vattr_init(void); + void vattr_cleanup(void); ++void vattr_check(void); + + /* + * slapd_plhash.c - supplement to NSPR plhash +diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c +index 09dab6ecf..24750a57c 100644 +--- a/ldap/servers/slapd/vattr.c ++++ b/ldap/servers/slapd/vattr.c +@@ -64,6 +64,10 @@ + #define SOURCEFILE "vattr.c" + static char *sourcefile = SOURCEFILE; + ++/* stolen from roles_cache.h, must remain in sync */ ++#define NSROLEATTR "nsRole" ++static Slapi_Eq_Context vattr_check_ctx = {0}; ++ + /* Define only for module test code */ + /* #define VATTR_TEST_CODE */ + +@@ -130,6 +134,112 @@ vattr_cleanup() + { + /* We need to free and remove anything that was inserted first */ + vattr_map_destroy(); ++ slapi_eq_cancel_rel(vattr_check_ctx); ++} ++ ++static void ++vattr_check_thread(void *arg) ++{ ++ Slapi_Backend *be = NULL; ++ char *cookie = NULL; ++ Slapi_DN *base_sdn = NULL; ++ Slapi_PBlock *search_pb = NULL; ++ Slapi_Entry **entries = NULL; ++ int32_t rc; ++ int32_t check_suffix; /* used to skip suffixes in ignored_backend */ ++ PRBool exist_vattr_definition = PR_FALSE; ++ char *ignored_backend[5] = {"cn=config", "cn=schema", "cn=monitor", "cn=changelog", NULL}; /* suffixes to ignore */ ++ char *suffix; ++ int ignore_vattrs; ++ ++ ignore_vattrs = config_get_ignore_vattrs(); ++ ++ if (!ignore_vattrs) { ++ /* Nothing to do more, we are already evaluating virtual attribute */ ++ return; ++ } ++ ++ search_pb = slapi_pblock_new(); ++ be = slapi_get_first_backend(&cookie); ++ while (be && !exist_vattr_definition && !slapi_is_shutting_down()) { ++ base_sdn = (Slapi_DN *) slapi_be_getsuffix(be, 0); ++ suffix = (char *) slapi_sdn_get_dn(base_sdn); ++ ++ if (suffix) { ++ /* First check that we need to check that suffix */ ++ check_suffix = 1; ++ for (size_t i = 0; ignored_backend[i]; i++) { ++ if (strcasecmp(suffix, ignored_backend[i]) == 0) { ++ check_suffix = 0; ++ break; ++ } ++ } ++ ++ /* search for a role or cos definition */ ++ if (check_suffix) { ++ slapi_search_internal_set_pb(search_pb, slapi_sdn_get_dn(base_sdn), ++ LDAP_SCOPE_SUBTREE, "(&(objectclass=ldapsubentry)(|(objectclass=nsRoleDefinition)(objectclass=cosSuperDefinition)))", ++ NULL, 0, NULL, NULL, (void *) plugin_get_default_component_id(), 0); ++ slapi_search_internal_pb(search_pb); ++ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); ++ ++ if (rc == LDAP_SUCCESS) { ++ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries); ++ if (entries && entries[0]) { ++ /* it exists at least a cos or role definition */ ++ exist_vattr_definition = PR_TRUE; ++ slapi_log_err(SLAPI_LOG_INFO, ++ "vattr_check_thread", ++ "Found a role/cos definition in %s\n", slapi_entry_get_dn(entries[0])); ++ } else { ++ slapi_log_err(SLAPI_LOG_INFO, ++ "vattr_check_thread", ++ "No role/cos definition in %s\n", slapi_sdn_get_dn(base_sdn)); ++ } ++ } ++ slapi_free_search_results_internal(search_pb); ++ } /* check_suffix */ ++ } /* suffix */ ++ be = (backend *) slapi_get_next_backend(cookie); ++ } ++ slapi_pblock_destroy(search_pb); ++ slapi_ch_free_string(&cookie); ++ ++ /* Now if a virtual attribute is defined, then CONFIG_IGNORE_VATTRS -> off */ ++ if (exist_vattr_definition) { ++ char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE]; ++ errorbuf[0] = '\0'; ++ config_set_ignore_vattrs(CONFIG_IGNORE_VATTRS, "off", errorbuf, 1); ++ slapi_log_err(SLAPI_LOG_INFO, ++ "vattr_check_thread", ++ "Because of virtual attribute definition, %s was set to 'off'\n", CONFIG_IGNORE_VATTRS); ++ } ++} ++static void ++vattr_check_schedule_once(time_t when __attribute__((unused)), void *arg) ++{ ++ if (PR_CreateThread(PR_USER_THREAD, ++ vattr_check_thread, ++ (void *) arg, ++ PR_PRIORITY_NORMAL, ++ PR_GLOBAL_THREAD, ++ PR_UNJOINABLE_THREAD, ++ SLAPD_DEFAULT_THREAD_STACKSIZE) == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, ++ "vattr_check_schedule_once", ++ "Fails to check if %s needs to be toggled to FALSE\n", CONFIG_IGNORE_VATTRS); ++ } ++} ++#define VATTR_CHECK_DELAY 3 ++void ++vattr_check() ++{ ++ /* Schedule running a callback that will create a thread ++ * but make sure it is called a first thing when event loop is created */ ++ time_t now; ++ ++ now = slapi_current_rel_time_t(); ++ vattr_check_ctx = slapi_eq_once_rel(vattr_check_schedule_once, NULL, now + VATTR_CHECK_DELAY); + } + + /* The public interface functions start here */ +@@ -1631,6 +1741,9 @@ slapi_vattrspi_regattr(vattr_sp_handle *h, char *type_name_to_register, char *DN + char *type_to_add; + int free_type_to_add = 0; + Slapi_DN original_dn; ++ int ignore_vattrs; ++ ++ ignore_vattrs = config_get_ignore_vattrs(); + + slapi_sdn_init(&original_dn); + +@@ -1676,6 +1789,20 @@ slapi_vattrspi_regattr(vattr_sp_handle *h, char *type_name_to_register, char *DN + if (free_type_to_add) { + slapi_ch_free((void **)&type_to_add); + } ++ if (ignore_vattrs && strcasecmp(type_name_to_register, NSROLEATTR)) { ++ /* A new virtual attribute is registered. ++ * This new vattr being *different* than the default roles vattr 'nsRole' ++ * It is time to allow vattr lookup ++ */ ++ char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE]; ++ errorbuf[0] = '\0'; ++ config_set_ignore_vattrs(CONFIG_IGNORE_VATTRS, "off", errorbuf, 1); ++ slapi_log_err(SLAPI_LOG_INFO, ++ "slapi_vattrspi_regattr", ++ "Because %s is a new registered virtual attribute , %s was set to 'off'\n", ++ type_name_to_register, ++ CONFIG_IGNORE_VATTRS); ++ } + + return ret; + } +diff --git a/src/lib389/lib389/idm/role.py b/src/lib389/lib389/idm/role.py +index fe91aab6f..9a2bff3d6 100644 +--- a/src/lib389/lib389/idm/role.py ++++ b/src/lib389/lib389/idm/role.py +@@ -252,6 +252,8 @@ class FilteredRole(Role): + self._rdn_attribute = 'cn' + self._create_objectclasses = ['nsComplexRoleDefinition', 'nsFilteredRoleDefinition'] + ++ self._protected = False ++ + + + class FilteredRoles(Roles): +@@ -285,6 +287,7 @@ class ManagedRole(Role): + self._rdn_attribute = 'cn' + self._create_objectclasses = ['nsSimpleRoleDefinition', 'nsManagedRoleDefinition'] + ++ self._protected = False + + class ManagedRoles(Roles): + """DSLdapObjects that represents all Managed Roles entries +@@ -320,6 +323,7 @@ class NestedRole(Role): + self._rdn_attribute = 'cn' + self._create_objectclasses = ['nsComplexRoleDefinition', 'nsNestedRoleDefinition'] + ++ self._protected = False + + class NestedRoles(Roles): + """DSLdapObjects that represents all NestedRoles entries in suffix. +-- +2.31.1 + diff --git a/0002-Issue-4943-Fix-csn-generator-to-limit-time-skew-drif.patch b/0002-Issue-4943-Fix-csn-generator-to-limit-time-skew-drif.patch new file mode 100644 index 0000000..51f8170 --- /dev/null +++ b/0002-Issue-4943-Fix-csn-generator-to-limit-time-skew-drif.patch @@ -0,0 +1,621 @@ +From 968ad6b5039d839bfbc61da755c252cc7598415b Mon Sep 17 00:00:00 2001 +From: progier389 +Date: Mon, 25 Oct 2021 17:09:57 +0200 +Subject: [PATCH 02/12] Issue 4943 - Fix csn generator to limit time skew drift + - PR 4946 + +--- + ldap/servers/slapd/csngen.c | 433 +++++++++++++++++------------- + ldap/servers/slapd/slapi-plugin.h | 9 + + 2 files changed, 255 insertions(+), 187 deletions(-) + +diff --git a/ldap/servers/slapd/csngen.c b/ldap/servers/slapd/csngen.c +index fcd88b4cc..c7c5c2ba8 100644 +--- a/ldap/servers/slapd/csngen.c ++++ b/ldap/servers/slapd/csngen.c +@@ -18,8 +18,9 @@ + #include "prcountr.h" + #include "slap.h" + ++ + #define CSN_MAX_SEQNUM 0xffff /* largest sequence number */ +-#define CSN_MAX_TIME_ADJUST 24 * 60 * 60 /* maximum allowed time adjustment (in seconds) = 1 day */ ++#define CSN_MAX_TIME_ADJUST _SEC_PER_DAY /* maximum allowed time adjustment (in seconds) = 1 day */ + #define ATTR_CSN_GENERATOR_STATE "nsState" /* attribute that stores csn state information */ + #define STATE_FORMAT "%8x%8x%8x%4hx%4hx" + #define STATE_LENGTH 32 +@@ -27,6 +28,8 @@ + #define CSN_CALC_TSTAMP(gen) ((gen)->state.sampled_time + \ + (gen)->state.local_offset + \ + (gen)->state.remote_offset) ++#define TIME_DIFF_WARNING_DELAY (30*_SEC_PER_DAY) /* log an info message when difference ++ between clock is greater than this delay */ + + /* + * ************************************************************************** +@@ -63,6 +66,7 @@ typedef struct csngen_state + struct csngen + { + csngen_state state; /* persistent state of the generator */ ++ int32_t (*gettime)(struct timespec *tp); /* Get local time */ + callback_list callbacks; /* list of callbacks registered with the generator */ + Slapi_RWLock *lock; /* concurrency control */ + }; +@@ -78,7 +82,7 @@ static int _csngen_init_callbacks(CSNGen *gen); + static void _csngen_call_callbacks(const CSNGen *gen, const CSN *csn, PRBool abort); + static int _csngen_cmp_callbacks(const void *el1, const void *el2); + static void _csngen_free_callbacks(CSNGen *gen); +-static int _csngen_adjust_local_time(CSNGen *gen, time_t cur_time); ++static int _csngen_adjust_local_time(CSNGen *gen); + + /* + * ************************************************************************** +@@ -121,6 +125,7 @@ csngen_new(ReplicaId rid, Slapi_Attr *state) + _csngen_init_callbacks(gen); + + gen->state.rid = rid; ++ gen->gettime = slapi_clock_utc_gettime; + + if (state) { + rc = _csngen_parse_state(gen, state); +@@ -164,10 +169,7 @@ csngen_free(CSNGen **gen) + int + csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify) + { +- struct timespec now = {0}; + int rc = CSN_SUCCESS; +- time_t cur_time; +- int delta; + + if (gen == NULL || csn == NULL) { + slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn", "Invalid argument\n"); +@@ -180,39 +182,13 @@ csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify) + return CSN_MEMORY_ERROR; + } + +- if ((rc = slapi_clock_gettime(&now)) != 0) { +- /* Failed to get system time, we must abort */ +- slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn", +- "Failed to get system time (%s)\n", +- slapd_system_strerror(rc)); +- return CSN_TIME_ERROR; +- } +- cur_time = now.tv_sec; +- + slapi_rwlock_wrlock(gen->lock); + +- /* check if the time should be adjusted */ +- delta = cur_time - gen->state.sampled_time; +- if (delta > _SEC_PER_DAY || delta < (-1 * _SEC_PER_DAY)) { +- /* We had a jump larger than a day */ +- slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn", +- "Detected large jump in CSN time. Delta: %d (current time: %ld vs previous time: %ld)\n", +- delta, cur_time, gen->state.sampled_time); +- } +- if (delta > 0) { +- rc = _csngen_adjust_local_time(gen, cur_time); +- if (rc != CSN_SUCCESS) { +- slapi_rwlock_unlock(gen->lock); +- return rc; +- } ++ rc = _csngen_adjust_local_time(gen); ++ if (rc != CSN_SUCCESS) { ++ slapi_rwlock_unlock(gen->lock); ++ return rc; + } +- /* if (delta < 0) this means the local system time was set back +- * the new csn will be generated based on sampled time, which is +- * ahead of system time and previously generated csns. +- * the time stamp of the csn will not change until system time +- * catches up or is corrected by remote csns. +- * But we need to ensure that the seq_num does not overflow. +- */ + + if (gen->state.seq_num == CSN_MAX_SEQNUM) { + slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn", "Sequence rollover; " +@@ -261,13 +237,36 @@ csngen_rewrite_rid(CSNGen *gen, ReplicaId rid) + } + + /* this function should be called when a remote CSN for the same part of +- the dit becomes known to the server (for instance, as part of RUV during +- replication session. In response, the generator would adjust its notion +- of time so that it does not generate smaller csns */ ++ * the dit becomes known to the server (for instance, as part of RUV during ++ * replication session. In response, the generator would adjust its notion ++ * of time so that it does not generate smaller csns ++ * ++ * The following counters are updated ++ * - when a new csn is generated ++ * - when csngen is adjusted (beginning of a incoming (extop) or outgoing ++ * (inc_protocol) session) ++ * ++ * sampled_time: It takes the value of current system time. ++ * ++ * remote offset: it is updated when 'csn' argument is ahead of the next csn ++ * that the csn generator will generate. It is the MAX jump ahead, it is not ++ * cumulative counter (e.g. if remote_offset=7 and 'csn' is 5sec ahead ++ * remote_offset stays the same. The jump ahead (5s) pour into the local offset. ++ * It is not clear of the interest of this counter. It gives an indication of ++ * the maximum jump ahead but not much. ++ * ++ * local offset: it is increased if ++ * - system time is going backward (compare sampled_time) ++ * - if 'csn' argument is ahead of csn that the csn generator would generate ++ * AND diff('csn', csngen.new_csn) < remote_offset ++ * then the diff "pour" into local_offset ++ * It is decreased as the clock is ticking, local offset is "consumed" as ++ * sampled_time progresses. ++ */ + int + csngen_adjust_time(CSNGen *gen, const CSN *csn) + { +- time_t remote_time, remote_offset, cur_time; ++ time_t remote_time, remote_offset, cur_time, old_time, new_time; + PRUint16 remote_seqnum; + int rc; + extern int config_get_ignore_time_skew(void); +@@ -281,6 +280,11 @@ csngen_adjust_time(CSNGen *gen, const CSN *csn) + + slapi_rwlock_wrlock(gen->lock); + ++ /* Get last local csn time */ ++ old_time = CSN_CALC_TSTAMP(gen); ++ /* update local offset and sample_time */ ++ rc = _csngen_adjust_local_time(gen); ++ + if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { + cur_time = CSN_CALC_TSTAMP(gen); + slapi_log_err(SLAPI_LOG_REPL, "csngen_adjust_time", +@@ -290,79 +294,60 @@ csngen_adjust_time(CSNGen *gen, const CSN *csn) + gen->state.local_offset, + gen->state.remote_offset); + } +- /* make sure we have the current time */ +- cur_time = slapi_current_utc_time(); +- +- /* make sure sampled_time is current */ +- /* must only call adjust_local_time if the current time is greater than +- the generator state time */ +- if ((cur_time > gen->state.sampled_time) && +- (CSN_SUCCESS != (rc = _csngen_adjust_local_time(gen, cur_time)))) { ++ if (rc != CSN_SUCCESS) { + /* _csngen_adjust_local_time will log error */ + slapi_rwlock_unlock(gen->lock); +- csngen_dump_state(gen); ++ csngen_dump_state(gen, SLAPI_LOG_DEBUG); + return rc; + } + +- cur_time = CSN_CALC_TSTAMP(gen); +- if (remote_time >= cur_time) { +- time_t new_time = 0; +- +- if (remote_seqnum > gen->state.seq_num) { +- if (remote_seqnum < CSN_MAX_SEQNUM) { +- gen->state.seq_num = remote_seqnum + 1; +- } else { +- remote_time++; +- } +- } +- +- remote_offset = remote_time - cur_time; +- if (remote_offset > gen->state.remote_offset) { +- if (ignore_time_skew || (remote_offset <= CSN_MAX_TIME_ADJUST)) { +- gen->state.remote_offset = remote_offset; +- } else /* remote_offset > CSN_MAX_TIME_ADJUST */ +- { +- slapi_log_err(SLAPI_LOG_ERR, "csngen_adjust_time", +- "Adjustment limit exceeded; value - %ld, limit - %ld\n", +- remote_offset, (long)CSN_MAX_TIME_ADJUST); +- slapi_rwlock_unlock(gen->lock); +- csngen_dump_state(gen); +- return CSN_LIMIT_EXCEEDED; +- } +- } else if (remote_offset > 0) { /* still need to account for this */ +- gen->state.local_offset += remote_offset; ++ remote_offset = remote_time - CSN_CALC_TSTAMP(gen); ++ if (remote_offset > 0) { ++ if (!ignore_time_skew && (gen->state.remote_offset + remote_offset > CSN_MAX_TIME_ADJUST)) { ++ slapi_log_err(SLAPI_LOG_ERR, "csngen_adjust_time", ++ "Adjustment limit exceeded; value - %ld, limit - %ld\n", ++ remote_offset, (long)CSN_MAX_TIME_ADJUST); ++ slapi_rwlock_unlock(gen->lock); ++ csngen_dump_state(gen, SLAPI_LOG_DEBUG); ++ return CSN_LIMIT_EXCEEDED; + } +- +- new_time = CSN_CALC_TSTAMP(gen); +- /* let's revisit the seq num - if the new time is > the old +- tiem, we should reset the seq number to remote + 1 if +- this won't cause a wrap around */ +- if (new_time >= cur_time) { +- /* just set seq_num regardless of whether the current one +- is < or > than the remote one - the goal of this function +- is to make sure we generate CSNs > the remote CSN - if +- we have increased the time, we can decrease the seqnum +- and still guarantee that any new CSNs generated will be +- > any current CSNs we have generated */ +- if (remote_seqnum < gen->state.seq_num) { +- gen->state.seq_num ++; +- } else { +- gen->state.seq_num = remote_seqnum + 1; +- } ++ gen->state.remote_offset += remote_offset; ++ /* To avoid beat phenomena between suppliers let put 1 second in local_offset ++ * it will be eaten at next clock tick rather than increasing remote offset ++ * If we do not do that we will have a time skew drift of 1 second per 2 seconds ++ * if suppliers are desynchronized by 0.5 second ++ */ ++ if (gen->state.local_offset == 0) { ++ gen->state.local_offset++; ++ gen->state.remote_offset--; + } +- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { +- slapi_log_err(SLAPI_LOG_REPL, "csngen_adjust_time", +- "gen state after %08lx%04x:%ld:%ld:%ld\n", +- new_time, gen->state.seq_num, +- gen->state.sampled_time, +- gen->state.local_offset, +- gen->state.remote_offset); ++ } ++ /* Time to compute seqnum so that ++ * new csn >= remote csn and new csn >= old local csn ++ */ ++ new_time = CSN_CALC_TSTAMP(gen); ++ PR_ASSERT(new_time >= old_time); ++ PR_ASSERT(new_time >= remote_time); ++ if (new_time > old_time) { ++ /* Can reset (local) seqnum */ ++ gen->state.seq_num = 0; ++ } ++ if (new_time == remote_time && remote_seqnum >= gen->state.seq_num) { ++ if (remote_seqnum >= CSN_MAX_SEQNUM) { ++ gen->state.seq_num = 0; ++ gen->state.local_offset++; ++ } else { ++ gen->state.seq_num = remote_seqnum + 1; + } +- } else if (gen->state.remote_offset > 0) { +- /* decrease remote offset? */ +- /* how to decrease remote offset but ensure that we don't +- generate a duplicate CSN, or a CSN smaller than one we've already +- generated? */ ++ } ++ ++ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { ++ slapi_log_err(SLAPI_LOG_REPL, "csngen_adjust_time", ++ "gen state after %08lx%04x:%ld:%ld:%ld\n", ++ new_time, gen->state.seq_num, ++ gen->state.sampled_time, ++ gen->state.local_offset, ++ gen->state.remote_offset); + } + + slapi_rwlock_unlock(gen->lock); +@@ -435,16 +420,16 @@ csngen_unregister_callbacks(CSNGen *gen, void *cookie) + + /* debugging function */ + void +-csngen_dump_state(const CSNGen *gen) ++csngen_dump_state(const CSNGen *gen, int severity) + { + if (gen) { + slapi_rwlock_rdlock(gen->lock); +- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "CSN generator's state:\n"); +- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\treplica id: %d\n", gen->state.rid); +- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tsampled time: %ld\n", gen->state.sampled_time); +- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tlocal offset: %ld\n", gen->state.local_offset); +- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tremote offset: %ld\n", gen->state.remote_offset); +- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tsequence number: %d\n", gen->state.seq_num); ++ slapi_log_err(severity, "csngen_dump_state", "CSN generator's state:\n"); ++ slapi_log_err(severity, "csngen_dump_state", "\treplica id: %d\n", gen->state.rid); ++ slapi_log_err(severity, "csngen_dump_state", "\tsampled time: %ld\n", gen->state.sampled_time); ++ slapi_log_err(severity, "csngen_dump_state", "\tlocal offset: %ld\n", gen->state.local_offset); ++ slapi_log_err(severity, "csngen_dump_state", "\tremote offset: %ld\n", gen->state.remote_offset); ++ slapi_log_err(severity, "csngen_dump_state", "\tsequence number: %d\n", gen->state.seq_num); + slapi_rwlock_unlock(gen->lock); + } + } +@@ -459,7 +444,7 @@ csngen_test() + CSNGen *gen = csngen_new(255, NULL); + + slapi_log_err(SLAPI_LOG_DEBUG, "csngen_test", "staring csn generator test ..."); +- csngen_dump_state(gen); ++ csngen_dump_state(gen, SLAPI_LOG_INFO); + + rc = _csngen_start_test_threads(gen); + if (rc == 0) { +@@ -469,7 +454,7 @@ csngen_test() + } + + _csngen_stop_test_threads(); +- csngen_dump_state(gen); ++ csngen_dump_state(gen, SLAPI_LOG_INFO); + slapi_log_err(SLAPI_LOG_DEBUG, "csngen_test", "csn generator test is complete..."); + } + +@@ -574,94 +559,93 @@ _csngen_cmp_callbacks(const void *el1, const void *el2) + return 1; + } + ++/* Get time and adjust local offset */ + static int +-_csngen_adjust_local_time(CSNGen *gen, time_t cur_time) ++_csngen_adjust_local_time(CSNGen *gen) + { + extern int config_get_ignore_time_skew(void); + int ignore_time_skew = config_get_ignore_time_skew(); +- time_t time_diff = cur_time - gen->state.sampled_time; ++ struct timespec now = {0}; ++ time_t time_diff; ++ time_t cur_time; ++ int rc; + ++ ++ if ((rc = gen->gettime(&now)) != 0) { ++ /* Failed to get system time, we must abort */ ++ slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn", ++ "Failed to get system time (%s)\n", ++ slapd_system_strerror(rc)); ++ return CSN_TIME_ERROR; ++ } ++ cur_time = now.tv_sec; ++ time_diff = cur_time - gen->state.sampled_time; ++ ++ /* check if the time should be adjusted */ + if (time_diff == 0) { + /* This is a no op - _csngen_adjust_local_time should never be called + in this case, because there is nothing to adjust - but just return + here to protect ourselves + */ + return CSN_SUCCESS; +- } else if (time_diff > 0) { +- time_t ts_before = CSN_CALC_TSTAMP(gen); +- time_t ts_after = 0; +- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { +- time_t new_time = CSN_CALC_TSTAMP(gen); +- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time", +- "gen state before %08lx%04x:%ld:%ld:%ld\n", +- new_time, gen->state.seq_num, +- gen->state.sampled_time, +- gen->state.local_offset, +- gen->state.remote_offset); +- } +- +- gen->state.sampled_time = cur_time; +- if (time_diff > gen->state.local_offset) +- gen->state.local_offset = 0; +- else +- gen->state.local_offset = gen->state.local_offset - time_diff; +- +- /* only reset the seq_num if the new timestamp part of the CSN +- is going to be greater than the old one - if they are the +- same after the above adjustment (which can happen if +- csngen_adjust_time has to store the offset in the +- local_offset field) we must not allow the CSN to regress or +- generate duplicate numbers */ +- ts_after = CSN_CALC_TSTAMP(gen); +- if (ts_after > ts_before) { +- gen->state.seq_num = 0; /* only reset if new time > old time */ +- } +- +- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { +- time_t new_time = CSN_CALC_TSTAMP(gen); +- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time", +- "gen state after %08lx%04x:%ld:%ld:%ld\n", +- new_time, gen->state.seq_num, +- gen->state.sampled_time, +- gen->state.local_offset, +- gen->state.remote_offset); +- } +- return CSN_SUCCESS; +- } else /* time was turned back */ +- { +- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { +- time_t new_time = CSN_CALC_TSTAMP(gen); +- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time", +- "gen state back before %08lx%04x:%ld:%ld:%ld\n", +- new_time, gen->state.seq_num, +- gen->state.sampled_time, +- gen->state.local_offset, +- gen->state.remote_offset); +- } ++ } ++ if (labs(time_diff) > TIME_DIFF_WARNING_DELAY) { ++ /* We had a jump larger than a day */ ++ slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn", ++ "Detected large jump in CSN time. Delta: %ld (current time: %ld vs previous time: %ld)\n", ++ time_diff, cur_time, gen->state.sampled_time); ++ } ++ if (!ignore_time_skew && (gen->state.local_offset - time_diff > CSN_MAX_TIME_ADJUST)) { ++ slapi_log_err(SLAPI_LOG_ERR, "_csngen_adjust_local_time", ++ "Adjustment limit exceeded; value - %ld, limit - %d\n", ++ gen->state.local_offset - time_diff, CSN_MAX_TIME_ADJUST); ++ return CSN_LIMIT_EXCEEDED; ++ } + +- if (!ignore_time_skew && (labs(time_diff) > CSN_MAX_TIME_ADJUST)) { +- slapi_log_err(SLAPI_LOG_ERR, "_csngen_adjust_local_time", +- "Adjustment limit exceeded; value - %ld, limit - %d\n", +- labs(time_diff), CSN_MAX_TIME_ADJUST); +- return CSN_LIMIT_EXCEEDED; +- } ++ time_t ts_before = CSN_CALC_TSTAMP(gen); ++ time_t ts_after = 0; ++ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { ++ time_t new_time = CSN_CALC_TSTAMP(gen); ++ slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time", ++ "gen state before %08lx%04x:%ld:%ld:%ld\n", ++ new_time, gen->state.seq_num, ++ gen->state.sampled_time, ++ gen->state.local_offset, ++ gen->state.remote_offset); ++ } + +- gen->state.sampled_time = cur_time; +- gen->state.local_offset = MAX_VAL(gen->state.local_offset, labs(time_diff)); +- gen->state.seq_num = 0; ++ gen->state.sampled_time = cur_time; ++ gen->state.local_offset = MAX_VAL(0, gen->state.local_offset - time_diff); ++ /* new local_offset = MAX_VAL(0, old sample_time + old local_offset - cur_time) ++ * ==> new local_offset >= 0 and ++ * new local_offset + cur_time >= old sample_time + old local_offset ++ * ==> new local_offset + cur_time + remote_offset >= ++ * sample_time + old local_offset + remote_offset ++ * ==> CSN_CALC_TSTAMP(new gen) >= CSN_CALC_TSTAMP(old gen) ++ */ + +- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { +- time_t new_time = CSN_CALC_TSTAMP(gen); +- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time", +- "gen state back after %08lx%04x:%ld:%ld:%ld\n", +- new_time, gen->state.seq_num, +- gen->state.sampled_time, +- gen->state.local_offset, +- gen->state.remote_offset); +- } ++ /* only reset the seq_num if the new timestamp part of the CSN ++ is going to be greater than the old one - if they are the ++ same after the above adjustment (which can happen if ++ csngen_adjust_time has to store the offset in the ++ local_offset field) we must not allow the CSN to regress or ++ generate duplicate numbers */ ++ ts_after = CSN_CALC_TSTAMP(gen); ++ PR_ASSERT(ts_after >= ts_before); ++ if (ts_after > ts_before) { ++ gen->state.seq_num = 0; /* only reset if new time > old time */ ++ } + +- return CSN_SUCCESS; ++ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) { ++ time_t new_time = CSN_CALC_TSTAMP(gen); ++ slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time", ++ "gen state after %08lx%04x:%ld:%ld:%ld\n", ++ new_time, gen->state.seq_num, ++ gen->state.sampled_time, ++ gen->state.local_offset, ++ gen->state.remote_offset); + } ++ return CSN_SUCCESS; + } + + /* +@@ -799,7 +783,7 @@ _csngen_remote_tester_main(void *data) + "Failed to adjust generator's time; csn error - %d\n", rc); + } + +- csngen_dump_state(gen); ++ csngen_dump_state(gen, SLAPI_LOG_INFO); + } + csn_free(&csn); + +@@ -825,8 +809,83 @@ _csngen_local_tester_main(void *data) + /* + * g_sampled_time -= slapi_rand () % 100; + */ +- csngen_dump_state(gen); ++ csngen_dump_state(gen, SLAPI_LOG_INFO); + } + + PR_AtomicDecrement(&s_thread_count); + } ++ ++int _csngen_tester_state; ++int _csngen_tester_state_rid; ++ ++static int ++_mynoise(int time, int len, double height) ++{ ++ if (((time/len) % 2) == 0) { ++ return -height + 2 * height * ( time % len ) / (len-1); ++ } else { ++ return height - 2 * height * ( time % len ) / (len-1); ++ } ++} ++ ++ ++int32_t _csngen_tester_gettime(struct timespec *tp) ++{ ++ int vtime = _csngen_tester_state ; ++ tp->tv_sec = 0x1000000 + vtime + 2 * _csngen_tester_state_rid; ++ if (_csngen_tester_state_rid == 3) { ++ /* tp->tv_sec += _mynoise(vtime, 10, 1.5); */ ++ tp->tv_sec += _mynoise(vtime, 30, 15); ++ } ++ return 0; ++} ++ ++/* Mimic a fully meshed multi suplier topology */ ++void csngen_multi_suppliers_test(void) ++{ ++#define NB_TEST_MASTERS 6 ++#define NB_TEST_STATES 500 ++ CSNGen *gen[NB_TEST_MASTERS]; ++ struct timespec now = {0}; ++ CSN *last_csn = NULL; ++ CSN *csn = NULL; ++ int i,j,rc; ++ ++ _csngen_tester_gettime(&now); ++ ++ for (i=0; i< NB_TEST_MASTERS; i++) { ++ gen[i] = csngen_new(i+1, NULL); ++ gen[i]->gettime = _csngen_tester_gettime; ++ gen[i]->state.sampled_time = now.tv_sec; ++ } ++ ++ for (_csngen_tester_state=0; _csngen_tester_state < NB_TEST_STATES; _csngen_tester_state++) { ++ for (i=0; i< NB_TEST_MASTERS; i++) { ++ _csngen_tester_state_rid = i+1; ++ rc = csngen_new_csn(gen[i], &csn, PR_FALSE); ++ if (rc) { ++ continue; ++ } ++ csngen_dump_state(gen[i], SLAPI_LOG_INFO); ++ ++ if (csn_compare(csn, last_csn) <= 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "csngen_multi_suppliers_test", ++ "CSN generated in disorder state=%d rid=%d\n", _csngen_tester_state, _csngen_tester_state_rid); ++ _csngen_tester_state = NB_TEST_STATES; ++ break; ++ } ++ last_csn = csn; ++ ++ for (j=0; j< NB_TEST_MASTERS; j++) { ++ if (i==j) { ++ continue; ++ } ++ _csngen_tester_state_rid = j+1; ++ rc = csngen_adjust_time(gen[j], csn); ++ if (rc) { ++ continue; ++ } ++ } ++ } ++ } ++} +diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h +index 56765fdfb..59c5ec9ab 100644 +--- a/ldap/servers/slapd/slapi-plugin.h ++++ b/ldap/servers/slapd/slapi-plugin.h +@@ -6762,8 +6762,17 @@ time_t slapi_current_time(void) __attribute__((deprecated)); + * + * \param tp - a timespec struct where the system time is set + * \return result code, upon success tp is set to the system time ++ * as a clock in UTC timezone. This clock adjusts with ntp steps, ++ * and should NOT be used for timer information. + */ + int32_t slapi_clock_gettime(struct timespec *tp); ++/* ++ * slapi_clock_gettime should have better been called ++ * slapi_clock_utc_gettime but sice the function pre-existed ++ * we are just adding an alias (to avoid risking to break ++ * some custom plugins) ++ */ ++#define slapi_clock_utc_gettime slapi_clock_gettime + + /** + * Returns the current system time as a hr clock relative to uptime +-- +2.31.1 + diff --git a/0003-Issue-3584-Fix-PBKDF2_SHA256-hashing-in-FIPS-mode-49.patch b/0003-Issue-3584-Fix-PBKDF2_SHA256-hashing-in-FIPS-mode-49.patch new file mode 100644 index 0000000..59c4435 --- /dev/null +++ b/0003-Issue-3584-Fix-PBKDF2_SHA256-hashing-in-FIPS-mode-49.patch @@ -0,0 +1,240 @@ +From 957ffd53b041c19d27753a028e6f514dcc75dfbd Mon Sep 17 00:00:00 2001 +From: Simon Pichugin +Date: Tue, 26 Oct 2021 15:51:24 -0700 +Subject: [PATCH 03/12] Issue 3584 - Fix PBKDF2_SHA256 hashing in FIPS mode + (#4949) + +Issue Description: Use PK11_Decrypt function to get hash data +because PK11_ExtractKeyValue function is forbidden in FIPS mode. +We can't extract keys while in FIPS mode. But we use PK11_ExtractKeyValue +for hashes, and it's not forbidden. + +We can't use OpenSSL's PBKDF2-SHA256 implementation right now because +we need to support an upgrade procedure while in FIPS mode (update +hash on bind). For that, we should fix existing PBKDF2 usage, and we can +switch to OpenSSL's PBKDF2-SHA256 in the following versions. + +Fix Description: Use PK11_Decrypt function to get the data. + +Enable TLS on all CI test topologies while in FIPS because without +that we don't set up the NSS database correctly. + +Add PBKDF2-SHA256 (OpenSSL) to ldif templates, so the password scheme is +discoverable by internal functions. + +https://github.com/389ds/389-ds-base/issues/3584 + +Reviewed by: @progier389, @mreynolds389, @Firstyear, @tbordaz (Thanks!!) +--- + .../healthcheck/health_security_test.py | 10 --- + ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 62 ++++++++++++++++--- + ldap/servers/slapd/main.c | 12 ++++ + src/lib389/lib389/__init__.py | 4 ++ + src/lib389/lib389/topologies.py | 6 +- + src/lib389/lib389/utils.py | 13 ++++ + 6 files changed, 86 insertions(+), 21 deletions(-) + +diff --git a/dirsrvtests/tests/suites/healthcheck/health_security_test.py b/dirsrvtests/tests/suites/healthcheck/health_security_test.py +index 6c0d27aaa..c1dc7938c 100644 +--- a/dirsrvtests/tests/suites/healthcheck/health_security_test.py ++++ b/dirsrvtests/tests/suites/healthcheck/health_security_test.py +@@ -40,16 +40,6 @@ else: + log = logging.getLogger(__name__) + + +-def is_fips(): +- if os.path.exists('/proc/sys/crypto/fips_enabled'): +- with open('/proc/sys/crypto/fips_enabled', 'r') as f: +- state = f.readline().strip() +- if state == '1': +- return True +- else: +- return False +- +- + def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None): + args = FakeArgs() + args.instance = instance.serverid +diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c +index d310dc792..dcac4fcdd 100644 +--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c ++++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c +@@ -91,10 +91,11 @@ pbkdf2_sha256_extract(char *hash_in, SECItem *salt, uint32_t *iterations) + SECStatus + pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *salt, uint32_t iterations) + { +- SECItem *result = NULL; + SECAlgorithmID *algid = NULL; + PK11SlotInfo *slot = NULL; + PK11SymKey *symkey = NULL; ++ SECItem *wrapKeyData = NULL; ++ SECStatus rv = SECFailure; + + /* We assume that NSS is already started. */ + algid = PK11_CreatePBEV2AlgorithmID(SEC_OID_PKCS5_PBKDF2, SEC_OID_HMAC_SHA256, SEC_OID_HMAC_SHA256, hash_out_len, iterations, salt); +@@ -104,7 +105,6 @@ pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *s + slot = PK11_GetBestSlotMultiple(mechanism_array, 2, NULL); + if (slot != NULL) { + symkey = PK11_PBEKeyGen(slot, algid, pwd, PR_FALSE, NULL); +- PK11_FreeSlot(slot); + if (symkey == NULL) { + /* We try to get the Error here but NSS has two or more error interfaces, and sometimes it uses none of them. */ + int32_t status = PORT_GetError(); +@@ -123,18 +123,60 @@ pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *s + return SECFailure; + } + +- if (PK11_ExtractKeyValue(symkey) == SECSuccess) { +- result = PK11_GetKeyData(symkey); +- if (result != NULL && result->len <= hash_out_len) { +- memcpy(hash_out, result->data, result->len); +- PK11_FreeSymKey(symkey); ++ /* ++ * First, we need to generate a wrapped key for PK11_Decrypt call: ++ * slot is the same slot we used in PK11_PBEKeyGen() ++ * 256 bits / 8 bit per byte ++ */ ++ PK11SymKey *wrapKey = PK11_KeyGen(slot, CKM_AES_ECB, NULL, 256/8, NULL); ++ PK11_FreeSlot(slot); ++ if (wrapKey == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to generate a wrapped key.\n"); ++ return SECFailure; ++ } ++ ++ wrapKeyData = (SECItem *)PORT_Alloc(sizeof(SECItem)); ++ /* Align the wrapped key with 32 bytes. */ ++ wrapKeyData->len = (PK11_GetKeyLength(symkey) + 31) & ~31; ++ /* Allocate the aligned space for pkc5PBE key plus AESKey block */ ++ wrapKeyData->data = (unsigned char *)slapi_ch_calloc(wrapKeyData->len, sizeof(unsigned char)); ++ ++ /* Get symkey wrapped with wrapKey - required for PK11_Decrypt call */ ++ rv = PK11_WrapSymKey(CKM_AES_ECB, NULL, wrapKey, symkey, wrapKeyData); ++ if (rv != SECSuccess) { ++ PK11_FreeSymKey(symkey); ++ PK11_FreeSymKey(wrapKey); ++ SECITEM_FreeItem(wrapKeyData, PR_TRUE); ++ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to wrap the symkey. (%d)\n", rv); ++ return SECFailure; ++ } ++ ++ /* Allocate the space for our result */ ++ void *result = (char *)slapi_ch_calloc(wrapKeyData->len, sizeof(char)); ++ unsigned int result_len = 0; ++ ++ /* User wrapKey to decrypt the wrapped contents. ++ * result is the hash that we need; ++ * result_len is the actual lengh of the data; ++ * has_out_len is the maximum (the space we allocted for hash_out) ++ */ ++ rv = PK11_Decrypt(wrapKey, CKM_AES_ECB, NULL, result, &result_len, hash_out_len, wrapKeyData->data, wrapKeyData->len); ++ PK11_FreeSymKey(symkey); ++ PK11_FreeSymKey(wrapKey); ++ SECITEM_FreeItem(wrapKeyData, PR_TRUE); ++ ++ if (rv == SECSuccess) { ++ if (result != NULL && result_len <= hash_out_len) { ++ memcpy(hash_out, result, result_len); ++ slapi_ch_free((void **)&result); + } else { +- PK11_FreeSymKey(symkey); +- slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to retrieve (get) hash output.\n"); ++ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to retrieve (get) hash output.\n"); ++ slapi_ch_free((void **)&result); + return SECFailure; + } + } else { +- slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to extract hash output.\n"); ++ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to extract hash output. (%d)\n", rv); ++ slapi_ch_free((void **)&result); + return SECFailure; + } + +diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c +index 61ed40b7d..04d0494f8 100644 +--- a/ldap/servers/slapd/main.c ++++ b/ldap/servers/slapd/main.c +@@ -2895,9 +2895,21 @@ slapd_do_all_nss_ssl_init(int slapd_exemode, int importexport_encrypt, int s_por + * is enabled or not. We use NSS for random number generation and + * other things even if we are not going to accept SSL connections. + * We also need NSS for attribute encryption/decryption on import and export. ++ * ++ * It's important to remember that while in FIPS mode the administrator should always enable ++ * the security, otherwise we don't call slapd_pk11_authenticate which is a requirement for FIPS mode + */ ++ PRBool isFIPS = slapd_pk11_isFIPS(); + int init_ssl = config_get_security(); + ++ if (isFIPS && !init_ssl) { ++ slapi_log_err(SLAPI_LOG_WARNING, "slapd_do_all_nss_ssl_init", ++ "ERROR: TLS is not enabled, and the machine is in FIPS mode. " ++ "Some functionality won't work correctly (for example, " ++ "users with PBKDF2_SHA256 password scheme won't be able to log in). " ++ "It's highly advisable to enable TLS on this instance.\n"); ++ } ++ + if (slapd_exemode == SLAPD_EXEMODE_SLAPD) { + init_ssl = init_ssl && (0 != s_port) && (s_port <= LDAP_PORT_MAX); + } else { +diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py +index 29ee5245a..e0299c5b4 100644 +--- a/src/lib389/lib389/__init__.py ++++ b/src/lib389/lib389/__init__.py +@@ -1588,6 +1588,10 @@ class DirSrv(SimpleLDAPObject, object): + :param post_open: Open the server connection after restart. + :type post_open: bool + """ ++ if self.config.get_attr_val_utf8_l("nsslapd-security") == 'on': ++ self.restart(post_open=post_open) ++ return ++ + # If it doesn't exist, create a cadb. + ssca = NssSsl(dbpath=self.get_ssca_dir()) + if not ssca._db_exists(): +diff --git a/src/lib389/lib389/topologies.py b/src/lib389/lib389/topologies.py +index e9969f524..e7d56582d 100644 +--- a/src/lib389/lib389/topologies.py ++++ b/src/lib389/lib389/topologies.py +@@ -15,7 +15,7 @@ import socket + import pytest + + from lib389 import DirSrv +-from lib389.utils import generate_ds_params ++from lib389.utils import generate_ds_params, is_fips + from lib389.mit_krb5 import MitKrb5 + from lib389.saslmap import SaslMappings + from lib389.replica import ReplicationManager, Replicas +@@ -108,6 +108,10 @@ def _create_instances(topo_dict, suffix): + if role == ReplicaRole.HUB: + hs[instance.serverid] = instance + instances.update(hs) ++ # We should always enable TLS while in FIPS mode because otherwise NSS database won't be ++ # configured in a FIPS compliant way ++ if is_fips(): ++ instance.enable_tls() + log.info("Instance with parameters {} was created.".format(args_instance)) + + if "standalone1" in instances and len(instances) == 1: +diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py +index b270784ce..5ba0c6676 100644 +--- a/src/lib389/lib389/utils.py ++++ b/src/lib389/lib389/utils.py +@@ -1430,3 +1430,16 @@ def is_valid_hostname(hostname): + hostname = hostname[:-1] # strip exactly one dot from the right, if present + allowed = re.compile("(?!-)[A-Z\d-]{1,63}(? +Date: Wed, 20 Oct 2021 10:04:06 -0400 +Subject: [PATCH 04/12] Issue 4956 - Automember allows invalid regex, and does + not log proper error + +Bug Description: The server was detecting an invalid automember + regex, but it did not reject it, and it did not + log which regex rule was invalid. + +Fix Description: By properly rejecting the invalid regex will also + trigger the proper error logging to occur. + +relates: https://github.com/389ds/389-ds-base/issues/4956 + +Reviewed by: tbordaz & spichugi(Thanks!!) +--- + .../automember_plugin/configuration_test.py | 49 +++++++++++++++++-- + ldap/servers/plugins/automember/automember.c | 1 + + 2 files changed, 46 insertions(+), 4 deletions(-) + +diff --git a/dirsrvtests/tests/suites/automember_plugin/configuration_test.py b/dirsrvtests/tests/suites/automember_plugin/configuration_test.py +index 0f9cc49dc..4a6b596db 100644 +--- a/dirsrvtests/tests/suites/automember_plugin/configuration_test.py ++++ b/dirsrvtests/tests/suites/automember_plugin/configuration_test.py +@@ -1,21 +1,20 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2019 Red Hat, Inc. ++# Copyright (C) 2021 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). + # See LICENSE for details. + # --- END COPYRIGHT BLOCK --- + ++import ldap + import os + import pytest +- + from lib389.topologies import topology_st as topo + from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions, MemberOfPlugin +-import ldap ++from lib389._constants import DEFAULT_SUFFIX + + pytestmark = pytest.mark.tier1 + +- + @pytest.mark.bz834056 + def test_configuration(topo): + """ +@@ -52,6 +51,48 @@ def test_configuration(topo): + '"cn=SuffDef1,ou=autouserGroups,cn=config" ' + 'can not be a child of the plugin config area "cn=config"') + ++def test_invalid_regex(topo): ++ """Test invalid regex is properly reportedin the error log ++ ++ :id: a6d89f84-ec76-4871-be96-411d051800b1 ++ :setup: Standalone Instance ++ :steps: ++ 1. Setup automember ++ 2. Add invalid regex ++ 3. Error log reports useful message ++ :expectedresults: ++ 1. Success ++ 2. Success ++ 3. Success ++ """ ++ REGEX_DN = "cn=regex1,cn=testregex,cn=auto membership plugin,cn=plugins,cn=config" ++ REGEX_VALUE = "cn=*invalid*" ++ REGEX_ESC_VALUE = "cn=\\*invalid\\*" ++ GROUP_DN = "cn=demo_group,ou=groups," + DEFAULT_SUFFIX ++ ++ AutoMembershipPlugin(topo.standalone).remove_all("nsslapd-pluginConfigArea") ++ automemberplugin = AutoMembershipPlugin(topo.standalone) ++ ++ automember_prop = { ++ 'cn': 'testRegex', ++ 'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX, ++ 'autoMemberFilter': 'objectclass=*', ++ 'autoMemberDefaultGroup': GROUP_DN, ++ 'autoMemberGroupingAttr': 'member:dn', ++ } ++ automember_defs = AutoMembershipDefinitions(topo.standalone, "cn=Auto Membership Plugin,cn=plugins,cn=config") ++ automember_def = automember_defs.create(properties=automember_prop) ++ automember_def.add_regex_rule("regex1", GROUP_DN, include_regex=[REGEX_VALUE]) ++ ++ automemberplugin.enable() ++ topo.standalone.restart() ++ ++ # Check errors log for invalid message ++ ERR_STR1 = "automember_parse_regex_rule - Unable to parse regex rule" ++ ERR_STR2 = f"Skipping invalid inclusive regex rule in rule entry \"{REGEX_DN}\" \\(rule = \"{REGEX_ESC_VALUE}\"\\)" ++ assert topo.standalone.searchErrorsLog(ERR_STR1) ++ assert topo.standalone.searchErrorsLog(ERR_STR2) ++ + + if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) +diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c +index 39350ad53..b92b89bd5 100644 +--- a/ldap/servers/plugins/automember/automember.c ++++ b/ldap/servers/plugins/automember/automember.c +@@ -1217,6 +1217,7 @@ automember_parse_regex_rule(char *rule_string) + "automember_parse_regex_rule - Unable to parse " + "regex rule (invalid regex). Error \"%s\".\n", + recomp_result ? recomp_result : "unknown"); ++ goto bail; + } + + /* Validation has passed, so create the regex rule struct and fill it in. +-- +2.31.1 + diff --git a/0005-Issue-4092-systemd-tmpfiles-warnings.patch b/0005-Issue-4092-systemd-tmpfiles-warnings.patch new file mode 100644 index 0000000..8a54d97 --- /dev/null +++ b/0005-Issue-4092-systemd-tmpfiles-warnings.patch @@ -0,0 +1,245 @@ +From 9c08a053938eb28821fad7d0850c046ef2ed44c4 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 9 Dec 2020 16:16:30 -0500 +Subject: [PATCH 05/12] Issue 4092 - systemd-tmpfiles warnings + +Bug Description: + +systemd-tmpfiles warns about legacy paths in our tmpfiles configs. +Using /var/run also introduces a race condition, see the following +issue https://pagure.io/389-ds-base/issue/47429 + +Fix Description: + +Instead of using @localstatedir@/run use @localrundir@ which was +introduced in #850. + +Relates: https://github.com/389ds/389-ds-base/issues/766 +Fixes: https://github.com/389ds/389-ds-base/issues/4092 + +Reviewed by: vashirov & firstyear(Thanks!) +--- + Makefile.am | 4 ++-- + configure.ac | 10 ++++++++-- + dirsrvtests/tests/suites/basic/basic_test.py | 3 ++- + ldap/admin/src/defaults.inf.in | 8 ++++---- + ldap/servers/snmp/main.c | 8 ++++---- + src/lib389/lib389/__init__.py | 3 +++ + src/lib389/lib389/instance/options.py | 7 ++++++- + src/lib389/lib389/instance/remove.py | 13 ++++++++----- + src/lib389/lib389/instance/setup.py | 10 ++++++++-- + 9 files changed, 45 insertions(+), 21 deletions(-) + +diff --git a/Makefile.am b/Makefile.am +index 36434cf17..fc5a6a7d1 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -141,8 +141,8 @@ PATH_DEFINES = -DLOCALSTATEDIR="\"$(localstatedir)\"" -DSYSCONFDIR="\"$(sysconfd + -DLIBDIR="\"$(libdir)\"" -DBINDIR="\"$(bindir)\"" \ + -DDATADIR="\"$(datadir)\"" -DDOCDIR="\"$(docdir)\"" \ + -DSBINDIR="\"$(sbindir)\"" -DPLUGINDIR="\"$(serverplugindir)\"" \ +- -DTEMPLATEDIR="\"$(sampledatadir)\"" -DSYSTEMSCHEMADIR="\"$(systemschemadir)\"" +- ++ -DTEMPLATEDIR="\"$(sampledatadir)\"" -DSYSTEMSCHEMADIR="\"$(systemschemadir)\"" \ ++ -DLOCALRUNDIR="\"$(localrundir)\"" + # Now that we have all our defines in place, setup the CPPFLAGS + + # These flags are the "must have" for all components +diff --git a/configure.ac b/configure.ac +index 61bf35e4a..9845beb7d 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -418,7 +418,14 @@ fi + + m4_include(m4/fhs.m4) + +-localrundir='/run' ++# /run directory path ++AC_ARG_WITH([localrundir], ++ AS_HELP_STRING([--with-localrundir=DIR], ++ [Runtime data directory]), ++ [localrundir=$with_localrundir], ++ [localrundir="/run"]) ++AC_SUBST([localrundir]) ++ + cockpitdir=/389-console + + # installation paths - by default, we store everything +@@ -899,7 +906,6 @@ AC_SUBST(ldaplib_defs) + AC_SUBST(ldaptool_bindir) + AC_SUBST(ldaptool_opts) + AC_SUBST(plainldif_opts) +-AC_SUBST(localrundir) + + AC_SUBST(brand) + AC_SUBST(capbrand) +diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py +index 41726f073..7e80c443b 100644 +--- a/dirsrvtests/tests/suites/basic/basic_test.py ++++ b/dirsrvtests/tests/suites/basic/basic_test.py +@@ -901,7 +901,8 @@ def test_basic_ldapagent(topology_st, import_example_ldif): + # Remember, this is *forking* + check_output([os.path.join(topology_st.standalone.get_sbin_dir(), 'ldap-agent'), config_file]) + # First kill any previous agents .... +- pidpath = os.path.join(var_dir, 'run/ldap-agent.pid') ++ run_dir = topology_st.standalone.get_run_dir() ++ pidpath = os.path.join(run_dir, 'ldap-agent.pid') + pid = None + with open(pidpath, 'r') as pf: + pid = pf.readlines()[0].strip() +diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in +index d5f504591..e02248b89 100644 +--- a/ldap/admin/src/defaults.inf.in ++++ b/ldap/admin/src/defaults.inf.in +@@ -35,12 +35,12 @@ sysconf_dir = @sysconfdir@ + initconfig_dir = @initconfigdir@ + config_dir = @instconfigdir@/slapd-{instance_name} + local_state_dir = @localstatedir@ +-run_dir = @localstatedir@/run/dirsrv ++run_dir = @localrundir@ + # This is the expected location of ldapi. +-ldapi = @localstatedir@/run/slapd-{instance_name}.socket ++ldapi = @localrundir@/slapd-{instance_name}.socket ++pid_file = @localrundir@/slapd-{instance_name}.pid + ldapi_listen = on + ldapi_autobind = on +-pid_file = @localstatedir@/run/dirsrv/slapd-{instance_name}.pid + inst_dir = @serverdir@/slapd-{instance_name} + plugin_dir = @serverplugindir@ + system_schema_dir = @systemschemadir@ +@@ -54,7 +54,7 @@ root_dn = cn=Directory Manager + schema_dir = @instconfigdir@/slapd-{instance_name}/schema + cert_dir = @instconfigdir@/slapd-{instance_name} + +-lock_dir = @localstatedir@/lock/dirsrv/slapd-{instance_name} ++lock_dir = @localrundir@/lock/dirsrv/slapd-{instance_name} + log_dir = @localstatedir@/log/dirsrv/slapd-{instance_name} + access_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/access + audit_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/audit +diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c +index 88a4d532a..e6271a8a9 100644 +--- a/ldap/servers/snmp/main.c ++++ b/ldap/servers/snmp/main.c +@@ -287,14 +287,14 @@ load_config(char *conf_path) + } + + /* set pidfile path */ +- if ((pidfile = malloc(strlen(LOCALSTATEDIR) + strlen("/run/") + ++ if ((pidfile = malloc(strlen(LOCALRUNDIR) + strlen("/") + + strlen(LDAP_AGENT_PIDFILE) + 1)) != NULL) { +- strncpy(pidfile, LOCALSTATEDIR, strlen(LOCALSTATEDIR) + 1); ++ strncpy(pidfile, LOCALRUNDIR, strlen(LOCALRUNDIR) + 1); + /* The above will likely not be NULL terminated, but we need to + * be sure that we're properly NULL terminated for the below + * strcat() to work properly. */ +- pidfile[strlen(LOCALSTATEDIR)] = (char)0; +- strcat(pidfile, "/run/"); ++ pidfile[strlen(LOCALRUNDIR)] = (char)0; ++ strcat(pidfile, "/"); + strcat(pidfile, LDAP_AGENT_PIDFILE); + } else { + printf("ldap-agent: malloc error processing config file\n"); +diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py +index e0299c5b4..2a0b83913 100644 +--- a/src/lib389/lib389/__init__.py ++++ b/src/lib389/lib389/__init__.py +@@ -1709,6 +1709,9 @@ class DirSrv(SimpleLDAPObject, object): + def get_bin_dir(self): + return self.ds_paths.bin_dir + ++ def get_run_dir(self): ++ return self.ds_paths.run_dir ++ + def get_plugin_dir(self): + return self.ds_paths.plugin_dir + +diff --git a/src/lib389/lib389/instance/options.py b/src/lib389/lib389/instance/options.py +index 4e083618c..d5b95e6df 100644 +--- a/src/lib389/lib389/instance/options.py ++++ b/src/lib389/lib389/instance/options.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2019 Red Hat, Inc. ++# Copyright (C) 2021 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -32,6 +32,7 @@ format_keys = [ + 'backup_dir', + 'db_dir', + 'db_home_dir', ++ 'ldapi', + 'ldif_dir', + 'lock_dir', + 'log_dir', +@@ -233,6 +234,10 @@ class Slapd2Base(Options2): + self._helptext['local_state_dir'] = "Sets the location of Directory Server variable data. Only set this parameter in a development environment." + self._advanced['local_state_dir'] = True + ++ self._options['ldapi'] = ds_paths.ldapi ++ self._type['ldapi'] = str ++ self._helptext['ldapi'] = "Sets the location of socket interface of the Directory Server." ++ + self._options['lib_dir'] = ds_paths.lib_dir + self._type['lib_dir'] = str + self._helptext['lib_dir'] = "Sets the location of Directory Server shared libraries. Only set this parameter in a development environment." +diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py +index d7bb48ce0..1a35ddc07 100644 +--- a/src/lib389/lib389/instance/remove.py ++++ b/src/lib389/lib389/instance/remove.py +@@ -78,13 +78,16 @@ def remove_ds_instance(dirsrv, force=False): + + _log.debug("Found instance marker at %s! Proceeding to remove ..." % dse_ldif_path) + +- # Stop the instance (if running) and now we know it really does exist +- # and hopefully have permission to access it ... +- _log.debug("Stopping instance %s" % dirsrv.serverid) +- dirsrv.stop() +- + ### ANY NEW REMOVAL ACTION MUST BE BELOW THIS LINE!!! + ++ # Remove LDAPI socket file ++ ldapi_path = os.path.join(dirsrv.ds_paths.run_dir, "slapd-%s.socket" % dirsrv.serverid) ++ if os.path.exists(ldapi_path): ++ try: ++ os.remove(ldapi_path) ++ except OSError as e: ++ _log.debug(f"Failed to remove LDAPI socket ({ldapi_path}) Error: {str(e)}") ++ + # Remove these paths: + # for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', + # 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): +diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py +index ab7a2da85..57e7a9fd4 100644 +--- a/src/lib389/lib389/instance/setup.py ++++ b/src/lib389/lib389/instance/setup.py +@@ -732,7 +732,10 @@ class SetupDs(object): + dse += line.replace('%', '{', 1).replace('%', '}', 1) + + with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse: +- ldapi_path = os.path.join(slapd['local_state_dir'], "run/slapd-%s.socket" % slapd['instance_name']) ++ if os.path.exists(os.path.dirname(slapd['ldapi'])): ++ ldapi_path = slapd['ldapi'] ++ else: ++ ldapi_path = os.path.join(slapd['run_dir'], "slapd-%s.socket" % slapd['instance_name']) + dse_fmt = dse.format( + schema_dir=slapd['schema_dir'], + lock_dir=slapd['lock_dir'], +@@ -902,10 +905,13 @@ class SetupDs(object): + self.log.info("Perform SELinux labeling ...") + selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', + 'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir', +- 'run_dir', 'schema_dir', 'tmp_dir') ++ 'schema_dir', 'tmp_dir') + for path in selinux_paths: + selinux_restorecon(slapd[path]) + ++ # Don't run restorecon on the entire /run directory ++ selinux_restorecon(slapd['run_dir'] + '/dirsrv') ++ + selinux_label_port(slapd['port']) + + # Start the server +-- +2.31.1 + diff --git a/0006-Issue-4973-installer-changes-permissions-on-run.patch b/0006-Issue-4973-installer-changes-permissions-on-run.patch new file mode 100644 index 0000000..5088fb8 --- /dev/null +++ b/0006-Issue-4973-installer-changes-permissions-on-run.patch @@ -0,0 +1,113 @@ +From b4a3b88faeafa6aa197d88ee84e4b2dbadd37ace Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 1 Nov 2021 10:42:27 -0400 +Subject: [PATCH 06/12] Issue 4973 - installer changes permissions on /run + +Description: There was a regression when we switched over to using /run + that caused the installer to try and create /run which + caused the ownership to change. Fixed this by changing + the "run_dir" to /run/dirsrv + +relates: https://github.com/389ds/389-ds-base/issues/4973 + +Reviewed by: jchapman(Thanks!) +--- + ldap/admin/src/defaults.inf.in | 2 +- + src/lib389/lib389/instance/remove.py | 10 +--------- + src/lib389/lib389/instance/setup.py | 13 +++---------- + 3 files changed, 5 insertions(+), 20 deletions(-) + +diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in +index e02248b89..92b93d695 100644 +--- a/ldap/admin/src/defaults.inf.in ++++ b/ldap/admin/src/defaults.inf.in +@@ -35,7 +35,7 @@ sysconf_dir = @sysconfdir@ + initconfig_dir = @initconfigdir@ + config_dir = @instconfigdir@/slapd-{instance_name} + local_state_dir = @localstatedir@ +-run_dir = @localrundir@ ++run_dir = @localrundir@/dirsrv + # This is the expected location of ldapi. + ldapi = @localrundir@/slapd-{instance_name}.socket + pid_file = @localrundir@/slapd-{instance_name}.pid +diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py +index 1a35ddc07..e96db3896 100644 +--- a/src/lib389/lib389/instance/remove.py ++++ b/src/lib389/lib389/instance/remove.py +@@ -52,9 +52,9 @@ def remove_ds_instance(dirsrv, force=False): + remove_paths['ldif_dir'] = dirsrv.ds_paths.ldif_dir + remove_paths['lock_dir'] = dirsrv.ds_paths.lock_dir + remove_paths['log_dir'] = dirsrv.ds_paths.log_dir +- # remove_paths['run_dir'] = dirsrv.ds_paths.run_dir + remove_paths['inst_dir'] = dirsrv.ds_paths.inst_dir + remove_paths['etc_sysconfig'] = "%s/sysconfig/dirsrv-%s" % (dirsrv.ds_paths.sysconf_dir, dirsrv.serverid) ++ remove_paths['ldapi'] = dirsrv.ds_paths.ldapi + + tmpfiles_d_path = dirsrv.ds_paths.tmpfiles_d + "/dirsrv-" + dirsrv.serverid + ".conf" + +@@ -80,14 +80,6 @@ def remove_ds_instance(dirsrv, force=False): + + ### ANY NEW REMOVAL ACTION MUST BE BELOW THIS LINE!!! + +- # Remove LDAPI socket file +- ldapi_path = os.path.join(dirsrv.ds_paths.run_dir, "slapd-%s.socket" % dirsrv.serverid) +- if os.path.exists(ldapi_path): +- try: +- os.remove(ldapi_path) +- except OSError as e: +- _log.debug(f"Failed to remove LDAPI socket ({ldapi_path}) Error: {str(e)}") +- + # Remove these paths: + # for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', + # 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): +diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py +index 57e7a9fd4..be6854af8 100644 +--- a/src/lib389/lib389/instance/setup.py ++++ b/src/lib389/lib389/instance/setup.py +@@ -732,10 +732,6 @@ class SetupDs(object): + dse += line.replace('%', '{', 1).replace('%', '}', 1) + + with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse: +- if os.path.exists(os.path.dirname(slapd['ldapi'])): +- ldapi_path = slapd['ldapi'] +- else: +- ldapi_path = os.path.join(slapd['run_dir'], "slapd-%s.socket" % slapd['instance_name']) + dse_fmt = dse.format( + schema_dir=slapd['schema_dir'], + lock_dir=slapd['lock_dir'], +@@ -759,7 +755,7 @@ class SetupDs(object): + db_dir=slapd['db_dir'], + db_home_dir=slapd['db_home_dir'], + ldapi_enabled="on", +- ldapi=ldapi_path, ++ ldapi=slapd['ldapi'], + ldapi_autobind="on", + ) + file_dse.write(dse_fmt) +@@ -861,7 +857,7 @@ class SetupDs(object): + SER_ROOT_PW: self._raw_secure_password, + SER_DEPLOYED_DIR: slapd['prefix'], + SER_LDAPI_ENABLED: 'on', +- SER_LDAPI_SOCKET: ldapi_path, ++ SER_LDAPI_SOCKET: slapd['ldapi'], + SER_LDAPI_AUTOBIND: 'on' + } + +@@ -905,13 +901,10 @@ class SetupDs(object): + self.log.info("Perform SELinux labeling ...") + selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', + 'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir', +- 'schema_dir', 'tmp_dir') ++ 'run_dir', 'schema_dir', 'tmp_dir') + for path in selinux_paths: + selinux_restorecon(slapd[path]) + +- # Don't run restorecon on the entire /run directory +- selinux_restorecon(slapd['run_dir'] + '/dirsrv') +- + selinux_label_port(slapd['port']) + + # Start the server +-- +2.31.1 + diff --git a/0007-Issue-4973-update-snmp-to-use-run-dirsrv-for-PID-fil.patch b/0007-Issue-4973-update-snmp-to-use-run-dirsrv-for-PID-fil.patch new file mode 100644 index 0000000..dc375a4 --- /dev/null +++ b/0007-Issue-4973-update-snmp-to-use-run-dirsrv-for-PID-fil.patch @@ -0,0 +1,70 @@ +From c26c463ac92682dcf01ddbdc11cc1109b183eb0a Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 1 Nov 2021 16:04:28 -0400 +Subject: [PATCH 07/12] Issue 4973 - update snmp to use /run/dirsrv for PID + file + +Description: Previously SNMP would write the agent PID file directly + under /run (or /var/run), but this broke a CI test after + updating lib389/defaults.inf to use /run/dirsrv. + + Instead of hacking the CI test, I changed the path + snmp uses to: /run/dirsrv/ Which is where it + should really be written anyway. + +relates: https://github.com/389ds/389-ds-base/issues/4973 + +Reviewed by: vashirov(Thanks!) +--- + ldap/servers/snmp/main.c | 4 ++-- + wrappers/systemd-snmp.service.in | 6 +++--- + 2 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c +index e6271a8a9..d8eb918f6 100644 +--- a/ldap/servers/snmp/main.c ++++ b/ldap/servers/snmp/main.c +@@ -287,14 +287,14 @@ load_config(char *conf_path) + } + + /* set pidfile path */ +- if ((pidfile = malloc(strlen(LOCALRUNDIR) + strlen("/") + ++ if ((pidfile = malloc(strlen(LOCALRUNDIR) + strlen("/dirsrv/") + + strlen(LDAP_AGENT_PIDFILE) + 1)) != NULL) { + strncpy(pidfile, LOCALRUNDIR, strlen(LOCALRUNDIR) + 1); + /* The above will likely not be NULL terminated, but we need to + * be sure that we're properly NULL terminated for the below + * strcat() to work properly. */ + pidfile[strlen(LOCALRUNDIR)] = (char)0; +- strcat(pidfile, "/"); ++ strcat(pidfile, "/dirsrv/"); + strcat(pidfile, LDAP_AGENT_PIDFILE); + } else { + printf("ldap-agent: malloc error processing config file\n"); +diff --git a/wrappers/systemd-snmp.service.in b/wrappers/systemd-snmp.service.in +index 477bc623d..f18766cb4 100644 +--- a/wrappers/systemd-snmp.service.in ++++ b/wrappers/systemd-snmp.service.in +@@ -1,7 +1,7 @@ + # do not edit this file in /lib/systemd/system - instead do the following: + # cp /lib/systemd/system/dirsrv-snmp.service /etc/systemd/system/ + # edit /etc/systemd/system/dirsrv-snmp.service +-# systemctl daemon-reload ++# systemctl daemon-reload + # systemctl (re)start dirsrv-snmp.service + [Unit] + Description=@capbrand@ Directory Server SNMP Subagent. +@@ -9,8 +9,8 @@ After=network.target + + [Service] + Type=forking +-PIDFile=/run/ldap-agent.pid +-ExecStart=@sbindir@/ldap-agent @configdir@/ldap-agent.conf ++PIDFile=/run/dirsrv/ldap-agent.pid ++ExecStart=@sbindir@/ldap-agent @configdir@/ldap-agent.conf + + [Install] + WantedBy=multi-user.target +-- +2.31.1 + diff --git a/0008-Issue-4978-make-installer-robust.patch b/0008-Issue-4978-make-installer-robust.patch new file mode 100644 index 0000000..90704e4 --- /dev/null +++ b/0008-Issue-4978-make-installer-robust.patch @@ -0,0 +1,70 @@ +From 88d6ceb18e17c5a18bafb5092ae0c22241b212df Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Mon, 1 Nov 2021 14:01:11 -0400 +Subject: [PATCH 08/12] Issue 4978 - make installer robust + +Description: When run in a container the server can fail to start + because the installer sets the db_home_dir to /dev/shm, + but in containers the default size of /dev/shm is too + small for libdb. We should detect if we are in a + container and not set db_home_dir to /dev/shm. + + During instance removal, if an instance was not properly + created then it can not be removed either. Make the + uninstall more robust to accept some errors and continue + removing the instance. + +relates: https://github.com/389ds/389-ds-base/issues/4978 + +Reviewed by: firstyear & tbordaz(Thanks!) +--- + src/lib389/lib389/instance/setup.py | 9 +++++++++ + src/lib389/lib389/utils.py | 5 ++++- + 2 files changed, 13 insertions(+), 1 deletion(-) + +diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py +index be6854af8..7b0147cf9 100644 +--- a/src/lib389/lib389/instance/setup.py ++++ b/src/lib389/lib389/instance/setup.py +@@ -731,6 +731,15 @@ class SetupDs(object): + for line in template_dse.readlines(): + dse += line.replace('%', '{', 1).replace('%', '}', 1) + ++ # Check if we are in a container, if so don't use /dev/shm for the db home dir ++ # as containers typically don't allocate enough space for dev/shm and we don't ++ # want to unexpectedly break the server after an upgrade ++ container_result = subprocess.run(["systemd-detect-virt", "-c"], capture_output=True) ++ if container_result.returncode == 0: ++ # In a container, set the db_home_dir to the db path ++ self.log.debug("Container detected setting db home directory to db directory.") ++ slapd['db_home_dir'] = slapd['db_dir'] ++ + with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse: + dse_fmt = dse.format( + schema_dir=slapd['schema_dir'], +diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py +index 5ba0c6676..c63b4d0ee 100644 +--- a/src/lib389/lib389/utils.py ++++ b/src/lib389/lib389/utils.py +@@ -266,6 +266,8 @@ def selinux_label_port(port, remove_label=False): + :type remove_label: boolean + :raises: ValueError: Error message + """ ++ if port is None: ++ return + try: + import selinux + except ImportError: +@@ -662,7 +664,8 @@ def isLocalHost(host_name): + Uses gethostbyname() + """ + # first see if this is a "well known" local hostname +- if host_name == 'localhost' or \ ++ if host_name is None or \ ++ host_name == 'localhost' or \ + host_name == 'localhost.localdomain' or \ + host_name == socket.gethostname(): + return True +-- +2.31.1 + diff --git a/0009-Issue-4972-gecos-with-IA5-introduces-a-compatibility.patch b/0009-Issue-4972-gecos-with-IA5-introduces-a-compatibility.patch new file mode 100644 index 0000000..e895ba1 --- /dev/null +++ b/0009-Issue-4972-gecos-with-IA5-introduces-a-compatibility.patch @@ -0,0 +1,468 @@ +From 2ae2f53756b6f13e2816bb30812740cb7ad97403 Mon Sep 17 00:00:00 2001 +From: tbordaz +Date: Fri, 5 Nov 2021 09:56:43 +0100 +Subject: [PATCH 09/12] Issue 4972 - gecos with IA5 introduces a compatibility + issue with previous (#4981) + +releases where it was DirectoryString + +Bug description: + For years 'gecos' was DirectoryString (UTF8), with #50933 it was restricted to IA5 (ascii) + https://github.com/389ds/389-ds-base/commit/0683bcde1b667b6d0ca6e8d1ef605f17c51ea2f7# + + IA5 definition conforms rfc2307 but is a problem for existing deployments + where entries can have 'gecos' attribute value with UTF8. + +Fix description: + Revert the definition to of 'gecos' being Directory String + + Additional fix to make test_replica_backup_and_restore more + robust to CI + +relates: https://github.com/389ds/389-ds-base/issues/4972 + +Reviewed by: William Brown, Pierre Rogier, James Chapman (Thanks !) + +Platforms tested: F34 +--- + .../tests/suites/schema/schema_test.py | 398 +++++++++++++++++- + ldap/schema/10rfc2307compat.ldif | 6 +- + 2 files changed, 400 insertions(+), 4 deletions(-) + +diff --git a/dirsrvtests/tests/suites/schema/schema_test.py b/dirsrvtests/tests/suites/schema/schema_test.py +index d590624b6..5d62b8d59 100644 +--- a/dirsrvtests/tests/suites/schema/schema_test.py ++++ b/dirsrvtests/tests/suites/schema/schema_test.py +@@ -18,8 +18,12 @@ import pytest + import six + from ldap.cidict import cidict + from ldap.schema import SubSchema ++from lib389.schema import SchemaLegacy + from lib389._constants import * +-from lib389.topologies import topology_st ++from lib389.topologies import topology_st, topology_m2 as topo_m2 ++from lib389.idm.user import UserAccounts, UserAccount ++from lib389.replica import ReplicationManager ++from lib389.utils import ensure_bytes + + pytestmark = pytest.mark.tier1 + +@@ -165,6 +169,398 @@ def test_schema_comparewithfiles(topology_st): + + log.info('test_schema_comparewithfiles: PASSED') + ++def test_gecos_directoryString(topology_st): ++ """Check that gecos supports directoryString value ++ ++ :id: aee422bb-6299-4124-b5cd-d7393dac19d3 ++ ++ :setup: Standalone instance ++ ++ :steps: ++ 1. Add a common user ++ 2. replace gecos with a direstoryString value ++ ++ :expectedresults: ++ 1. Success ++ 2. Success ++ """ ++ ++ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) ++ ++ user_properties = { ++ 'uid': 'testuser', ++ 'cn' : 'testuser', ++ 'sn' : 'user', ++ 'uidNumber' : '1000', ++ 'gidNumber' : '2000', ++ 'homeDirectory' : '/home/testuser', ++ } ++ testuser = users.create(properties=user_properties) ++ ++ # Add a gecos UTF value ++ testuser.replace('gecos', 'Hélène') ++ ++def test_gecos_mixed_definition_topo(topo_m2, request): ++ """Check that replication is still working if schema contains ++ definitions that does not conform with a replicated entry ++ ++ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe ++ :setup: Two suppliers replication setup ++ :steps: ++ 1. Create a testuser on M1 ++ 2 Stop M1 and M2 ++ 3 Change gecos def on M2 to be IA5 ++ 4 Update testuser with gecos directoryString value ++ 5 Check replication is still working ++ :expectedresults: ++ 1. success ++ 2. success ++ 3. success ++ 4. success ++ 5. success ++ ++ """ ++ ++ repl = ReplicationManager(DEFAULT_SUFFIX) ++ m1 = topo_m2.ms["supplier1"] ++ m2 = topo_m2.ms["supplier2"] ++ ++ ++ # create a test user ++ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX) ++ testuser = UserAccount(m1, testuser_dn) ++ try: ++ testuser.create(properties={ ++ 'uid': 'testuser', ++ 'cn': 'testuser', ++ 'sn': 'testuser', ++ 'uidNumber' : '1000', ++ 'gidNumber' : '2000', ++ 'homeDirectory' : '/home/testuser', ++ }) ++ except ldap.ALREADY_EXISTS: ++ pass ++ repl.wait_for_replication(m1, m2) ++ ++ # Stop suppliers to update the schema ++ m1.stop() ++ m2.stop() ++ ++ # on M1: gecos is DirectoryString (default) ++ # on M2: gecos is IA5 ++ schema_filename = (m2.schemadir + "/99user.ldif") ++ try: ++ with open(schema_filename, 'w') as schema_file: ++ schema_file.write("dn: cn=schema\n") ++ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " + ++ "'gecos' DESC 'The GECOS field; the common name' " + ++ "EQUALITY caseIgnoreIA5Match " + ++ "SUBSTR caseIgnoreIA5SubstringsMatch " + ++ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + ++ "SINGLE-VALUE )\n") ++ os.chmod(schema_filename, 0o777) ++ except OSError as e: ++ log.fatal("Failed to update schema file: " + ++ "{} Error: {}".format(schema_filename, str(e))) ++ ++ # start the instances ++ m1.start() ++ m2.start() ++ ++ # Check that gecos is IA5 on M2 ++ schema = SchemaLegacy(m2) ++ attributetypes = schema.query_attributetype('gecos') ++ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26" ++ ++ ++ # Add a gecos UTF value on M1 ++ testuser.replace('gecos', 'Hélène') ++ ++ # Check replication is still working ++ testuser.replace('displayName', 'ascii value') ++ repl.wait_for_replication(m1, m2) ++ testuser_m2 = UserAccount(m2, testuser_dn) ++ assert testuser_m2.exists() ++ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value' ++ ++ def fin(): ++ m1.start() ++ m2.start() ++ testuser.delete() ++ repl.wait_for_replication(m1, m2) ++ ++ # on M2 restore a default 99user.ldif ++ m2.stop() ++ os.remove(m2.schemadir + "/99user.ldif") ++ schema_filename = (m2.schemadir + "/99user.ldif") ++ try: ++ with open(schema_filename, 'w') as schema_file: ++ schema_file.write("dn: cn=schema\n") ++ os.chmod(schema_filename, 0o777) ++ except OSError as e: ++ log.fatal("Failed to update schema file: " + ++ "{} Error: {}".format(schema_filename, str(e))) ++ m2.start() ++ m1.start() ++ ++ request.addfinalizer(fin) ++ ++def test_gecos_directoryString_wins_M1(topo_m2, request): ++ """Check that if inital syntax are IA5(M2) and DirectoryString(M1) ++ Then directoryString wins when nsSchemaCSN M1 is the greatest ++ ++ :id: ad119fa5-7671-45c8-b2ef-0b28ffb68fdb ++ :setup: Two suppliers replication setup ++ :steps: ++ 1. Create a testuser on M1 ++ 2 Stop M1 and M2 ++ 3 Change gecos def on M2 to be IA5 ++ 4 Start M1 and M2 ++ 5 Update M1 schema so that M1 has greatest nsSchemaCSN ++ 6 Update testuser with gecos directoryString value ++ 7 Check replication is still working ++ 8 Check gecos is DirectoryString on M1 and M2 ++ :expectedresults: ++ 1. success ++ 2. success ++ 3. success ++ 4. success ++ 5. success ++ 6. success ++ 7. success ++ 8. success ++ ++ """ ++ ++ repl = ReplicationManager(DEFAULT_SUFFIX) ++ m1 = topo_m2.ms["supplier1"] ++ m2 = topo_m2.ms["supplier2"] ++ ++ ++ # create a test user ++ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX) ++ testuser = UserAccount(m1, testuser_dn) ++ try: ++ testuser.create(properties={ ++ 'uid': 'testuser', ++ 'cn': 'testuser', ++ 'sn': 'testuser', ++ 'uidNumber' : '1000', ++ 'gidNumber' : '2000', ++ 'homeDirectory' : '/home/testuser', ++ }) ++ except ldap.ALREADY_EXISTS: ++ pass ++ repl.wait_for_replication(m1, m2) ++ ++ # Stop suppliers to update the schema ++ m1.stop() ++ m2.stop() ++ ++ # on M1: gecos is DirectoryString (default) ++ # on M2: gecos is IA5 ++ schema_filename = (m2.schemadir + "/99user.ldif") ++ try: ++ with open(schema_filename, 'w') as schema_file: ++ schema_file.write("dn: cn=schema\n") ++ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " + ++ "'gecos' DESC 'The GECOS field; the common name' " + ++ "EQUALITY caseIgnoreIA5Match " + ++ "SUBSTR caseIgnoreIA5SubstringsMatch " + ++ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + ++ "SINGLE-VALUE )\n") ++ os.chmod(schema_filename, 0o777) ++ except OSError as e: ++ log.fatal("Failed to update schema file: " + ++ "{} Error: {}".format(schema_filename, str(e))) ++ ++ # start the instances ++ m1.start() ++ m2.start() ++ ++ # Check that gecos is IA5 on M2 ++ schema = SchemaLegacy(m2) ++ attributetypes = schema.query_attributetype('gecos') ++ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26" ++ ++ ++ # update M1 schema to increase its nsschemaCSN ++ new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )" ++ m1.schema.add_schema('attributetypes', ensure_bytes(new_at)) ++ ++ # Add a gecos UTF value on M1 ++ testuser.replace('gecos', 'Hélène') ++ ++ # Check replication is still working ++ testuser.replace('displayName', 'ascii value') ++ repl.wait_for_replication(m1, m2) ++ testuser_m2 = UserAccount(m2, testuser_dn) ++ assert testuser_m2.exists() ++ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value' ++ ++ # Check that gecos is DirectoryString on M1 ++ schema = SchemaLegacy(m1) ++ attributetypes = schema.query_attributetype('gecos') ++ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15" ++ ++ # Check that gecos is DirectoryString on M2 ++ schema = SchemaLegacy(m2) ++ attributetypes = schema.query_attributetype('gecos') ++ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15" ++ ++ def fin(): ++ m1.start() ++ m2.start() ++ testuser.delete() ++ m1.schema.del_schema('attributetypes', ensure_bytes(new_at)) ++ repl.wait_for_replication(m1, m2) ++ ++ # on M2 restore a default 99user.ldif ++ m2.stop() ++ os.remove(m2.schemadir + "/99user.ldif") ++ schema_filename = (m2.schemadir + "/99user.ldif") ++ try: ++ with open(schema_filename, 'w') as schema_file: ++ schema_file.write("dn: cn=schema\n") ++ os.chmod(schema_filename, 0o777) ++ except OSError as e: ++ log.fatal("Failed to update schema file: " + ++ "{} Error: {}".format(schema_filename, str(e))) ++ m2.start() ++ m1.start() ++ ++ request.addfinalizer(fin) ++ ++def test_gecos_directoryString_wins_M2(topo_m2, request): ++ """Check that if inital syntax are IA5(M2) and DirectoryString(M1) ++ Then directoryString wins when nsSchemaCSN M2 is the greatest ++ ++ :id: 2da7f1b1-f86d-4072-a940-ba56d4bc8348 ++ :setup: Two suppliers replication setup ++ :steps: ++ 1. Create a testuser on M1 ++ 2 Stop M1 and M2 ++ 3 Change gecos def on M2 to be IA5 ++ 4 Start M1 and M2 ++ 5 Update M2 schema so that M2 has greatest nsSchemaCSN ++ 6 Update testuser on M2 and trigger replication to M1 ++ 7 Update testuser on M2 with gecos directoryString value ++ 8 Check replication is still working ++ 9 Check gecos is DirectoryString on M1 and M2 ++ :expectedresults: ++ 1. success ++ 2. success ++ 3. success ++ 4. success ++ 5. success ++ 6. success ++ 7. success ++ 8. success ++ 9. success ++ ++ """ ++ ++ repl = ReplicationManager(DEFAULT_SUFFIX) ++ m1 = topo_m2.ms["supplier1"] ++ m2 = topo_m2.ms["supplier2"] ++ ++ ++ # create a test user ++ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX) ++ testuser = UserAccount(m1, testuser_dn) ++ try: ++ testuser.create(properties={ ++ 'uid': 'testuser', ++ 'cn': 'testuser', ++ 'sn': 'testuser', ++ 'uidNumber' : '1000', ++ 'gidNumber' : '2000', ++ 'homeDirectory' : '/home/testuser', ++ }) ++ except ldap.ALREADY_EXISTS: ++ pass ++ testuser.replace('displayName', 'to trigger replication M1-> M2') ++ repl.wait_for_replication(m1, m2) ++ ++ # Stop suppliers to update the schema ++ m1.stop() ++ m2.stop() ++ ++ # on M1: gecos is DirectoryString (default) ++ # on M2: gecos is IA5 ++ schema_filename = (m2.schemadir + "/99user.ldif") ++ try: ++ with open(schema_filename, 'w') as schema_file: ++ schema_file.write("dn: cn=schema\n") ++ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " + ++ "'gecos' DESC 'The GECOS field; the common name' " + ++ "EQUALITY caseIgnoreIA5Match " + ++ "SUBSTR caseIgnoreIA5SubstringsMatch " + ++ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + ++ "SINGLE-VALUE )\n") ++ os.chmod(schema_filename, 0o777) ++ except OSError as e: ++ log.fatal("Failed to update schema file: " + ++ "{} Error: {}".format(schema_filename, str(e))) ++ ++ # start the instances ++ m1.start() ++ m2.start() ++ ++ # Check that gecos is IA5 on M2 ++ schema = SchemaLegacy(m2) ++ attributetypes = schema.query_attributetype('gecos') ++ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26" ++ ++ # update M2 schema to increase its nsschemaCSN ++ new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )" ++ m2.schema.add_schema('attributetypes', ensure_bytes(new_at)) ++ ++ # update just to trigger replication M2->M1 ++ # and update of M2 schema ++ testuser_m2 = UserAccount(m2, testuser_dn) ++ testuser_m2.replace('displayName', 'to trigger replication M2-> M1') ++ ++ # Add a gecos UTF value on M1 ++ testuser.replace('gecos', 'Hélène') ++ ++ # Check replication is still working ++ testuser.replace('displayName', 'ascii value') ++ repl.wait_for_replication(m1, m2) ++ assert testuser_m2.exists() ++ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value' ++ ++ # Check that gecos is DirectoryString on M1 ++ schema = SchemaLegacy(m1) ++ attributetypes = schema.query_attributetype('gecos') ++ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15" ++ ++ # Check that gecos is DirectoryString on M2 ++ schema = SchemaLegacy(m2) ++ attributetypes = schema.query_attributetype('gecos') ++ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15" ++ ++ def fin(): ++ m1.start() ++ m2.start() ++ testuser.delete() ++ m1.schema.del_schema('attributetypes', ensure_bytes(new_at)) ++ repl.wait_for_replication(m1, m2) ++ ++ # on M2 restore a default 99user.ldif ++ m2.stop() ++ os.remove(m2.schemadir + "/99user.ldif") ++ schema_filename = (m2.schemadir + "/99user.ldif") ++ try: ++ with open(schema_filename, 'w') as schema_file: ++ schema_file.write("dn: cn=schema\n") ++ os.chmod(schema_filename, 0o777) ++ except OSError as e: ++ log.fatal("Failed to update schema file: " + ++ "{} Error: {}".format(schema_filename, str(e))) ++ m2.start() ++ ++ request.addfinalizer(fin) + + if __name__ == '__main__': + # Run isolated +diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif +index 8ba72e1e3..998b8983b 100644 +--- a/ldap/schema/10rfc2307compat.ldif ++++ b/ldap/schema/10rfc2307compat.ldif +@@ -21,9 +21,9 @@ attributeTypes: ( + attributeTypes: ( + 1.3.6.1.1.1.1.2 NAME 'gecos' + DESC 'The GECOS field; the common name' +- EQUALITY caseIgnoreIA5Match +- SUBSTR caseIgnoreIA5SubstringsMatch +- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ++ EQUALITY caseIgnoreMatch ++ SUBSTR caseIgnoreSubstringsMatch ++ SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + SINGLE-VALUE + ) + attributeTypes: ( +-- +2.31.1 + diff --git a/0010-Issue-4997-Function-declaration-compiler-error-on-1..patch b/0010-Issue-4997-Function-declaration-compiler-error-on-1..patch new file mode 100644 index 0000000..df7195a --- /dev/null +++ b/0010-Issue-4997-Function-declaration-compiler-error-on-1..patch @@ -0,0 +1,32 @@ +From 3909877f12e50556e844bc20e72870a4fa905ada Mon Sep 17 00:00:00 2001 +From: James Chapman +Date: Tue, 9 Nov 2021 12:55:28 +0000 +Subject: [PATCH 10/12] Issue 4997 - Function declaration compiler error on + 1.4.3 + +Bug description: Building the server on the 1.4.3 branch generates a +compiler error due to a typo in function declaration. + +Fixes: https://github.com/389ds/389-ds-base/issues/4997 + +Reviewed by: @jchapman (one line commit rule) +--- + ldap/servers/slapd/slapi-private.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h +index 570765e47..d6d74e8a7 100644 +--- a/ldap/servers/slapd/slapi-private.h ++++ b/ldap/servers/slapd/slapi-private.h +@@ -273,7 +273,7 @@ void *csngen_register_callbacks(CSNGen *gen, GenCSNFn genFn, void *genArg, Abort + void csngen_unregister_callbacks(CSNGen *gen, void *cookie); + + /* debugging function */ +-void csngen_dump_state(const CSNGen *gen); ++void csngen_dump_state(const CSNGen *gen, int severity); + + /* this function tests csn generator */ + void csngen_test(void); +-- +2.31.1 + diff --git a/0011-Issue-4978-use-more-portable-python-command-for-chec.patch b/0011-Issue-4978-use-more-portable-python-command-for-chec.patch new file mode 100644 index 0000000..1414b9d --- /dev/null +++ b/0011-Issue-4978-use-more-portable-python-command-for-chec.patch @@ -0,0 +1,32 @@ +From 60d570e52465b58167301f64792f5f85cbc85e20 Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Wed, 10 Nov 2021 08:53:45 -0500 +Subject: [PATCH 11/12] Issue 4978 - use more portable python command for + checking containers + +Description: During the installation check for containers use arguments + for subprocess.run() that work on all versions of python + +relates: https://github.com/389ds/389-ds-base/issues/4978 + +Reviewed by: mreynolds(one line commit rule) +--- + src/lib389/lib389/instance/setup.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py +index 7b0147cf9..b23d2deb8 100644 +--- a/src/lib389/lib389/instance/setup.py ++++ b/src/lib389/lib389/instance/setup.py +@@ -734,7 +734,7 @@ class SetupDs(object): + # Check if we are in a container, if so don't use /dev/shm for the db home dir + # as containers typically don't allocate enough space for dev/shm and we don't + # want to unexpectedly break the server after an upgrade +- container_result = subprocess.run(["systemd-detect-virt", "-c"], capture_output=True) ++ container_result = subprocess.run(["systemd-detect-virt", "-c"], stdout=subprocess.PIPE) + if container_result.returncode == 0: + # In a container, set the db_home_dir to the db path + self.log.debug("Container detected setting db home directory to db directory.") +-- +2.31.1 + diff --git a/0012-Issue-4959-Invalid-etc-hosts-setup-can-cause-isLocal.patch b/0012-Issue-4959-Invalid-etc-hosts-setup-can-cause-isLocal.patch new file mode 100644 index 0000000..5600d8c --- /dev/null +++ b/0012-Issue-4959-Invalid-etc-hosts-setup-can-cause-isLocal.patch @@ -0,0 +1,31 @@ +From 2c6653edef793d46815e6df607e55d68e14fe232 Mon Sep 17 00:00:00 2001 +From: spike +Date: Fri, 5 Nov 2021 13:56:41 +0100 +Subject: [PATCH 12/12] Issue 4959 - Invalid /etc/hosts setup can cause + isLocalHost to fail. + +Description: Use local_simple_allocate in dsctl so that isLocal is always set properly + +Relates: https://github.com/389ds/389-ds-base/issues/4959 + +Reviewed by: @droideck (Thanks!) +--- + src/lib389/cli/dsctl | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl +index b6c42b5cc..d2ea6cd29 100755 +--- a/src/lib389/cli/dsctl ++++ b/src/lib389/cli/dsctl +@@ -135,7 +135,7 @@ if __name__ == '__main__': + log.error("Unable to access instance information. Are you running as the correct user? (usually dirsrv or root)") + sys.exit(1) + +- inst.allocate(insts[0]) ++ inst.local_simple_allocate(insts[0]['server-id']) + log.debug('Instance allocated') + + try: +-- +2.31.1 + diff --git a/0013-CVE-2021-4091-BZ-2030367-double-free-of-the-virtual-.patch b/0013-CVE-2021-4091-BZ-2030367-double-free-of-the-virtual-.patch new file mode 100644 index 0000000..b7da4bf --- /dev/null +++ b/0013-CVE-2021-4091-BZ-2030367-double-free-of-the-virtual-.patch @@ -0,0 +1,105 @@ +From d000349089eb15b3476ec302f4279f118336290e Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 16 Dec 2021 16:13:08 -0500 +Subject: [PATCH 1/2] CVE-2021-4091 (BZ#2030367) double-free of the virtual + attribute context in persistent search + +description: + A search is processed by a worker using a private pblock. + If the search is persistent, the worker spawn a thread + and kind of duplicate its private pblock so that the spawn + thread continue to process the persistent search. + Then worker ends the initial search, reinit (free) its private pblock, + and returns monitoring the wait_queue. + When the persistent search completes, it frees the duplicated + pblock. + The problem is that private pblock and duplicated pblock + are referring to a same structure (pb_vattr_context). + That lead to a double free + +Fix: + When cloning the pblock (slapi_pblock_clone) make sure + to transfert the references inside the original (private) + pblock to the target (cloned) one + That includes pb_vattr_context pointer. + +Reviewed by: Mark Reynolds, James Chapman, Pierre Rogier (Thanks !) +--- + ldap/servers/slapd/connection.c | 8 +++++--- + ldap/servers/slapd/pblock.c | 14 ++++++++++++-- + 2 files changed, 17 insertions(+), 5 deletions(-) + +diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c +index e0c1a52d2..fc7ed9c4a 100644 +--- a/ldap/servers/slapd/connection.c ++++ b/ldap/servers/slapd/connection.c +@@ -1823,9 +1823,11 @@ connection_threadmain() + pthread_mutex_unlock(&(conn->c_mutex)); + } + /* ps_add makes a shallow copy of the pb - so we +- * can't free it or init it here - just set operation to NULL. +- * ps_send_results will call connection_remove_operation_ext to free it +- */ ++ * can't free it or init it here - just set operation to NULL. ++ * ps_send_results will call connection_remove_operation_ext to free it ++ * The connection_thread private pblock ('pb') has be cloned and should only ++ * be reinit (slapi_pblock_init) ++ */ + slapi_pblock_set(pb, SLAPI_OPERATION, NULL); + slapi_pblock_init(pb); + } else { +diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c +index a64986aeb..c78d1250f 100644 +--- a/ldap/servers/slapd/pblock.c ++++ b/ldap/servers/slapd/pblock.c +@@ -292,6 +292,12 @@ _pblock_assert_pb_deprecated(Slapi_PBlock *pblock) + } + } + ++/* It clones the pblock ++ * the content of the source pblock is transfered ++ * to the target pblock (returned) ++ * The source pblock should not be used for any operation ++ * it needs to be reinit (slapi_pblock_init) ++ */ + Slapi_PBlock * + slapi_pblock_clone(Slapi_PBlock *pb) + { +@@ -312,28 +318,32 @@ slapi_pblock_clone(Slapi_PBlock *pb) + if (pb->pb_task != NULL) { + _pblock_assert_pb_task(new_pb); + *(new_pb->pb_task) = *(pb->pb_task); ++ memset(pb->pb_task, 0, sizeof(slapi_pblock_task)); + } + if (pb->pb_mr != NULL) { + _pblock_assert_pb_mr(new_pb); + *(new_pb->pb_mr) = *(pb->pb_mr); ++ memset(pb->pb_mr, 0, sizeof(slapi_pblock_matching_rule)); + } + if (pb->pb_misc != NULL) { + _pblock_assert_pb_misc(new_pb); + *(new_pb->pb_misc) = *(pb->pb_misc); ++ memset(pb->pb_misc, 0, sizeof(slapi_pblock_misc)); + } + if (pb->pb_intop != NULL) { + _pblock_assert_pb_intop(new_pb); + *(new_pb->pb_intop) = *(pb->pb_intop); +- /* set pwdpolicy to NULL so this clone allocates its own policy */ +- new_pb->pb_intop->pwdpolicy = NULL; ++ memset(pb->pb_intop, 0, sizeof(slapi_pblock_intop)); + } + if (pb->pb_intplugin != NULL) { + _pblock_assert_pb_intplugin(new_pb); + *(new_pb->pb_intplugin) = *(pb->pb_intplugin); ++ memset(pb->pb_intplugin, 0,sizeof(slapi_pblock_intplugin)); + } + if (pb->pb_deprecated != NULL) { + _pblock_assert_pb_deprecated(new_pb); + *(new_pb->pb_deprecated) = *(pb->pb_deprecated); ++ memset(pb->pb_deprecated, 0, sizeof(slapi_pblock_deprecated)); + } + #ifdef PBLOCK_ANALYTICS + new_pb->analytics = NULL; +-- +2.31.1 + diff --git a/0014-Issue-5127-run-restorecon-on-dev-shm-at-server-start.patch b/0014-Issue-5127-run-restorecon-on-dev-shm-at-server-start.patch new file mode 100644 index 0000000..7097431 --- /dev/null +++ b/0014-Issue-5127-run-restorecon-on-dev-shm-at-server-start.patch @@ -0,0 +1,102 @@ +From 03ca5111a8de602ecef9ad33206ba593b242d0df Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Fri, 21 Jan 2022 10:15:35 -0500 +Subject: [PATCH 1/2] Issue 5127 - run restorecon on /dev/shm at server startup + +Description: + +Update the systemd service file to execute a script that runs +restorecon on the DB home directory. This addresses issues with +backup/restore, reboot, and FS restore issues that can happen when +/dev/shm is missing or created outside of dscreate. + +relates: https://github.com/389ds/389-ds-base/issues/5127 + +Reviewed by: progier & viktor (Thanks!!) +--- + Makefile.am | 2 +- + rpm/389-ds-base.spec.in | 1 + + wrappers/ds_selinux_restorecon.sh.in | 33 ++++++++++++++++++++++++++++ + wrappers/systemd.template.service.in | 1 + + 4 files changed, 36 insertions(+), 1 deletion(-) + create mode 100644 wrappers/ds_selinux_restorecon.sh.in + +diff --git a/Makefile.am b/Makefile.am +index fc5a6a7d1..d6ad273c3 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -775,7 +775,7 @@ libexec_SCRIPTS += ldap/admin/src/scripts/ds_selinux_enabled \ + ldap/admin/src/scripts/ds_selinux_port_query + endif + if SYSTEMD +-libexec_SCRIPTS += wrappers/ds_systemd_ask_password_acl ++libexec_SCRIPTS += wrappers/ds_systemd_ask_password_acl wrappers/ds_selinux_restorecon.sh + endif + + install-data-hook: +diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in +index d80de8422..6c0d95abd 100644 +--- a/rpm/389-ds-base.spec.in ++++ b/rpm/389-ds-base.spec.in +@@ -623,6 +623,7 @@ exit 0 + %{_sbindir}/ns-slapd + %{_mandir}/man8/ns-slapd.8.gz + %{_libexecdir}/%{pkgname}/ds_systemd_ask_password_acl ++%{_libexecdir}/%{pkgname}/ds_selinux_restorecon.sh + %{_mandir}/man5/99user.ldif.5.gz + %{_mandir}/man5/certmap.conf.5.gz + %{_mandir}/man5/slapd-collations.conf.5.gz +diff --git a/wrappers/ds_selinux_restorecon.sh.in b/wrappers/ds_selinux_restorecon.sh.in +new file mode 100644 +index 000000000..063347de3 +--- /dev/null ++++ b/wrappers/ds_selinux_restorecon.sh.in +@@ -0,0 +1,33 @@ ++#!/bin/sh ++# BEGIN COPYRIGHT BLOCK ++# Copyright (C) 2022 Red Hat, Inc. ++# ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# END COPYRIGHT BLOCK ++ ++# Make sure we have the path to the dse.ldif ++if [ -z $1 ] ++then ++ echo "usage: ${0} /etc/dirsrv/slapd-/dse.ldif" ++ exit 0 ++fi ++ ++if ! command -v restorecon &> /dev/null ++then ++ # restorecon is not available ++ exit 0 ++fi ++ ++# Grep the db_home_dir out of the config file ++DS_HOME_DIR=`grep 'nsslapd-db-home-directory: ' $1 | awk '{print $2}'` ++if [ -z "$DS_HOME_DIR" ] ++then ++ # No DB home set, that's ok ++ exit 0 ++fi ++ ++# Now run restorecon ++restorecon ${DS_HOME_DIR} +diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in +index a8c21a9be..4485e0ec0 100644 +--- a/wrappers/systemd.template.service.in ++++ b/wrappers/systemd.template.service.in +@@ -14,6 +14,7 @@ EnvironmentFile=-@initconfigdir@/@package_name@ + EnvironmentFile=-@initconfigdir@/@package_name@-%i + PIDFile=/run/@package_name@/slapd-%i.pid + ExecStartPre=@libexecdir@/ds_systemd_ask_password_acl @instconfigdir@/slapd-%i/dse.ldif ++ExecStartPre=@libexecdir@/ds_selinux_restorecon.sh @instconfigdir@/slapd-%i/dse.ldif + ExecStart=@sbindir@/ns-slapd -D @instconfigdir@/slapd-%i -i /run/@package_name@/slapd-%i.pid + PrivateTmp=on + +-- +2.31.1 + diff --git a/0015-Issue-5127-ds_selinux_restorecon.sh-always-exit-0.patch b/0015-Issue-5127-ds_selinux_restorecon.sh-always-exit-0.patch new file mode 100644 index 0000000..566d0ea --- /dev/null +++ b/0015-Issue-5127-ds_selinux_restorecon.sh-always-exit-0.patch @@ -0,0 +1,35 @@ +From 0ed471bae52bb0debd23336cbc5f3f1d400cbbc9 Mon Sep 17 00:00:00 2001 +From: Adam Williamson +Date: Thu, 27 Jan 2022 11:07:26 -0800 +Subject: [PATCH] Issue 5127 - ds_selinux_restorecon.sh: always exit 0 + +Description: + +We don't want to error out and give up on starting the service +if the restorecon fails - it might just be that the directory +doesn't exist and doesn't need restoring. Issue identified and +fix suggested by Simon Farnsworth + +relates: https://github.com/389ds/389-ds-base/issues/5127 + +Reviewed by: adamw & mreynolds +--- + wrappers/ds_selinux_restorecon.sh.in | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/wrappers/ds_selinux_restorecon.sh.in b/wrappers/ds_selinux_restorecon.sh.in +index 063347de3..2d7386233 100644 +--- a/wrappers/ds_selinux_restorecon.sh.in ++++ b/wrappers/ds_selinux_restorecon.sh.in +@@ -29,5 +29,6 @@ then + exit 0 + fi + +-# Now run restorecon +-restorecon ${DS_HOME_DIR} ++# Now run restorecon, but don't die if it fails (could be that the ++# directory doesn't exist) ++restorecon ${DS_HOME_DIR} || : +-- +2.31.1 + diff --git a/0016-Issue-4775-Add-entryuuid-CLI-and-Fixup-4776.patch b/0016-Issue-4775-Add-entryuuid-CLI-and-Fixup-4776.patch new file mode 100644 index 0000000..05d7f36 --- /dev/null +++ b/0016-Issue-4775-Add-entryuuid-CLI-and-Fixup-4776.patch @@ -0,0 +1,262 @@ +From 93588ea455aff691bdfbf59cdef4df8fcedb69f2 Mon Sep 17 00:00:00 2001 +From: Firstyear +Date: Thu, 19 Aug 2021 10:46:00 +1000 +Subject: [PATCH 1/2] Issue 4775 - Add entryuuid CLI and Fixup (#4776) + +Bug Description: EntryUUID when added was missing it's CLI +and helpers for fixups. + +Fix Description: Add the CLI elements. + +fixes: https://github.com/389ds/389-ds-base/issues/4775 + +Author: William Brown + +Review by: @mreynolds389 (thanks!) +--- + src/lib389/lib389/cli_conf/plugin.py | 6 ++- + .../lib389/cli_conf/plugins/entryuuid.py | 39 ++++++++++++++ + src/plugins/entryuuid/src/lib.rs | 54 ++++++++----------- + 3 files changed, 65 insertions(+), 34 deletions(-) + create mode 100644 src/lib389/lib389/cli_conf/plugins/entryuuid.py + +diff --git a/src/lib389/lib389/cli_conf/plugin.py b/src/lib389/lib389/cli_conf/plugin.py +index 560c57f9b..7c0cf2c80 100644 +--- a/src/lib389/lib389/cli_conf/plugin.py ++++ b/src/lib389/lib389/cli_conf/plugin.py +@@ -1,5 +1,5 @@ + # --- BEGIN COPYRIGHT BLOCK --- +-# Copyright (C) 2018 Red Hat, Inc. ++# Copyright (C) 2022 Red Hat, Inc. + # All rights reserved. + # + # License: GPL (version 3 or any later version). +@@ -27,6 +27,8 @@ from lib389.cli_conf.plugins import passthroughauth as cli_passthroughauth + from lib389.cli_conf.plugins import retrochangelog as cli_retrochangelog + from lib389.cli_conf.plugins import automember as cli_automember + from lib389.cli_conf.plugins import posix_winsync as cli_posix_winsync ++from lib389.cli_conf.plugins import contentsync as cli_contentsync ++from lib389.cli_conf.plugins import entryuuid as cli_entryuuid + + SINGULAR = Plugin + MANY = Plugins +@@ -113,6 +115,8 @@ def create_parser(subparsers): + cli_passthroughauth.create_parser(subcommands) + cli_retrochangelog.create_parser(subcommands) + cli_posix_winsync.create_parser(subcommands) ++ cli_contentsync.create_parser(subcommands) ++ cli_entryuuid.create_parser(subcommands) + + list_parser = subcommands.add_parser('list', help="List current configured (enabled and disabled) plugins") + list_parser.set_defaults(func=plugin_list) +diff --git a/src/lib389/lib389/cli_conf/plugins/entryuuid.py b/src/lib389/lib389/cli_conf/plugins/entryuuid.py +new file mode 100644 +index 000000000..6c86bff4b +--- /dev/null ++++ b/src/lib389/lib389/cli_conf/plugins/entryuuid.py +@@ -0,0 +1,39 @@ ++# --- BEGIN COPYRIGHT BLOCK --- ++# Copyright (C) 2021 William Brown ++# All rights reserved. ++# ++# License: GPL (version 3 or any later version). ++# See LICENSE for details. ++# --- END COPYRIGHT BLOCK --- ++ ++import ldap ++from lib389.plugins import EntryUUIDPlugin ++from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add ++ ++def do_fixup(inst, basedn, log, args): ++ plugin = EntryUUIDPlugin(inst) ++ log.info('Attempting to add task entry...') ++ if not plugin.status(): ++ log.error("'%s' is disabled. Fix up task can't be executed" % plugin.rdn) ++ return ++ fixup_task = plugin.fixup(args.DN, args.filter) ++ fixup_task.wait() ++ exitcode = fixup_task.get_exit_code() ++ if exitcode != 0: ++ log.error('EntryUUID fixup task has failed. Please, check the error log for more - %s' % exitcode) ++ else: ++ log.info('Successfully added task entry') ++ ++def create_parser(subparsers): ++ referint = subparsers.add_parser('entryuuid', help='Manage and configure EntryUUID plugin') ++ subcommands = referint.add_subparsers(help='action') ++ ++ add_generic_plugin_parsers(subcommands, EntryUUIDPlugin) ++ ++ fixup = subcommands.add_parser('fixup', help='Run the fix-up task for EntryUUID plugin') ++ fixup.set_defaults(func=do_fixup) ++ fixup.add_argument('DN', help="Base DN that contains entries to fix up") ++ fixup.add_argument('-f', '--filter', ++ help='Filter for entries to fix up.\n If omitted, all entries under base DN' ++ 'will have their EntryUUID attribute regenerated if not present.') ++ +diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs +index da9f0c239..29a9f1258 100644 +--- a/src/plugins/entryuuid/src/lib.rs ++++ b/src/plugins/entryuuid/src/lib.rs +@@ -33,7 +33,7 @@ fn assign_uuid(e: &mut EntryRef) { + // 🚧 safety barrier 🚧 + if e.contains_attr("entryUUID") { + log_error!( +- ErrorLevel::Trace, ++ ErrorLevel::Plugin, + "assign_uuid -> entryUUID exists, skipping dn {}", + sdn.to_dn_string() + ); +@@ -47,7 +47,7 @@ fn assign_uuid(e: &mut EntryRef) { + if sdn.is_below_suffix(&*config_sdn) || sdn.is_below_suffix(&*schema_sdn) { + // We don't need to assign to these suffixes. + log_error!( +- ErrorLevel::Trace, ++ ErrorLevel::Plugin, + "assign_uuid -> not assigning to {:?} as part of system suffix", + sdn.to_dn_string() + ); +@@ -57,7 +57,7 @@ fn assign_uuid(e: &mut EntryRef) { + // Generate a new Uuid. + let u: Uuid = Uuid::new_v4(); + log_error!( +- ErrorLevel::Trace, ++ ErrorLevel::Plugin, + "assign_uuid -> assigning {:?} to dn {}", + u, + sdn.to_dn_string() +@@ -78,13 +78,13 @@ impl SlapiPlugin3 for EntryUuid { + fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> { + if pb.get_is_replicated_operation() { + log_error!( +- ErrorLevel::Trace, ++ ErrorLevel::Plugin, + "betxn_pre_add -> replicated operation, will not change" + ); + return Ok(()); + } + +- log_error!(ErrorLevel::Trace, "betxn_pre_add -> start"); ++ log_error!(ErrorLevel::Plugin, "betxn_pre_add -> start"); + + let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?; + assign_uuid(&mut e); +@@ -105,7 +105,7 @@ impl SlapiPlugin3 for EntryUuid { + .first() + .ok_or_else(|| { + log_error!( +- ErrorLevel::Trace, ++ ErrorLevel::Plugin, + "task_validate basedn error -> empty value array?" + ); + LDAPError::Operation +@@ -113,7 +113,7 @@ impl SlapiPlugin3 for EntryUuid { + .as_ref() + .try_into() + .map_err(|e| { +- log_error!(ErrorLevel::Trace, "task_validate basedn error -> {:?}", e); ++ log_error!(ErrorLevel::Plugin, "task_validate basedn error -> {:?}", e); + LDAPError::Operation + })?, + None => return Err(LDAPError::ObjectClassViolation), +@@ -124,7 +124,7 @@ impl SlapiPlugin3 for EntryUuid { + .first() + .ok_or_else(|| { + log_error!( +- ErrorLevel::Trace, ++ ErrorLevel::Plugin, + "task_validate filter error -> empty value array?" + ); + LDAPError::Operation +@@ -132,7 +132,7 @@ impl SlapiPlugin3 for EntryUuid { + .as_ref() + .try_into() + .map_err(|e| { +- log_error!(ErrorLevel::Trace, "task_validate filter error -> {:?}", e); ++ log_error!(ErrorLevel::Plugin, "task_validate filter error -> {:?}", e); + LDAPError::Operation + })?, + None => { +@@ -144,17 +144,11 @@ impl SlapiPlugin3 for EntryUuid { + // Error if the first filter is empty? + + // Now, to make things faster, we wrap the filter in a exclude term. +- +- // 2021 - #4877 because we allow entryuuid to be strings, on import these may +- // be invalid. As a result, we DO need to allow the fixup to check the entryuuid +- // value is correct, so we can not exclude these during the search. +- /* + let raw_filter = if !raw_filter.starts_with('(') && !raw_filter.ends_with('(') { + format!("(&({})(!(entryuuid=*)))", raw_filter) + } else { + format!("(&{}(!(entryuuid=*)))", raw_filter) + }; +- */ + + Ok(FixupData { basedn, raw_filter }) + } +@@ -165,7 +159,7 @@ impl SlapiPlugin3 for EntryUuid { + + fn task_handler(_task: &Task, data: Self::TaskData) -> Result { + log_error!( +- ErrorLevel::Trace, ++ ErrorLevel::Plugin, + "task_handler -> start thread with -> {:?}", + data + ); +@@ -205,12 +199,12 @@ impl SlapiPlugin3 for EntryUuid { + } + + fn start(_pb: &mut PblockRef) -> Result<(), PluginError> { +- log_error!(ErrorLevel::Trace, "plugin start"); ++ log_error!(ErrorLevel::Plugin, "plugin start"); + Ok(()) + } + + fn close(_pb: &mut PblockRef) -> Result<(), PluginError> { +- log_error!(ErrorLevel::Trace, "plugin close"); ++ log_error!(ErrorLevel::Plugin, "plugin close"); + Ok(()) + } + } +@@ -219,20 +213,14 @@ pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError + /* Supply a modification to the entry. */ + let sdn = e.get_sdnref(); + +- /* Check that entryuuid doesn't already exist, and is valid */ +- if let Some(valueset) = e.get_attr("entryUUID") { +- if valueset.iter().all(|v| { +- let u: Result = (&v).try_into(); +- u.is_ok() +- }) { +- // All values were valid uuid, move on! +- log_error!( +- ErrorLevel::Plugin, +- "skipping fixup for -> {}", +- sdn.to_dn_string() +- ); +- return Ok(()); +- } ++ /* Sanity check that entryuuid doesn't already exist */ ++ if e.contains_attr("entryUUID") { ++ log_error!( ++ ErrorLevel::Plugin, ++ "skipping fixup for -> {}", ++ sdn.to_dn_string() ++ ); ++ return Ok(()); + } + + // Setup the modifications +@@ -248,7 +236,7 @@ pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError + + match lmod.execute() { + Ok(_) => { +- log_error!(ErrorLevel::Trace, "fixed-up -> {}", sdn.to_dn_string()); ++ log_error!(ErrorLevel::Plugin, "fixed-up -> {}", sdn.to_dn_string()); + Ok(()) + } + Err(e) => { +-- +2.34.1 + diff --git a/0017-Issue-4775-Fix-cherry-pick-error.patch b/0017-Issue-4775-Fix-cherry-pick-error.patch new file mode 100644 index 0000000..f9e5b2c --- /dev/null +++ b/0017-Issue-4775-Fix-cherry-pick-error.patch @@ -0,0 +1,42 @@ +From 525f2307fa3e2d0ae55c8c922e6f7220a1e5bd1b Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 3 Feb 2022 16:51:38 -0500 +Subject: [PATCH] Issue 4775 - Fix cherry-pick error + +Bug Description: EntryUUID when added was missing it's CLI +and helpers for fixups. + +Fix Description: Add the CLI elements. + +fixes: https://github.com/389ds/389-ds-base/issues/4775 + +Author: William Brown + +Review by: @mreynolds389 (thanks!) +--- + src/lib389/lib389/cli_conf/plugin.py | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/src/lib389/lib389/cli_conf/plugin.py b/src/lib389/lib389/cli_conf/plugin.py +index 7c0cf2c80..fb0ef3077 100644 +--- a/src/lib389/lib389/cli_conf/plugin.py ++++ b/src/lib389/lib389/cli_conf/plugin.py +@@ -27,7 +27,6 @@ from lib389.cli_conf.plugins import passthroughauth as cli_passthroughauth + from lib389.cli_conf.plugins import retrochangelog as cli_retrochangelog + from lib389.cli_conf.plugins import automember as cli_automember + from lib389.cli_conf.plugins import posix_winsync as cli_posix_winsync +-from lib389.cli_conf.plugins import contentsync as cli_contentsync + from lib389.cli_conf.plugins import entryuuid as cli_entryuuid + + SINGULAR = Plugin +@@ -115,7 +114,6 @@ def create_parser(subparsers): + cli_passthroughauth.create_parser(subcommands) + cli_retrochangelog.create_parser(subcommands) + cli_posix_winsync.create_parser(subcommands) +- cli_contentsync.create_parser(subcommands) + cli_entryuuid.create_parser(subcommands) + + list_parser = subcommands.add_parser('list', help="List current configured (enabled and disabled) plugins") +-- +2.34.1 + diff --git a/389-ds-base-devel.README b/389-ds-base-devel.README new file mode 100644 index 0000000..190c874 --- /dev/null +++ b/389-ds-base-devel.README @@ -0,0 +1,4 @@ +For detailed information on developing plugins for +389 Directory Server visit. + +http://port389/wiki/Plugins diff --git a/389-ds-base-git-local.sh b/389-ds-base-git-local.sh new file mode 100644 index 0000000..bc809cb --- /dev/null +++ b/389-ds-base-git-local.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +DATE=`date +%Y%m%d` +# use a real tag name here +VERSION=1.3.5.14 +PKGNAME=389-ds-base +TAG=${TAG:-$PKGNAME-$VERSION} +#SRCNAME=$PKGNAME-$VERSION-$DATE +SRCNAME=$PKGNAME-$VERSION + +test -d .git || { + echo you must be in the ds git repo to use this + echo bye + exit 1 +} + +if [ -z "$1" ] ; then + dir=. +else + dir="$1" +fi + +git archive --prefix=$SRCNAME/ $TAG | bzip2 > $dir/$SRCNAME.tar.bz2 diff --git a/389-ds-base-git.sh b/389-ds-base-git.sh new file mode 100644 index 0000000..0043901 --- /dev/null +++ b/389-ds-base-git.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +DATE=`date +%Y%m%d` +# use a real tag name here +VERSION=1.3.5.14 +PKGNAME=389-ds-base +TAG=${TAG:-$PKGNAME-$VERSION} +URL="https://git.fedorahosted.org/git/?p=389/ds.git;a=snapshot;h=$TAG;sf=tgz" +SRCNAME=$PKGNAME-$VERSION + +wget -O $SRCNAME.tar.gz "$URL" + +echo convert tgz format to tar.bz2 format + +gunzip $PKGNAME-$VERSION.tar.gz +bzip2 $PKGNAME-$VERSION.tar diff --git a/389-ds-base.spec b/389-ds-base.spec new file mode 100644 index 0000000..3180aed --- /dev/null +++ b/389-ds-base.spec @@ -0,0 +1,926 @@ + +%global pkgname dirsrv +%global srcname 389-ds-base + +# Exclude i686 bit arches +ExcludeArch: i686 + +# for a pre-release, define the prerel field e.g. .a1 .rc2 - comment out for official release +# also remove the space between % and global - this space is needed because +# fedpkg verrel stupidly ignores comment lines +#% global prerel .rc3 +# also need the relprefix field for a pre-release e.g. .0 - also comment out for official release +#% global relprefix 0. + +# If perl-Socket-2.000 or newer is available, set 0 to use_Socket6. +%global use_Socket6 0 + +%global use_asan 0 +%global use_rust 1 +%global use_legacy 1 +%global bundle_jemalloc 1 +%if %{use_asan} +%global bundle_jemalloc 0 +%endif + +%if %{bundle_jemalloc} +%global jemalloc_name jemalloc +%global jemalloc_ver 5.2.1 +%global __provides_exclude ^libjemalloc\\.so.*$ +%endif + +# Use Clang instead of GCC +%global use_clang 0 + +# fedora 15 and later uses tmpfiles.d +# otherwise, comment this out +%{!?with_tmpfiles_d: %global with_tmpfiles_d %{_sysconfdir}/tmpfiles.d} + +# systemd support +%global groupname %{pkgname}.target + +# set PIE flag +%global _hardened_build 1 + +# Filter argparse-manpage from autogenerated package Requires +%global __requires_exclude ^python.*argparse-manpage + +Summary: 389 Directory Server (base) +Name: 389-ds-base +Version: 1.4.3.28 +Release: %{?relprefix}6%{?prerel}%{?dist} +License: GPLv3+ +URL: https://www.port389.org +Group: System Environment/Daemons +Conflicts: selinux-policy-base < 3.9.8 +Conflicts: freeipa-server < 4.0.3 +Obsoletes: %{name} <= 1.4.0.9 +Provides: ldif2ldbm >= 0 + +##### Bundled cargo crates list - START ##### +Provides: bundled(crate(ansi_term)) = 0.11.0 +Provides: bundled(crate(atty)) = 0.2.14 +Provides: bundled(crate(autocfg)) = 1.0.1 +Provides: bundled(crate(base64)) = 0.13.0 +Provides: bundled(crate(bitflags)) = 1.3.2 +Provides: bundled(crate(byteorder)) = 1.4.3 +Provides: bundled(crate(cbindgen)) = 0.9.1 +Provides: bundled(crate(cc)) = 1.0.71 +Provides: bundled(crate(cfg-if)) = 1.0.0 +Provides: bundled(crate(clap)) = 2.33.3 +Provides: bundled(crate(entryuuid)) = 0.1.0 +Provides: bundled(crate(entryuuid_syntax)) = 0.1.0 +Provides: bundled(crate(fernet)) = 0.1.4 +Provides: bundled(crate(foreign-types)) = 0.3.2 +Provides: bundled(crate(foreign-types-shared)) = 0.1.1 +Provides: bundled(crate(getrandom)) = 0.2.3 +Provides: bundled(crate(hermit-abi)) = 0.1.19 +Provides: bundled(crate(itoa)) = 0.4.8 +Provides: bundled(crate(jobserver)) = 0.1.24 +Provides: bundled(crate(lazy_static)) = 1.4.0 +Provides: bundled(crate(libc)) = 0.2.104 +Provides: bundled(crate(librnsslapd)) = 0.1.0 +Provides: bundled(crate(librslapd)) = 0.1.0 +Provides: bundled(crate(log)) = 0.4.14 +Provides: bundled(crate(once_cell)) = 1.8.0 +Provides: bundled(crate(openssl)) = 0.10.36 +Provides: bundled(crate(openssl-sys)) = 0.9.67 +Provides: bundled(crate(paste)) = 0.1.18 +Provides: bundled(crate(paste-impl)) = 0.1.18 +Provides: bundled(crate(pkg-config)) = 0.3.20 +Provides: bundled(crate(ppv-lite86)) = 0.2.14 +Provides: bundled(crate(proc-macro-hack)) = 0.5.19 +Provides: bundled(crate(proc-macro2)) = 1.0.30 +Provides: bundled(crate(quote)) = 1.0.10 +Provides: bundled(crate(rand)) = 0.8.4 +Provides: bundled(crate(rand_chacha)) = 0.3.1 +Provides: bundled(crate(rand_core)) = 0.6.3 +Provides: bundled(crate(rand_hc)) = 0.3.1 +Provides: bundled(crate(redox_syscall)) = 0.2.10 +Provides: bundled(crate(remove_dir_all)) = 0.5.3 +Provides: bundled(crate(rsds)) = 0.1.0 +Provides: bundled(crate(ryu)) = 1.0.5 +Provides: bundled(crate(serde)) = 1.0.130 +Provides: bundled(crate(serde_derive)) = 1.0.130 +Provides: bundled(crate(serde_json)) = 1.0.68 +Provides: bundled(crate(slapd)) = 0.1.0 +Provides: bundled(crate(slapi_r_plugin)) = 0.1.0 +Provides: bundled(crate(strsim)) = 0.8.0 +Provides: bundled(crate(syn)) = 1.0.80 +Provides: bundled(crate(synstructure)) = 0.12.6 +Provides: bundled(crate(tempfile)) = 3.2.0 +Provides: bundled(crate(textwrap)) = 0.11.0 +Provides: bundled(crate(toml)) = 0.5.8 +Provides: bundled(crate(unicode-width)) = 0.1.9 +Provides: bundled(crate(unicode-xid)) = 0.2.2 +Provides: bundled(crate(uuid)) = 0.8.2 +Provides: bundled(crate(vcpkg)) = 0.2.15 +Provides: bundled(crate(vec_map)) = 0.8.2 +Provides: bundled(crate(wasi)) = 0.10.2+wasi_snapshot_preview1 +Provides: bundled(crate(winapi)) = 0.3.9 +Provides: bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0 +Provides: bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0 +Provides: bundled(crate(zeroize)) = 1.4.2 +Provides: bundled(crate(zeroize_derive)) = 1.2.0 +##### Bundled cargo crates list - END ##### + +BuildRequires: nspr-devel +BuildRequires: nss-devel >= 3.34 +BuildRequires: perl-generators +BuildRequires: openldap-devel +BuildRequires: libdb-devel +BuildRequires: cyrus-sasl-devel +BuildRequires: icu +BuildRequires: libicu-devel +BuildRequires: pcre-devel +BuildRequires: cracklib-devel +%if %{use_clang} +BuildRequires: libatomic +BuildRequires: clang +%else +BuildRequires: gcc +BuildRequires: gcc-c++ +%endif +# The following are needed to build the snmp ldap-agent +BuildRequires: net-snmp-devel +BuildRequires: lm_sensors-devel +BuildRequires: bzip2-devel +BuildRequires: zlib-devel +BuildRequires: openssl-devel +# the following is for the pam passthru auth plug-in +BuildRequires: pam-devel +BuildRequires: systemd-units +BuildRequires: systemd-devel +%if %{use_asan} +BuildRequires: libasan +%endif +# If rust is enabled +%if %{use_rust} +BuildRequires: cargo +BuildRequires: rust +%endif +BuildRequires: pkgconfig +BuildRequires: pkgconfig(systemd) +BuildRequires: pkgconfig(krb5) + +# Needed to support regeneration of the autotool artifacts. +BuildRequires: autoconf +BuildRequires: automake +BuildRequires: libtool +# For our documentation +BuildRequires: doxygen +# For tests! +BuildRequires: libcmocka-devel +BuildRequires: libevent-devel +# For lib389 and related components +BuildRequires: python%{python3_pkgversion} +BuildRequires: python%{python3_pkgversion}-devel +BuildRequires: python%{python3_pkgversion}-setuptools +BuildRequires: python%{python3_pkgversion}-ldap +BuildRequires: python%{python3_pkgversion}-six +BuildRequires: python%{python3_pkgversion}-pyasn1 +BuildRequires: python%{python3_pkgversion}-pyasn1-modules +BuildRequires: python%{python3_pkgversion}-dateutil +BuildRequires: python%{python3_pkgversion}-argcomplete +BuildRequires: python%{python3_pkgversion}-argparse-manpage +BuildRequires: python%{python3_pkgversion}-policycoreutils +BuildRequires: python%{python3_pkgversion}-libselinux + +# For cockpit +BuildRequires: rsync + +Requires: %{name}-libs = %{version}-%{release} +Requires: python%{python3_pkgversion}-lib389 = %{version}-%{release} + +# this is needed for using semanage from our setup scripts +Requires: policycoreutils-python-utils +Requires: /usr/sbin/semanage +Requires: libsemanage-python%{python3_pkgversion} + +Requires: selinux-policy >= 3.14.1-29 + +# the following are needed for some of our scripts +Requires: openldap-clients +Requires: openssl-perl +Requires: python%{python3_pkgversion}-ldap + +# this is needed to setup SSL if you are not using the +# administration server package +Requires: nss-tools +Requires: nss >= 3.34 + +# these are not found by the auto-dependency method +# they are required to support the mandatory LDAP SASL mechs +Requires: cyrus-sasl-gssapi +Requires: cyrus-sasl-md5 +Requires: cyrus-sasl-plain + +# this is needed for verify-db.pl +Requires: libdb-utils + +# Needed for password dictionary checks +Requires: cracklib-dicts + +# This picks up libperl.so as a Requires, so we add this versioned one +Requires: perl(:MODULE_COMPAT_%(eval "`%{__perl} -V:version`"; echo $version)) +Requires: perl-Errno >= 1.23-360 + +# Needed by logconv.pl +Requires: perl-DB_File +Requires: perl-Archive-Tar + +# Needed for password dictionary checks +Requires: cracklib-dicts + +# Picks up our systemd deps. +%{?systemd_requires} + +Obsoletes: %{name} <= 1.3.5.4 + +Source0: https://releases.pagure.org/389-ds-base/%{name}-%{version}.tar.bz2 +# 389-ds-git.sh should be used to generate the source tarball from git +Source1: %{name}-git.sh +Source2: %{name}-devel.README +%if %{bundle_jemalloc} +Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2 +%endif +%if %{use_rust} +Source4: vendor-%{version}-1.tar.gz +Source5: Cargo.lock +%endif + +Patch01: 0001-Issue-4678-RFE-automatique-disable-of-virtual-attrib.patch +Patch02: 0002-Issue-4943-Fix-csn-generator-to-limit-time-skew-drif.patch +Patch03: 0003-Issue-3584-Fix-PBKDF2_SHA256-hashing-in-FIPS-mode-49.patch +Patch04: 0004-Issue-4956-Automember-allows-invalid-regex-and-does-.patch +Patch05: 0005-Issue-4092-systemd-tmpfiles-warnings.patch +Patch06: 0006-Issue-4973-installer-changes-permissions-on-run.patch +Patch07: 0007-Issue-4973-update-snmp-to-use-run-dirsrv-for-PID-fil.patch +Patch08: 0008-Issue-4978-make-installer-robust.patch +Patch09: 0009-Issue-4972-gecos-with-IA5-introduces-a-compatibility.patch +Patch10: 0010-Issue-4997-Function-declaration-compiler-error-on-1..patch +Patch11: 0011-Issue-4978-use-more-portable-python-command-for-chec.patch +Patch12: 0012-Issue-4959-Invalid-etc-hosts-setup-can-cause-isLocal.patch +Patch13: 0013-CVE-2021-4091-BZ-2030367-double-free-of-the-virtual-.patch +Patch14: 0014-Issue-5127-run-restorecon-on-dev-shm-at-server-start.patch +Patch15: 0015-Issue-5127-ds_selinux_restorecon.sh-always-exit-0.patch +Patch16: 0016-Issue-4775-Add-entryuuid-CLI-and-Fixup-4776.patch +Patch17: 0017-Issue-4775-Fix-cherry-pick-error.patch + +%description +389 Directory Server is an LDAPv3 compliant server. The base package includes +the LDAP server and command line utilities for server administration. +%if %{use_asan} +WARNING! This build is linked to Address Sanitisation libraries. This probably +isn't what you want. Please contact support immediately. +Please see http://seclists.org/oss-sec/2016/q1/363 for more information. +%endif + +%package libs +Summary: Core libraries for 389 Directory Server +Group: System Environment/Daemons +BuildRequires: nspr-devel +BuildRequires: nss-devel >= 3.34 +BuildRequires: openldap-devel +BuildRequires: libdb-devel +BuildRequires: cyrus-sasl-devel +BuildRequires: libicu-devel +BuildRequires: pcre-devel +BuildRequires: libtalloc-devel +BuildRequires: libevent-devel +BuildRequires: libtevent-devel +Requires: krb5-libs +Requires: libevent +BuildRequires: systemd-devel +Provides: svrcore = 4.1.4 +Conflicts: svrcore +Obsoletes: svrcore <= 4.1.3 + +%description libs +Core libraries for the 389 Directory Server base package. These libraries +are used by the main package and the -devel package. This allows the -devel +package to be installed with just the -libs package and without the main package. + +%if %{use_legacy} +%package legacy-tools +Summary: Legacy utilities for 389 Directory Server +Group: System Environment/Daemons +Obsoletes: %{name} <= 1.4.0.9 +Requires: %{name}-libs = %{version}-%{release} +# for setup-ds.pl to support ipv6 +%if %{use_Socket6} +Requires: perl-Socket6 +%else +Requires: perl-Socket +%endif +Requires: perl-NetAddr-IP +# use_openldap assumes perl-Mozilla-LDAP is built with openldap support +Requires: perl-Mozilla-LDAP +# for setup-ds.pl +Requires: bind-utils +%global __provides_exclude_from %{_libdir}/%{pkgname}/perl +%global __requires_exclude perl\\((DSCreate|DSMigration|DSUpdate|DSUtil|Dialog|DialogManager|FileConn|Inf|Migration|Resource|Setup|SetupLog) +%{?perl_default_filter} + +%description legacy-tools +Legacy (and deprecated) utilities for 389 Directory Server. This includes +the old account management and task scripts. These are deprecated in favour of +the dscreate, dsctl, dsconf and dsidm tools. +%endif + +%package devel +Summary: Development libraries for 389 Directory Server +Group: Development/Libraries +Requires: %{name}-libs = %{version}-%{release} +Requires: pkgconfig +Requires: nspr-devel +Requires: nss-devel >= 3.34 +Requires: openldap-devel +Requires: libtalloc +Requires: libevent +Requires: libtevent +Requires: systemd-libs +Provides: svrcore-devel = 4.1.4 +Conflicts: svrcore-devel +Obsoletes: svrcore-devel <= 4.1.3 + +%description devel +Development Libraries and headers for the 389 Directory Server base package. + +%package snmp +Summary: SNMP Agent for 389 Directory Server +Group: System Environment/Daemons +Requires: %{name} = %{version}-%{release} + +Obsoletes: %{name} <= 1.4.0.0 + +%description snmp +SNMP Agent for the 389 Directory Server base package. + +%package -n python%{python3_pkgversion}-lib389 +Summary: A library for accessing, testing, and configuring the 389 Directory Server +BuildArch: noarch +Group: Development/Libraries +Requires: openssl +Requires: iproute +Requires: platform-python +Recommends: bash-completion +Requires: python%{python3_pkgversion}-ldap +Requires: python%{python3_pkgversion}-six +Requires: python%{python3_pkgversion}-pyasn1 +Requires: python%{python3_pkgversion}-pyasn1-modules +Requires: python%{python3_pkgversion}-dateutil +Requires: python%{python3_pkgversion}-argcomplete +Requires: python%{python3_pkgversion}-libselinux +Requires: python%{python3_pkgversion}-setuptools +Requires: python%{python3_pkgversion}-distro +%{?python_provide:%python_provide python%{python3_pkgversion}-lib389} + +%description -n python%{python3_pkgversion}-lib389 +This module contains tools and libraries for accessing, testing, + and configuring the 389 Directory Server. + +%package -n cockpit-389-ds +Summary: Cockpit UI Plugin for configuring and administering the 389 Directory Server +BuildArch: noarch +Requires: cockpit +Requires: platform-python +Requires: python%{python3_pkgversion}-lib389 + +%description -n cockpit-389-ds +A cockpit UI Plugin for configuring and administering the 389 Directory Server + +%prep +%autosetup -p1 -v -n %{name}-%{version}%{?prerel} +%if %{use_rust} +tar xvzf %{SOURCE4} +cp %{SOURCE5} src/ +%endif +%if %{bundle_jemalloc} +%setup -q -n %{name}-%{version}%{?prerel} -T -D -b 3 +%endif +cp %{SOURCE2} README.devel + +%build + +OPENLDAP_FLAG="--with-openldap" +%{?with_tmpfiles_d: TMPFILES_FLAG="--with-tmpfiles-d=%{with_tmpfiles_d}"} +# hack hack hack https://bugzilla.redhat.com/show_bug.cgi?id=833529 +NSSARGS="--with-nss-lib=%{_libdir} --with-nss-inc=%{_includedir}/nss3" + +%if %{use_asan} +ASAN_FLAGS="--enable-asan --enable-debug" +%endif + +%if %{use_rust} +RUST_FLAGS="--enable-rust --enable-rust-offline" +%endif + +%if %{use_legacy} +LEGACY_FLAGS="--enable-legacy --enable-perl" +%else +LEGACY_FLAGS="--disable-legacy --disable-perl" +%endif + +%if %{use_clang} +export CC=clang +export CXX=clang++ +CLANG_FLAGS="--enable-clang" +%endif + +%if %{bundle_jemalloc} +# Override page size, bz #1545539 +# 4K +%ifarch %ix86 %arm x86_64 s390x +%define lg_page --with-lg-page=12 +%endif + +# 64K +%ifarch ppc64 ppc64le aarch64 +%define lg_page --with-lg-page=16 +%endif + +# Override huge page size on aarch64 +# 2M instead of 512M +%ifarch aarch64 +%define lg_hugepage --with-lg-hugepage=21 +%endif + +# Build jemalloc +pushd ../%{jemalloc_name}-%{jemalloc_ver} +%configure \ + --libdir=%{_libdir}/%{pkgname}/lib \ + --bindir=%{_libdir}/%{pkgname}/bin \ + --enable-prof +make %{?_smp_mflags} +popd +%endif + + +# Enforce strict linking +%define _strict_symbol_defs_build 1 + +# Rebuild the autotool artifacts now. +autoreconf -fiv + +%configure --enable-autobind --with-selinux $OPENLDAP_FLAG $TMPFILES_FLAG \ + --with-systemd \ + --with-systemdsystemunitdir=%{_unitdir} \ + --with-systemdsystemconfdir=%{_sysconfdir}/systemd/system \ + --with-systemdgroupname=%{groupname} \ + --libexecdir=%{_libexecdir}/%{pkgname} \ + $NSSARGS $ASAN_FLAGS $RUST_FLAGS $LEGACY_FLAGS $CLANG_FLAGS \ + --enable-cmocka + +# lib389 +pushd ./src/lib389 +%py3_build +popd +# argparse-manpage dynamic man pages have hardcoded man v1 in header, +# need to change it to v8 +sed -i "1s/\"1\"/\"8\"/" %{_builddir}/%{name}-%{version}%{?prerel}/src/lib389/man/dsconf.8 +sed -i "1s/\"1\"/\"8\"/" %{_builddir}/%{name}-%{version}%{?prerel}/src/lib389/man/dsctl.8 +sed -i "1s/\"1\"/\"8\"/" %{_builddir}/%{name}-%{version}%{?prerel}/src/lib389/man/dsidm.8 +sed -i "1s/\"1\"/\"8\"/" %{_builddir}/%{name}-%{version}%{?prerel}/src/lib389/man/dscreate.8 + +# Generate symbolic info for debuggers +export XCFLAGS=$RPM_OPT_FLAGS + +#make %{?_smp_mflags} +make + +%install + +mkdir -p %{buildroot}%{_datadir}/gdb/auto-load%{_sbindir} +mkdir -p %{buildroot}%{_datadir}/cockpit +make DESTDIR="$RPM_BUILD_ROOT" install + +# Cockpit file list +find %{buildroot}%{_datadir}/cockpit/389-console -type d | sed -e "s@%{buildroot}@@" | sed -e 's/^/\%dir /' > cockpit.list +find %{buildroot}%{_datadir}/cockpit/389-console -type f | sed -e "s@%{buildroot}@@" >> cockpit.list + +# Copy in our docs from doxygen. +cp -r %{_builddir}/%{name}-%{version}%{?prerel}/man/man3 $RPM_BUILD_ROOT/%{_mandir}/man3 + +# lib389 +pushd src/lib389 +%py3_install +popd + +mkdir -p $RPM_BUILD_ROOT/var/log/%{pkgname} +mkdir -p $RPM_BUILD_ROOT/var/lib/%{pkgname} +mkdir -p $RPM_BUILD_ROOT/var/3lock/%{pkgname} + +# for systemd +mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/systemd/system/%{groupname}.wants + +#remove libtool archives and static libs +find %{buildroot} -type f -name "*.la" -delete +find %{buildroot} -type f -name "*.a" -delete + +%if %{use_legacy} +# make sure perl scripts have a proper shebang +sed -i -e 's|#{{PERL-EXEC}}|#!/usr/bin/perl|' $RPM_BUILD_ROOT%{_datadir}/%{pkgname}/script-templates/template-*.pl +%endif + +%if %{bundle_jemalloc} +pushd ../%{jemalloc_name}-%{jemalloc_ver} +make DESTDIR="$RPM_BUILD_ROOT" install_lib install_bin +cp -pa COPYING ../%{name}-%{version}%{?prerel}/COPYING.jemalloc +cp -pa README ../%{name}-%{version}%{?prerel}/README.jemalloc +popd +%endif + +%check +# This checks the code, if it fails it prints why, then re-raises the fail to shortcircuit the rpm build. +if ! make DESTDIR="$RPM_BUILD_ROOT" check; then cat ./test-suite.log && false; fi + +%clean +rm -rf $RPM_BUILD_ROOT + +%post +if [ -n "$DEBUGPOSTTRANS" ] ; then + output=$DEBUGPOSTTRANS + output2=${DEBUGPOSTTRANS}.upgrade +else + output=/dev/null + output2=/dev/null +fi + +# reload to pick up any changes to systemd files +/bin/systemctl daemon-reload >$output 2>&1 || : + +# https://fedoraproject.org/wiki/Packaging:UsersAndGroups#Soft_static_allocation +# Soft static allocation for UID and GID +USERNAME="dirsrv" +ALLOCATED_UID=389 +GROUPNAME="dirsrv" +ALLOCATED_GID=389 +HOMEDIR="/usr/share/dirsrv" + +getent group $GROUPNAME >/dev/null || /usr/sbin/groupadd -f -g $ALLOCATED_GID -r $GROUPNAME +if ! getent passwd $USERNAME >/dev/null ; then + if ! getent passwd $ALLOCATED_UID >/dev/null ; then + /usr/sbin/useradd -r -u $ALLOCATED_UID -g $GROUPNAME -d $HOMEDIR -s /sbin/nologin -c "user for 389-ds-base" $USERNAME + else + /usr/sbin/useradd -r -g $GROUPNAME -d $HOMEDIR -s /sbin/nologin -c "user for 389-ds-base" $USERNAME + fi +fi + +# Reload our sysctl before we restart (if we can) +sysctl --system &> $output; true + +%preun +if [ $1 -eq 0 ]; then # Final removal + # remove instance specific service files/links + rm -rf %{_sysconfdir}/systemd/system/%{groupname}.wants/* > /dev/null 2>&1 || : +fi + +%postun +if [ $1 = 0 ]; then # Final removal + rm -rf /var/run/%{pkgname} +fi + +%post snmp +%systemd_post %{pkgname}-snmp.service + +%preun snmp +%systemd_preun %{pkgname}-snmp.service %{groupname} + +%postun snmp +%systemd_postun_with_restart %{pkgname}-snmp.service + +%if %{use_legacy} +%post legacy-tools + +# START UPGRADE SCRIPT + +if [ -n "$DEBUGPOSTTRANS" ] ; then + output=$DEBUGPOSTTRANS + output2=${DEBUGPOSTTRANS}.upgrade +else + output=/dev/null + output2=/dev/null +fi + +# find all instances +instances="" # instances that require a restart after upgrade +ninst=0 # number of instances found in total + +echo looking for instances in %{_sysconfdir}/%{pkgname} > $output 2>&1 || : +instbase="%{_sysconfdir}/%{pkgname}" +for dir in $instbase/slapd-* ; do + echo dir = $dir >> $output 2>&1 || : + if [ ! -d "$dir" ] ; then continue ; fi + case "$dir" in *.removed) continue ;; esac + basename=`basename $dir` + inst="%{pkgname}@`echo $basename | sed -e 's/slapd-//g'`" + echo found instance $inst - getting status >> $output 2>&1 || : + if /bin/systemctl -q is-active $inst ; then + echo instance $inst is running >> $output 2>&1 || : + instances="$instances $inst" + else + echo instance $inst is not running >> $output 2>&1 || : + fi + ninst=`expr $ninst + 1` +done +if [ $ninst -eq 0 ] ; then + echo no instances to upgrade >> $output 2>&1 || : + exit 0 # have no instances to upgrade - just skip the rest +fi +# shutdown all instances +echo shutting down all instances . . . >> $output 2>&1 || : +for inst in $instances ; do + echo stopping instance $inst >> $output 2>&1 || : + /bin/systemctl stop $inst >> $output 2>&1 || : +done +echo remove pid files . . . >> $output 2>&1 || : +/bin/rm -f /var/run/%{pkgname}*.pid /var/run/%{pkgname}*.startpid +# do the upgrade +echo upgrading instances . . . >> $output 2>&1 || : +DEBUGPOSTSETUPOPT=`/usr/bin/echo $DEBUGPOSTSETUP | /usr/bin/sed -e "s/[^d]//g"` +if [ -n "$DEBUGPOSTSETUPOPT" ] ; then + %{_sbindir}/setup-ds.pl -$DEBUGPOSTSETUPOPT -u -s General.UpdateMode=offline >> $output 2>&1 || : +else + %{_sbindir}/setup-ds.pl -u -s General.UpdateMode=offline >> $output 2>&1 || : +fi + +# restart instances that require it +for inst in $instances ; do + echo restarting instance $inst >> $output 2>&1 || : + /bin/systemctl start $inst >> $output 2>&1 || : +done +#END UPGRADE +%endif + +exit 0 + + +%files +%if %{bundle_jemalloc} +%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.jemalloc +%license COPYING.jemalloc +%else +%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl +%endif +%dir %{_sysconfdir}/%{pkgname} +%dir %{_sysconfdir}/%{pkgname}/schema +%config(noreplace)%{_sysconfdir}/%{pkgname}/schema/*.ldif +%dir %{_sysconfdir}/%{pkgname}/config +%dir %{_sysconfdir}/systemd/system/%{groupname}.wants +%config(noreplace)%{_sysconfdir}/%{pkgname}/config/slapd-collations.conf +%config(noreplace)%{_sysconfdir}/%{pkgname}/config/certmap.conf +%{_datadir}/%{pkgname} +%{_datadir}/gdb/auto-load/* +%{_unitdir} +%{_bindir}/dbscan +%{_mandir}/man1/dbscan.1.gz +%{_bindir}/ds-replcheck +%{_mandir}/man1/ds-replcheck.1.gz +%{_bindir}/ds-logpipe.py +%{_mandir}/man1/ds-logpipe.py.1.gz +%{_bindir}/ldclt +%{_mandir}/man1/ldclt.1.gz +%{_sbindir}/ldif2ldap +%{_mandir}/man8/ldif2ldap.8.gz +%{_bindir}/logconv.pl +%{_mandir}/man1/logconv.pl.1.gz +%{_bindir}/pwdhash +%{_mandir}/man1/pwdhash.1.gz +%{_bindir}/readnsstate +%{_mandir}/man1/readnsstate.1.gz +# Remove for now: %caps(CAP_NET_BIND_SERVICE=pe) {_sbindir}/ns-slapd +%{_sbindir}/ns-slapd +%{_mandir}/man8/ns-slapd.8.gz +%{_libexecdir}/%{pkgname}/ds_systemd_ask_password_acl +%{_libexecdir}/%{pkgname}/ds_selinux_restorecon.sh +%{_mandir}/man5/99user.ldif.5.gz +%{_mandir}/man5/certmap.conf.5.gz +%{_mandir}/man5/slapd-collations.conf.5.gz +%{_mandir}/man5/dirsrv.5.gz +%{_mandir}/man5/dirsrv.systemd.5.gz +%{_libdir}/%{pkgname}/python +%dir %{_libdir}/%{pkgname}/plugins +%{_libdir}/%{pkgname}/plugins/*.so +# This has to be hardcoded to /lib - $libdir changes between lib/lib64, but +# sysctl.d is always in /lib. +%{_prefix}/lib/sysctl.d/* +%dir %{_localstatedir}/lib/%{pkgname} +%dir %{_localstatedir}/log/%{pkgname} +%ghost %dir %{_localstatedir}/lock/%{pkgname} +%exclude %{_sbindir}/ldap-agent* +%exclude %{_mandir}/man1/ldap-agent.1.gz +%exclude %{_unitdir}/%{pkgname}-snmp.service +%if %{bundle_jemalloc} +%{_libdir}/%{pkgname}/lib/ +%{_libdir}/%{pkgname}/bin/ +%exclude %{_libdir}/%{pkgname}/bin/jemalloc-config +%exclude %{_libdir}/%{pkgname}/bin/jemalloc.sh +%exclude %{_libdir}/%{pkgname}/lib/libjemalloc.a +%exclude %{_libdir}/%{pkgname}/lib/libjemalloc.so +%exclude %{_libdir}/%{pkgname}/lib/libjemalloc_pic.a +%exclude %{_libdir}/%{pkgname}/lib/pkgconfig +%endif + +%files devel +%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.devel +%{_mandir}/man3/* +%{_includedir}/svrcore.h +%{_includedir}/%{pkgname} +%{_libdir}/libsvrcore.so +%{_libdir}/%{pkgname}/libslapd.so +%{_libdir}/%{pkgname}/libns-dshttpd.so +%{_libdir}/%{pkgname}/libsds.so +%{_libdir}/%{pkgname}/libldaputil.so +%{_libdir}/pkgconfig/svrcore.pc +%{_libdir}/pkgconfig/dirsrv.pc +%{_libdir}/pkgconfig/libsds.pc + +%files libs +%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.devel +%dir %{_libdir}/%{pkgname} +%{_libdir}/libsvrcore.so.* +%{_libdir}/%{pkgname}/libslapd.so.* +%{_libdir}/%{pkgname}/libns-dshttpd-*.so +%{_libdir}/%{pkgname}/libsds.so.* +%{_libdir}/%{pkgname}/libldaputil.so.* +%{_libdir}/%{pkgname}/librewriters.so* +%if %{bundle_jemalloc} +%{_libdir}/%{pkgname}/lib/libjemalloc.so.2 +%endif + +%if %{use_legacy} +%files legacy-tools +%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.devel +%{_bindir}/infadd +%{_mandir}/man1/infadd.1.gz +%{_bindir}/ldif +%{_mandir}/man1/ldif.1.gz +%{_bindir}/migratecred +%{_mandir}/man1/migratecred.1.gz +%{_bindir}/mmldif +%{_mandir}/man1/mmldif.1.gz +%{_bindir}/rsearch +%{_mandir}/man1/rsearch.1.gz +%{_libexecdir}/%{pkgname}/ds_selinux_enabled +%{_libexecdir}/%{pkgname}/ds_selinux_port_query +%config(noreplace)%{_sysconfdir}/%{pkgname}/config/template-initconfig +%{_mandir}/man5/template-initconfig.5.gz +%{_datadir}/%{pkgname}/properties/*.res +%{_datadir}/%{pkgname}/script-templates +%{_datadir}/%{pkgname}/updates +%{_sbindir}/ldif2ldap +%{_mandir}/man8/ldif2ldap.8.gz +%{_sbindir}/bak2db +%{_mandir}/man8/bak2db.8.gz +%{_sbindir}/db2bak +%{_mandir}/man8/db2bak.8.gz +%{_sbindir}/db2index +%{_mandir}/man8/db2index.8.gz +%{_sbindir}/db2ldif +%{_mandir}/man8/db2ldif.8.gz +%{_sbindir}/dbverify +%{_mandir}/man8/dbverify.8.gz +%{_sbindir}/ldif2db +%{_mandir}/man8/ldif2db.8.gz +%{_sbindir}/restart-dirsrv +%{_mandir}/man8/restart-dirsrv.8.gz +%{_sbindir}/start-dirsrv +%{_mandir}/man8/start-dirsrv.8.gz +%{_sbindir}/status-dirsrv +%{_mandir}/man8/status-dirsrv.8.gz +%{_sbindir}/stop-dirsrv +%{_mandir}/man8/stop-dirsrv.8.gz +%{_sbindir}/upgradedb +%{_mandir}/man8/upgradedb.8.gz +%{_sbindir}/vlvindex +%{_mandir}/man8/vlvindex.8.gz +%{_sbindir}/monitor +%{_mandir}/man8/monitor.8.gz +%{_sbindir}/dbmon.sh +%{_mandir}/man8/dbmon.sh.8.gz +%{_sbindir}/dn2rdn +%{_mandir}/man8/dn2rdn.8.gz +%{_sbindir}/restoreconfig +%{_mandir}/man8/restoreconfig.8.gz +%{_sbindir}/saveconfig +%{_mandir}/man8/saveconfig.8.gz +%{_sbindir}/suffix2instance +%{_mandir}/man8/suffix2instance.8.gz +%{_sbindir}/upgradednformat +%{_mandir}/man8/upgradednformat.8.gz +%{_mandir}/man1/dbgen.pl.1.gz +%{_bindir}/repl-monitor +%{_mandir}/man1/repl-monitor.1.gz +%{_bindir}/repl-monitor.pl +%{_mandir}/man1/repl-monitor.pl.1.gz +%{_bindir}/cl-dump +%{_mandir}/man1/cl-dump.1.gz +%{_bindir}/cl-dump.pl +%{_mandir}/man1/cl-dump.pl.1.gz +%{_bindir}/dbgen.pl +%{_mandir}/man8/bak2db.pl.8.gz +%{_sbindir}/bak2db.pl +%{_sbindir}/cleanallruv.pl +%{_mandir}/man8/cleanallruv.pl.8.gz +%{_sbindir}/db2bak.pl +%{_mandir}/man8/db2bak.pl.8.gz +%{_sbindir}/db2index.pl +%{_mandir}/man8/db2index.pl.8.gz +%{_sbindir}/db2ldif.pl +%{_mandir}/man8/db2ldif.pl.8.gz +%{_sbindir}/fixup-linkedattrs.pl +%{_mandir}/man8/fixup-linkedattrs.pl.8.gz +%{_sbindir}/fixup-memberof.pl +%{_mandir}/man8/fixup-memberof.pl.8.gz +%{_sbindir}/ldif2db.pl +%{_mandir}/man8/ldif2db.pl.8.gz +%{_sbindir}/migrate-ds.pl +%{_mandir}/man8/migrate-ds.pl.8.gz +%{_sbindir}/ns-accountstatus.pl +%{_mandir}/man8/ns-accountstatus.pl.8.gz +%{_sbindir}/ns-activate.pl +%{_mandir}/man8/ns-activate.pl.8.gz +%{_sbindir}/ns-inactivate.pl +%{_mandir}/man8/ns-inactivate.pl.8.gz +%{_sbindir}/ns-newpwpolicy.pl +%{_mandir}/man8/ns-newpwpolicy.pl.8.gz +%{_sbindir}/remove-ds.pl +%{_mandir}/man8/remove-ds.pl.8.gz +%{_sbindir}/schema-reload.pl +%{_mandir}/man8/schema-reload.pl.8.gz +%{_sbindir}/setup-ds.pl +%{_mandir}/man8/setup-ds.pl.8.gz +%{_sbindir}/syntax-validate.pl +%{_mandir}/man8/syntax-validate.pl.8.gz +%{_sbindir}/usn-tombstone-cleanup.pl +%{_mandir}/man8/usn-tombstone-cleanup.pl.8.gz +%{_sbindir}/verify-db.pl +%{_mandir}/man8/verify-db.pl.8.gz +%{_libdir}/%{pkgname}/perl +%endif + +%files snmp +%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.devel +%config(noreplace)%{_sysconfdir}/%{pkgname}/config/ldap-agent.conf +%{_sbindir}/ldap-agent* +%{_mandir}/man1/ldap-agent.1.gz +%{_unitdir}/%{pkgname}-snmp.service + +%files -n python%{python3_pkgversion}-lib389 +%doc LICENSE LICENSE.GPLv3+ +%{python3_sitelib}/lib389* +%{_sbindir}/dsconf +%{_mandir}/man8/dsconf.8.gz +%{_sbindir}/dscreate +%{_mandir}/man8/dscreate.8.gz +%{_sbindir}/dsctl +%{_mandir}/man8/dsctl.8.gz +%{_sbindir}/dsidm +%{_mandir}/man8/dsidm.8.gz +%{_libexecdir}/%{pkgname}/dscontainer + +%files -n cockpit-389-ds -f cockpit.list +%{_datarootdir}/metainfo/389-console/org.port389.cockpit_console.metainfo.xml +%doc README.md + +%changelog +* Thu Feb 3 2022 Mark Reynolds - 1.4.3.28-6 +- Bump version to 1.4.3.28-6 +- Resolves: Bug 2047171 - Based on 1944494 (RFC 4530 entryUUID attribute) - plugin entryuuid failing + +* Fri Jan 28 2022 Mark Reynolds - 1.4.3.28-5 +- Bump version to 1.4.3.28-5 +- Resolves: Bug 2045223 - ipa-restore command is failing when restore after uninstalling the server (aprt 2) + +* Tue Jan 25 2022 Mark Reynolds - 1.4.3.28-4 +- Bump version to 1.4.3.28-4 +- Resolves: Bug 2045223 - ipa-restore command is failing when restore after uninstalling the server + +* Thu Nov 18 2021 Mark Reynolds - 1.4.3.28-3 +- Bump version to 1.4.3.28-3 +- Resolves: Bug 2030367 - EMBARGOED CVE-2021-4091 389-ds:1.4/389-ds-base: double-free of the virtual attribute context in persistent search +- Resolves: Bug 2033398 - PBKDF2 hashing does not work in FIPS mode + +* Thu Nov 18 2021 Mark Reynolds - 1.4.3.28-2 +- Bump version to 1.4.3.28-2 +- Resolves: Bug 2024695 - DB corruption "_entryrdn_insert_key - Same DN (dn: nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff,) is already in the entryrdn file" +- Resolves: Bug 1859210 - systemd-tmpfiles warnings +- Resolves: Bug 1913199 - IPA server (389ds) is very slow in execution of some searches (`&(memberOf=...)(objectClass=ipaHost)` in particular) +- Resolves: Bug 1974236 - automatique disable of virtual attribute checking +- Resolves: Bug 1976882 - logconv.pl -j: Use of uninitialized value $first in numeric gt (>) +- Resolves: Bug 1981281 - ipa user-add fails with "gecos: value invalid per syntax: Invalid syntax" +- Resolves: Bug 2015998 - Log the Auto Member invalid regex rules in the LDAP errors log + +* Thu Oct 21 2021 Mark Reynolds - 1.4.3.28-1 +- Bump version to 1.4.3.28-1 +- Resolves: Bug 2016014 - rebase RHEL 8.6 with 389-ds-base-1.4.3 +- Resolves: Bug 1990002 - monitor displays wrong date for connection +- Resolves: Bug 1950335 - upgrade password hash on bind also causes passwordExpirationtime to be updated +- Resolves: Bug 1916292 - Indexing a single backend actually processes all configured backends +- Resolves: Bug 1780842 - [RFE] set db home directory to /dev/shm by default +- Resolves: Bug 2000975 - Retro Changelog does not trim changes + + + + diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..4c77f19 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,565 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "ansi_term" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +dependencies = [ + "winapi", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + +[[package]] +name = "base64" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + +[[package]] +name = "cbindgen" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd" +dependencies = [ + "clap", + "log", + "proc-macro2", + "quote", + "serde", + "serde_json", + "syn", + "tempfile", + "toml", +] + +[[package]] +name = "cc" +version = "1.0.71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd" +dependencies = [ + "jobserver", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "clap" +version = "2.33.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" +dependencies = [ + "ansi_term", + "atty", + "bitflags", + "strsim", + "textwrap", + "unicode-width", + "vec_map", +] + +[[package]] +name = "entryuuid" +version = "0.1.0" +dependencies = [ + "cc", + "libc", + "paste", + "slapi_r_plugin", + "uuid", +] + +[[package]] +name = "entryuuid_syntax" +version = "0.1.0" +dependencies = [ + "cc", + "libc", + "paste", + "slapi_r_plugin", + "uuid", +] + +[[package]] +name = "fernet" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f" +dependencies = [ + "base64", + "byteorder", + "getrandom", + "openssl", + "zeroize", +] + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "getrandom" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "itoa" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" + +[[package]] +name = "jobserver" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" +dependencies = [ + "libc", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2f96d100e1cf1929e7719b7edb3b90ab5298072638fccd77be9ce942ecdfce" + +[[package]] +name = "librnsslapd" +version = "0.1.0" +dependencies = [ + "cbindgen", + "libc", + "slapd", +] + +[[package]] +name = "librslapd" +version = "0.1.0" +dependencies = [ + "cbindgen", + "libc", + "slapd", +] + +[[package]] +name = "log" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "once_cell" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" + +[[package]] +name = "openssl" +version = "0.10.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9facdb76fec0b73c406f125d44d86fdad818d66fef0531eec9233ca425ff4a" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-sys", +] + +[[package]] +name = "openssl-sys" +version = "0.9.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69df2d8dfc6ce3aaf44b40dec6f487d5a886516cf6879c49e98e0710f310a058" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "paste" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" +dependencies = [ + "paste-impl", + "proc-macro-hack", +] + +[[package]] +name = "paste-impl" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" +dependencies = [ + "proc-macro-hack", +] + +[[package]] +name = "pkg-config" +version = "0.3.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c9b1041b4387893b91ee6746cddfc28516aff326a3519fb2adf820932c5e6cb" + +[[package]] +name = "ppv-lite86" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ca011bd0129ff4ae15cd04c4eef202cadf6c51c21e47aba319b4e0501db741" + +[[package]] +name = "proc-macro-hack" +version = "0.5.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" + +[[package]] +name = "proc-macro2" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edc3358ebc67bc8b7fa0c007f945b0b18226f78437d61bec735a9eb96b61ee70" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", + "rand_hc", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_hc" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" +dependencies = [ + "rand_core", +] + +[[package]] +name = "redox_syscall" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +dependencies = [ + "bitflags", +] + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "rsds" +version = "0.1.0" + +[[package]] +name = "ryu" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" + +[[package]] +name = "serde" +version = "1.0.130" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.130" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "slapd" +version = "0.1.0" +dependencies = [ + "fernet", +] + +[[package]] +name = "slapi_r_plugin" +version = "0.1.0" +dependencies = [ + "lazy_static", + "libc", + "paste", + "uuid", +] + +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + +[[package]] +name = "syn" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d010a1623fbd906d51d650a9916aaefc05ffa0e4053ff7fe601167f3e715d194" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "synstructure" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + +[[package]] +name = "tempfile" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +dependencies = [ + "cfg-if", + "libc", + "rand", + "redox_syscall", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "toml" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +dependencies = [ + "serde", +] + +[[package]] +name = "unicode-width" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" + +[[package]] +name = "unicode-xid" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" + +[[package]] +name = "uuid" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +dependencies = [ + "getrandom", +] + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + +[[package]] +name = "wasi" +version = "0.10.2+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "zeroize" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf68b08513768deaa790264a7fac27a58cbf2705cfcdc9448362229217d7e970" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdff2024a851a322b08f179173ae2ba620445aef1e838f0c196820eade4ae0c7" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] diff --git a/gating.yaml b/gating.yaml new file mode 100644 index 0000000..91f6f0f --- /dev/null +++ b/gating.yaml @@ -0,0 +1,6 @@ +--- !Policy +product_versions: + - rhel-9 +decision_context: osci_compose_gate +rules: + - !PassingTestCaseRule {test_case_name: osci.brew-build.tier0.functional} diff --git a/rpminspect.yaml b/rpminspect.yaml new file mode 100644 index 0000000..873d73f --- /dev/null +++ b/rpminspect.yaml @@ -0,0 +1,7 @@ +--- +specname: + match: suffix +runpath: + allowed_paths: + - /usr/lib64/dirsrv + - /usr/lib64/dirsrv/plugins diff --git a/sources b/sources new file mode 100644 index 0000000..42b4bf9 --- /dev/null +++ b/sources @@ -0,0 +1,3 @@ +SHA1 (389-ds-base-1.4.3.28.tar.bz2) = 9274c7088190993255749ea90bbb770c5c5e0f5c +SHA1 (jemalloc-5.2.1.tar.bz2) = 9e06b5cc57fd185379d007696da153893cf73e30 +SHA1 (vendor-1.4.3.28-1.tar.gz) = c6875530163f0e217ed2e0e5b768506db3d07447 diff --git a/tests/tests.yml b/tests/tests.yml new file mode 100644 index 0000000..a2570d1 --- /dev/null +++ b/tests/tests.yml @@ -0,0 +1,58 @@ +--- +- hosts: localhost + remote_user: root + vars: + ds_repo_url: https://github.com/389ds/389-ds-base.git + ds_repo_dir: ds + ds_repo_version: 389-ds-base-2.1 + ds_tests: "{{ ds_repo_dir }}/dirsrvtests/tests" + pytest: "py.test-3" + pytest_args: "-v" + pytest_tier0_tests: "-m tier0" + pytest_tier1_tests: "-m 'tier1 and not tier2'" + pytest_run_command: "PYTHONPATH=../../src/lib389 {{ pytest }} {{ pytest_args }}" + artifacts: ./artifacts + pre_tasks: + - name: Install policycoreutils + action: > + {{ ansible_pkg_mgr }} name=policycoreutils-python-utils state=present + tags: always + ignore_errors: yes + - name: Prelabel non-secure ports + tags: always + shell: "semanage port -a -t ldap_port_t -p tcp 38900-39299" + ignore_errors: yes + - name: Prelabel secure ports + tags: always + shell: "semanage port -a -t ldap_port_t -p tcp 63600-63999" + ignore_errors: yes + - name: Install pip + action: > + {{ ansible_pkg_mgr }} name=python3-pip state=present + tags: always + ignore_errors: yes + - name: Install slugify + tags: always + shell: "pip3 install slugify" + ignore_errors: yes + roles: + - role: standard-test-basic + tags: + - classic + repositories: + - repo: "{{ ds_repo_url }}" + dest: "{{ ds_repo_dir }}" + version: "{{ ds_repo_version }}" + tests: + - tier0: + dir: "{{ ds_tests }}" + run: "{{ pytest_run_command }} {{ pytest_tier0_tests }}" + - tier1: + dir: "{{ ds_tests }}" + run: "{{ pytest_run_command }} {{ pytest_tier1_tests }}" + required_packages: + - python3-pytest + - python3-distro + - 389-ds-base + - 389-ds-base-snmp + - cracklib-dicts