import 389-ds-base-1.4.3.30-6.module+el8.7.0+16373+1a59bba2
This commit is contained in:
parent
0f16213f1f
commit
a6550cf152
@ -1,3 +1,3 @@
|
||||
9274c7088190993255749ea90bbb770c5c5e0f5c SOURCES/389-ds-base-1.4.3.28.tar.bz2
|
||||
9e06b5cc57fd185379d007696da153893cf73e30 SOURCES/jemalloc-5.2.1.tar.bz2
|
||||
c6875530163f0e217ed2e0e5b768506db3d07447 SOURCES/vendor-1.4.3.28-1.tar.gz
|
||||
672f63948af9d242034f689340f772b8e148ee3c SOURCES/389-ds-base-1.4.3.30.tar.bz2
|
||||
1c8f2d0dfbf39fa8cd86363bf3314351ab21f8d4 SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
dc0d2e81e54cc7e4098a829b8202d59ec471b34f SOURCES/vendor-1.4.3.30-1.tar.gz
|
||||
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -1,3 +1,3 @@
|
||||
SOURCES/389-ds-base-1.4.3.28.tar.bz2
|
||||
SOURCES/jemalloc-5.2.1.tar.bz2
|
||||
SOURCES/vendor-1.4.3.28-1.tar.gz
|
||||
SOURCES/389-ds-base-1.4.3.30.tar.bz2
|
||||
SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
SOURCES/vendor-1.4.3.30-1.tar.gz
|
||||
|
@ -1,738 +0,0 @@
|
||||
From 67e19da62a9e8958458de54173dcd9bcaf53164d Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 30 Sep 2021 15:59:40 +0200
|
||||
Subject: [PATCH 01/12] Issue 4678 - RFE automatique disable of virtual
|
||||
attribute checking (#4918)
|
||||
|
||||
Bug description:
|
||||
Virtual attributes are configured via Roles or COS definitions
|
||||
and registered during initialization of those plugins.
|
||||
Virtual attributes are processed during search evaluation of
|
||||
filter and returned attributes. This processing is expensive
|
||||
and prone to create contention between searches.
|
||||
Use of virtual attribute is not frequent. So many of the
|
||||
deployement process virtual attribute even if there is none.
|
||||
|
||||
Fix description:
|
||||
The fix configure the server to ignore virtual attribute by
|
||||
default (nsslapd-ignore-virtual-attrs: on).
|
||||
At startup, if a new virtual attribute is registered or
|
||||
it exists Roles/COS definitions, then the server is
|
||||
configured to process the virtual attributes
|
||||
(nsslapd-ignore-virtual-attrs: off)
|
||||
design: https://www.port389.org/docs/389ds/design/vattr-automatic-toggle.html
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4678
|
||||
|
||||
Reviewed by: William Brown, Simon Pichugin, Mark Reynolds (Thanks !!)
|
||||
|
||||
Platforms tested: F34
|
||||
---
|
||||
.../tests/suites/config/config_test.py | 40 +++-
|
||||
dirsrvtests/tests/suites/cos/cos_test.py | 94 ++++++--
|
||||
dirsrvtests/tests/suites/roles/basic_test.py | 200 +++++++++++++++++-
|
||||
ldap/servers/plugins/roles/roles_cache.c | 9 +
|
||||
ldap/servers/slapd/libglobs.c | 2 +-
|
||||
ldap/servers/slapd/main.c | 2 +
|
||||
ldap/servers/slapd/proto-slap.h | 1 +
|
||||
ldap/servers/slapd/vattr.c | 127 +++++++++++
|
||||
src/lib389/lib389/idm/role.py | 4 +
|
||||
9 files changed, 455 insertions(+), 24 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
|
||||
index 2ecff8f98..19232c87d 100644
|
||||
--- a/dirsrvtests/tests/suites/config/config_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/config_test.py
|
||||
@@ -351,7 +351,7 @@ def test_ignore_virtual_attrs(topo):
|
||||
:setup: Standalone instance
|
||||
:steps:
|
||||
1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
|
||||
- 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF
|
||||
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
|
||||
3. Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs
|
||||
4. Set invalid value for attribute nsslapd-ignore-virtual-attrs
|
||||
5. Set nsslapd-ignore-virtual-attrs=off
|
||||
@@ -374,8 +374,8 @@ def test_ignore_virtual_attrs(topo):
|
||||
log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
|
||||
assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
|
||||
|
||||
- log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
- assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "off"
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
|
||||
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
|
||||
|
||||
log.info("Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs")
|
||||
for attribute_value in ['on', 'off', 'ON', 'OFF']:
|
||||
@@ -415,6 +415,40 @@ def test_ignore_virtual_attrs(topo):
|
||||
log.info("Test if virtual attribute i.e. postal code not shown while nsslapd-ignore-virtual-attrs: on")
|
||||
assert not test_user.present('postalcode', '117')
|
||||
|
||||
+def test_ignore_virtual_attrs_after_restart(topo):
|
||||
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
|
||||
+ The attribute is ON by default. If it set to OFF, it keeps
|
||||
+ its value on restart
|
||||
+
|
||||
+ :id: ac368649-4fda-473c-9ef8-e0c728b162af
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
|
||||
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
|
||||
+ 3. Set nsslapd-ignore-virtual-attrs=off
|
||||
+ 4. restart the instance
|
||||
+ 5. Check the attribute nsslapd-ignore-virtual-attrs is OFF
|
||||
+ :expectedresults:
|
||||
+ 1. This should be successful
|
||||
+ 2. This should be successful
|
||||
+ 3. This should be successful
|
||||
+ 4. This should be successful
|
||||
+ 5. This should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
|
||||
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
|
||||
+
|
||||
+ log.info("Set nsslapd-ignore-virtual-attrs = off")
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'off')
|
||||
+
|
||||
+ topo.standalone.restart()
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
|
||||
@pytest.mark.bz918694
|
||||
@pytest.mark.ds408
|
||||
diff --git a/dirsrvtests/tests/suites/cos/cos_test.py b/dirsrvtests/tests/suites/cos/cos_test.py
|
||||
index d6a498c73..d1f99f96f 100644
|
||||
--- a/dirsrvtests/tests/suites/cos/cos_test.py
|
||||
+++ b/dirsrvtests/tests/suites/cos/cos_test.py
|
||||
@@ -6,6 +6,8 @@
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
|
||||
+import logging
|
||||
+import time
|
||||
import pytest, os, ldap
|
||||
from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate
|
||||
from lib389._constants import DEFAULT_SUFFIX
|
||||
@@ -14,26 +16,37 @@ from lib389.idm.role import FilteredRoles
|
||||
from lib389.idm.nscontainer import nsContainer
|
||||
from lib389.idm.user import UserAccount
|
||||
|
||||
+logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
pytestmark = pytest.mark.tier1
|
||||
+@pytest.fixture(scope="function")
|
||||
+def reset_ignore_vattr(topo, request):
|
||||
+ default_ignore_vattr_value = topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs')
|
||||
+ def fin():
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', default_ignore_vattr_value)
|
||||
|
||||
-def test_positive(topo):
|
||||
- """
|
||||
- :id: a5a74235-597f-4fe8-8c38-826860927472
|
||||
- :setup: server
|
||||
- :steps:
|
||||
- 1. Add filter role entry
|
||||
- 2. Add ns container
|
||||
- 3. Add cos template
|
||||
- 4. Add CosClassic Definition
|
||||
- 5. Cos entries should be added and searchable
|
||||
- 6. employeeType attribute should be there in user entry as per the cos plugin property
|
||||
- :expectedresults:
|
||||
- 1. Operation should success
|
||||
- 2. Operation should success
|
||||
- 3. Operation should success
|
||||
- 4. Operation should success
|
||||
- 5. Operation should success
|
||||
- 6. Operation should success
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+def test_positive(topo, reset_ignore_vattr):
|
||||
+ """CoS positive tests
|
||||
+
|
||||
+ :id: a5a74235-597f-4fe8-8c38-826860927472
|
||||
+ :setup: server
|
||||
+ :steps:
|
||||
+ 1. Add filter role entry
|
||||
+ 2. Add ns container
|
||||
+ 3. Add cos template
|
||||
+ 4. Add CosClassic Definition
|
||||
+ 5. Cos entries should be added and searchable
|
||||
+ 6. employeeType attribute should be there in user entry as per the cos plugin property
|
||||
+ :expectedresults:
|
||||
+ 1. Operation should success
|
||||
+ 2. Operation should success
|
||||
+ 3. Operation should success
|
||||
+ 4. Operation should success
|
||||
+ 5. Operation should success
|
||||
+ 6. Operation should success
|
||||
"""
|
||||
# Adding ns filter role
|
||||
roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
|
||||
@@ -77,7 +90,52 @@ def test_positive(topo):
|
||||
|
||||
# CoS definition entry's cosSpecifier attribute specifies the employeeType attribute
|
||||
assert user.present('employeeType')
|
||||
+ cosdef.delete()
|
||||
+
|
||||
+def test_vattr_on_cos_definition(topo, reset_ignore_vattr):
|
||||
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
|
||||
+ The attribute is ON by default. If a cos definition is
|
||||
+ added it is moved to OFF
|
||||
+
|
||||
+ :id: e7ef5254-386f-4362-bbb4-9409f3f51b08
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
|
||||
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
|
||||
+ 3. Create a cos definition for employeeType
|
||||
+ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF (with a delay for postop processing)
|
||||
+ 5. Check a message "slapi_vattrspi_regattr - Because employeeType,.." in error logs
|
||||
+ :expectedresults:
|
||||
+ 1. This should be successful
|
||||
+ 2. This should be successful
|
||||
+ 3. This should be successful
|
||||
+ 4. This should be successful
|
||||
+ 5. This should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
|
||||
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
|
||||
+
|
||||
+ # creating CosClassicDefinition
|
||||
+ log.info("Create a cos definition")
|
||||
+ properties = {'cosTemplateDn': 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX),
|
||||
+ 'cosAttribute': 'employeeType',
|
||||
+ 'cosSpecifier': 'nsrole',
|
||||
+ 'cn': 'cosClassicGenerateEmployeeTypeUsingnsrole'}
|
||||
+ cosdef = CosClassicDefinition(topo.standalone,'cn=cosClassicGenerateEmployeeTypeUsingnsrole,{}'.format(DEFAULT_SUFFIX))\
|
||||
+ .create(properties=properties)
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
+ time.sleep(2)
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
|
||||
+ topo.standalone.stop()
|
||||
+ assert topo.standalone.searchErrorsLog("slapi_vattrspi_regattr - Because employeeType is a new registered virtual attribute , nsslapd-ignore-virtual-attrs was set to \'off\'")
|
||||
+ topo.standalone.start()
|
||||
+ cosdef.delete()
|
||||
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
index 47a531794..bec3aedfc 100644
|
||||
--- a/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
@@ -11,6 +11,8 @@
|
||||
Importing necessary Modules.
|
||||
"""
|
||||
|
||||
+import logging
|
||||
+import time
|
||||
import os
|
||||
import pytest
|
||||
|
||||
@@ -22,6 +24,9 @@ from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.role import FilteredRoles, ManagedRoles, NestedRoles
|
||||
from lib389.idm.domain import Domain
|
||||
|
||||
+logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
DNBASE = "o=acivattr,{}".format(DEFAULT_SUFFIX)
|
||||
@@ -35,7 +40,7 @@ FILTERROLESALESROLE = "cn=FILTERROLESALESROLE,{}".format(DNBASE)
|
||||
FILTERROLEENGROLE = "cn=FILTERROLEENGROLE,{}".format(DNBASE)
|
||||
|
||||
|
||||
-def test_filterrole(topo):
|
||||
+def test_filterrole(topo, request):
|
||||
"""Test Filter Role
|
||||
|
||||
:id: 8ada4064-786b-11e8-8634-8c16451d917b
|
||||
@@ -136,8 +141,20 @@ def test_filterrole(topo):
|
||||
SALES_OU, DNBASE]:
|
||||
UserAccount(topo.standalone, dn_dn).delete()
|
||||
|
||||
+ def fin():
|
||||
+ topo.standalone.restart()
|
||||
+ try:
|
||||
+ filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ for i in filtered_roles.list():
|
||||
+ i.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
|
||||
-def test_managedrole(topo):
|
||||
+def test_managedrole(topo, request):
|
||||
"""Test Managed Role
|
||||
|
||||
:id: d52a9c00-3bf6-11e9-9b7b-8c16451d917b
|
||||
@@ -209,6 +226,16 @@ def test_managedrole(topo):
|
||||
for i in roles.list():
|
||||
i.delete()
|
||||
|
||||
+ def fin():
|
||||
+ topo.standalone.restart()
|
||||
+ try:
|
||||
+ role = ManagedRoles(topo.standalone, DEFAULT_SUFFIX).get('ROLE1')
|
||||
+ role.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def _final(request, topo):
|
||||
@@ -220,6 +247,7 @@ def _final(request, topo):
|
||||
def finofaci():
|
||||
"""
|
||||
Removes and Restores ACIs and other users after the test.
|
||||
+ And restore nsslapd-ignore-virtual-attrs to default
|
||||
"""
|
||||
domain = Domain(topo.standalone, DEFAULT_SUFFIX)
|
||||
domain.remove_all('aci')
|
||||
@@ -234,6 +262,8 @@ def _final(request, topo):
|
||||
for i in aci_list:
|
||||
domain.add("aci", i)
|
||||
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
|
||||
+
|
||||
request.addfinalizer(finofaci)
|
||||
|
||||
|
||||
@@ -296,6 +326,172 @@ def test_nestedrole(topo, _final):
|
||||
conn = users.get('test_user_3').bind(PW_DM)
|
||||
assert UserAccounts(conn, DEFAULT_SUFFIX).list()
|
||||
|
||||
+def test_vattr_on_filtered_role(topo, request):
|
||||
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
|
||||
+ The attribute is ON by default. If a filtered role is
|
||||
+ added it is moved to OFF
|
||||
+
|
||||
+ :id: 88b3ad3c-f39a-4eb7-a8c9-07c685f11908
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
|
||||
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
|
||||
+ 3. Create a filtered role
|
||||
+ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF
|
||||
+ 5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs
|
||||
+ :expectedresults:
|
||||
+ 1. This should be successful
|
||||
+ 2. This should be successful
|
||||
+ 3. This should be successful
|
||||
+ 4. This should be successful
|
||||
+ 5. This should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
|
||||
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
|
||||
+
|
||||
+ log.info("Create a filtered role")
|
||||
+ try:
|
||||
+ Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX)
|
||||
+ except:
|
||||
+ pass
|
||||
+ roles = FilteredRoles(topo.standalone, DNBASE)
|
||||
+ roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'})
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
+
|
||||
+ topo.standalone.stop()
|
||||
+ assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'")
|
||||
+
|
||||
+ def fin():
|
||||
+ topo.standalone.restart()
|
||||
+ try:
|
||||
+ filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ for i in filtered_roles.list():
|
||||
+ i.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+def test_vattr_on_filtered_role_restart(topo, request):
|
||||
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
|
||||
+ If it exists a filtered role definition at restart then
|
||||
+ nsslapd-ignore-virtual-attrs should be set to 'off'
|
||||
+
|
||||
+ :id: 972183f7-d18f-40e0-94ab-580e7b7d78d0
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
|
||||
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
|
||||
+ 3. Create a filtered role
|
||||
+ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF
|
||||
+ 5. restart the instance
|
||||
+ 6. Check the presence of virtual attribute is detected
|
||||
+ 7. Check the value of nsslapd-ignore-virtual-attrs should be OFF
|
||||
+ :expectedresults:
|
||||
+ 1. This should be successful
|
||||
+ 2. This should be successful
|
||||
+ 3. This should be successful
|
||||
+ 4. This should be successful
|
||||
+ 5. This should be successful
|
||||
+ 6. This should be successful
|
||||
+ 7. This should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
|
||||
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
|
||||
+
|
||||
+ log.info("Create a filtered role")
|
||||
+ try:
|
||||
+ Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX)
|
||||
+ except:
|
||||
+ pass
|
||||
+ roles = FilteredRoles(topo.standalone, DNBASE)
|
||||
+ roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'})
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
+
|
||||
+
|
||||
+ log.info("Check the virtual attribute definition is found (after a required delay)")
|
||||
+ topo.standalone.restart()
|
||||
+ time.sleep(5)
|
||||
+ assert topo.standalone.searchErrorsLog("Found a role/cos definition in")
|
||||
+ assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'")
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
+
|
||||
+ def fin():
|
||||
+ topo.standalone.restart()
|
||||
+ try:
|
||||
+ filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ for i in filtered_roles.list():
|
||||
+ i.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+
|
||||
+def test_vattr_on_managed_role(topo, request):
|
||||
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
|
||||
+ The attribute is ON by default. If a managed role is
|
||||
+ added it is moved to OFF
|
||||
+
|
||||
+ :id: 664b722d-c1ea-41e4-8f6c-f9c87a212346
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
|
||||
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
|
||||
+ 3. Create a managed role
|
||||
+ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF
|
||||
+ 5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs
|
||||
+ :expectedresults:
|
||||
+ 1. This should be successful
|
||||
+ 2. This should be successful
|
||||
+ 3. This should be successful
|
||||
+ 4. This should be successful
|
||||
+ 5. This should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
|
||||
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
|
||||
+
|
||||
+ log.info("Create a managed role")
|
||||
+ roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ role = roles.create(properties={"cn": 'ROLE1'})
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
+
|
||||
+ topo.standalone.stop()
|
||||
+ assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'")
|
||||
+
|
||||
+ def fin():
|
||||
+ topo.standalone.restart()
|
||||
+ try:
|
||||
+ filtered_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ for i in filtered_roles.list():
|
||||
+ i.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c
|
||||
index 3d076a4cb..cd00e0aba 100644
|
||||
--- a/ldap/servers/plugins/roles/roles_cache.c
|
||||
+++ b/ldap/servers/plugins/roles/roles_cache.c
|
||||
@@ -530,6 +530,15 @@ roles_cache_trigger_update_role(char *dn, Slapi_Entry *roles_entry, Slapi_DN *be
|
||||
}
|
||||
|
||||
slapi_rwlock_unlock(global_lock);
|
||||
+ {
|
||||
+ /* A role definition has been updated, enable vattr handling */
|
||||
+ char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
|
||||
+ errorbuf[0] = '\0';
|
||||
+ config_set_ignore_vattrs(CONFIG_IGNORE_VATTRS, "off", errorbuf, 1);
|
||||
+ slapi_log_err(SLAPI_LOG_INFO,
|
||||
+ "roles_cache_trigger_update_role",
|
||||
+ "Because of virtual attribute definition (role), %s was set to 'off'\n", CONFIG_IGNORE_VATTRS);
|
||||
+ }
|
||||
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, "<-- roles_cache_trigger_update_role: %p \n", roles_list);
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
|
||||
index 2ea4cd760..f6dacce30 100644
|
||||
--- a/ldap/servers/slapd/libglobs.c
|
||||
+++ b/ldap/servers/slapd/libglobs.c
|
||||
@@ -1803,7 +1803,7 @@ FrontendConfig_init(void)
|
||||
init_ndn_cache_enabled = cfg->ndn_cache_enabled = LDAP_ON;
|
||||
cfg->ndn_cache_max_size = SLAPD_DEFAULT_NDN_SIZE;
|
||||
init_sasl_mapping_fallback = cfg->sasl_mapping_fallback = LDAP_OFF;
|
||||
- init_ignore_vattrs = cfg->ignore_vattrs = LDAP_OFF;
|
||||
+ init_ignore_vattrs = cfg->ignore_vattrs = LDAP_ON;
|
||||
cfg->sasl_max_bufsize = SLAPD_DEFAULT_SASL_MAXBUFSIZE;
|
||||
cfg->unhashed_pw_switch = SLAPD_DEFAULT_UNHASHED_PW_SWITCH;
|
||||
init_return_orig_type = cfg->return_orig_type = LDAP_OFF;
|
||||
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
|
||||
index 4931a4ca4..61ed40b7d 100644
|
||||
--- a/ldap/servers/slapd/main.c
|
||||
+++ b/ldap/servers/slapd/main.c
|
||||
@@ -1042,6 +1042,8 @@ main(int argc, char **argv)
|
||||
eq_start(); /* must be done after plugins started - DEPRECATED */
|
||||
eq_start_rel(); /* must be done after plugins started */
|
||||
|
||||
+ vattr_check(); /* Check if it exists virtual attribute definitions */
|
||||
+
|
||||
#ifdef HPUX10
|
||||
/* HPUX linker voodoo */
|
||||
if (collation_init == NULL) {
|
||||
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
|
||||
index c143f3772..442a621aa 100644
|
||||
--- a/ldap/servers/slapd/proto-slap.h
|
||||
+++ b/ldap/servers/slapd/proto-slap.h
|
||||
@@ -1462,6 +1462,7 @@ void subentry_create_filter(Slapi_Filter **filter);
|
||||
*/
|
||||
void vattr_init(void);
|
||||
void vattr_cleanup(void);
|
||||
+void vattr_check(void);
|
||||
|
||||
/*
|
||||
* slapd_plhash.c - supplement to NSPR plhash
|
||||
diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c
|
||||
index 09dab6ecf..24750a57c 100644
|
||||
--- a/ldap/servers/slapd/vattr.c
|
||||
+++ b/ldap/servers/slapd/vattr.c
|
||||
@@ -64,6 +64,10 @@
|
||||
#define SOURCEFILE "vattr.c"
|
||||
static char *sourcefile = SOURCEFILE;
|
||||
|
||||
+/* stolen from roles_cache.h, must remain in sync */
|
||||
+#define NSROLEATTR "nsRole"
|
||||
+static Slapi_Eq_Context vattr_check_ctx = {0};
|
||||
+
|
||||
/* Define only for module test code */
|
||||
/* #define VATTR_TEST_CODE */
|
||||
|
||||
@@ -130,6 +134,112 @@ vattr_cleanup()
|
||||
{
|
||||
/* We need to free and remove anything that was inserted first */
|
||||
vattr_map_destroy();
|
||||
+ slapi_eq_cancel_rel(vattr_check_ctx);
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+vattr_check_thread(void *arg)
|
||||
+{
|
||||
+ Slapi_Backend *be = NULL;
|
||||
+ char *cookie = NULL;
|
||||
+ Slapi_DN *base_sdn = NULL;
|
||||
+ Slapi_PBlock *search_pb = NULL;
|
||||
+ Slapi_Entry **entries = NULL;
|
||||
+ int32_t rc;
|
||||
+ int32_t check_suffix; /* used to skip suffixes in ignored_backend */
|
||||
+ PRBool exist_vattr_definition = PR_FALSE;
|
||||
+ char *ignored_backend[5] = {"cn=config", "cn=schema", "cn=monitor", "cn=changelog", NULL}; /* suffixes to ignore */
|
||||
+ char *suffix;
|
||||
+ int ignore_vattrs;
|
||||
+
|
||||
+ ignore_vattrs = config_get_ignore_vattrs();
|
||||
+
|
||||
+ if (!ignore_vattrs) {
|
||||
+ /* Nothing to do more, we are already evaluating virtual attribute */
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ search_pb = slapi_pblock_new();
|
||||
+ be = slapi_get_first_backend(&cookie);
|
||||
+ while (be && !exist_vattr_definition && !slapi_is_shutting_down()) {
|
||||
+ base_sdn = (Slapi_DN *) slapi_be_getsuffix(be, 0);
|
||||
+ suffix = (char *) slapi_sdn_get_dn(base_sdn);
|
||||
+
|
||||
+ if (suffix) {
|
||||
+ /* First check that we need to check that suffix */
|
||||
+ check_suffix = 1;
|
||||
+ for (size_t i = 0; ignored_backend[i]; i++) {
|
||||
+ if (strcasecmp(suffix, ignored_backend[i]) == 0) {
|
||||
+ check_suffix = 0;
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* search for a role or cos definition */
|
||||
+ if (check_suffix) {
|
||||
+ slapi_search_internal_set_pb(search_pb, slapi_sdn_get_dn(base_sdn),
|
||||
+ LDAP_SCOPE_SUBTREE, "(&(objectclass=ldapsubentry)(|(objectclass=nsRoleDefinition)(objectclass=cosSuperDefinition)))",
|
||||
+ NULL, 0, NULL, NULL, (void *) plugin_get_default_component_id(), 0);
|
||||
+ slapi_search_internal_pb(search_pb);
|
||||
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
|
||||
+
|
||||
+ if (rc == LDAP_SUCCESS) {
|
||||
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
|
||||
+ if (entries && entries[0]) {
|
||||
+ /* it exists at least a cos or role definition */
|
||||
+ exist_vattr_definition = PR_TRUE;
|
||||
+ slapi_log_err(SLAPI_LOG_INFO,
|
||||
+ "vattr_check_thread",
|
||||
+ "Found a role/cos definition in %s\n", slapi_entry_get_dn(entries[0]));
|
||||
+ } else {
|
||||
+ slapi_log_err(SLAPI_LOG_INFO,
|
||||
+ "vattr_check_thread",
|
||||
+ "No role/cos definition in %s\n", slapi_sdn_get_dn(base_sdn));
|
||||
+ }
|
||||
+ }
|
||||
+ slapi_free_search_results_internal(search_pb);
|
||||
+ } /* check_suffix */
|
||||
+ } /* suffix */
|
||||
+ be = (backend *) slapi_get_next_backend(cookie);
|
||||
+ }
|
||||
+ slapi_pblock_destroy(search_pb);
|
||||
+ slapi_ch_free_string(&cookie);
|
||||
+
|
||||
+ /* Now if a virtual attribute is defined, then CONFIG_IGNORE_VATTRS -> off */
|
||||
+ if (exist_vattr_definition) {
|
||||
+ char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
|
||||
+ errorbuf[0] = '\0';
|
||||
+ config_set_ignore_vattrs(CONFIG_IGNORE_VATTRS, "off", errorbuf, 1);
|
||||
+ slapi_log_err(SLAPI_LOG_INFO,
|
||||
+ "vattr_check_thread",
|
||||
+ "Because of virtual attribute definition, %s was set to 'off'\n", CONFIG_IGNORE_VATTRS);
|
||||
+ }
|
||||
+}
|
||||
+static void
|
||||
+vattr_check_schedule_once(time_t when __attribute__((unused)), void *arg)
|
||||
+{
|
||||
+ if (PR_CreateThread(PR_USER_THREAD,
|
||||
+ vattr_check_thread,
|
||||
+ (void *) arg,
|
||||
+ PR_PRIORITY_NORMAL,
|
||||
+ PR_GLOBAL_THREAD,
|
||||
+ PR_UNJOINABLE_THREAD,
|
||||
+ SLAPD_DEFAULT_THREAD_STACKSIZE) == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR,
|
||||
+ "vattr_check_schedule_once",
|
||||
+ "Fails to check if %s needs to be toggled to FALSE\n", CONFIG_IGNORE_VATTRS);
|
||||
+ }
|
||||
+}
|
||||
+#define VATTR_CHECK_DELAY 3
|
||||
+void
|
||||
+vattr_check()
|
||||
+{
|
||||
+ /* Schedule running a callback that will create a thread
|
||||
+ * but make sure it is called a first thing when event loop is created */
|
||||
+ time_t now;
|
||||
+
|
||||
+ now = slapi_current_rel_time_t();
|
||||
+ vattr_check_ctx = slapi_eq_once_rel(vattr_check_schedule_once, NULL, now + VATTR_CHECK_DELAY);
|
||||
}
|
||||
|
||||
/* The public interface functions start here */
|
||||
@@ -1631,6 +1741,9 @@ slapi_vattrspi_regattr(vattr_sp_handle *h, char *type_name_to_register, char *DN
|
||||
char *type_to_add;
|
||||
int free_type_to_add = 0;
|
||||
Slapi_DN original_dn;
|
||||
+ int ignore_vattrs;
|
||||
+
|
||||
+ ignore_vattrs = config_get_ignore_vattrs();
|
||||
|
||||
slapi_sdn_init(&original_dn);
|
||||
|
||||
@@ -1676,6 +1789,20 @@ slapi_vattrspi_regattr(vattr_sp_handle *h, char *type_name_to_register, char *DN
|
||||
if (free_type_to_add) {
|
||||
slapi_ch_free((void **)&type_to_add);
|
||||
}
|
||||
+ if (ignore_vattrs && strcasecmp(type_name_to_register, NSROLEATTR)) {
|
||||
+ /* A new virtual attribute is registered.
|
||||
+ * This new vattr being *different* than the default roles vattr 'nsRole'
|
||||
+ * It is time to allow vattr lookup
|
||||
+ */
|
||||
+ char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
|
||||
+ errorbuf[0] = '\0';
|
||||
+ config_set_ignore_vattrs(CONFIG_IGNORE_VATTRS, "off", errorbuf, 1);
|
||||
+ slapi_log_err(SLAPI_LOG_INFO,
|
||||
+ "slapi_vattrspi_regattr",
|
||||
+ "Because %s is a new registered virtual attribute , %s was set to 'off'\n",
|
||||
+ type_name_to_register,
|
||||
+ CONFIG_IGNORE_VATTRS);
|
||||
+ }
|
||||
|
||||
return ret;
|
||||
}
|
||||
diff --git a/src/lib389/lib389/idm/role.py b/src/lib389/lib389/idm/role.py
|
||||
index fe91aab6f..9a2bff3d6 100644
|
||||
--- a/src/lib389/lib389/idm/role.py
|
||||
+++ b/src/lib389/lib389/idm/role.py
|
||||
@@ -252,6 +252,8 @@ class FilteredRole(Role):
|
||||
self._rdn_attribute = 'cn'
|
||||
self._create_objectclasses = ['nsComplexRoleDefinition', 'nsFilteredRoleDefinition']
|
||||
|
||||
+ self._protected = False
|
||||
+
|
||||
|
||||
|
||||
class FilteredRoles(Roles):
|
||||
@@ -285,6 +287,7 @@ class ManagedRole(Role):
|
||||
self._rdn_attribute = 'cn'
|
||||
self._create_objectclasses = ['nsSimpleRoleDefinition', 'nsManagedRoleDefinition']
|
||||
|
||||
+ self._protected = False
|
||||
|
||||
class ManagedRoles(Roles):
|
||||
"""DSLdapObjects that represents all Managed Roles entries
|
||||
@@ -320,6 +323,7 @@ class NestedRole(Role):
|
||||
self._rdn_attribute = 'cn'
|
||||
self._create_objectclasses = ['nsComplexRoleDefinition', 'nsNestedRoleDefinition']
|
||||
|
||||
+ self._protected = False
|
||||
|
||||
class NestedRoles(Roles):
|
||||
"""DSLdapObjects that represents all NestedRoles entries in suffix.
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,86 @@
|
||||
From 36da9be6b82c96a656fa6dd1f99e5a7c41c7652a Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 23 May 2022 16:53:41 +0200
|
||||
Subject: [PATCH] Revert 4866 - cl trimming not applicable in 1.4.3
|
||||
|
||||
---
|
||||
.../suites/healthcheck/health_repl_test.py | 2 +-
|
||||
.../tests/suites/replication/acceptance_test.py | 17 +----------------
|
||||
src/lib389/lib389/replica.py | 13 -------------
|
||||
3 files changed, 2 insertions(+), 30 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/healthcheck/health_repl_test.py b/dirsrvtests/tests/suites/healthcheck/health_repl_test.py
|
||||
index 9e1af2ff8..238d25290 100644
|
||||
--- a/dirsrvtests/tests/suites/healthcheck/health_repl_test.py
|
||||
+++ b/dirsrvtests/tests/suites/healthcheck/health_repl_test.py
|
||||
@@ -74,7 +74,7 @@ def set_changelog_trimming(instance):
|
||||
inst_changelog = Changelog5(instance)
|
||||
|
||||
log.info('Set nsslapd-changelogmaxage to 30d')
|
||||
- inst_changelog.set_max_age('30d')
|
||||
+ inst_changelog.add('nsslapd-changelogmaxage', '30')
|
||||
|
||||
|
||||
@pytest.mark.ds50873
|
||||
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
index 8b96df7a4..a5f0c4c6b 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
@@ -15,7 +15,7 @@ from lib389.topologies import topology_m4 as topo_m4
|
||||
from lib389.topologies import topology_m2 as topo_m2
|
||||
from . import get_repl_entries
|
||||
from lib389.idm.user import UserAccount
|
||||
-from lib389.replica import ReplicationManager, Changelog
|
||||
+from lib389.replica import ReplicationManager
|
||||
from lib389._constants import *
|
||||
|
||||
pytestmark = pytest.mark.tier0
|
||||
@@ -645,21 +645,6 @@ def test_csngen_task(topo_m2):
|
||||
assert m1.searchErrorsLog("_csngen_gen_tester_main")
|
||||
|
||||
|
||||
-def test_default_cl_trimming_enabled(topo_m2):
|
||||
- """Check that changelog trimming was enabled by default
|
||||
-
|
||||
- :id: c37b9a28-f961-4867-b8a1-e81edd7f9bf3
|
||||
- :setup: Supplier Instance
|
||||
- :steps:
|
||||
- 1. Check changelog has trimming set up by default
|
||||
- :expectedresults:
|
||||
- 1. Success
|
||||
- """
|
||||
-
|
||||
- # Set up changelog trimming by default
|
||||
- cl = Changelog(topo_m2.ms["supplier1"], DEFAULT_SUFFIX)
|
||||
- assert cl.get_attr_val_utf8("nsslapd-changelogmaxage") == "7d"
|
||||
-
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index c7328605b..90905dbf1 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -1667,19 +1667,6 @@ class Replicas(DSLdapObjects):
|
||||
self._childobject = Replica
|
||||
self._basedn = DN_MAPPING_TREE
|
||||
|
||||
- def create(self, rdn=None, properties=None):
|
||||
- replica = super(Replicas, self).create(rdn, properties)
|
||||
-
|
||||
- # Set up changelog trimming by default
|
||||
- if properties is not None:
|
||||
- for attr, val in properties.items():
|
||||
- if attr.lower() == 'nsds5replicaroot':
|
||||
- cl = Changelog(self._instance, val[0])
|
||||
- cl.set_max_age("7d")
|
||||
- break
|
||||
-
|
||||
- return replica
|
||||
-
|
||||
def get(self, selector=[], dn=None):
|
||||
"""Get a child entry (DSLdapObject, Replica, etc.) with dn or selector
|
||||
using a base DN and objectClasses of our object (DSLdapObjects, Replicas, etc.)
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,77 @@
|
||||
From 6c8906559cd049b14b08e4d3158338f6611f04e4 Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Fri, 20 Aug 2021 09:18:50 +1000
|
||||
Subject: [PATCH] Issue 4877 - RFE - EntryUUID to validate UUIDs on fixup
|
||||
(#4878)
|
||||
|
||||
Bug Description: Due to changing the syntax of EntryUUID's
|
||||
to string, we may have invalid EntryUUID's imported into
|
||||
the database.
|
||||
|
||||
Fix Description: To resolve this during a fixup we validate
|
||||
that Uuid's have a valid syntax. If they do not, we regenerate
|
||||
them.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/4877
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @mreynolds389
|
||||
---
|
||||
src/plugins/entryuuid/src/lib.rs | 28 ++++++++++++++++++++--------
|
||||
1 file changed, 20 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
|
||||
index 29a9f1258..ad3faef4b 100644
|
||||
--- a/src/plugins/entryuuid/src/lib.rs
|
||||
+++ b/src/plugins/entryuuid/src/lib.rs
|
||||
@@ -144,11 +144,17 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
// Error if the first filter is empty?
|
||||
|
||||
// Now, to make things faster, we wrap the filter in a exclude term.
|
||||
+
|
||||
+ // 2021 - #4877 because we allow entryuuid to be strings, on import these may
|
||||
+ // be invalid. As a result, we DO need to allow the fixup to check the entryuuid
|
||||
+ // value is correct, so we can not exclude these during the search.
|
||||
+ /*
|
||||
let raw_filter = if !raw_filter.starts_with('(') && !raw_filter.ends_with('(') {
|
||||
format!("(&({})(!(entryuuid=*)))", raw_filter)
|
||||
} else {
|
||||
format!("(&{}(!(entryuuid=*)))", raw_filter)
|
||||
};
|
||||
+ */
|
||||
|
||||
Ok(FixupData { basedn, raw_filter })
|
||||
}
|
||||
@@ -213,14 +219,20 @@ pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError
|
||||
/* Supply a modification to the entry. */
|
||||
let sdn = e.get_sdnref();
|
||||
|
||||
- /* Sanity check that entryuuid doesn't already exist */
|
||||
- if e.contains_attr("entryUUID") {
|
||||
- log_error!(
|
||||
- ErrorLevel::Plugin,
|
||||
- "skipping fixup for -> {}",
|
||||
- sdn.to_dn_string()
|
||||
- );
|
||||
- return Ok(());
|
||||
+ /* Check that entryuuid doesn't already exist, and is valid */
|
||||
+ if let Some(valueset) = e.get_attr("entryUUID") {
|
||||
+ if valueset.iter().all(|v| {
|
||||
+ let u: Result<Uuid, _> = (&v).try_into();
|
||||
+ u.is_ok()
|
||||
+ }) {
|
||||
+ // All values were valid uuid, move on!
|
||||
+ log_error!(
|
||||
+ ErrorLevel::Plugin,
|
||||
+ "skipping fixup for -> {}",
|
||||
+ sdn.to_dn_string()
|
||||
+ );
|
||||
+ return Ok(());
|
||||
+ }
|
||||
}
|
||||
|
||||
// Setup the modifications
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,621 +0,0 @@
|
||||
From 968ad6b5039d839bfbc61da755c252cc7598415b Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 25 Oct 2021 17:09:57 +0200
|
||||
Subject: [PATCH 02/12] Issue 4943 - Fix csn generator to limit time skew drift
|
||||
- PR 4946
|
||||
|
||||
---
|
||||
ldap/servers/slapd/csngen.c | 433 +++++++++++++++++-------------
|
||||
ldap/servers/slapd/slapi-plugin.h | 9 +
|
||||
2 files changed, 255 insertions(+), 187 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/csngen.c b/ldap/servers/slapd/csngen.c
|
||||
index fcd88b4cc..c7c5c2ba8 100644
|
||||
--- a/ldap/servers/slapd/csngen.c
|
||||
+++ b/ldap/servers/slapd/csngen.c
|
||||
@@ -18,8 +18,9 @@
|
||||
#include "prcountr.h"
|
||||
#include "slap.h"
|
||||
|
||||
+
|
||||
#define CSN_MAX_SEQNUM 0xffff /* largest sequence number */
|
||||
-#define CSN_MAX_TIME_ADJUST 24 * 60 * 60 /* maximum allowed time adjustment (in seconds) = 1 day */
|
||||
+#define CSN_MAX_TIME_ADJUST _SEC_PER_DAY /* maximum allowed time adjustment (in seconds) = 1 day */
|
||||
#define ATTR_CSN_GENERATOR_STATE "nsState" /* attribute that stores csn state information */
|
||||
#define STATE_FORMAT "%8x%8x%8x%4hx%4hx"
|
||||
#define STATE_LENGTH 32
|
||||
@@ -27,6 +28,8 @@
|
||||
#define CSN_CALC_TSTAMP(gen) ((gen)->state.sampled_time + \
|
||||
(gen)->state.local_offset + \
|
||||
(gen)->state.remote_offset)
|
||||
+#define TIME_DIFF_WARNING_DELAY (30*_SEC_PER_DAY) /* log an info message when difference
|
||||
+ between clock is greater than this delay */
|
||||
|
||||
/*
|
||||
* **************************************************************************
|
||||
@@ -63,6 +66,7 @@ typedef struct csngen_state
|
||||
struct csngen
|
||||
{
|
||||
csngen_state state; /* persistent state of the generator */
|
||||
+ int32_t (*gettime)(struct timespec *tp); /* Get local time */
|
||||
callback_list callbacks; /* list of callbacks registered with the generator */
|
||||
Slapi_RWLock *lock; /* concurrency control */
|
||||
};
|
||||
@@ -78,7 +82,7 @@ static int _csngen_init_callbacks(CSNGen *gen);
|
||||
static void _csngen_call_callbacks(const CSNGen *gen, const CSN *csn, PRBool abort);
|
||||
static int _csngen_cmp_callbacks(const void *el1, const void *el2);
|
||||
static void _csngen_free_callbacks(CSNGen *gen);
|
||||
-static int _csngen_adjust_local_time(CSNGen *gen, time_t cur_time);
|
||||
+static int _csngen_adjust_local_time(CSNGen *gen);
|
||||
|
||||
/*
|
||||
* **************************************************************************
|
||||
@@ -121,6 +125,7 @@ csngen_new(ReplicaId rid, Slapi_Attr *state)
|
||||
_csngen_init_callbacks(gen);
|
||||
|
||||
gen->state.rid = rid;
|
||||
+ gen->gettime = slapi_clock_utc_gettime;
|
||||
|
||||
if (state) {
|
||||
rc = _csngen_parse_state(gen, state);
|
||||
@@ -164,10 +169,7 @@ csngen_free(CSNGen **gen)
|
||||
int
|
||||
csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
|
||||
{
|
||||
- struct timespec now = {0};
|
||||
int rc = CSN_SUCCESS;
|
||||
- time_t cur_time;
|
||||
- int delta;
|
||||
|
||||
if (gen == NULL || csn == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn", "Invalid argument\n");
|
||||
@@ -180,39 +182,13 @@ csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
|
||||
return CSN_MEMORY_ERROR;
|
||||
}
|
||||
|
||||
- if ((rc = slapi_clock_gettime(&now)) != 0) {
|
||||
- /* Failed to get system time, we must abort */
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn",
|
||||
- "Failed to get system time (%s)\n",
|
||||
- slapd_system_strerror(rc));
|
||||
- return CSN_TIME_ERROR;
|
||||
- }
|
||||
- cur_time = now.tv_sec;
|
||||
-
|
||||
slapi_rwlock_wrlock(gen->lock);
|
||||
|
||||
- /* check if the time should be adjusted */
|
||||
- delta = cur_time - gen->state.sampled_time;
|
||||
- if (delta > _SEC_PER_DAY || delta < (-1 * _SEC_PER_DAY)) {
|
||||
- /* We had a jump larger than a day */
|
||||
- slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn",
|
||||
- "Detected large jump in CSN time. Delta: %d (current time: %ld vs previous time: %ld)\n",
|
||||
- delta, cur_time, gen->state.sampled_time);
|
||||
- }
|
||||
- if (delta > 0) {
|
||||
- rc = _csngen_adjust_local_time(gen, cur_time);
|
||||
- if (rc != CSN_SUCCESS) {
|
||||
- slapi_rwlock_unlock(gen->lock);
|
||||
- return rc;
|
||||
- }
|
||||
+ rc = _csngen_adjust_local_time(gen);
|
||||
+ if (rc != CSN_SUCCESS) {
|
||||
+ slapi_rwlock_unlock(gen->lock);
|
||||
+ return rc;
|
||||
}
|
||||
- /* if (delta < 0) this means the local system time was set back
|
||||
- * the new csn will be generated based on sampled time, which is
|
||||
- * ahead of system time and previously generated csns.
|
||||
- * the time stamp of the csn will not change until system time
|
||||
- * catches up or is corrected by remote csns.
|
||||
- * But we need to ensure that the seq_num does not overflow.
|
||||
- */
|
||||
|
||||
if (gen->state.seq_num == CSN_MAX_SEQNUM) {
|
||||
slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn", "Sequence rollover; "
|
||||
@@ -261,13 +237,36 @@ csngen_rewrite_rid(CSNGen *gen, ReplicaId rid)
|
||||
}
|
||||
|
||||
/* this function should be called when a remote CSN for the same part of
|
||||
- the dit becomes known to the server (for instance, as part of RUV during
|
||||
- replication session. In response, the generator would adjust its notion
|
||||
- of time so that it does not generate smaller csns */
|
||||
+ * the dit becomes known to the server (for instance, as part of RUV during
|
||||
+ * replication session. In response, the generator would adjust its notion
|
||||
+ * of time so that it does not generate smaller csns
|
||||
+ *
|
||||
+ * The following counters are updated
|
||||
+ * - when a new csn is generated
|
||||
+ * - when csngen is adjusted (beginning of a incoming (extop) or outgoing
|
||||
+ * (inc_protocol) session)
|
||||
+ *
|
||||
+ * sampled_time: It takes the value of current system time.
|
||||
+ *
|
||||
+ * remote offset: it is updated when 'csn' argument is ahead of the next csn
|
||||
+ * that the csn generator will generate. It is the MAX jump ahead, it is not
|
||||
+ * cumulative counter (e.g. if remote_offset=7 and 'csn' is 5sec ahead
|
||||
+ * remote_offset stays the same. The jump ahead (5s) pour into the local offset.
|
||||
+ * It is not clear of the interest of this counter. It gives an indication of
|
||||
+ * the maximum jump ahead but not much.
|
||||
+ *
|
||||
+ * local offset: it is increased if
|
||||
+ * - system time is going backward (compare sampled_time)
|
||||
+ * - if 'csn' argument is ahead of csn that the csn generator would generate
|
||||
+ * AND diff('csn', csngen.new_csn) < remote_offset
|
||||
+ * then the diff "pour" into local_offset
|
||||
+ * It is decreased as the clock is ticking, local offset is "consumed" as
|
||||
+ * sampled_time progresses.
|
||||
+ */
|
||||
int
|
||||
csngen_adjust_time(CSNGen *gen, const CSN *csn)
|
||||
{
|
||||
- time_t remote_time, remote_offset, cur_time;
|
||||
+ time_t remote_time, remote_offset, cur_time, old_time, new_time;
|
||||
PRUint16 remote_seqnum;
|
||||
int rc;
|
||||
extern int config_get_ignore_time_skew(void);
|
||||
@@ -281,6 +280,11 @@ csngen_adjust_time(CSNGen *gen, const CSN *csn)
|
||||
|
||||
slapi_rwlock_wrlock(gen->lock);
|
||||
|
||||
+ /* Get last local csn time */
|
||||
+ old_time = CSN_CALC_TSTAMP(gen);
|
||||
+ /* update local offset and sample_time */
|
||||
+ rc = _csngen_adjust_local_time(gen);
|
||||
+
|
||||
if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
cur_time = CSN_CALC_TSTAMP(gen);
|
||||
slapi_log_err(SLAPI_LOG_REPL, "csngen_adjust_time",
|
||||
@@ -290,79 +294,60 @@ csngen_adjust_time(CSNGen *gen, const CSN *csn)
|
||||
gen->state.local_offset,
|
||||
gen->state.remote_offset);
|
||||
}
|
||||
- /* make sure we have the current time */
|
||||
- cur_time = slapi_current_utc_time();
|
||||
-
|
||||
- /* make sure sampled_time is current */
|
||||
- /* must only call adjust_local_time if the current time is greater than
|
||||
- the generator state time */
|
||||
- if ((cur_time > gen->state.sampled_time) &&
|
||||
- (CSN_SUCCESS != (rc = _csngen_adjust_local_time(gen, cur_time)))) {
|
||||
+ if (rc != CSN_SUCCESS) {
|
||||
/* _csngen_adjust_local_time will log error */
|
||||
slapi_rwlock_unlock(gen->lock);
|
||||
- csngen_dump_state(gen);
|
||||
+ csngen_dump_state(gen, SLAPI_LOG_DEBUG);
|
||||
return rc;
|
||||
}
|
||||
|
||||
- cur_time = CSN_CALC_TSTAMP(gen);
|
||||
- if (remote_time >= cur_time) {
|
||||
- time_t new_time = 0;
|
||||
-
|
||||
- if (remote_seqnum > gen->state.seq_num) {
|
||||
- if (remote_seqnum < CSN_MAX_SEQNUM) {
|
||||
- gen->state.seq_num = remote_seqnum + 1;
|
||||
- } else {
|
||||
- remote_time++;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- remote_offset = remote_time - cur_time;
|
||||
- if (remote_offset > gen->state.remote_offset) {
|
||||
- if (ignore_time_skew || (remote_offset <= CSN_MAX_TIME_ADJUST)) {
|
||||
- gen->state.remote_offset = remote_offset;
|
||||
- } else /* remote_offset > CSN_MAX_TIME_ADJUST */
|
||||
- {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "csngen_adjust_time",
|
||||
- "Adjustment limit exceeded; value - %ld, limit - %ld\n",
|
||||
- remote_offset, (long)CSN_MAX_TIME_ADJUST);
|
||||
- slapi_rwlock_unlock(gen->lock);
|
||||
- csngen_dump_state(gen);
|
||||
- return CSN_LIMIT_EXCEEDED;
|
||||
- }
|
||||
- } else if (remote_offset > 0) { /* still need to account for this */
|
||||
- gen->state.local_offset += remote_offset;
|
||||
+ remote_offset = remote_time - CSN_CALC_TSTAMP(gen);
|
||||
+ if (remote_offset > 0) {
|
||||
+ if (!ignore_time_skew && (gen->state.remote_offset + remote_offset > CSN_MAX_TIME_ADJUST)) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "csngen_adjust_time",
|
||||
+ "Adjustment limit exceeded; value - %ld, limit - %ld\n",
|
||||
+ remote_offset, (long)CSN_MAX_TIME_ADJUST);
|
||||
+ slapi_rwlock_unlock(gen->lock);
|
||||
+ csngen_dump_state(gen, SLAPI_LOG_DEBUG);
|
||||
+ return CSN_LIMIT_EXCEEDED;
|
||||
}
|
||||
-
|
||||
- new_time = CSN_CALC_TSTAMP(gen);
|
||||
- /* let's revisit the seq num - if the new time is > the old
|
||||
- tiem, we should reset the seq number to remote + 1 if
|
||||
- this won't cause a wrap around */
|
||||
- if (new_time >= cur_time) {
|
||||
- /* just set seq_num regardless of whether the current one
|
||||
- is < or > than the remote one - the goal of this function
|
||||
- is to make sure we generate CSNs > the remote CSN - if
|
||||
- we have increased the time, we can decrease the seqnum
|
||||
- and still guarantee that any new CSNs generated will be
|
||||
- > any current CSNs we have generated */
|
||||
- if (remote_seqnum < gen->state.seq_num) {
|
||||
- gen->state.seq_num ++;
|
||||
- } else {
|
||||
- gen->state.seq_num = remote_seqnum + 1;
|
||||
- }
|
||||
+ gen->state.remote_offset += remote_offset;
|
||||
+ /* To avoid beat phenomena between suppliers let put 1 second in local_offset
|
||||
+ * it will be eaten at next clock tick rather than increasing remote offset
|
||||
+ * If we do not do that we will have a time skew drift of 1 second per 2 seconds
|
||||
+ * if suppliers are desynchronized by 0.5 second
|
||||
+ */
|
||||
+ if (gen->state.local_offset == 0) {
|
||||
+ gen->state.local_offset++;
|
||||
+ gen->state.remote_offset--;
|
||||
}
|
||||
- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
- slapi_log_err(SLAPI_LOG_REPL, "csngen_adjust_time",
|
||||
- "gen state after %08lx%04x:%ld:%ld:%ld\n",
|
||||
- new_time, gen->state.seq_num,
|
||||
- gen->state.sampled_time,
|
||||
- gen->state.local_offset,
|
||||
- gen->state.remote_offset);
|
||||
+ }
|
||||
+ /* Time to compute seqnum so that
|
||||
+ * new csn >= remote csn and new csn >= old local csn
|
||||
+ */
|
||||
+ new_time = CSN_CALC_TSTAMP(gen);
|
||||
+ PR_ASSERT(new_time >= old_time);
|
||||
+ PR_ASSERT(new_time >= remote_time);
|
||||
+ if (new_time > old_time) {
|
||||
+ /* Can reset (local) seqnum */
|
||||
+ gen->state.seq_num = 0;
|
||||
+ }
|
||||
+ if (new_time == remote_time && remote_seqnum >= gen->state.seq_num) {
|
||||
+ if (remote_seqnum >= CSN_MAX_SEQNUM) {
|
||||
+ gen->state.seq_num = 0;
|
||||
+ gen->state.local_offset++;
|
||||
+ } else {
|
||||
+ gen->state.seq_num = remote_seqnum + 1;
|
||||
}
|
||||
- } else if (gen->state.remote_offset > 0) {
|
||||
- /* decrease remote offset? */
|
||||
- /* how to decrease remote offset but ensure that we don't
|
||||
- generate a duplicate CSN, or a CSN smaller than one we've already
|
||||
- generated? */
|
||||
+ }
|
||||
+
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, "csngen_adjust_time",
|
||||
+ "gen state after %08lx%04x:%ld:%ld:%ld\n",
|
||||
+ new_time, gen->state.seq_num,
|
||||
+ gen->state.sampled_time,
|
||||
+ gen->state.local_offset,
|
||||
+ gen->state.remote_offset);
|
||||
}
|
||||
|
||||
slapi_rwlock_unlock(gen->lock);
|
||||
@@ -435,16 +420,16 @@ csngen_unregister_callbacks(CSNGen *gen, void *cookie)
|
||||
|
||||
/* debugging function */
|
||||
void
|
||||
-csngen_dump_state(const CSNGen *gen)
|
||||
+csngen_dump_state(const CSNGen *gen, int severity)
|
||||
{
|
||||
if (gen) {
|
||||
slapi_rwlock_rdlock(gen->lock);
|
||||
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "CSN generator's state:\n");
|
||||
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\treplica id: %d\n", gen->state.rid);
|
||||
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tsampled time: %ld\n", gen->state.sampled_time);
|
||||
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tlocal offset: %ld\n", gen->state.local_offset);
|
||||
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tremote offset: %ld\n", gen->state.remote_offset);
|
||||
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tsequence number: %d\n", gen->state.seq_num);
|
||||
+ slapi_log_err(severity, "csngen_dump_state", "CSN generator's state:\n");
|
||||
+ slapi_log_err(severity, "csngen_dump_state", "\treplica id: %d\n", gen->state.rid);
|
||||
+ slapi_log_err(severity, "csngen_dump_state", "\tsampled time: %ld\n", gen->state.sampled_time);
|
||||
+ slapi_log_err(severity, "csngen_dump_state", "\tlocal offset: %ld\n", gen->state.local_offset);
|
||||
+ slapi_log_err(severity, "csngen_dump_state", "\tremote offset: %ld\n", gen->state.remote_offset);
|
||||
+ slapi_log_err(severity, "csngen_dump_state", "\tsequence number: %d\n", gen->state.seq_num);
|
||||
slapi_rwlock_unlock(gen->lock);
|
||||
}
|
||||
}
|
||||
@@ -459,7 +444,7 @@ csngen_test()
|
||||
CSNGen *gen = csngen_new(255, NULL);
|
||||
|
||||
slapi_log_err(SLAPI_LOG_DEBUG, "csngen_test", "staring csn generator test ...");
|
||||
- csngen_dump_state(gen);
|
||||
+ csngen_dump_state(gen, SLAPI_LOG_INFO);
|
||||
|
||||
rc = _csngen_start_test_threads(gen);
|
||||
if (rc == 0) {
|
||||
@@ -469,7 +454,7 @@ csngen_test()
|
||||
}
|
||||
|
||||
_csngen_stop_test_threads();
|
||||
- csngen_dump_state(gen);
|
||||
+ csngen_dump_state(gen, SLAPI_LOG_INFO);
|
||||
slapi_log_err(SLAPI_LOG_DEBUG, "csngen_test", "csn generator test is complete...");
|
||||
}
|
||||
|
||||
@@ -574,94 +559,93 @@ _csngen_cmp_callbacks(const void *el1, const void *el2)
|
||||
return 1;
|
||||
}
|
||||
|
||||
+/* Get time and adjust local offset */
|
||||
static int
|
||||
-_csngen_adjust_local_time(CSNGen *gen, time_t cur_time)
|
||||
+_csngen_adjust_local_time(CSNGen *gen)
|
||||
{
|
||||
extern int config_get_ignore_time_skew(void);
|
||||
int ignore_time_skew = config_get_ignore_time_skew();
|
||||
- time_t time_diff = cur_time - gen->state.sampled_time;
|
||||
+ struct timespec now = {0};
|
||||
+ time_t time_diff;
|
||||
+ time_t cur_time;
|
||||
+ int rc;
|
||||
|
||||
+
|
||||
+ if ((rc = gen->gettime(&now)) != 0) {
|
||||
+ /* Failed to get system time, we must abort */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn",
|
||||
+ "Failed to get system time (%s)\n",
|
||||
+ slapd_system_strerror(rc));
|
||||
+ return CSN_TIME_ERROR;
|
||||
+ }
|
||||
+ cur_time = now.tv_sec;
|
||||
+ time_diff = cur_time - gen->state.sampled_time;
|
||||
+
|
||||
+ /* check if the time should be adjusted */
|
||||
if (time_diff == 0) {
|
||||
/* This is a no op - _csngen_adjust_local_time should never be called
|
||||
in this case, because there is nothing to adjust - but just return
|
||||
here to protect ourselves
|
||||
*/
|
||||
return CSN_SUCCESS;
|
||||
- } else if (time_diff > 0) {
|
||||
- time_t ts_before = CSN_CALC_TSTAMP(gen);
|
||||
- time_t ts_after = 0;
|
||||
- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
- time_t new_time = CSN_CALC_TSTAMP(gen);
|
||||
- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
|
||||
- "gen state before %08lx%04x:%ld:%ld:%ld\n",
|
||||
- new_time, gen->state.seq_num,
|
||||
- gen->state.sampled_time,
|
||||
- gen->state.local_offset,
|
||||
- gen->state.remote_offset);
|
||||
- }
|
||||
-
|
||||
- gen->state.sampled_time = cur_time;
|
||||
- if (time_diff > gen->state.local_offset)
|
||||
- gen->state.local_offset = 0;
|
||||
- else
|
||||
- gen->state.local_offset = gen->state.local_offset - time_diff;
|
||||
-
|
||||
- /* only reset the seq_num if the new timestamp part of the CSN
|
||||
- is going to be greater than the old one - if they are the
|
||||
- same after the above adjustment (which can happen if
|
||||
- csngen_adjust_time has to store the offset in the
|
||||
- local_offset field) we must not allow the CSN to regress or
|
||||
- generate duplicate numbers */
|
||||
- ts_after = CSN_CALC_TSTAMP(gen);
|
||||
- if (ts_after > ts_before) {
|
||||
- gen->state.seq_num = 0; /* only reset if new time > old time */
|
||||
- }
|
||||
-
|
||||
- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
- time_t new_time = CSN_CALC_TSTAMP(gen);
|
||||
- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
|
||||
- "gen state after %08lx%04x:%ld:%ld:%ld\n",
|
||||
- new_time, gen->state.seq_num,
|
||||
- gen->state.sampled_time,
|
||||
- gen->state.local_offset,
|
||||
- gen->state.remote_offset);
|
||||
- }
|
||||
- return CSN_SUCCESS;
|
||||
- } else /* time was turned back */
|
||||
- {
|
||||
- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
- time_t new_time = CSN_CALC_TSTAMP(gen);
|
||||
- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
|
||||
- "gen state back before %08lx%04x:%ld:%ld:%ld\n",
|
||||
- new_time, gen->state.seq_num,
|
||||
- gen->state.sampled_time,
|
||||
- gen->state.local_offset,
|
||||
- gen->state.remote_offset);
|
||||
- }
|
||||
+ }
|
||||
+ if (labs(time_diff) > TIME_DIFF_WARNING_DELAY) {
|
||||
+ /* We had a jump larger than a day */
|
||||
+ slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn",
|
||||
+ "Detected large jump in CSN time. Delta: %ld (current time: %ld vs previous time: %ld)\n",
|
||||
+ time_diff, cur_time, gen->state.sampled_time);
|
||||
+ }
|
||||
+ if (!ignore_time_skew && (gen->state.local_offset - time_diff > CSN_MAX_TIME_ADJUST)) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "_csngen_adjust_local_time",
|
||||
+ "Adjustment limit exceeded; value - %ld, limit - %d\n",
|
||||
+ gen->state.local_offset - time_diff, CSN_MAX_TIME_ADJUST);
|
||||
+ return CSN_LIMIT_EXCEEDED;
|
||||
+ }
|
||||
|
||||
- if (!ignore_time_skew && (labs(time_diff) > CSN_MAX_TIME_ADJUST)) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "_csngen_adjust_local_time",
|
||||
- "Adjustment limit exceeded; value - %ld, limit - %d\n",
|
||||
- labs(time_diff), CSN_MAX_TIME_ADJUST);
|
||||
- return CSN_LIMIT_EXCEEDED;
|
||||
- }
|
||||
+ time_t ts_before = CSN_CALC_TSTAMP(gen);
|
||||
+ time_t ts_after = 0;
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ time_t new_time = CSN_CALC_TSTAMP(gen);
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
|
||||
+ "gen state before %08lx%04x:%ld:%ld:%ld\n",
|
||||
+ new_time, gen->state.seq_num,
|
||||
+ gen->state.sampled_time,
|
||||
+ gen->state.local_offset,
|
||||
+ gen->state.remote_offset);
|
||||
+ }
|
||||
|
||||
- gen->state.sampled_time = cur_time;
|
||||
- gen->state.local_offset = MAX_VAL(gen->state.local_offset, labs(time_diff));
|
||||
- gen->state.seq_num = 0;
|
||||
+ gen->state.sampled_time = cur_time;
|
||||
+ gen->state.local_offset = MAX_VAL(0, gen->state.local_offset - time_diff);
|
||||
+ /* new local_offset = MAX_VAL(0, old sample_time + old local_offset - cur_time)
|
||||
+ * ==> new local_offset >= 0 and
|
||||
+ * new local_offset + cur_time >= old sample_time + old local_offset
|
||||
+ * ==> new local_offset + cur_time + remote_offset >=
|
||||
+ * sample_time + old local_offset + remote_offset
|
||||
+ * ==> CSN_CALC_TSTAMP(new gen) >= CSN_CALC_TSTAMP(old gen)
|
||||
+ */
|
||||
|
||||
- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
- time_t new_time = CSN_CALC_TSTAMP(gen);
|
||||
- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
|
||||
- "gen state back after %08lx%04x:%ld:%ld:%ld\n",
|
||||
- new_time, gen->state.seq_num,
|
||||
- gen->state.sampled_time,
|
||||
- gen->state.local_offset,
|
||||
- gen->state.remote_offset);
|
||||
- }
|
||||
+ /* only reset the seq_num if the new timestamp part of the CSN
|
||||
+ is going to be greater than the old one - if they are the
|
||||
+ same after the above adjustment (which can happen if
|
||||
+ csngen_adjust_time has to store the offset in the
|
||||
+ local_offset field) we must not allow the CSN to regress or
|
||||
+ generate duplicate numbers */
|
||||
+ ts_after = CSN_CALC_TSTAMP(gen);
|
||||
+ PR_ASSERT(ts_after >= ts_before);
|
||||
+ if (ts_after > ts_before) {
|
||||
+ gen->state.seq_num = 0; /* only reset if new time > old time */
|
||||
+ }
|
||||
|
||||
- return CSN_SUCCESS;
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ time_t new_time = CSN_CALC_TSTAMP(gen);
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
|
||||
+ "gen state after %08lx%04x:%ld:%ld:%ld\n",
|
||||
+ new_time, gen->state.seq_num,
|
||||
+ gen->state.sampled_time,
|
||||
+ gen->state.local_offset,
|
||||
+ gen->state.remote_offset);
|
||||
}
|
||||
+ return CSN_SUCCESS;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -799,7 +783,7 @@ _csngen_remote_tester_main(void *data)
|
||||
"Failed to adjust generator's time; csn error - %d\n", rc);
|
||||
}
|
||||
|
||||
- csngen_dump_state(gen);
|
||||
+ csngen_dump_state(gen, SLAPI_LOG_INFO);
|
||||
}
|
||||
csn_free(&csn);
|
||||
|
||||
@@ -825,8 +809,83 @@ _csngen_local_tester_main(void *data)
|
||||
/*
|
||||
* g_sampled_time -= slapi_rand () % 100;
|
||||
*/
|
||||
- csngen_dump_state(gen);
|
||||
+ csngen_dump_state(gen, SLAPI_LOG_INFO);
|
||||
}
|
||||
|
||||
PR_AtomicDecrement(&s_thread_count);
|
||||
}
|
||||
+
|
||||
+int _csngen_tester_state;
|
||||
+int _csngen_tester_state_rid;
|
||||
+
|
||||
+static int
|
||||
+_mynoise(int time, int len, double height)
|
||||
+{
|
||||
+ if (((time/len) % 2) == 0) {
|
||||
+ return -height + 2 * height * ( time % len ) / (len-1);
|
||||
+ } else {
|
||||
+ return height - 2 * height * ( time % len ) / (len-1);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+
|
||||
+int32_t _csngen_tester_gettime(struct timespec *tp)
|
||||
+{
|
||||
+ int vtime = _csngen_tester_state ;
|
||||
+ tp->tv_sec = 0x1000000 + vtime + 2 * _csngen_tester_state_rid;
|
||||
+ if (_csngen_tester_state_rid == 3) {
|
||||
+ /* tp->tv_sec += _mynoise(vtime, 10, 1.5); */
|
||||
+ tp->tv_sec += _mynoise(vtime, 30, 15);
|
||||
+ }
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+/* Mimic a fully meshed multi suplier topology */
|
||||
+void csngen_multi_suppliers_test(void)
|
||||
+{
|
||||
+#define NB_TEST_MASTERS 6
|
||||
+#define NB_TEST_STATES 500
|
||||
+ CSNGen *gen[NB_TEST_MASTERS];
|
||||
+ struct timespec now = {0};
|
||||
+ CSN *last_csn = NULL;
|
||||
+ CSN *csn = NULL;
|
||||
+ int i,j,rc;
|
||||
+
|
||||
+ _csngen_tester_gettime(&now);
|
||||
+
|
||||
+ for (i=0; i< NB_TEST_MASTERS; i++) {
|
||||
+ gen[i] = csngen_new(i+1, NULL);
|
||||
+ gen[i]->gettime = _csngen_tester_gettime;
|
||||
+ gen[i]->state.sampled_time = now.tv_sec;
|
||||
+ }
|
||||
+
|
||||
+ for (_csngen_tester_state=0; _csngen_tester_state < NB_TEST_STATES; _csngen_tester_state++) {
|
||||
+ for (i=0; i< NB_TEST_MASTERS; i++) {
|
||||
+ _csngen_tester_state_rid = i+1;
|
||||
+ rc = csngen_new_csn(gen[i], &csn, PR_FALSE);
|
||||
+ if (rc) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ csngen_dump_state(gen[i], SLAPI_LOG_INFO);
|
||||
+
|
||||
+ if (csn_compare(csn, last_csn) <= 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "csngen_multi_suppliers_test",
|
||||
+ "CSN generated in disorder state=%d rid=%d\n", _csngen_tester_state, _csngen_tester_state_rid);
|
||||
+ _csngen_tester_state = NB_TEST_STATES;
|
||||
+ break;
|
||||
+ }
|
||||
+ last_csn = csn;
|
||||
+
|
||||
+ for (j=0; j< NB_TEST_MASTERS; j++) {
|
||||
+ if (i==j) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ _csngen_tester_state_rid = j+1;
|
||||
+ rc = csngen_adjust_time(gen[j], csn);
|
||||
+ if (rc) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
|
||||
index 56765fdfb..59c5ec9ab 100644
|
||||
--- a/ldap/servers/slapd/slapi-plugin.h
|
||||
+++ b/ldap/servers/slapd/slapi-plugin.h
|
||||
@@ -6762,8 +6762,17 @@ time_t slapi_current_time(void) __attribute__((deprecated));
|
||||
*
|
||||
* \param tp - a timespec struct where the system time is set
|
||||
* \return result code, upon success tp is set to the system time
|
||||
+ * as a clock in UTC timezone. This clock adjusts with ntp steps,
|
||||
+ * and should NOT be used for timer information.
|
||||
*/
|
||||
int32_t slapi_clock_gettime(struct timespec *tp);
|
||||
+/*
|
||||
+ * slapi_clock_gettime should have better been called
|
||||
+ * slapi_clock_utc_gettime but sice the function pre-existed
|
||||
+ * we are just adding an alias (to avoid risking to break
|
||||
+ * some custom plugins)
|
||||
+ */
|
||||
+#define slapi_clock_utc_gettime slapi_clock_gettime
|
||||
|
||||
/**
|
||||
* Returns the current system time as a hr clock relative to uptime
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,240 +0,0 @@
|
||||
From 957ffd53b041c19d27753a028e6f514dcc75dfbd Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 26 Oct 2021 15:51:24 -0700
|
||||
Subject: [PATCH 03/12] Issue 3584 - Fix PBKDF2_SHA256 hashing in FIPS mode
|
||||
(#4949)
|
||||
|
||||
Issue Description: Use PK11_Decrypt function to get hash data
|
||||
because PK11_ExtractKeyValue function is forbidden in FIPS mode.
|
||||
We can't extract keys while in FIPS mode. But we use PK11_ExtractKeyValue
|
||||
for hashes, and it's not forbidden.
|
||||
|
||||
We can't use OpenSSL's PBKDF2-SHA256 implementation right now because
|
||||
we need to support an upgrade procedure while in FIPS mode (update
|
||||
hash on bind). For that, we should fix existing PBKDF2 usage, and we can
|
||||
switch to OpenSSL's PBKDF2-SHA256 in the following versions.
|
||||
|
||||
Fix Description: Use PK11_Decrypt function to get the data.
|
||||
|
||||
Enable TLS on all CI test topologies while in FIPS because without
|
||||
that we don't set up the NSS database correctly.
|
||||
|
||||
Add PBKDF2-SHA256 (OpenSSL) to ldif templates, so the password scheme is
|
||||
discoverable by internal functions.
|
||||
|
||||
https://github.com/389ds/389-ds-base/issues/3584
|
||||
|
||||
Reviewed by: @progier389, @mreynolds389, @Firstyear, @tbordaz (Thanks!!)
|
||||
---
|
||||
.../healthcheck/health_security_test.py | 10 ---
|
||||
ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 62 ++++++++++++++++---
|
||||
ldap/servers/slapd/main.c | 12 ++++
|
||||
src/lib389/lib389/__init__.py | 4 ++
|
||||
src/lib389/lib389/topologies.py | 6 +-
|
||||
src/lib389/lib389/utils.py | 13 ++++
|
||||
6 files changed, 86 insertions(+), 21 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/healthcheck/health_security_test.py b/dirsrvtests/tests/suites/healthcheck/health_security_test.py
|
||||
index 6c0d27aaa..c1dc7938c 100644
|
||||
--- a/dirsrvtests/tests/suites/healthcheck/health_security_test.py
|
||||
+++ b/dirsrvtests/tests/suites/healthcheck/health_security_test.py
|
||||
@@ -40,16 +40,6 @@ else:
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
-def is_fips():
|
||||
- if os.path.exists('/proc/sys/crypto/fips_enabled'):
|
||||
- with open('/proc/sys/crypto/fips_enabled', 'r') as f:
|
||||
- state = f.readline().strip()
|
||||
- if state == '1':
|
||||
- return True
|
||||
- else:
|
||||
- return False
|
||||
-
|
||||
-
|
||||
def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None):
|
||||
args = FakeArgs()
|
||||
args.instance = instance.serverid
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
index d310dc792..dcac4fcdd 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
@@ -91,10 +91,11 @@ pbkdf2_sha256_extract(char *hash_in, SECItem *salt, uint32_t *iterations)
|
||||
SECStatus
|
||||
pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *salt, uint32_t iterations)
|
||||
{
|
||||
- SECItem *result = NULL;
|
||||
SECAlgorithmID *algid = NULL;
|
||||
PK11SlotInfo *slot = NULL;
|
||||
PK11SymKey *symkey = NULL;
|
||||
+ SECItem *wrapKeyData = NULL;
|
||||
+ SECStatus rv = SECFailure;
|
||||
|
||||
/* We assume that NSS is already started. */
|
||||
algid = PK11_CreatePBEV2AlgorithmID(SEC_OID_PKCS5_PBKDF2, SEC_OID_HMAC_SHA256, SEC_OID_HMAC_SHA256, hash_out_len, iterations, salt);
|
||||
@@ -104,7 +105,6 @@ pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *s
|
||||
slot = PK11_GetBestSlotMultiple(mechanism_array, 2, NULL);
|
||||
if (slot != NULL) {
|
||||
symkey = PK11_PBEKeyGen(slot, algid, pwd, PR_FALSE, NULL);
|
||||
- PK11_FreeSlot(slot);
|
||||
if (symkey == NULL) {
|
||||
/* We try to get the Error here but NSS has two or more error interfaces, and sometimes it uses none of them. */
|
||||
int32_t status = PORT_GetError();
|
||||
@@ -123,18 +123,60 @@ pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *s
|
||||
return SECFailure;
|
||||
}
|
||||
|
||||
- if (PK11_ExtractKeyValue(symkey) == SECSuccess) {
|
||||
- result = PK11_GetKeyData(symkey);
|
||||
- if (result != NULL && result->len <= hash_out_len) {
|
||||
- memcpy(hash_out, result->data, result->len);
|
||||
- PK11_FreeSymKey(symkey);
|
||||
+ /*
|
||||
+ * First, we need to generate a wrapped key for PK11_Decrypt call:
|
||||
+ * slot is the same slot we used in PK11_PBEKeyGen()
|
||||
+ * 256 bits / 8 bit per byte
|
||||
+ */
|
||||
+ PK11SymKey *wrapKey = PK11_KeyGen(slot, CKM_AES_ECB, NULL, 256/8, NULL);
|
||||
+ PK11_FreeSlot(slot);
|
||||
+ if (wrapKey == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to generate a wrapped key.\n");
|
||||
+ return SECFailure;
|
||||
+ }
|
||||
+
|
||||
+ wrapKeyData = (SECItem *)PORT_Alloc(sizeof(SECItem));
|
||||
+ /* Align the wrapped key with 32 bytes. */
|
||||
+ wrapKeyData->len = (PK11_GetKeyLength(symkey) + 31) & ~31;
|
||||
+ /* Allocate the aligned space for pkc5PBE key plus AESKey block */
|
||||
+ wrapKeyData->data = (unsigned char *)slapi_ch_calloc(wrapKeyData->len, sizeof(unsigned char));
|
||||
+
|
||||
+ /* Get symkey wrapped with wrapKey - required for PK11_Decrypt call */
|
||||
+ rv = PK11_WrapSymKey(CKM_AES_ECB, NULL, wrapKey, symkey, wrapKeyData);
|
||||
+ if (rv != SECSuccess) {
|
||||
+ PK11_FreeSymKey(symkey);
|
||||
+ PK11_FreeSymKey(wrapKey);
|
||||
+ SECITEM_FreeItem(wrapKeyData, PR_TRUE);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to wrap the symkey. (%d)\n", rv);
|
||||
+ return SECFailure;
|
||||
+ }
|
||||
+
|
||||
+ /* Allocate the space for our result */
|
||||
+ void *result = (char *)slapi_ch_calloc(wrapKeyData->len, sizeof(char));
|
||||
+ unsigned int result_len = 0;
|
||||
+
|
||||
+ /* User wrapKey to decrypt the wrapped contents.
|
||||
+ * result is the hash that we need;
|
||||
+ * result_len is the actual lengh of the data;
|
||||
+ * has_out_len is the maximum (the space we allocted for hash_out)
|
||||
+ */
|
||||
+ rv = PK11_Decrypt(wrapKey, CKM_AES_ECB, NULL, result, &result_len, hash_out_len, wrapKeyData->data, wrapKeyData->len);
|
||||
+ PK11_FreeSymKey(symkey);
|
||||
+ PK11_FreeSymKey(wrapKey);
|
||||
+ SECITEM_FreeItem(wrapKeyData, PR_TRUE);
|
||||
+
|
||||
+ if (rv == SECSuccess) {
|
||||
+ if (result != NULL && result_len <= hash_out_len) {
|
||||
+ memcpy(hash_out, result, result_len);
|
||||
+ slapi_ch_free((void **)&result);
|
||||
} else {
|
||||
- PK11_FreeSymKey(symkey);
|
||||
- slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to retrieve (get) hash output.\n");
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to retrieve (get) hash output.\n");
|
||||
+ slapi_ch_free((void **)&result);
|
||||
return SECFailure;
|
||||
}
|
||||
} else {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to extract hash output.\n");
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to extract hash output. (%d)\n", rv);
|
||||
+ slapi_ch_free((void **)&result);
|
||||
return SECFailure;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
|
||||
index 61ed40b7d..04d0494f8 100644
|
||||
--- a/ldap/servers/slapd/main.c
|
||||
+++ b/ldap/servers/slapd/main.c
|
||||
@@ -2895,9 +2895,21 @@ slapd_do_all_nss_ssl_init(int slapd_exemode, int importexport_encrypt, int s_por
|
||||
* is enabled or not. We use NSS for random number generation and
|
||||
* other things even if we are not going to accept SSL connections.
|
||||
* We also need NSS for attribute encryption/decryption on import and export.
|
||||
+ *
|
||||
+ * It's important to remember that while in FIPS mode the administrator should always enable
|
||||
+ * the security, otherwise we don't call slapd_pk11_authenticate which is a requirement for FIPS mode
|
||||
*/
|
||||
+ PRBool isFIPS = slapd_pk11_isFIPS();
|
||||
int init_ssl = config_get_security();
|
||||
|
||||
+ if (isFIPS && !init_ssl) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "slapd_do_all_nss_ssl_init",
|
||||
+ "ERROR: TLS is not enabled, and the machine is in FIPS mode. "
|
||||
+ "Some functionality won't work correctly (for example, "
|
||||
+ "users with PBKDF2_SHA256 password scheme won't be able to log in). "
|
||||
+ "It's highly advisable to enable TLS on this instance.\n");
|
||||
+ }
|
||||
+
|
||||
if (slapd_exemode == SLAPD_EXEMODE_SLAPD) {
|
||||
init_ssl = init_ssl && (0 != s_port) && (s_port <= LDAP_PORT_MAX);
|
||||
} else {
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index 29ee5245a..e0299c5b4 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -1588,6 +1588,10 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
:param post_open: Open the server connection after restart.
|
||||
:type post_open: bool
|
||||
"""
|
||||
+ if self.config.get_attr_val_utf8_l("nsslapd-security") == 'on':
|
||||
+ self.restart(post_open=post_open)
|
||||
+ return
|
||||
+
|
||||
# If it doesn't exist, create a cadb.
|
||||
ssca = NssSsl(dbpath=self.get_ssca_dir())
|
||||
if not ssca._db_exists():
|
||||
diff --git a/src/lib389/lib389/topologies.py b/src/lib389/lib389/topologies.py
|
||||
index e9969f524..e7d56582d 100644
|
||||
--- a/src/lib389/lib389/topologies.py
|
||||
+++ b/src/lib389/lib389/topologies.py
|
||||
@@ -15,7 +15,7 @@ import socket
|
||||
import pytest
|
||||
|
||||
from lib389 import DirSrv
|
||||
-from lib389.utils import generate_ds_params
|
||||
+from lib389.utils import generate_ds_params, is_fips
|
||||
from lib389.mit_krb5 import MitKrb5
|
||||
from lib389.saslmap import SaslMappings
|
||||
from lib389.replica import ReplicationManager, Replicas
|
||||
@@ -108,6 +108,10 @@ def _create_instances(topo_dict, suffix):
|
||||
if role == ReplicaRole.HUB:
|
||||
hs[instance.serverid] = instance
|
||||
instances.update(hs)
|
||||
+ # We should always enable TLS while in FIPS mode because otherwise NSS database won't be
|
||||
+ # configured in a FIPS compliant way
|
||||
+ if is_fips():
|
||||
+ instance.enable_tls()
|
||||
log.info("Instance with parameters {} was created.".format(args_instance))
|
||||
|
||||
if "standalone1" in instances and len(instances) == 1:
|
||||
diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py
|
||||
index b270784ce..5ba0c6676 100644
|
||||
--- a/src/lib389/lib389/utils.py
|
||||
+++ b/src/lib389/lib389/utils.py
|
||||
@@ -1430,3 +1430,16 @@ def is_valid_hostname(hostname):
|
||||
hostname = hostname[:-1] # strip exactly one dot from the right, if present
|
||||
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
|
||||
return all(allowed.match(x) for x in hostname.split("."))
|
||||
+
|
||||
+
|
||||
+def is_fips():
|
||||
+ if os.path.exists('/proc/sys/crypto/fips_enabled'):
|
||||
+ with open('/proc/sys/crypto/fips_enabled', 'r') as f:
|
||||
+ state = f.readline().strip()
|
||||
+ if state == '1':
|
||||
+ return True
|
||||
+ else:
|
||||
+ return False
|
||||
+ else:
|
||||
+ return False
|
||||
+
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,780 @@
|
||||
From 63e1ceac74cdfda7cf432537a18670e9562b58df Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 2 May 2022 18:43:25 +0200
|
||||
Subject: [PATCH] Issue 5126 - Memory leak in slapi_ldap_get_lderrno (#5153)
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
* Issue 5126 - Memory leak in slapi_ldap_get_lderrno
|
||||
|
||||
The problem is that some time ago libldap API replaced LDAP_OPT_ERROR_STRING whose data should not be freed by
|
||||
LDAP_OPT_DIAGNOSTIC_MESSAGE whose data must be freed.
|
||||
slapi_ldap_get_lderrno was adapted to use the new option but the callers were not modified to free the value.
|
||||
|
||||
The Solution:
|
||||
Insure that we also need to free slapi_ldap_get_lderrno value if legacy LDAP_OPT_ERROR_STRING is used (by duping the value)
|
||||
Insure that the callers free the value.
|
||||
|
||||
Added test case about replication using SASL/Digest-md5 authentication
|
||||
Added test case to check this leak
|
||||
Also updated test case about SASL/GSSAPI to be comapatible with current lib389 framework but marked as skipped because it requires a specific configuration (This path should be tested by IPA tests)
|
||||
Fixed valgrind lib389 function to run on prefixed installation without needing to be root.
|
||||
At last I also improved lib389 mapped object to have a better diagnostic when LDAP operation fails (by adding the request within the exception)
|
||||
|
||||
issue: 5126 https://github.com/389ds/389-ds-base/issues/5126
|
||||
|
||||
Reviewd by: @droideck
|
||||
|
||||
(cherry picked from commit 4d89e11494233d8297896540bc752cfdbab2cc69)
|
||||
---
|
||||
.../suites/gssapi_repl/gssapi_repl_test.py | 31 ++-
|
||||
.../tests/suites/replication/sasl_m2_test.py | 185 ++++++++++++++++++
|
||||
ldap/servers/plugins/chainingdb/cb_search.c | 6 +-
|
||||
ldap/servers/plugins/passthru/ptbind.c | 2 +
|
||||
.../plugins/replication/repl5_connection.c | 4 +
|
||||
.../plugins/replication/windows_connection.c | 3 +
|
||||
ldap/servers/slapd/ldaputil.c | 6 +
|
||||
src/lib389/lib389/_mapped_object.py | 76 ++++---
|
||||
src/lib389/lib389/utils.py | 40 +++-
|
||||
9 files changed, 311 insertions(+), 42 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/replication/sasl_m2_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py b/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py
|
||||
index 41f323c06..402684aab 100644
|
||||
--- a/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py
|
||||
+++ b/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py
|
||||
@@ -9,6 +9,7 @@
|
||||
import pytest
|
||||
from lib389.tasks import *
|
||||
from lib389.utils import *
|
||||
+from lib389.agreement import *
|
||||
from lib389.topologies import topology_m2
|
||||
|
||||
pytestmark = pytest.mark.tier2
|
||||
@@ -65,10 +66,27 @@ def _allow_machine_account(inst, name):
|
||||
# First we need to get the mapping tree dn
|
||||
mt = inst.mappingtree.list(suffix=DEFAULT_SUFFIX)[0]
|
||||
inst.modify_s('cn=replica,%s' % mt.dn, [
|
||||
- (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDN', "uid=%s,ou=Machines,%s" % (name, DEFAULT_SUFFIX))
|
||||
+ (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDN', f"uid={name},ou=Machines,{DEFAULT_SUFFIX}".encode('utf-8'))
|
||||
])
|
||||
|
||||
-
|
||||
+def _verify_etc_hosts():
|
||||
+ #Check if /etc/hosts is compatible with the test
|
||||
+ NEEDED_HOSTS = ( ('ldapkdc.example.com', '127.0.0.1'),
|
||||
+ ('ldapkdc1.example.com', '127.0.1.1'),
|
||||
+ ('ldapkdc2.example.com', '127.0.2.1'))
|
||||
+ found_hosts = {}
|
||||
+ with open('/etc/hosts','r') as f:
|
||||
+ for l in f:
|
||||
+ s = l.split()
|
||||
+ if len(s) < 2:
|
||||
+ continue
|
||||
+ for nh in NEEDED_HOSTS:
|
||||
+ if (s[0] == nh[1] and s[1] == nh[0]):
|
||||
+ found_hosts[s[1]] = True
|
||||
+ return len(found_hosts) == len(NEEDED_HOSTS)
|
||||
+
|
||||
+@pytest.mark.skipif(not _verify_etc_hosts(), reason="/etc/hosts does not contains the needed hosts.")
|
||||
+@pytest.mark.skipif(True, reason="Test disabled because it requires specific kerberos requirement (server principal, keytab, etc ...")
|
||||
def test_gssapi_repl(topology_m2):
|
||||
"""Test gssapi authenticated replication agreement of two suppliers using KDC
|
||||
|
||||
@@ -94,8 +112,6 @@ def test_gssapi_repl(topology_m2):
|
||||
6. Test User should be created on M1 and M2 both
|
||||
7. Test User should be created on M1 and M2 both
|
||||
"""
|
||||
-
|
||||
- return
|
||||
supplier1 = topology_m2.ms["supplier1"]
|
||||
supplier2 = topology_m2.ms["supplier2"]
|
||||
|
||||
@@ -121,6 +137,7 @@ def test_gssapi_repl(topology_m2):
|
||||
properties = {RA_NAME: r'meTo_$host:$port',
|
||||
RA_METHOD: 'SASL/GSSAPI',
|
||||
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
|
||||
+ supplier1.agreement.delete(suffix=SUFFIX, consumer_host=supplier2.host, consumer_port=supplier2.port)
|
||||
m1_m2_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier2.host, port=supplier2.port, properties=properties)
|
||||
if not m1_m2_agmt:
|
||||
log.fatal("Fail to create a supplier -> supplier replica agreement")
|
||||
@@ -133,6 +150,7 @@ def test_gssapi_repl(topology_m2):
|
||||
properties = {RA_NAME: r'meTo_$host:$port',
|
||||
RA_METHOD: 'SASL/GSSAPI',
|
||||
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
|
||||
+ supplier2.agreement.delete(suffix=SUFFIX, consumer_host=supplier1.host, consumer_port=supplier1.port)
|
||||
m2_m1_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier1.host, port=supplier1.port, properties=properties)
|
||||
if not m2_m1_agmt:
|
||||
log.fatal("Fail to create a supplier -> supplier replica agreement")
|
||||
@@ -145,8 +163,9 @@ def test_gssapi_repl(topology_m2):
|
||||
#
|
||||
# Initialize all the agreements
|
||||
#
|
||||
- supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2)
|
||||
- supplier1.waitForReplInit(m1_m2_agmt)
|
||||
+ agmt = Agreement(supplier1, m1_m2_agmt)
|
||||
+ agmt.begin_reinit()
|
||||
+ agmt.wait_reinit()
|
||||
|
||||
# Check replication is working...
|
||||
if supplier1.testReplication(DEFAULT_SUFFIX, supplier2):
|
||||
diff --git a/dirsrvtests/tests/suites/replication/sasl_m2_test.py b/dirsrvtests/tests/suites/replication/sasl_m2_test.py
|
||||
new file mode 100644
|
||||
index 000000000..d7406ac7e
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/replication/sasl_m2_test.py
|
||||
@@ -0,0 +1,185 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2022 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import logging
|
||||
+import os
|
||||
+import pytest
|
||||
+import ldap
|
||||
+import uuid
|
||||
+from lib389.utils import ds_is_older, valgrind_enable, valgrind_disable, valgrind_get_results_file, valgrind_check_file
|
||||
+
|
||||
+from lib389.idm.services import ServiceAccounts
|
||||
+from lib389.idm.group import Groups
|
||||
+from lib389.config import CertmapLegacy, Config
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
+from lib389.agreement import Agreements
|
||||
+from lib389._mapped_object import DSLdapObject
|
||||
+from lib389.replica import ReplicationManager, Replicas, BootstrapReplicationManager
|
||||
+from lib389.topologies import topology_m2 as topo_m2
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+if DEBUGGING:
|
||||
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+else:
|
||||
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+def set_sasl_md5_client_auth(inst, to):
|
||||
+ # Create the certmap before we restart
|
||||
+ cm = CertmapLegacy(to)
|
||||
+ certmaps = cm.list()
|
||||
+ certmaps['default']['nsSaslMapRegexString'] = '^dn:\\(.*\\)'
|
||||
+ certmaps['default']['nsSaslMapBaseDNTemplate'] = 'cn=config'
|
||||
+ certmaps['default']['nsSaslMapFilterTemplate'] = '(objectclass=*)'
|
||||
+ cm.set(certmaps)
|
||||
+
|
||||
+ Config(to).replace("passwordStorageScheme", 'CLEAR')
|
||||
+
|
||||
+ # Create a repl manager on the replica
|
||||
+ replication_manager_pwd = 'secret12'
|
||||
+ brm = BootstrapReplicationManager(to)
|
||||
+ try:
|
||||
+ brm.delete()
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ pass
|
||||
+ brm.create(properties={
|
||||
+ 'cn': brm.common_name,
|
||||
+ 'userPassword': replication_manager_pwd
|
||||
+ })
|
||||
+ replication_manager_dn = brm.dn
|
||||
+
|
||||
+ replica = Replicas(inst).get(DEFAULT_SUFFIX)
|
||||
+ replica.set('nsDS5ReplicaBindDN', brm.dn)
|
||||
+ replica.remove_all('nsDS5ReplicaBindDNgroup')
|
||||
+ agmt = replica.get_agreements().list()[0]
|
||||
+ agmt.replace_many(
|
||||
+ ('nsDS5ReplicaBindMethod', 'SASL/DIGEST-MD5'),
|
||||
+ ('nsDS5ReplicaTransportInfo', 'LDAP'),
|
||||
+ ('nsDS5ReplicaPort', str(to.port)),
|
||||
+ ('nsDS5ReplicaBindDN', replication_manager_dn),
|
||||
+ ('nsDS5ReplicaCredentials', replication_manager_pwd),
|
||||
+ )
|
||||
+
|
||||
+
|
||||
+def gen_valgrind_wrapper(dir):
|
||||
+ name=f"{dir}/VALGRIND"
|
||||
+ with open(name, 'w') as f:
|
||||
+ f.write('#!/bin/sh\n')
|
||||
+ f.write('export SASL_PATH=foo\n')
|
||||
+ f.write(f'valgrind -q --tool=memcheck --leak-check=yes --leak-resolution=high --num-callers=50 --log-file=/var/tmp/slapd.vg.$$ {dir}/ns-slapd.original "$@"\n')
|
||||
+ os.chmod(name, 0o755)
|
||||
+ return name
|
||||
+
|
||||
+@pytest.fixture
|
||||
+def use_valgrind(topo_m2, request):
|
||||
+ """Adds entries to the supplier1"""
|
||||
+
|
||||
+ log.info("Enable valgrind")
|
||||
+ m1 = topo_m2.ms['supplier1']
|
||||
+ m2 = topo_m2.ms['supplier2']
|
||||
+ if m1.has_asan():
|
||||
+ pytest.skip('Tescase using valgring cannot run on asan enabled build')
|
||||
+ return
|
||||
+ set_sasl_md5_client_auth(m1, m2)
|
||||
+ set_sasl_md5_client_auth(m2, m1)
|
||||
+ m1.stop()
|
||||
+ m2.stop()
|
||||
+ m1.systemd_override = False
|
||||
+ m2.systemd_override = False
|
||||
+ valgrind_enable(m1.ds_paths.sbin_dir, gen_valgrind_wrapper(m1.ds_paths.sbin_dir))
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info("Disable valgrind")
|
||||
+ valgrind_disable(m1.ds_paths.sbin_dir)
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+
|
||||
+def test_repl_sasl_md5_auth(topo_m2):
|
||||
+ """Test replication with SASL digest-md5 authentication
|
||||
+
|
||||
+ :id: 922d16f8-662a-4915-a39e-0aecd7c8e6e2
|
||||
+ :setup: Two supplier replication
|
||||
+ :steps:
|
||||
+ 1. Set sasl digest/md4 on both suppliers
|
||||
+ 2. Restart the instance
|
||||
+ 3. Check that replication works
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Replication works
|
||||
+ """
|
||||
+
|
||||
+ m1 = topo_m2.ms['supplier1']
|
||||
+ m2 = topo_m2.ms['supplier2']
|
||||
+
|
||||
+ set_sasl_md5_client_auth(m1, m2)
|
||||
+ set_sasl_md5_client_auth(m2, m1)
|
||||
+
|
||||
+ m1.restart()
|
||||
+ m2.restart()
|
||||
+
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ repl.test_replication_topology(topo_m2)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(not os.path.exists('/usr/bin/valgrind'), reason="valgrind is not installed.")
|
||||
+def test_repl_sasl_leak(topo_m2, use_valgrind):
|
||||
+ """Test replication with SASL digest-md5 authentication
|
||||
+
|
||||
+ :id: 180e088e-841c-11ec-af4f-482ae39447e5
|
||||
+ :setup: Two supplier replication, valgrind
|
||||
+ :steps:
|
||||
+ 1. Set sasl digest/md4 on both suppliers
|
||||
+ 2. Break sasl by setting invalid PATH
|
||||
+ 3. Restart the instances
|
||||
+ 4. Perform a change
|
||||
+ 5. Poke replication 100 times
|
||||
+ 6. Stop server
|
||||
+ 7. Check presence of "SASL(-4): no mechanism available: No worthy mechs found" message in error log
|
||||
+ 8 Check that there is no leak about slapi_ldap_get_lderrno
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 2. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Success
|
||||
+ """
|
||||
+
|
||||
+ m1 = topo_m2.ms['supplier1']
|
||||
+ m2 = topo_m2.ms['supplier2']
|
||||
+
|
||||
+ os.environ["SASL_PATH"] = 'foo'
|
||||
+
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+
|
||||
+ resfile=valgrind_get_results_file(m1)
|
||||
+
|
||||
+ # Perform a change
|
||||
+ from_groups = Groups(m1, basedn=DEFAULT_SUFFIX, rdn=None)
|
||||
+ from_group = from_groups.get('replication_managers')
|
||||
+ change = str(uuid.uuid4())
|
||||
+ from_group.replace('description', change)
|
||||
+
|
||||
+ # Poke replication to trigger thev leak
|
||||
+ replica = Replicas(m1).get(DEFAULT_SUFFIX)
|
||||
+ agmt = Agreements(m1, replica.dn).list()[0]
|
||||
+ for i in range(0, 100):
|
||||
+ agmt.pause()
|
||||
+ agmt.resume()
|
||||
+
|
||||
+ m1.stop()
|
||||
+ assert m1.searchErrorsLog("worthy")
|
||||
+ assert not valgrind_check_file(resfile, 'slapi_ldap_get_lderrno');
|
||||
+
|
||||
diff --git a/ldap/servers/plugins/chainingdb/cb_search.c b/ldap/servers/plugins/chainingdb/cb_search.c
|
||||
index ffc8f56f8..d6f30b357 100644
|
||||
--- a/ldap/servers/plugins/chainingdb/cb_search.c
|
||||
+++ b/ldap/servers/plugins/chainingdb/cb_search.c
|
||||
@@ -348,10 +348,9 @@ chainingdb_build_candidate_list(Slapi_PBlock *pb)
|
||||
warned_rc = 1;
|
||||
}
|
||||
cb_send_ldap_result(pb, rc, NULL, ENDUSERMSG, 0, NULL);
|
||||
- /* BEWARE: matched_msg and error_msg points */
|
||||
+ /* BEWARE: matched_msg points */
|
||||
/* to ld fields. */
|
||||
matched_msg = NULL;
|
||||
- error_msg = NULL;
|
||||
rc = -1;
|
||||
}
|
||||
|
||||
@@ -695,10 +694,9 @@ chainingdb_next_search_entry(Slapi_PBlock *pb)
|
||||
}
|
||||
cb_send_ldap_result(pb, rc, matched_msg, ENDUSERMSG, 0, NULL);
|
||||
|
||||
- /* BEWARE: Don't free matched_msg && error_msg */
|
||||
+ /* BEWARE: Don't free matched_msg */
|
||||
/* Points to the ld fields */
|
||||
matched_msg = NULL;
|
||||
- error_msg = NULL;
|
||||
retcode = -1;
|
||||
} else {
|
||||
/* Add control response sent by the farm server */
|
||||
diff --git a/ldap/servers/plugins/passthru/ptbind.c b/ldap/servers/plugins/passthru/ptbind.c
|
||||
index 705ab2c3a..3e79b47f6 100644
|
||||
--- a/ldap/servers/plugins/passthru/ptbind.c
|
||||
+++ b/ldap/servers/plugins/passthru/ptbind.c
|
||||
@@ -33,6 +33,8 @@ passthru_simple_bind_once_s(PassThruServer *srvr, const char *dn, struct berval
|
||||
* are only interested in recovering silently when the remote server is up
|
||||
* but decided to close our connection, we retry without pausing between
|
||||
* attempts.
|
||||
+ *
|
||||
+ * Note that errmsgp must be freed by the caller.
|
||||
*/
|
||||
int
|
||||
passthru_simple_bind_s(Slapi_PBlock *pb, PassThruServer *srvr, int tries, const char *dn, struct berval *creds, LDAPControl **reqctrls, int *lderrnop, char **matcheddnp, char **errmsgp, struct berval ***refurlsp, LDAPControl ***resctrlsp)
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c
|
||||
index 2dd74f9e7..b6bc21c46 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_connection.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_connection.c
|
||||
@@ -244,6 +244,7 @@ conn_delete_internal(Repl_Connection *conn)
|
||||
PR_ASSERT(NULL != conn);
|
||||
close_connection_internal(conn);
|
||||
/* slapi_ch_free accepts NULL pointer */
|
||||
+ slapi_ch_free_string(&conn->last_ldap_errmsg);
|
||||
slapi_ch_free((void **)&conn->hostname);
|
||||
slapi_ch_free((void **)&conn->binddn);
|
||||
slapi_ch_free((void **)&conn->plain);
|
||||
@@ -450,6 +451,7 @@ conn_read_result_ex(Repl_Connection *conn, char **retoidp, struct berval **retda
|
||||
char *s = NULL;
|
||||
|
||||
rc = slapi_ldap_get_lderrno(conn->ld, NULL, &s);
|
||||
+ slapi_ch_free_string(&conn->last_ldap_errmsg);
|
||||
conn->last_ldap_errmsg = s;
|
||||
conn->last_ldap_error = rc;
|
||||
/* some errors will require a disconnect and retry the connection
|
||||
@@ -1937,6 +1939,7 @@ bind_and_check_pwp(Repl_Connection *conn, char *binddn, char *password)
|
||||
agmt_get_long_name(conn->agmt),
|
||||
mech ? mech : "SIMPLE", rc,
|
||||
ldap_err2string(rc), errmsg ? errmsg : "");
|
||||
+ slapi_ch_free_string(&errmsg);
|
||||
} else {
|
||||
char *errmsg = NULL;
|
||||
/* errmsg is a pointer directly into the ld structure - do not free */
|
||||
@@ -1946,6 +1949,7 @@ bind_and_check_pwp(Repl_Connection *conn, char *binddn, char *password)
|
||||
agmt_get_long_name(conn->agmt),
|
||||
mech ? mech : "SIMPLE", rc,
|
||||
ldap_err2string(rc), errmsg ? errmsg : "");
|
||||
+ slapi_ch_free_string(&errmsg);
|
||||
}
|
||||
|
||||
return (CONN_OPERATION_FAILED);
|
||||
diff --git a/ldap/servers/plugins/replication/windows_connection.c b/ldap/servers/plugins/replication/windows_connection.c
|
||||
index 5eca5fad1..d3f6a4e93 100644
|
||||
--- a/ldap/servers/plugins/replication/windows_connection.c
|
||||
+++ b/ldap/servers/plugins/replication/windows_connection.c
|
||||
@@ -331,6 +331,7 @@ windows_perform_operation(Repl_Connection *conn, int optype, const char *dn, LDA
|
||||
"windows_perform_operation - %s: Received error %d: %s for %s operation\n",
|
||||
agmt_get_long_name(conn->agmt),
|
||||
rc, s ? s : "NULL", op_string);
|
||||
+ slapi_ch_free_string(&s);
|
||||
conn->last_ldap_error = rc;
|
||||
/* some errors will require a disconnect and retry the connection
|
||||
later */
|
||||
@@ -1709,6 +1710,7 @@ bind_and_check_pwp(Repl_Connection *conn, char *binddn, char *password)
|
||||
agmt_get_long_name(conn->agmt),
|
||||
mech ? mech : "SIMPLE", rc,
|
||||
ldap_err2string(rc), errmsg);
|
||||
+ slapi_ch_free_string(&errmsg);
|
||||
} else {
|
||||
char *errmsg = NULL;
|
||||
/* errmsg is a pointer directly into the ld structure - do not free */
|
||||
@@ -1718,6 +1720,7 @@ bind_and_check_pwp(Repl_Connection *conn, char *binddn, char *password)
|
||||
agmt_get_long_name(conn->agmt),
|
||||
mech ? mech : "SIMPLE", rc,
|
||||
ldap_err2string(rc), errmsg);
|
||||
+ slapi_ch_free_string(&errmsg);
|
||||
}
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= bind_and_check_pwp - CONN_OPERATION_FAILED\n");
|
||||
diff --git a/ldap/servers/slapd/ldaputil.c b/ldap/servers/slapd/ldaputil.c
|
||||
index 336ca3912..db3300e30 100644
|
||||
--- a/ldap/servers/slapd/ldaputil.c
|
||||
+++ b/ldap/servers/slapd/ldaputil.c
|
||||
@@ -375,6 +375,8 @@ slapi_ldap_url_parse(const char *url, LDAPURLDesc **ludpp, int require_dn, int *
|
||||
|
||||
#include <sasl/sasl.h>
|
||||
|
||||
+
|
||||
+/* Warning: caller must free s (if not NULL) */
|
||||
int
|
||||
slapi_ldap_get_lderrno(LDAP *ld, char **m, char **s)
|
||||
{
|
||||
@@ -389,6 +391,9 @@ slapi_ldap_get_lderrno(LDAP *ld, char **m, char **s)
|
||||
ldap_get_option(ld, LDAP_OPT_DIAGNOSTIC_MESSAGE, s);
|
||||
#else
|
||||
ldap_get_option(ld, LDAP_OPT_ERROR_STRING, s);
|
||||
+ if (*s) {
|
||||
+ *s = slapi_ch_strdup(*s);
|
||||
+ }
|
||||
#endif
|
||||
}
|
||||
return rc;
|
||||
@@ -1517,6 +1522,7 @@ slapd_ldap_sasl_interactive_bind(
|
||||
mech ? mech : "SIMPLE",
|
||||
rc, ldap_err2string(rc), errmsg,
|
||||
errno, slapd_system_strerror(errno));
|
||||
+ slapi_ch_free_string(&errmsg);
|
||||
if (can_retry_bind(ld, mech, bindid, creds, rc, errmsg)) {
|
||||
; /* pass through to retry one time */
|
||||
} else {
|
||||
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
|
||||
index 48d3879a3..1c314322b 100644
|
||||
--- a/src/lib389/lib389/_mapped_object.py
|
||||
+++ b/src/lib389/lib389/_mapped_object.py
|
||||
@@ -67,6 +67,34 @@ def _gen_filter(attrtypes, values, extra=None):
|
||||
return filt
|
||||
|
||||
|
||||
+# Define wrappers around the ldap operation to have a clear diagnostic
|
||||
+def _ldap_op_s(inst, f, fname, *args, **kwargs):
|
||||
+ # f.__name__ says 'inner' so the wanted name is provided as argument
|
||||
+ try:
|
||||
+ return f(*args, **kwargs)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ new_desc = f"{fname}({args},{kwargs}) on instance {inst.serverid}";
|
||||
+ if len(e.args) >= 1:
|
||||
+ e.args[0]['ldap_request'] = new_desc
|
||||
+ logging.getLogger().error(f"args={e.args}")
|
||||
+ raise
|
||||
+
|
||||
+def _add_ext_s(inst, *args, **kwargs):
|
||||
+ return _ldap_op_s(inst, inst.add_ext_s, 'add_ext_s', *args, **kwargs)
|
||||
+
|
||||
+def _modify_ext_s(inst, *args, **kwargs):
|
||||
+ return _ldap_op_s(inst, inst.modify_ext_s, 'modify_ext_s', *args, **kwargs)
|
||||
+
|
||||
+def _delete_ext_s(inst, *args, **kwargs):
|
||||
+ return _ldap_op_s(inst, inst.delete_ext_s, 'delete_ext_s', *args, **kwargs)
|
||||
+
|
||||
+def _search_ext_s(inst, *args, **kwargs):
|
||||
+ return _ldap_op_s(inst, inst.search_ext_s, 'search_ext_s', *args, **kwargs)
|
||||
+
|
||||
+def _search_s(inst, *args, **kwargs):
|
||||
+ return _ldap_op_s(inst, inst.search_s, 'search_s', *args, **kwargs)
|
||||
+
|
||||
+
|
||||
class DSLogging(object):
|
||||
"""The benefit of this is automatic name detection, and correct application
|
||||
of level and verbosity to the object.
|
||||
@@ -129,7 +157,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
:returns: Entry object
|
||||
"""
|
||||
|
||||
- return self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=["*"],
|
||||
+ return _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=["*"],
|
||||
serverctrls=self._server_controls, clientctrls=self._client_controls,
|
||||
escapehatch='i am sure')[0]
|
||||
|
||||
@@ -140,7 +168,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
"""
|
||||
|
||||
try:
|
||||
- self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrsonly=1,
|
||||
+ _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, attrsonly=1,
|
||||
serverctrls=self._server_controls, clientctrls=self._client_controls,
|
||||
escapehatch='i am sure')
|
||||
except ldap.NO_SUCH_OBJECT:
|
||||
@@ -156,7 +184,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
search_scope = ldap.SCOPE_ONE
|
||||
elif scope == 'subtree':
|
||||
search_scope = ldap.SCOPE_SUBTREE
|
||||
- return self._instance.search_ext_s(self._dn, search_scope, filter,
|
||||
+ return _search_ext_s(self._instance,self._dn, search_scope, filter,
|
||||
serverctrls=self._server_controls,
|
||||
clientctrls=self._client_controls,
|
||||
escapehatch='i am sure')
|
||||
@@ -166,7 +194,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
|
||||
:returns: LDIF formatted string
|
||||
"""
|
||||
- e = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=attrlist,
|
||||
+ e = _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=attrlist,
|
||||
serverctrls=self._server_controls, clientctrls=self._client_controls,
|
||||
escapehatch='i am sure')[0]
|
||||
return e.__repr__()
|
||||
@@ -258,7 +286,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
raise ValueError("Invalid state. Cannot get presence on instance that is not ONLINE")
|
||||
self._log.debug("%s present(%r) %s" % (self._dn, attr, value))
|
||||
|
||||
- self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[attr, ],
|
||||
+ _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[attr, ],
|
||||
serverctrls=self._server_controls, clientctrls=self._client_controls,
|
||||
escapehatch='i am sure')[0]
|
||||
values = self.get_attr_vals_bytes(attr)
|
||||
@@ -313,7 +341,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
else:
|
||||
value = [ensure_bytes(arg[1])]
|
||||
mods.append((ldap.MOD_REPLACE, ensure_str(arg[0]), value))
|
||||
- return self._instance.modify_ext_s(self._dn, mods, serverctrls=self._server_controls,
|
||||
+ return _modify_ext_s(self._instance,self._dn, mods, serverctrls=self._server_controls,
|
||||
clientctrls=self._client_controls, escapehatch='i am sure')
|
||||
|
||||
# This needs to work on key + val, and key
|
||||
@@ -457,7 +485,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
elif value is not None:
|
||||
value = [ensure_bytes(value)]
|
||||
|
||||
- return self._instance.modify_ext_s(self._dn, [(action, key, value)],
|
||||
+ return _modify_ext_s(self._instance,self._dn, [(action, key, value)],
|
||||
serverctrls=self._server_controls, clientctrls=self._client_controls,
|
||||
escapehatch='i am sure')
|
||||
|
||||
@@ -497,7 +525,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
else:
|
||||
# Error too many items
|
||||
raise ValueError('Too many arguments in the mod op')
|
||||
- return self._instance.modify_ext_s(self._dn, mod_list, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')
|
||||
+ return _modify_ext_s(self._instance,self._dn, mod_list, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')
|
||||
|
||||
def _unsafe_compare_attribute(self, other):
|
||||
"""Compare two attributes from two objects. This is currently marked unsafe as it's
|
||||
@@ -593,7 +621,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE")
|
||||
else:
|
||||
# retrieving real(*) and operational attributes(+)
|
||||
- attrs_entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter,
|
||||
+ attrs_entry = _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter,
|
||||
attrlist=["*", "+"], serverctrls=self._server_controls,
|
||||
clientctrls=self._client_controls, escapehatch='i am sure')[0]
|
||||
# getting dict from 'entry' object
|
||||
@@ -613,7 +641,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE")
|
||||
else:
|
||||
# retrieving real(*) and operational attributes(+)
|
||||
- attrs_entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter,
|
||||
+ attrs_entry = _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter,
|
||||
attrlist=["*", "+"], serverctrls=self._server_controls,
|
||||
clientctrls=self._client_controls, escapehatch='i am sure')[0]
|
||||
# getting dict from 'entry' object
|
||||
@@ -627,7 +655,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
if self._instance.state != DIRSRV_STATE_ONLINE:
|
||||
raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE")
|
||||
else:
|
||||
- entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter,
|
||||
+ entry = _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter,
|
||||
attrlist=keys, serverctrls=self._server_controls,
|
||||
clientctrls=self._client_controls, escapehatch='i am sure')[0]
|
||||
return entry.getValuesSet(keys)
|
||||
@@ -636,7 +664,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
self._log.debug("%s get_attrs_vals_utf8(%r)" % (self._dn, keys))
|
||||
if self._instance.state != DIRSRV_STATE_ONLINE:
|
||||
raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE")
|
||||
- entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=keys,
|
||||
+ entry = _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=keys,
|
||||
serverctrls=self._server_controls, clientctrls=self._client_controls,
|
||||
escapehatch='i am sure')[0]
|
||||
vset = entry.getValuesSet(keys)
|
||||
@@ -655,7 +683,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
else:
|
||||
# It would be good to prevent the entry code intercepting this ....
|
||||
# We have to do this in this method, because else we ignore the scope base.
|
||||
- entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter,
|
||||
+ entry = _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter,
|
||||
attrlist=[key], serverctrls=self._server_controls,
|
||||
clientctrls=self._client_controls, escapehatch='i am sure')[0]
|
||||
vals = entry.getValues(key)
|
||||
@@ -675,7 +703,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
# In the future, I plan to add a mode where if local == true, we
|
||||
# can use get on dse.ldif to get values offline.
|
||||
else:
|
||||
- entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter,
|
||||
+ entry = _search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter,
|
||||
attrlist=[key], serverctrls=self._server_controls,
|
||||
clientctrls=self._client_controls, escapehatch='i am sure')[0]
|
||||
return entry.getValue(key)
|
||||
@@ -831,11 +859,11 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
# Is there a way to mark this as offline and kill it
|
||||
if recursive:
|
||||
filterstr = "(|(objectclass=*)(objectclass=ldapsubentry))"
|
||||
- ents = self._instance.search_s(self._dn, ldap.SCOPE_SUBTREE, filterstr, escapehatch='i am sure')
|
||||
+ ents = _search_s(self._instance, self._dn, ldap.SCOPE_SUBTREE, filterstr, escapehatch='i am sure')
|
||||
for ent in sorted(ents, key=lambda e: len(e.dn), reverse=True):
|
||||
- self._instance.delete_ext_s(ent.dn, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')
|
||||
+ _delete_ext_s(self._instance, ent.dn, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')
|
||||
else:
|
||||
- self._instance.delete_ext_s(self._dn, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')
|
||||
+ _delete_ext_s(self._instance, self._dn, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')
|
||||
|
||||
def _validate(self, rdn, properties, basedn):
|
||||
"""Used to validate a create request.
|
||||
@@ -933,7 +961,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
# If we are running in stateful ensure mode, we need to check if the object exists, and
|
||||
# we can see the state that it is in.
|
||||
try:
|
||||
- self._instance.search_ext_s(dn, ldap.SCOPE_BASE, self._object_filter, attrsonly=1, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')
|
||||
+ _search_ext_s(self._instance,dn, ldap.SCOPE_BASE, self._object_filter, attrsonly=1, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')
|
||||
exists = True
|
||||
except ldap.NO_SUCH_OBJECT:
|
||||
pass
|
||||
@@ -946,7 +974,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
mods = []
|
||||
for k, v in list(valid_props.items()):
|
||||
mods.append((ldap.MOD_REPLACE, k, v))
|
||||
- self._instance.modify_ext_s(self._dn, mods, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')
|
||||
+ _modify_ext_s(self._instance,self._dn, mods, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')
|
||||
elif not exists:
|
||||
# This case is reached in two cases. One is we are in ensure mode, and we KNOW the entry
|
||||
# doesn't exist.
|
||||
@@ -957,7 +985,7 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
e.update({'objectclass': ensure_list_bytes(self._create_objectclasses)})
|
||||
e.update(valid_props)
|
||||
# We rely on exceptions here to indicate failure to the parent.
|
||||
- self._instance.add_ext_s(e, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')
|
||||
+ _add_ext_s(self._instance, e, serverctrls=self._server_controls, clientctrls=self._client_controls, escapehatch='i am sure')
|
||||
self._log.debug('Created entry %s : %s' % (dn, display_log_data(e.data)))
|
||||
# If it worked, we need to fix our instance dn for the object's self reference. Because
|
||||
# we may not have a self reference yet (just created), it may have changed (someone
|
||||
@@ -1104,7 +1132,7 @@ class DSLdapObjects(DSLogging, DSLints):
|
||||
else:
|
||||
# If not paged
|
||||
try:
|
||||
- results = self._instance.search_ext_s(
|
||||
+ results = _search_ext_s(self._instance,
|
||||
base=self._basedn,
|
||||
scope=self._scope,
|
||||
filterstr=filterstr,
|
||||
@@ -1172,7 +1200,7 @@ class DSLdapObjects(DSLogging, DSLints):
|
||||
filterstr = self._get_objectclass_filter()
|
||||
self._log.debug('_gen_dn filter = %s' % filterstr)
|
||||
self._log.debug('_gen_dn dn = %s' % dn)
|
||||
- return self._instance.search_ext_s(
|
||||
+ return _search_ext_s(self._instance,
|
||||
base=dn,
|
||||
scope=ldap.SCOPE_BASE,
|
||||
filterstr=filterstr,
|
||||
@@ -1187,7 +1215,7 @@ class DSLdapObjects(DSLogging, DSLints):
|
||||
# This will yield and & filter for objectClass with as many terms as needed.
|
||||
filterstr = self._get_selector_filter(selector)
|
||||
self._log.debug('_gen_selector filter = %s' % filterstr)
|
||||
- return self._instance.search_ext_s(
|
||||
+ return _search_ext_s(self._instance,
|
||||
base=self._basedn,
|
||||
scope=self._scope,
|
||||
filterstr=filterstr,
|
||||
@@ -1261,7 +1289,7 @@ class DSLdapObjects(DSLogging, DSLints):
|
||||
self._list_attrlist = attrlist
|
||||
self._log.debug(f'list filter = {search_filter} with scope {scope} and attribute list {attrlist}')
|
||||
try:
|
||||
- results = self._instance.search_ext_s(
|
||||
+ results = _search_ext_s(self._instance,
|
||||
base=self._basedn,
|
||||
scope=scope,
|
||||
filterstr=search_filter,
|
||||
diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py
|
||||
index 6eba2d7b9..da966ed97 100644
|
||||
--- a/src/lib389/lib389/utils.py
|
||||
+++ b/src/lib389/lib389/utils.py
|
||||
@@ -52,7 +52,7 @@ from ldapurl import LDAPUrl
|
||||
from contextlib import closing
|
||||
|
||||
import lib389
|
||||
-from lib389.paths import Paths
|
||||
+from lib389.paths import ( Paths, DEFAULTS_PATH )
|
||||
from lib389.dseldif import DSEldif
|
||||
from lib389._constants import (
|
||||
DEFAULT_USER, VALGRIND_WRAPPER, DN_CONFIG, CFGSUFFIX, LOCALHOST,
|
||||
@@ -495,8 +495,10 @@ def valgrind_enable(sbin_dir, wrapper=None):
|
||||
:raise EnvironmentError: If script is not run as 'root'
|
||||
'''
|
||||
|
||||
- if os.geteuid() != 0:
|
||||
- log.error('This script must be run as root to use valgrind')
|
||||
+ if not os.access(sbin_dir, os.W_OK):
|
||||
+ # Note: valgrind has no limitation but ns-slapd must be replaced
|
||||
+ # This check allows non root user to use custom install prefix
|
||||
+ log.error('This script must be run as root to use valgrind (Should at least be able to write in {sbin_dir})')
|
||||
raise EnvironmentError
|
||||
|
||||
if not wrapper:
|
||||
@@ -542,7 +544,20 @@ def valgrind_enable(sbin_dir, wrapper=None):
|
||||
e.strerror)
|
||||
|
||||
# Disable selinux
|
||||
- os.system('setenforce 0')
|
||||
+ if os.geteuid() == 0:
|
||||
+ os.system('setenforce 0')
|
||||
+
|
||||
+ # Disable systemd by turning off with_system in .inf file
|
||||
+ old_path = Paths()._get_defaults_loc(DEFAULTS_PATH)
|
||||
+ new_path = f'{old_path}.orig'
|
||||
+ os.rename(old_path, new_path)
|
||||
+ with open(new_path, 'rt') as fin:
|
||||
+ with open(old_path, 'wt') as fout:
|
||||
+ for line in fin:
|
||||
+ if line.startswith('with_systemd'):
|
||||
+ fout.write('with_systemd = 0\n')
|
||||
+ else:
|
||||
+ fout.write(line)
|
||||
|
||||
log.info('Valgrind is now enabled.')
|
||||
|
||||
@@ -559,8 +574,10 @@ def valgrind_disable(sbin_dir):
|
||||
:raise EnvironmentError: If script is not run as 'root'
|
||||
'''
|
||||
|
||||
- if os.geteuid() != 0:
|
||||
- log.error('This script must be run as root to use valgrind')
|
||||
+ if not os.access(sbin_dir, os.W_OK):
|
||||
+ # Note: valgrind has no limitation but ns-slapd must be replaced
|
||||
+ # This check allows non root user to use custom install prefix
|
||||
+ log.error('This script must be run as root to use valgrind (Should at least be able to write in {sbin_dir})')
|
||||
raise EnvironmentError
|
||||
|
||||
nsslapd_orig = '%s/ns-slapd' % sbin_dir
|
||||
@@ -584,7 +601,14 @@ def valgrind_disable(sbin_dir):
|
||||
e.strerror)
|
||||
|
||||
# Enable selinux
|
||||
- os.system('setenforce 1')
|
||||
+ if os.geteuid() == 0:
|
||||
+ os.system('setenforce 1')
|
||||
+
|
||||
+ # Restore .inf file (for systemd)
|
||||
+ new_path = Paths()._get_defaults_loc(DEFAULTS_PATH)
|
||||
+ old_path = f'{new_path}.orig'
|
||||
+ if os.path.exists(old_path):
|
||||
+ os.replace(old_path, new_path)
|
||||
|
||||
log.info('Valgrind is now disabled.')
|
||||
|
||||
@@ -610,7 +634,7 @@ def valgrind_get_results_file(dirsrv_inst):
|
||||
|
||||
# Run the command and grab the output
|
||||
p = os.popen(cmd)
|
||||
- results_file = p.readline()
|
||||
+ results_file = p.readline().strip()
|
||||
p.close()
|
||||
|
||||
return results_file
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,114 +0,0 @@
|
||||
From d037688c072c4cb84fbf9b2a6cb24927f7950605 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 20 Oct 2021 10:04:06 -0400
|
||||
Subject: [PATCH 04/12] Issue 4956 - Automember allows invalid regex, and does
|
||||
not log proper error
|
||||
|
||||
Bug Description: The server was detecting an invalid automember
|
||||
regex, but it did not reject it, and it did not
|
||||
log which regex rule was invalid.
|
||||
|
||||
Fix Description: By properly rejecting the invalid regex will also
|
||||
trigger the proper error logging to occur.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4956
|
||||
|
||||
Reviewed by: tbordaz & spichugi(Thanks!!)
|
||||
---
|
||||
.../automember_plugin/configuration_test.py | 49 +++++++++++++++++--
|
||||
ldap/servers/plugins/automember/automember.c | 1 +
|
||||
2 files changed, 46 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/automember_plugin/configuration_test.py b/dirsrvtests/tests/suites/automember_plugin/configuration_test.py
|
||||
index 0f9cc49dc..4a6b596db 100644
|
||||
--- a/dirsrvtests/tests/suites/automember_plugin/configuration_test.py
|
||||
+++ b/dirsrvtests/tests/suites/automember_plugin/configuration_test.py
|
||||
@@ -1,21 +1,20 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2019 Red Hat, Inc.
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
|
||||
+import ldap
|
||||
import os
|
||||
import pytest
|
||||
-
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions, MemberOfPlugin
|
||||
-import ldap
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
-
|
||||
@pytest.mark.bz834056
|
||||
def test_configuration(topo):
|
||||
"""
|
||||
@@ -52,6 +51,48 @@ def test_configuration(topo):
|
||||
'"cn=SuffDef1,ou=autouserGroups,cn=config" '
|
||||
'can not be a child of the plugin config area "cn=config"')
|
||||
|
||||
+def test_invalid_regex(topo):
|
||||
+ """Test invalid regex is properly reportedin the error log
|
||||
+
|
||||
+ :id: a6d89f84-ec76-4871-be96-411d051800b1
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Setup automember
|
||||
+ 2. Add invalid regex
|
||||
+ 3. Error log reports useful message
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+ REGEX_DN = "cn=regex1,cn=testregex,cn=auto membership plugin,cn=plugins,cn=config"
|
||||
+ REGEX_VALUE = "cn=*invalid*"
|
||||
+ REGEX_ESC_VALUE = "cn=\\*invalid\\*"
|
||||
+ GROUP_DN = "cn=demo_group,ou=groups," + DEFAULT_SUFFIX
|
||||
+
|
||||
+ AutoMembershipPlugin(topo.standalone).remove_all("nsslapd-pluginConfigArea")
|
||||
+ automemberplugin = AutoMembershipPlugin(topo.standalone)
|
||||
+
|
||||
+ automember_prop = {
|
||||
+ 'cn': 'testRegex',
|
||||
+ 'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX,
|
||||
+ 'autoMemberFilter': 'objectclass=*',
|
||||
+ 'autoMemberDefaultGroup': GROUP_DN,
|
||||
+ 'autoMemberGroupingAttr': 'member:dn',
|
||||
+ }
|
||||
+ automember_defs = AutoMembershipDefinitions(topo.standalone, "cn=Auto Membership Plugin,cn=plugins,cn=config")
|
||||
+ automember_def = automember_defs.create(properties=automember_prop)
|
||||
+ automember_def.add_regex_rule("regex1", GROUP_DN, include_regex=[REGEX_VALUE])
|
||||
+
|
||||
+ automemberplugin.enable()
|
||||
+ topo.standalone.restart()
|
||||
+
|
||||
+ # Check errors log for invalid message
|
||||
+ ERR_STR1 = "automember_parse_regex_rule - Unable to parse regex rule"
|
||||
+ ERR_STR2 = f"Skipping invalid inclusive regex rule in rule entry \"{REGEX_DN}\" \\(rule = \"{REGEX_ESC_VALUE}\"\\)"
|
||||
+ assert topo.standalone.searchErrorsLog(ERR_STR1)
|
||||
+ assert topo.standalone.searchErrorsLog(ERR_STR2)
|
||||
+
|
||||
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
|
||||
index 39350ad53..b92b89bd5 100644
|
||||
--- a/ldap/servers/plugins/automember/automember.c
|
||||
+++ b/ldap/servers/plugins/automember/automember.c
|
||||
@@ -1217,6 +1217,7 @@ automember_parse_regex_rule(char *rule_string)
|
||||
"automember_parse_regex_rule - Unable to parse "
|
||||
"regex rule (invalid regex). Error \"%s\".\n",
|
||||
recomp_result ? recomp_result : "unknown");
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Validation has passed, so create the regex rule struct and fill it in.
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,34 @@
|
||||
From 16536e5d306727761ffd10403f4762956f177147 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Wed, 5 Jan 2022 12:09:27 +0100
|
||||
Subject: [PATCH] Issue 5085 - Race condition about snmp collator at startup
|
||||
(#5086)
|
||||
|
||||
---
|
||||
ldap/servers/slapd/snmp_collator.c | 3 +--
|
||||
1 file changed, 1 insertion(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c
|
||||
index 10a99475d..ed34d2ac4 100644
|
||||
--- a/ldap/servers/slapd/snmp_collator.c
|
||||
+++ b/ldap/servers/slapd/snmp_collator.c
|
||||
@@ -201,7 +201,7 @@ set_snmp_interaction_row(char *host, int port, int error)
|
||||
|
||||
/* The interactions table is using the default (first) snmp_vars*/
|
||||
snmp_vars = g_get_first_thread_snmp_vars(&cookie);
|
||||
- if (snmp_vars == NULL)
|
||||
+ if (snmp_vars == NULL || interaction_table_mutex == NULL)
|
||||
return;
|
||||
|
||||
/* stevross: our servers don't have a concept of dsName as a distinguished name
|
||||
@@ -856,7 +856,6 @@ snmp_update_cache_stats(void)
|
||||
|
||||
if (search_result == 0) {
|
||||
int cookie;
|
||||
- uint64_t total;
|
||||
struct snmp_vars_t *snmp_vars;
|
||||
slapi_pblock_get(search_result_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES,
|
||||
&search_entries);
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,245 +0,0 @@
|
||||
From 9c08a053938eb28821fad7d0850c046ef2ed44c4 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 9 Dec 2020 16:16:30 -0500
|
||||
Subject: [PATCH 05/12] Issue 4092 - systemd-tmpfiles warnings
|
||||
|
||||
Bug Description:
|
||||
|
||||
systemd-tmpfiles warns about legacy paths in our tmpfiles configs.
|
||||
Using /var/run also introduces a race condition, see the following
|
||||
issue https://pagure.io/389-ds-base/issue/47429
|
||||
|
||||
Fix Description:
|
||||
|
||||
Instead of using @localstatedir@/run use @localrundir@ which was
|
||||
introduced in #850.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/766
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4092
|
||||
|
||||
Reviewed by: vashirov & firstyear(Thanks!)
|
||||
---
|
||||
Makefile.am | 4 ++--
|
||||
configure.ac | 10 ++++++++--
|
||||
dirsrvtests/tests/suites/basic/basic_test.py | 3 ++-
|
||||
ldap/admin/src/defaults.inf.in | 8 ++++----
|
||||
ldap/servers/snmp/main.c | 8 ++++----
|
||||
src/lib389/lib389/__init__.py | 3 +++
|
||||
src/lib389/lib389/instance/options.py | 7 ++++++-
|
||||
src/lib389/lib389/instance/remove.py | 13 ++++++++-----
|
||||
src/lib389/lib389/instance/setup.py | 10 ++++++++--
|
||||
9 files changed, 45 insertions(+), 21 deletions(-)
|
||||
|
||||
diff --git a/Makefile.am b/Makefile.am
|
||||
index 36434cf17..fc5a6a7d1 100644
|
||||
--- a/Makefile.am
|
||||
+++ b/Makefile.am
|
||||
@@ -141,8 +141,8 @@ PATH_DEFINES = -DLOCALSTATEDIR="\"$(localstatedir)\"" -DSYSCONFDIR="\"$(sysconfd
|
||||
-DLIBDIR="\"$(libdir)\"" -DBINDIR="\"$(bindir)\"" \
|
||||
-DDATADIR="\"$(datadir)\"" -DDOCDIR="\"$(docdir)\"" \
|
||||
-DSBINDIR="\"$(sbindir)\"" -DPLUGINDIR="\"$(serverplugindir)\"" \
|
||||
- -DTEMPLATEDIR="\"$(sampledatadir)\"" -DSYSTEMSCHEMADIR="\"$(systemschemadir)\""
|
||||
-
|
||||
+ -DTEMPLATEDIR="\"$(sampledatadir)\"" -DSYSTEMSCHEMADIR="\"$(systemschemadir)\"" \
|
||||
+ -DLOCALRUNDIR="\"$(localrundir)\""
|
||||
# Now that we have all our defines in place, setup the CPPFLAGS
|
||||
|
||||
# These flags are the "must have" for all components
|
||||
diff --git a/configure.ac b/configure.ac
|
||||
index 61bf35e4a..9845beb7d 100644
|
||||
--- a/configure.ac
|
||||
+++ b/configure.ac
|
||||
@@ -418,7 +418,14 @@ fi
|
||||
|
||||
m4_include(m4/fhs.m4)
|
||||
|
||||
-localrundir='/run'
|
||||
+# /run directory path
|
||||
+AC_ARG_WITH([localrundir],
|
||||
+ AS_HELP_STRING([--with-localrundir=DIR],
|
||||
+ [Runtime data directory]),
|
||||
+ [localrundir=$with_localrundir],
|
||||
+ [localrundir="/run"])
|
||||
+AC_SUBST([localrundir])
|
||||
+
|
||||
cockpitdir=/389-console
|
||||
|
||||
# installation paths - by default, we store everything
|
||||
@@ -899,7 +906,6 @@ AC_SUBST(ldaplib_defs)
|
||||
AC_SUBST(ldaptool_bindir)
|
||||
AC_SUBST(ldaptool_opts)
|
||||
AC_SUBST(plainldif_opts)
|
||||
-AC_SUBST(localrundir)
|
||||
|
||||
AC_SUBST(brand)
|
||||
AC_SUBST(capbrand)
|
||||
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
index 41726f073..7e80c443b 100644
|
||||
--- a/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
@@ -901,7 +901,8 @@ def test_basic_ldapagent(topology_st, import_example_ldif):
|
||||
# Remember, this is *forking*
|
||||
check_output([os.path.join(topology_st.standalone.get_sbin_dir(), 'ldap-agent'), config_file])
|
||||
# First kill any previous agents ....
|
||||
- pidpath = os.path.join(var_dir, 'run/ldap-agent.pid')
|
||||
+ run_dir = topology_st.standalone.get_run_dir()
|
||||
+ pidpath = os.path.join(run_dir, 'ldap-agent.pid')
|
||||
pid = None
|
||||
with open(pidpath, 'r') as pf:
|
||||
pid = pf.readlines()[0].strip()
|
||||
diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in
|
||||
index d5f504591..e02248b89 100644
|
||||
--- a/ldap/admin/src/defaults.inf.in
|
||||
+++ b/ldap/admin/src/defaults.inf.in
|
||||
@@ -35,12 +35,12 @@ sysconf_dir = @sysconfdir@
|
||||
initconfig_dir = @initconfigdir@
|
||||
config_dir = @instconfigdir@/slapd-{instance_name}
|
||||
local_state_dir = @localstatedir@
|
||||
-run_dir = @localstatedir@/run/dirsrv
|
||||
+run_dir = @localrundir@
|
||||
# This is the expected location of ldapi.
|
||||
-ldapi = @localstatedir@/run/slapd-{instance_name}.socket
|
||||
+ldapi = @localrundir@/slapd-{instance_name}.socket
|
||||
+pid_file = @localrundir@/slapd-{instance_name}.pid
|
||||
ldapi_listen = on
|
||||
ldapi_autobind = on
|
||||
-pid_file = @localstatedir@/run/dirsrv/slapd-{instance_name}.pid
|
||||
inst_dir = @serverdir@/slapd-{instance_name}
|
||||
plugin_dir = @serverplugindir@
|
||||
system_schema_dir = @systemschemadir@
|
||||
@@ -54,7 +54,7 @@ root_dn = cn=Directory Manager
|
||||
schema_dir = @instconfigdir@/slapd-{instance_name}/schema
|
||||
cert_dir = @instconfigdir@/slapd-{instance_name}
|
||||
|
||||
-lock_dir = @localstatedir@/lock/dirsrv/slapd-{instance_name}
|
||||
+lock_dir = @localrundir@/lock/dirsrv/slapd-{instance_name}
|
||||
log_dir = @localstatedir@/log/dirsrv/slapd-{instance_name}
|
||||
access_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/access
|
||||
audit_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/audit
|
||||
diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c
|
||||
index 88a4d532a..e6271a8a9 100644
|
||||
--- a/ldap/servers/snmp/main.c
|
||||
+++ b/ldap/servers/snmp/main.c
|
||||
@@ -287,14 +287,14 @@ load_config(char *conf_path)
|
||||
}
|
||||
|
||||
/* set pidfile path */
|
||||
- if ((pidfile = malloc(strlen(LOCALSTATEDIR) + strlen("/run/") +
|
||||
+ if ((pidfile = malloc(strlen(LOCALRUNDIR) + strlen("/") +
|
||||
strlen(LDAP_AGENT_PIDFILE) + 1)) != NULL) {
|
||||
- strncpy(pidfile, LOCALSTATEDIR, strlen(LOCALSTATEDIR) + 1);
|
||||
+ strncpy(pidfile, LOCALRUNDIR, strlen(LOCALRUNDIR) + 1);
|
||||
/* The above will likely not be NULL terminated, but we need to
|
||||
* be sure that we're properly NULL terminated for the below
|
||||
* strcat() to work properly. */
|
||||
- pidfile[strlen(LOCALSTATEDIR)] = (char)0;
|
||||
- strcat(pidfile, "/run/");
|
||||
+ pidfile[strlen(LOCALRUNDIR)] = (char)0;
|
||||
+ strcat(pidfile, "/");
|
||||
strcat(pidfile, LDAP_AGENT_PIDFILE);
|
||||
} else {
|
||||
printf("ldap-agent: malloc error processing config file\n");
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index e0299c5b4..2a0b83913 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -1709,6 +1709,9 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
def get_bin_dir(self):
|
||||
return self.ds_paths.bin_dir
|
||||
|
||||
+ def get_run_dir(self):
|
||||
+ return self.ds_paths.run_dir
|
||||
+
|
||||
def get_plugin_dir(self):
|
||||
return self.ds_paths.plugin_dir
|
||||
|
||||
diff --git a/src/lib389/lib389/instance/options.py b/src/lib389/lib389/instance/options.py
|
||||
index 4e083618c..d5b95e6df 100644
|
||||
--- a/src/lib389/lib389/instance/options.py
|
||||
+++ b/src/lib389/lib389/instance/options.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2019 Red Hat, Inc.
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -32,6 +32,7 @@ format_keys = [
|
||||
'backup_dir',
|
||||
'db_dir',
|
||||
'db_home_dir',
|
||||
+ 'ldapi',
|
||||
'ldif_dir',
|
||||
'lock_dir',
|
||||
'log_dir',
|
||||
@@ -233,6 +234,10 @@ class Slapd2Base(Options2):
|
||||
self._helptext['local_state_dir'] = "Sets the location of Directory Server variable data. Only set this parameter in a development environment."
|
||||
self._advanced['local_state_dir'] = True
|
||||
|
||||
+ self._options['ldapi'] = ds_paths.ldapi
|
||||
+ self._type['ldapi'] = str
|
||||
+ self._helptext['ldapi'] = "Sets the location of socket interface of the Directory Server."
|
||||
+
|
||||
self._options['lib_dir'] = ds_paths.lib_dir
|
||||
self._type['lib_dir'] = str
|
||||
self._helptext['lib_dir'] = "Sets the location of Directory Server shared libraries. Only set this parameter in a development environment."
|
||||
diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py
|
||||
index d7bb48ce0..1a35ddc07 100644
|
||||
--- a/src/lib389/lib389/instance/remove.py
|
||||
+++ b/src/lib389/lib389/instance/remove.py
|
||||
@@ -78,13 +78,16 @@ def remove_ds_instance(dirsrv, force=False):
|
||||
|
||||
_log.debug("Found instance marker at %s! Proceeding to remove ..." % dse_ldif_path)
|
||||
|
||||
- # Stop the instance (if running) and now we know it really does exist
|
||||
- # and hopefully have permission to access it ...
|
||||
- _log.debug("Stopping instance %s" % dirsrv.serverid)
|
||||
- dirsrv.stop()
|
||||
-
|
||||
### ANY NEW REMOVAL ACTION MUST BE BELOW THIS LINE!!!
|
||||
|
||||
+ # Remove LDAPI socket file
|
||||
+ ldapi_path = os.path.join(dirsrv.ds_paths.run_dir, "slapd-%s.socket" % dirsrv.serverid)
|
||||
+ if os.path.exists(ldapi_path):
|
||||
+ try:
|
||||
+ os.remove(ldapi_path)
|
||||
+ except OSError as e:
|
||||
+ _log.debug(f"Failed to remove LDAPI socket ({ldapi_path}) Error: {str(e)}")
|
||||
+
|
||||
# Remove these paths:
|
||||
# for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
|
||||
# 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'):
|
||||
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
|
||||
index ab7a2da85..57e7a9fd4 100644
|
||||
--- a/src/lib389/lib389/instance/setup.py
|
||||
+++ b/src/lib389/lib389/instance/setup.py
|
||||
@@ -732,7 +732,10 @@ class SetupDs(object):
|
||||
dse += line.replace('%', '{', 1).replace('%', '}', 1)
|
||||
|
||||
with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse:
|
||||
- ldapi_path = os.path.join(slapd['local_state_dir'], "run/slapd-%s.socket" % slapd['instance_name'])
|
||||
+ if os.path.exists(os.path.dirname(slapd['ldapi'])):
|
||||
+ ldapi_path = slapd['ldapi']
|
||||
+ else:
|
||||
+ ldapi_path = os.path.join(slapd['run_dir'], "slapd-%s.socket" % slapd['instance_name'])
|
||||
dse_fmt = dse.format(
|
||||
schema_dir=slapd['schema_dir'],
|
||||
lock_dir=slapd['lock_dir'],
|
||||
@@ -902,10 +905,13 @@ class SetupDs(object):
|
||||
self.log.info("Perform SELinux labeling ...")
|
||||
selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
|
||||
'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir',
|
||||
- 'run_dir', 'schema_dir', 'tmp_dir')
|
||||
+ 'schema_dir', 'tmp_dir')
|
||||
for path in selinux_paths:
|
||||
selinux_restorecon(slapd[path])
|
||||
|
||||
+ # Don't run restorecon on the entire /run directory
|
||||
+ selinux_restorecon(slapd['run_dir'] + '/dirsrv')
|
||||
+
|
||||
selinux_label_port(slapd['port'])
|
||||
|
||||
# Start the server
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,41 @@
|
||||
From 2ee8d9d2ce8bf252287089d18e15b519f15e9538 Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Thu, 6 Jan 2022 09:49:30 +1000
|
||||
Subject: [PATCH 1/5] Issue 5079 - BUG - multiple ways to specific primary
|
||||
(#5087)
|
||||
|
||||
Bug Description: In a winsync environment, we can only sync
|
||||
changes to a primary replica. There are however, multiple
|
||||
ways to specify which server is a primary for a replication
|
||||
agreement, and I only accounted for one of them.
|
||||
|
||||
Fix Description: Improve the check to account for the
|
||||
other primary replica flags.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/5079
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @droideck
|
||||
---
|
||||
ldap/servers/plugins/replication/repl5_agmt.c | 4 +++-
|
||||
1 file changed, 3 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
index 82efdcd15..a71343dec 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
@@ -482,7 +482,9 @@ agmt_new_from_entry(Slapi_Entry *e)
|
||||
|
||||
/* DBDB: review this code */
|
||||
if (slapi_entry_attr_hasvalue(e, "objectclass", "nsDSWindowsReplicationAgreement")) {
|
||||
- if (replica && replica_get_type(replica) == REPLICA_TYPE_PRIMARY) {
|
||||
+ if (replica_get_type(replica) == REPLICA_TYPE_PRIMARY
|
||||
+ || (replica_get_type(replica) == REPLICA_TYPE_UPDATABLE && replica_is_flag_set(replica, REPLICA_LOG_CHANGES))
|
||||
+ ) {
|
||||
ra->agreement_type = REPLICA_TYPE_WINDOWS;
|
||||
windows_init_agreement_from_entry(ra, e);
|
||||
} else {
|
||||
--
|
||||
2.37.1
|
||||
|
@ -0,0 +1,873 @@
|
||||
From e65d6225398901c3319e72a460bc58e5d50df67c Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 3 Aug 2022 16:27:15 -0400
|
||||
Subject: [PATCH 2/5] Issue 3903 - Supplier should do periodic updates
|
||||
|
||||
Description:
|
||||
|
||||
On suppliers update the keep alive entry periodically to keep the RUV up
|
||||
to date in case a replica is neglected for along time. This prevents
|
||||
very long changelog scans when finally processing updates.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/3903
|
||||
|
||||
Reviewed by: firstyear & tbordaz(Thanks!)
|
||||
---
|
||||
.../suites/replication/regression_m2_test.py | 96 +++++--------
|
||||
.../suites/replication/replica_config_test.py | 6 +-
|
||||
ldap/schema/01core389.ldif | 3 +-
|
||||
ldap/servers/plugins/replication/repl5.h | 11 +-
|
||||
.../plugins/replication/repl5_inc_protocol.c | 44 +-----
|
||||
.../plugins/replication/repl5_replica.c | 127 +++++++++++++-----
|
||||
.../replication/repl5_replica_config.c | 12 ++
|
||||
.../plugins/replication/repl5_tot_protocol.c | 4 +-
|
||||
ldap/servers/plugins/replication/repl_extop.c | 2 +-
|
||||
.../plugins/replication/repl_globals.c | 1 +
|
||||
.../src/lib/replication/replConfig.jsx | 32 ++++-
|
||||
src/cockpit/389-console/src/replication.jsx | 6 +
|
||||
src/lib389/lib389/cli_conf/replication.py | 6 +-
|
||||
13 files changed, 202 insertions(+), 148 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index 466e3c2c0..7dd0f2984 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -14,6 +14,7 @@ import ldif
|
||||
import ldap
|
||||
import pytest
|
||||
import subprocess
|
||||
+import time
|
||||
from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts
|
||||
from lib389.pwpolicy import PwPolicyManager
|
||||
from lib389.utils import *
|
||||
@@ -204,12 +205,12 @@ def rename_entry(server, idx, ou_name, new_parent):
|
||||
def add_ldapsubentry(server, parent):
|
||||
pwp = PwPolicyManager(server)
|
||||
policy_props = {'passwordStorageScheme': 'ssha',
|
||||
- 'passwordCheckSyntax': 'on',
|
||||
- 'passwordInHistory': '6',
|
||||
- 'passwordChange': 'on',
|
||||
- 'passwordMinAge': '0',
|
||||
- 'passwordExp': 'off',
|
||||
- 'passwordMustChange': 'off',}
|
||||
+ 'passwordCheckSyntax': 'on',
|
||||
+ 'passwordInHistory': '6',
|
||||
+ 'passwordChange': 'on',
|
||||
+ 'passwordMinAge': '0',
|
||||
+ 'passwordExp': 'off',
|
||||
+ 'passwordMustChange': 'off',}
|
||||
log.info('Create password policy for subtree {}'.format(parent))
|
||||
pwp.create_subtree_policy(parent, policy_props)
|
||||
|
||||
@@ -742,7 +743,7 @@ def get_keepalive_entries(instance, replica):
|
||||
try:
|
||||
entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL,
|
||||
"(&(objectclass=ldapsubentry)(cn=repl keep alive*))",
|
||||
- ['cn', 'nsUniqueId', 'modifierTimestamp'])
|
||||
+ ['cn', 'keepalivetimestamp', 'nsUniqueId', 'modifierTimestamp'])
|
||||
except ldap.LDAPError as e:
|
||||
log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e)))
|
||||
assert False
|
||||
@@ -761,6 +762,7 @@ def verify_keepalive_entries(topo, expected):
|
||||
# (for example after: db2ldif / demote a supplier / ldif2db / init other suppliers)
|
||||
# ==> if the function is somehow pushed in lib389, a check better than simply counting the entries
|
||||
# should be done.
|
||||
+ entries = []
|
||||
for supplierId in topo.ms:
|
||||
supplier = topo.ms[supplierId]
|
||||
for replica in Replicas(supplier).list():
|
||||
@@ -771,6 +773,7 @@ def verify_keepalive_entries(topo, expected):
|
||||
keepaliveEntries = get_keepalive_entries(supplier, replica);
|
||||
expectedCount = len(topo.ms) if expected else 0
|
||||
foundCount = len(keepaliveEntries)
|
||||
+ entries += keepaliveEntries
|
||||
if (foundCount == expectedCount):
|
||||
log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.')
|
||||
else:
|
||||
@@ -778,70 +781,45 @@ def verify_keepalive_entries(topo, expected):
|
||||
f'while {expectedCount} were expected on {replica_info}.')
|
||||
assert False
|
||||
|
||||
+ return entries
|
||||
+
|
||||
|
||||
-def test_online_init_should_create_keepalive_entries(topo_m2):
|
||||
- """Check that keep alive entries are created when initializinf a supplier from another one
|
||||
+def test_keepalive_entries(topo_m2):
|
||||
+ """Check that keep alive entries are created
|
||||
|
||||
:id: d5940e71-d18a-4b71-aaf7-b9185361fffe
|
||||
:setup: Two suppliers replication setup
|
||||
:steps:
|
||||
- 1. Generate ldif without replication data
|
||||
- 2 Init both suppliers from that ldif
|
||||
- 3 Check that keep alive entries does not exists
|
||||
- 4 Perform on line init of supplier2 from supplier1
|
||||
- 5 Check that keep alive entries exists
|
||||
+ 1. Keep alives entries are present
|
||||
+ 2. Keep alive entries are updated every 60 seconds
|
||||
:expectedresults:
|
||||
- 1. No error while generating ldif
|
||||
- 2. No error while importing the ldif file
|
||||
- 3. No keepalive entrie should exists on any suppliers
|
||||
- 4. No error while initializing supplier2
|
||||
- 5. All keepalive entries should exist on every suppliers
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
|
||||
"""
|
||||
|
||||
- repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
- m1 = topo_m2.ms["supplier1"]
|
||||
- m2 = topo_m2.ms["supplier2"]
|
||||
- # Step 1: Generate ldif without replication data
|
||||
- m1.stop()
|
||||
- m2.stop()
|
||||
- ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
|
||||
- m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
|
||||
- excludeSuffixes=None, repl_data=False,
|
||||
- outputfile=ldif_file, encrypt=False)
|
||||
- # Remove replication metadata that are still in the ldif
|
||||
- _remove_replication_data(ldif_file)
|
||||
-
|
||||
- # Step 2: Init both suppliers from that ldif
|
||||
- m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
|
||||
- m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
|
||||
- m1.start()
|
||||
- m2.start()
|
||||
-
|
||||
- """ Replica state is now as if CLI setup has been done using:
|
||||
- dsconf supplier1 replication enable --suffix "${SUFFIX}" --role supplier
|
||||
- dsconf supplier2 replication enable --suffix "${SUFFIX}" --role supplier
|
||||
- dsconf supplier1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
|
||||
- dsconf supplier2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
|
||||
- dsconf supplier1 repl-agmt create --suffix "${SUFFIX}"
|
||||
- dsconf supplier2 repl-agmt create --suffix "${SUFFIX}"
|
||||
- """
|
||||
+ # default interval is 1 hour, too long for test, set it to the minimum of
|
||||
+ # 60 seconds
|
||||
+ for supplierId in topo_m2.ms:
|
||||
+ supplier = topo_m2.ms[supplierId]
|
||||
+ replica = Replicas(supplier).get(DEFAULT_SUFFIX)
|
||||
+ replica.replace('nsds5ReplicaKeepAliveUpdateInterval', '60')
|
||||
+ supplier.restart()
|
||||
|
||||
- # Step 3: No keepalive entrie should exists on any suppliers
|
||||
- verify_keepalive_entries(topo_m2, False)
|
||||
+ # verify entries exist
|
||||
+ entries = verify_keepalive_entries(topo_m2, True);
|
||||
|
||||
- # Step 4: Perform on line init of supplier2 from supplier1
|
||||
- agmt = Agreements(m1).list()[0]
|
||||
- agmt.begin_reinit()
|
||||
- (done, error) = agmt.wait_reinit()
|
||||
- assert done is True
|
||||
- assert error is False
|
||||
+ # Get current time from keep alive entry
|
||||
+ keep_alive_s1 = str(entries[0].data['keepalivetimestamp'])
|
||||
+ keep_alive_s2 = str(entries[1].data['keepalivetimestamp'])
|
||||
+
|
||||
+ # Wait for event interval (60 secs) to pass
|
||||
+ time.sleep(61)
|
||||
|
||||
- # Step 5: All keepalive entries should exists on every suppliers
|
||||
- # Verify the keep alive entry once replication is in sync
|
||||
- # (that is the step that fails when bug is not fixed)
|
||||
- repl.wait_for_ruv(m2,m1)
|
||||
- verify_keepalive_entries(topo_m2, True);
|
||||
+ # Check keep alives entries have been updated
|
||||
+ entries = verify_keepalive_entries(topo_m2, True);
|
||||
+ assert keep_alive_s1 != str(entries[0].data['keepalivetimestamp'])
|
||||
+ assert keep_alive_s2 != str(entries[1].data['keepalivetimestamp'])
|
||||
|
||||
|
||||
@pytest.mark.ds49915
|
||||
diff --git a/dirsrvtests/tests/suites/replication/replica_config_test.py b/dirsrvtests/tests/suites/replication/replica_config_test.py
|
||||
index c2140a2ac..06ae5afcf 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/replica_config_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/replica_config_test.py
|
||||
@@ -50,7 +50,8 @@ repl_add_attrs = [('nsDS5ReplicaType', '-1', '4', overflow, notnum, '1'),
|
||||
('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '1'),
|
||||
('nsds5ReplicaReleaseTimeout', '-1', too_big, overflow, notnum, '1'),
|
||||
('nsds5ReplicaBackoffMin', '0', too_big, overflow, notnum, '3'),
|
||||
- ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6')]
|
||||
+ ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6'),
|
||||
+ ('nsds5ReplicaKeepAliveUpdateInterval', '59', too_big, overflow, notnum, '60'),]
|
||||
|
||||
repl_mod_attrs = [('nsDS5Flags', '-1', '2', overflow, notnum, '1'),
|
||||
('nsds5ReplicaPurgeDelay', '-2', too_big, overflow, notnum, '1'),
|
||||
@@ -59,7 +60,8 @@ repl_mod_attrs = [('nsDS5Flags', '-1', '2', overflow, notnum, '1'),
|
||||
('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '1'),
|
||||
('nsds5ReplicaReleaseTimeout', '-1', too_big, overflow, notnum, '1'),
|
||||
('nsds5ReplicaBackoffMin', '0', too_big, overflow, notnum, '3'),
|
||||
- ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6')]
|
||||
+ ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6'),
|
||||
+ ('nsds5ReplicaKeepAliveUpdateInterval', '59', too_big, overflow, notnum, '60'),]
|
||||
|
||||
agmt_attrs = [
|
||||
('nsds5ReplicaPort', '0', '65535', overflow, notnum, '389'),
|
||||
diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif
|
||||
index 0c73e5114..7a9598730 100644
|
||||
--- a/ldap/schema/01core389.ldif
|
||||
+++ b/ldap/schema/01core389.ldif
|
||||
@@ -327,6 +327,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2371 NAME 'nsDS5ReplicaBootstrapBindDN'
|
||||
attributeTypes: ( 2.16.840.1.113730.3.1.2372 NAME 'nsDS5ReplicaBootstrapCredentials' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
|
||||
attributeTypes: ( 2.16.840.1.113730.3.1.2373 NAME 'nsDS5ReplicaBootstrapBindMethod' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
|
||||
attributeTypes: ( 2.16.840.1.113730.3.1.2374 NAME 'nsDS5ReplicaBootstrapTransportInfo' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
|
||||
+attributeTypes: ( 2.16.840.1.113730.3.1.2390 NAME 'nsds5ReplicaKeepAliveUpdateInterval' DESC '389 defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
|
||||
#
|
||||
# objectclasses
|
||||
#
|
||||
@@ -336,7 +337,7 @@ objectClasses: ( 2.16.840.1.113730.3.2.44 NAME 'nsIndex' DESC 'Netscape defined
|
||||
objectClasses: ( 2.16.840.1.113730.3.2.109 NAME 'nsBackendInstance' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' )
|
||||
objectClasses: ( 2.16.840.1.113730.3.2.110 NAME 'nsMappingTree' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' )
|
||||
objectClasses: ( 2.16.840.1.113730.3.2.104 NAME 'nsContainer' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' )
|
||||
-objectClasses: ( 2.16.840.1.113730.3.2.108 NAME 'nsDS5Replica' DESC 'Replication configuration objectclass' SUP top MUST ( nsDS5ReplicaRoot $ nsDS5ReplicaId ) MAY (cn $ nsds5ReplicaPreciseTombstonePurging $ nsds5ReplicaCleanRUV $ nsds5ReplicaAbortCleanRUV $ nsDS5ReplicaType $ nsDS5ReplicaBindDN $ nsDS5ReplicaBindDNGroup $ nsState $ nsDS5ReplicaName $ nsDS5Flags $ nsDS5Task $ nsDS5ReplicaReferral $ nsDS5ReplicaAutoReferral $ nsds5ReplicaPurgeDelay $ nsds5ReplicaTombstonePurgeInterval $ nsds5ReplicaChangeCount $ nsds5ReplicaLegacyConsumer $ nsds5ReplicaProtocolTimeout $ nsds5ReplicaBackoffMin $ nsds5ReplicaBackoffMax $ nsds5ReplicaReleaseTimeout $ nsDS5ReplicaBindDnGroupCheckInterval ) X-ORIGIN 'Netscape Directory Server' )
|
||||
+objectClasses: ( 2.16.840.1.113730.3.2.108 NAME 'nsDS5Replica' DESC 'Replication configuration objectclass' SUP top MUST ( nsDS5ReplicaRoot $ nsDS5ReplicaId ) MAY (cn $ nsds5ReplicaPreciseTombstonePurging $ nsds5ReplicaCleanRUV $ nsds5ReplicaAbortCleanRUV $ nsDS5ReplicaType $ nsDS5ReplicaBindDN $ nsDS5ReplicaBindDNGroup $ nsState $ nsDS5ReplicaName $ nsDS5Flags $ nsDS5Task $ nsDS5ReplicaReferral $ nsDS5ReplicaAutoReferral $ nsds5ReplicaPurgeDelay $ nsds5ReplicaTombstonePurgeInterval $ nsds5ReplicaChangeCount $ nsds5ReplicaLegacyConsumer $ nsds5ReplicaProtocolTimeout $ nsds5ReplicaBackoffMin $ nsds5ReplicaBackoffMax $ nsds5ReplicaReleaseTimeout $ nsDS5ReplicaBindDnGroupCheckInterval $ nsds5ReplicaKeepAliveUpdateInterval ) X-ORIGIN 'Netscape Directory Server' )
|
||||
objectClasses: ( 2.16.840.1.113730.3.2.113 NAME 'nsTombstone' DESC 'Netscape defined objectclass' SUP top MAY ( nstombstonecsn $ nsParentUniqueId $ nscpEntryDN ) X-ORIGIN 'Netscape Directory Server' )
|
||||
objectClasses: ( 2.16.840.1.113730.3.2.103 NAME 'nsDS5ReplicationAgreement' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsds5ReplicaCleanRUVNotified $ nsDS5ReplicaHost $ nsDS5ReplicaPort $ nsDS5ReplicaTransportInfo $ nsDS5ReplicaBindDN $ nsDS5ReplicaCredentials $ nsDS5ReplicaBindMethod $ nsDS5ReplicaRoot $ nsDS5ReplicatedAttributeList $ nsDS5ReplicatedAttributeListTotal $ nsDS5ReplicaUpdateSchedule $ nsds5BeginReplicaRefresh $ description $ nsds50ruv $ nsruvReplicaLastModified $ nsds5ReplicaTimeout $ nsds5replicaChangesSentSinceStartup $ nsds5replicaLastUpdateEnd $ nsds5replicaLastUpdateStart $ nsds5replicaLastUpdateStatus $ nsds5replicaUpdateInProgress $ nsds5replicaLastInitEnd $ nsds5ReplicaEnabled $ nsds5replicaLastInitStart $ nsds5replicaLastInitStatus $ nsds5debugreplicatimeout $ nsds5replicaBusyWaitTime $ nsds5ReplicaStripAttrs $ nsds5replicaSessionPauseTime $ nsds5ReplicaProtocolTimeout $ nsds5ReplicaFlowControlWindow $ nsds5ReplicaFlowControlPause $ nsDS5ReplicaWaitForAsyncResults $ nsds5ReplicaIgnoreMissingChange $ nsDS5ReplicaBootstrapBindDN $ nsDS5ReplicaBootstrapCredentials $ nsDS5ReplicaBootstrapBindMethod $ nsDS5ReplicaBootstrapTransportInfo ) X-ORIGIN 'Netscape Directory Server' )
|
||||
objectClasses: ( 2.16.840.1.113730.3.2.39 NAME 'nsslapdConfig' DESC 'Netscape defined objectclass' SUP top MAY ( cn ) X-ORIGIN 'Netscape Directory Server' )
|
||||
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
|
||||
index 06e747811..c2fbff8c0 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5.h
|
||||
+++ b/ldap/servers/plugins/replication/repl5.h
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2020 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -120,6 +120,8 @@
|
||||
#define PROTOCOL_STATUS_TOTAL_SENDING_DATA 711
|
||||
|
||||
#define DEFAULT_PROTOCOL_TIMEOUT 120
|
||||
+#define DEFAULT_REPLICA_KEEPALIVE_UPDATE_INTERVAL 3600
|
||||
+#define REPLICA_KEEPALIVE_UPDATE_INTERVAL_MIN 60
|
||||
|
||||
/* To Allow Consumer Initialization when adding an agreement - */
|
||||
#define STATE_PERFORMING_TOTAL_UPDATE 501
|
||||
@@ -162,6 +164,7 @@ extern const char *type_nsds5ReplicaBootstrapBindDN;
|
||||
extern const char *type_nsds5ReplicaBootstrapCredentials;
|
||||
extern const char *type_nsds5ReplicaBootstrapBindMethod;
|
||||
extern const char *type_nsds5ReplicaBootstrapTransportInfo;
|
||||
+extern const char *type_replicaKeepAliveUpdateInterval;
|
||||
|
||||
/* Attribute names for windows replication agreements */
|
||||
extern const char *type_nsds7WindowsReplicaArea;
|
||||
@@ -677,8 +680,8 @@ Replica *windows_replica_new(const Slapi_DN *root);
|
||||
during addition of the replica over LDAP */
|
||||
int replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, Replica **r);
|
||||
void replica_destroy(void **arg);
|
||||
-int replica_subentry_update(Slapi_DN *repl_root, ReplicaId rid);
|
||||
-int replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid);
|
||||
+void replica_subentry_update(time_t when, void *arg);
|
||||
+int replica_subentry_check(const char *repl_root, ReplicaId rid);
|
||||
PRBool replica_get_exclusive_access(Replica *r, PRBool *isInc, uint64_t connid, int opid, const char *locking_purl, char **current_purl);
|
||||
void replica_relinquish_exclusive_access(Replica *r, uint64_t connid, int opid);
|
||||
PRBool replica_get_tombstone_reap_active(const Replica *r);
|
||||
@@ -739,6 +742,8 @@ void consumer5_set_mapping_tree_state_for_replica(const Replica *r, RUV *supplie
|
||||
Replica *replica_get_for_backend(const char *be_name);
|
||||
void replica_set_purge_delay(Replica *r, uint32_t purge_delay);
|
||||
void replica_set_tombstone_reap_interval(Replica *r, long interval);
|
||||
+void replica_set_keepalive_update_interval(Replica *r, int64_t interval);
|
||||
+int64_t replica_get_keepalive_update_interval(Replica *r);
|
||||
void replica_update_ruv_consumer(Replica *r, RUV *supplier_ruv);
|
||||
Slapi_Entry *get_in_memory_ruv(Slapi_DN *suffix_sdn);
|
||||
int replica_write_ruv(Replica *r);
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
|
||||
index 4bb384882..846951b9e 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2020 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -1677,13 +1677,9 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
|
||||
} else {
|
||||
ConnResult replay_crc;
|
||||
Replica *replica = prp->replica;
|
||||
- PRBool subentry_update_needed = PR_FALSE;
|
||||
PRUint64 release_timeout = replica_get_release_timeout(replica);
|
||||
char csn_str[CSN_STRSIZE];
|
||||
- int skipped_updates = 0;
|
||||
- int fractional_repl;
|
||||
int finished = 0;
|
||||
-#define FRACTIONAL_SKIPPED_THRESHOLD 100
|
||||
|
||||
/* Start the results reading thread */
|
||||
rd = repl5_inc_rd_new(prp);
|
||||
@@ -1700,7 +1696,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
|
||||
|
||||
memset((void *)&op, 0, sizeof(op));
|
||||
entry.op = &op;
|
||||
- fractional_repl = agmt_is_fractional(prp->agmt);
|
||||
do {
|
||||
cl5_operation_parameters_done(entry.op);
|
||||
memset((void *)entry.op, 0, sizeof(op));
|
||||
@@ -1781,14 +1776,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
|
||||
replica_id = csn_get_replicaid(entry.op->csn);
|
||||
uniqueid = entry.op->target_address.uniqueid;
|
||||
|
||||
- if (fractional_repl && message_id) {
|
||||
- /* This update was sent no need to update the subentry
|
||||
- * and restart counting the skipped updates
|
||||
- */
|
||||
- subentry_update_needed = PR_FALSE;
|
||||
- skipped_updates = 0;
|
||||
- }
|
||||
-
|
||||
if (prp->repl50consumer && message_id) {
|
||||
int operation, error = 0;
|
||||
|
||||
@@ -1816,15 +1803,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
|
||||
agmt_get_long_name(prp->agmt),
|
||||
entry.op->target_address.uniqueid, csn_str);
|
||||
agmt_inc_last_update_changecount(prp->agmt, csn_get_replicaid(entry.op->csn), 1 /*skipped*/);
|
||||
- if (fractional_repl) {
|
||||
- skipped_updates++;
|
||||
- if (skipped_updates > FRACTIONAL_SKIPPED_THRESHOLD) {
|
||||
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
- "send_updates - %s: skipped updates is too high (%d) if no other update is sent we will update the subentry\n",
|
||||
- agmt_get_long_name(prp->agmt), skipped_updates);
|
||||
- subentry_update_needed = PR_TRUE;
|
||||
- }
|
||||
- }
|
||||
}
|
||||
}
|
||||
break;
|
||||
@@ -1906,26 +1884,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
|
||||
PR_Unlock(rd->lock);
|
||||
} while (!finished);
|
||||
|
||||
- if (fractional_repl && subentry_update_needed) {
|
||||
- ReplicaId rid = -1; /* Used to create the replica keep alive subentry */
|
||||
- Slapi_DN *replarea_sdn = NULL;
|
||||
-
|
||||
- if (replica) {
|
||||
- rid = replica_get_rid(replica);
|
||||
- }
|
||||
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
- "send_updates - %s: skipped updates was definitely too high (%d) update the subentry now\n",
|
||||
- agmt_get_long_name(prp->agmt), skipped_updates);
|
||||
- replarea_sdn = agmt_get_replarea(prp->agmt);
|
||||
- if (!replarea_sdn) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
|
||||
- "send_updates - Unknown replication area due to agreement not found.");
|
||||
- agmt_set_last_update_status(prp->agmt, 0, -1, "Agreement is corrupted: missing suffix");
|
||||
- return_value = UPDATE_FATAL_ERROR;
|
||||
- } else {
|
||||
- replica_subentry_update(replarea_sdn, rid);
|
||||
- }
|
||||
- }
|
||||
/* Terminate the results reading thread */
|
||||
if (!prp->repl50consumer) {
|
||||
/* We need to ensure that we wait until all the responses have been received from our operations */
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
index 3bd57647f..ded4cf754 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_replica.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -22,7 +22,6 @@
|
||||
#include "slap.h"
|
||||
|
||||
#define RUV_SAVE_INTERVAL (30 * 1000) /* 30 seconds */
|
||||
-
|
||||
#define REPLICA_RDN "cn=replica"
|
||||
|
||||
/*
|
||||
@@ -48,6 +47,7 @@ struct replica
|
||||
PRMonitor *repl_lock; /* protects entire structure */
|
||||
Slapi_Eq_Context repl_eqcxt_rs; /* context to cancel event that saves ruv */
|
||||
Slapi_Eq_Context repl_eqcxt_tr; /* context to cancel event that reaps tombstones */
|
||||
+ Slapi_Eq_Context repl_eqcxt_ka_update; /* keep-alive entry update event */
|
||||
Object *repl_csngen; /* CSN generator for this replica */
|
||||
PRBool repl_csn_assigned; /* Flag set when new csn is assigned. */
|
||||
int64_t repl_purge_delay; /* When purgeable, CSNs are held on to for this many extra seconds */
|
||||
@@ -66,6 +66,7 @@ struct replica
|
||||
uint64_t agmt_count; /* Number of agmts */
|
||||
Slapi_Counter *release_timeout; /* The amount of time to wait before releasing active replica */
|
||||
uint64_t abort_session; /* Abort the current replica session */
|
||||
+ int64_t keepalive_update_interval; /* interval to do dummy update to keep RUV fresh */)
|
||||
};
|
||||
|
||||
|
||||
@@ -133,8 +134,8 @@ replica_new(const Slapi_DN *root)
|
||||
&r);
|
||||
|
||||
if (NULL == r) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_new - "
|
||||
- "Unable to configure replica %s: %s\n",
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
|
||||
+ "replica_new - Unable to configure replica %s: %s\n",
|
||||
slapi_sdn_get_dn(root), errorbuf);
|
||||
}
|
||||
slapi_entry_free(e);
|
||||
@@ -232,7 +233,15 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation,
|
||||
In that case the updated would fail but nothing bad would happen. The next
|
||||
scheduled update would save the state */
|
||||
r->repl_eqcxt_rs = slapi_eq_repeat_rel(replica_update_state, r->repl_name,
|
||||
- slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL);
|
||||
+ slapi_current_rel_time_t() + START_UPDATE_DELAY,
|
||||
+ RUV_SAVE_INTERVAL);
|
||||
+
|
||||
+ /* create supplier update event */
|
||||
+ if (r->repl_eqcxt_ka_update == NULL && replica_get_type(r) == REPLICA_TYPE_UPDATABLE) {
|
||||
+ r->repl_eqcxt_ka_update = slapi_eq_repeat_rel(replica_subentry_update, r,
|
||||
+ slapi_current_rel_time_t() + START_UPDATE_DELAY,
|
||||
+ replica_get_keepalive_update_interval(r));
|
||||
+ }
|
||||
|
||||
if (r->tombstone_reap_interval > 0) {
|
||||
/*
|
||||
@@ -302,6 +311,11 @@ replica_destroy(void **arg)
|
||||
* and ruv updates.
|
||||
*/
|
||||
|
||||
+ if (r->repl_eqcxt_ka_update) {
|
||||
+ slapi_eq_cancel_rel(r->repl_eqcxt_ka_update);
|
||||
+ r->repl_eqcxt_ka_update = NULL;
|
||||
+ }
|
||||
+
|
||||
if (r->repl_eqcxt_rs) {
|
||||
slapi_eq_cancel_rel(r->repl_eqcxt_rs);
|
||||
r->repl_eqcxt_rs = NULL;
|
||||
@@ -393,7 +407,7 @@ replica_destroy(void **arg)
|
||||
|
||||
|
||||
static int
|
||||
-replica_subentry_create(Slapi_DN *repl_root, ReplicaId rid)
|
||||
+replica_subentry_create(const char *repl_root, ReplicaId rid)
|
||||
{
|
||||
char *entry_string = NULL;
|
||||
Slapi_Entry *e = NULL;
|
||||
@@ -402,7 +416,7 @@ replica_subentry_create(Slapi_DN *repl_root, ReplicaId rid)
|
||||
int rc = 0;
|
||||
|
||||
entry_string = slapi_ch_smprintf("dn: cn=%s %d,%s\nobjectclass: top\nobjectclass: ldapsubentry\nobjectclass: extensibleObject\ncn: %s %d",
|
||||
- KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root), KEEP_ALIVE_ENTRY, rid);
|
||||
+ KEEP_ALIVE_ENTRY, rid, repl_root, KEEP_ALIVE_ENTRY, rid);
|
||||
if (entry_string == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
|
||||
"replica_subentry_create - Failed in slapi_ch_smprintf\n");
|
||||
@@ -441,7 +455,7 @@ done:
|
||||
}
|
||||
|
||||
int
|
||||
-replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid)
|
||||
+replica_subentry_check(const char *repl_root, ReplicaId rid)
|
||||
{
|
||||
Slapi_PBlock *pb;
|
||||
char *filter = NULL;
|
||||
@@ -451,7 +465,7 @@ replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid)
|
||||
|
||||
pb = slapi_pblock_new();
|
||||
filter = slapi_ch_smprintf("(&(objectclass=ldapsubentry)(cn=%s %d))", KEEP_ALIVE_ENTRY, rid);
|
||||
- slapi_search_internal_set_pb(pb, slapi_sdn_get_dn(repl_root), LDAP_SCOPE_ONELEVEL,
|
||||
+ slapi_search_internal_set_pb(pb, repl_root, LDAP_SCOPE_ONELEVEL,
|
||||
filter, NULL, 0, NULL, NULL,
|
||||
repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), 0);
|
||||
slapi_search_internal_pb(pb);
|
||||
@@ -460,17 +474,19 @@ replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid)
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
|
||||
if (entries && (entries[0] == NULL)) {
|
||||
slapi_log_err(SLAPI_LOG_NOTICE, repl_plugin_name,
|
||||
- "replica_subentry_check - Need to create replication keep alive entry <cn=%s %d,%s>\n", KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root));
|
||||
+ "replica_subentry_check - Need to create replication keep alive entry <cn=%s %d,%s>\n",
|
||||
+ KEEP_ALIVE_ENTRY, rid, repl_root);
|
||||
rc = replica_subentry_create(repl_root, rid);
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
- "replica_subentry_check - replication keep alive entry <cn=%s %d,%s> already exists\n", KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root));
|
||||
+ "replica_subentry_check - replication keep alive entry <cn=%s %d,%s> already exists\n",
|
||||
+ KEEP_ALIVE_ENTRY, rid, repl_root);
|
||||
rc = 0;
|
||||
}
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
|
||||
"replica_subentry_check - Error accessing replication keep alive entry <cn=%s %d,%s> res=%d\n",
|
||||
- KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root), res);
|
||||
+ KEEP_ALIVE_ENTRY, rid, repl_root, res);
|
||||
/* The status of the entry is not clear, do not attempt to create it */
|
||||
rc = 1;
|
||||
}
|
||||
@@ -481,60 +497,59 @@ replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid)
|
||||
return rc;
|
||||
}
|
||||
|
||||
-int
|
||||
-replica_subentry_update(Slapi_DN *repl_root, ReplicaId rid)
|
||||
+void
|
||||
+replica_subentry_update(time_t when __attribute__((unused)), void *arg)
|
||||
{
|
||||
- int ldrc;
|
||||
- int rc = LDAP_SUCCESS; /* Optimistic default */
|
||||
+ Slapi_PBlock *modpb = NULL;
|
||||
+ Replica *replica = (Replica *)arg;
|
||||
+ ReplicaId rid;
|
||||
LDAPMod *mods[2];
|
||||
LDAPMod mod;
|
||||
struct berval *vals[2];
|
||||
- char buf[SLAPI_TIMESTAMP_BUFSIZE];
|
||||
struct berval val;
|
||||
- Slapi_PBlock *modpb = NULL;
|
||||
- char *dn;
|
||||
+ const char *repl_root = NULL;
|
||||
+ char buf[SLAPI_TIMESTAMP_BUFSIZE];
|
||||
+ char *dn = NULL;
|
||||
+ int ldrc = 0;
|
||||
|
||||
+ rid = replica_get_rid(replica);
|
||||
+ repl_root = slapi_ch_strdup(slapi_sdn_get_dn(replica_get_root(replica)));
|
||||
replica_subentry_check(repl_root, rid);
|
||||
|
||||
slapi_timestamp_utc_hr(buf, SLAPI_TIMESTAMP_BUFSIZE);
|
||||
-
|
||||
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "subentry_update called at %s\n", buf);
|
||||
-
|
||||
-
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "replica_subentry_update called at %s\n", buf);
|
||||
val.bv_val = buf;
|
||||
val.bv_len = strlen(val.bv_val);
|
||||
-
|
||||
vals[0] = &val;
|
||||
vals[1] = NULL;
|
||||
|
||||
mod.mod_op = LDAP_MOD_REPLACE | LDAP_MOD_BVALUES;
|
||||
mod.mod_type = KEEP_ALIVE_ATTR;
|
||||
mod.mod_bvalues = vals;
|
||||
-
|
||||
mods[0] = &mod;
|
||||
mods[1] = NULL;
|
||||
|
||||
modpb = slapi_pblock_new();
|
||||
- dn = slapi_ch_smprintf(KEEP_ALIVE_DN_FORMAT, KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root));
|
||||
-
|
||||
+ dn = slapi_ch_smprintf(KEEP_ALIVE_DN_FORMAT, KEEP_ALIVE_ENTRY, rid, repl_root);
|
||||
slapi_modify_internal_set_pb(modpb, dn, mods, NULL, NULL,
|
||||
repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), 0);
|
||||
slapi_modify_internal_pb(modpb);
|
||||
-
|
||||
slapi_pblock_get(modpb, SLAPI_PLUGIN_INTOP_RESULT, &ldrc);
|
||||
-
|
||||
if (ldrc != LDAP_SUCCESS) {
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
- "Failure (%d) to update replication keep alive entry \"%s: %s\"\n", ldrc, KEEP_ALIVE_ATTR, buf);
|
||||
- rc = ldrc;
|
||||
+ "replica_subentry_update - "
|
||||
+ "Failure (%d) to update replication keep alive entry \"%s: %s\"\n",
|
||||
+ ldrc, KEEP_ALIVE_ATTR, buf);
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, repl_plugin_name,
|
||||
- "Successful update of replication keep alive entry \"%s: %s\"\n", KEEP_ALIVE_ATTR, buf);
|
||||
+ "replica_subentry_update - "
|
||||
+ "Successful update of replication keep alive entry \"%s: %s\"\n",
|
||||
+ KEEP_ALIVE_ATTR, buf);
|
||||
}
|
||||
|
||||
slapi_pblock_destroy(modpb);
|
||||
+ slapi_ch_free_string((char **)&repl_root);
|
||||
slapi_ch_free_string(&dn);
|
||||
- return rc;
|
||||
}
|
||||
/*
|
||||
* Attempt to obtain exclusive access to replica (advisory only)
|
||||
@@ -1512,7 +1527,15 @@ replica_set_enabled(Replica *r, PRBool enable)
|
||||
if (r->repl_eqcxt_rs == NULL) /* event is not already registered */
|
||||
{
|
||||
r->repl_eqcxt_rs = slapi_eq_repeat_rel(replica_update_state, r->repl_name,
|
||||
- slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL);
|
||||
+ slapi_current_rel_time_t() + START_UPDATE_DELAY,
|
||||
+ RUV_SAVE_INTERVAL);
|
||||
+
|
||||
+ }
|
||||
+ /* create supplier update event */
|
||||
+ if (r->repl_eqcxt_ka_update == NULL && replica_get_type(r) == REPLICA_TYPE_UPDATABLE) {
|
||||
+ r->repl_eqcxt_ka_update = slapi_eq_repeat_rel(replica_subentry_update, r,
|
||||
+ slapi_current_rel_time_t() + START_UPDATE_DELAY,
|
||||
+ replica_get_keepalive_update_interval(r));
|
||||
}
|
||||
} else /* disable */
|
||||
{
|
||||
@@ -1521,6 +1544,11 @@ replica_set_enabled(Replica *r, PRBool enable)
|
||||
slapi_eq_cancel_rel(r->repl_eqcxt_rs);
|
||||
r->repl_eqcxt_rs = NULL;
|
||||
}
|
||||
+ /* Remove supplier update event */
|
||||
+ if (replica_get_type(r) == REPLICA_TYPE_PRIMARY) {
|
||||
+ slapi_eq_cancel_rel(r->repl_eqcxt_ka_update);
|
||||
+ r->repl_eqcxt_ka_update = NULL;
|
||||
+ }
|
||||
}
|
||||
|
||||
replica_unlock(r->repl_lock);
|
||||
@@ -2119,6 +2147,17 @@ _replica_init_from_config(Replica *r, Slapi_Entry *e, char *errortext)
|
||||
r->tombstone_reap_interval = 3600 * 24; /* One week, in seconds */
|
||||
}
|
||||
|
||||
+ if ((val = (char*)slapi_entry_attr_get_ref(e, type_replicaKeepAliveUpdateInterval))) {
|
||||
+ if (repl_config_valid_num(type_replicaKeepAliveUpdateInterval, val, REPLICA_KEEPALIVE_UPDATE_INTERVAL_MIN,
|
||||
+ INT_MAX, &rc, errormsg, &interval) != 0)
|
||||
+ {
|
||||
+ return LDAP_UNWILLING_TO_PERFORM;
|
||||
+ }
|
||||
+ r->keepalive_update_interval = interval;
|
||||
+ } else {
|
||||
+ r->keepalive_update_interval = DEFAULT_REPLICA_KEEPALIVE_UPDATE_INTERVAL;
|
||||
+ }
|
||||
+
|
||||
r->tombstone_reap_stop = r->tombstone_reap_active = PR_FALSE;
|
||||
|
||||
/* No supplier holding the replica */
|
||||
@@ -3646,6 +3685,26 @@ replica_set_tombstone_reap_interval(Replica *r, long interval)
|
||||
replica_unlock(r->repl_lock);
|
||||
}
|
||||
|
||||
+void
|
||||
+replica_set_keepalive_update_interval(Replica *r, int64_t interval)
|
||||
+{
|
||||
+ replica_lock(r->repl_lock);
|
||||
+ r->keepalive_update_interval = interval;
|
||||
+ replica_unlock(r->repl_lock);
|
||||
+}
|
||||
+
|
||||
+int64_t
|
||||
+replica_get_keepalive_update_interval(Replica *r)
|
||||
+{
|
||||
+ int64_t interval = DEFAULT_REPLICA_KEEPALIVE_UPDATE_INTERVAL;
|
||||
+
|
||||
+ replica_lock(r->repl_lock);
|
||||
+ interval = r->keepalive_update_interval;
|
||||
+ replica_unlock(r->repl_lock);
|
||||
+
|
||||
+ return interval;
|
||||
+}
|
||||
+
|
||||
static void
|
||||
replica_strip_cleaned_rids(Replica *r)
|
||||
{
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
|
||||
index 2c6d74b13..aea2cf506 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
|
||||
@@ -438,6 +438,9 @@ replica_config_modify(Slapi_PBlock *pb,
|
||||
} else if (strcasecmp(config_attr, type_replicaBackoffMax) == 0) {
|
||||
if (apply_mods)
|
||||
replica_set_backoff_max(r, PROTOCOL_BACKOFF_MAXIMUM);
|
||||
+ } else if (strcasecmp(config_attr, type_replicaKeepAliveUpdateInterval) == 0) {
|
||||
+ if (apply_mods)
|
||||
+ replica_set_keepalive_update_interval(r, DEFAULT_REPLICA_KEEPALIVE_UPDATE_INTERVAL);
|
||||
} else if (strcasecmp(config_attr, type_replicaPrecisePurge) == 0) {
|
||||
if (apply_mods)
|
||||
replica_set_precise_purging(r, 0);
|
||||
@@ -472,6 +475,15 @@ replica_config_modify(Slapi_PBlock *pb,
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
+ } else if (strcasecmp(config_attr, type_replicaKeepAliveUpdateInterval) == 0) {
|
||||
+ int64_t interval = DEFAULT_REPLICA_KEEPALIVE_UPDATE_INTERVAL;
|
||||
+ if (repl_config_valid_num(config_attr, config_attr_value, REPLICA_KEEPALIVE_UPDATE_INTERVAL_MIN,
|
||||
+ INT_MAX, returncode, errortext, &interval) == 0)
|
||||
+ {
|
||||
+ replica_set_keepalive_update_interval(r, interval);
|
||||
+ } else {
|
||||
+ break;
|
||||
+ }
|
||||
} else if (strcasecmp(config_attr, attr_replicaType) == 0) {
|
||||
int64_t rtype;
|
||||
slapi_ch_free_string(&new_repl_type);
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c
|
||||
index f67263c3e..4b2064912 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c
|
||||
@@ -510,7 +510,7 @@ retry:
|
||||
if (prp->replica) {
|
||||
rid = replica_get_rid(prp->replica);
|
||||
}
|
||||
- replica_subentry_check(area_sdn, rid);
|
||||
+ replica_subentry_check(slapi_sdn_get_dn(area_sdn), rid);
|
||||
|
||||
/* Send the subtree of the suffix in the order of parentid index plus ldapsubentry and nstombstone. */
|
||||
check_suffix_entryID(be, suffix);
|
||||
@@ -531,7 +531,7 @@ retry:
|
||||
if (prp->replica) {
|
||||
rid = replica_get_rid(prp->replica);
|
||||
}
|
||||
- replica_subentry_check(area_sdn, rid);
|
||||
+ replica_subentry_check(slapi_sdn_get_dn(area_sdn), rid);
|
||||
|
||||
slapi_search_internal_set_pb(pb, slapi_sdn_get_dn(area_sdn),
|
||||
LDAP_SCOPE_SUBTREE, "(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))", NULL, 0, ctrls, NULL,
|
||||
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
|
||||
index ef2025dd9..8b178610b 100644
|
||||
--- a/ldap/servers/plugins/replication/repl_extop.c
|
||||
+++ b/ldap/servers/plugins/replication/repl_extop.c
|
||||
@@ -1176,7 +1176,7 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb)
|
||||
/* now that the changelog is open and started, we can alos cretae the
|
||||
* keep alive entry without risk that db and cl will not match
|
||||
*/
|
||||
- replica_subentry_check((Slapi_DN *)replica_get_root(r), replica_get_rid(r));
|
||||
+ replica_subentry_check(slapi_sdn_get_dn(replica_get_root(r)), replica_get_rid(r));
|
||||
}
|
||||
|
||||
/* ONREPL code that dealt with new RUV, etc was moved into the code
|
||||
diff --git a/ldap/servers/plugins/replication/repl_globals.c b/ldap/servers/plugins/replication/repl_globals.c
|
||||
index 000777fdd..797ca957f 100644
|
||||
--- a/ldap/servers/plugins/replication/repl_globals.c
|
||||
+++ b/ldap/servers/plugins/replication/repl_globals.c
|
||||
@@ -89,6 +89,7 @@ const char *type_replicaReleaseTimeout = "nsds5ReplicaReleaseTimeout";
|
||||
const char *type_replicaBackoffMin = "nsds5ReplicaBackoffMin";
|
||||
const char *type_replicaBackoffMax = "nsds5ReplicaBackoffMax";
|
||||
const char *type_replicaPrecisePurge = "nsds5ReplicaPreciseTombstonePurging";
|
||||
+const char *type_replicaKeepAliveUpdateInterval = "nsds5ReplicaKeepAliveUpdateInterval";
|
||||
|
||||
/* Attribute names for replication agreement attributes */
|
||||
const char *type_nsds5ReplicaHost = "nsds5ReplicaHost";
|
||||
diff --git a/src/cockpit/389-console/src/lib/replication/replConfig.jsx b/src/cockpit/389-console/src/lib/replication/replConfig.jsx
|
||||
index 1f0dc3ec5..3dffb8f1a 100644
|
||||
--- a/src/cockpit/389-console/src/lib/replication/replConfig.jsx
|
||||
+++ b/src/cockpit/389-console/src/lib/replication/replConfig.jsx
|
||||
@@ -48,6 +48,7 @@ export class ReplConfig extends React.Component {
|
||||
nsds5replicaprotocoltimeout: Number(this.props.data.nsds5replicaprotocoltimeout) == 0 ? 120 : Number(this.props.data.nsds5replicaprotocoltimeout),
|
||||
nsds5replicabackoffmin: Number(this.props.data.nsds5replicabackoffmin) == 0 ? 3 : Number(this.props.data.nsds5replicabackoffmin),
|
||||
nsds5replicabackoffmax: Number(this.props.data.nsds5replicabackoffmax) == 0 ? 300 : Number(this.props.data.nsds5replicabackoffmax),
|
||||
+ nsds5replicakeepaliveupdateinterval: Number(this.props.data.nsds5replicakeepaliveupdateinterval) == 0 ? 3600 : Number(this.props.data.nsds5replicakeepaliveupdateinterval),
|
||||
// Original settings
|
||||
_nsds5replicabinddn: this.props.data.nsds5replicabinddn,
|
||||
_nsds5replicabinddngroup: this.props.data.nsds5replicabinddngroup,
|
||||
@@ -59,6 +60,7 @@ export class ReplConfig extends React.Component {
|
||||
_nsds5replicaprotocoltimeout: Number(this.props.data.nsds5replicaprotocoltimeout) == 0 ? 120 : Number(this.props.data.nsds5replicaprotocoltimeout),
|
||||
_nsds5replicabackoffmin: Number(this.props.data.nsds5replicabackoffmin) == 0 ? 3 : Number(this.props.data.nsds5replicabackoffmin),
|
||||
_nsds5replicabackoffmax: Number(this.props.data.nsds5replicabackoffmax) == 0 ? 300 : Number(this.props.data.nsds5replicabackoffmax),
|
||||
+ _nsds5replicakeepaliveupdateinterval: Number(this.props.data.nsds5replicakeepaliveupdateinterval) == 0 ? 3600 : Number(this.props.data.nsds5replicakeepaliveupdateinterval),
|
||||
};
|
||||
|
||||
this.onToggle = (isExpanded) => {
|
||||
@@ -275,7 +277,7 @@ export class ReplConfig extends React.Component {
|
||||
'nsds5replicapurgedelay', 'nsds5replicatombstonepurgeinterval',
|
||||
'nsds5replicareleasetimeout', 'nsds5replicaprotocoltimeout',
|
||||
'nsds5replicabackoffmin', 'nsds5replicabackoffmax',
|
||||
- 'nsds5replicaprecisetombstonepurging'
|
||||
+ 'nsds5replicaprecisetombstonepurging', 'nsds5replicakeepaliveupdateinterval',
|
||||
];
|
||||
// Check if a setting was changed, if so enable the save button
|
||||
for (const config_attr of config_attrs) {
|
||||
@@ -301,7 +303,7 @@ export class ReplConfig extends React.Component {
|
||||
'nsds5replicapurgedelay', 'nsds5replicatombstonepurgeinterval',
|
||||
'nsds5replicareleasetimeout', 'nsds5replicaprotocoltimeout',
|
||||
'nsds5replicabackoffmin', 'nsds5replicabackoffmax',
|
||||
- 'nsds5replicaprecisetombstonepurging'
|
||||
+ 'nsds5replicaprecisetombstonepurging', 'nsds5replicakeepaliveupdateinterval',
|
||||
];
|
||||
// Check if a setting was changed, if so enable the save button
|
||||
for (const config_attr of config_attrs) {
|
||||
@@ -451,6 +453,9 @@ export class ReplConfig extends React.Component {
|
||||
if (this.state.nsds5replicabinddngroupcheckinterval != this.state._nsds5replicabinddngroupcheckinterval) {
|
||||
cmd.push("--repl-bind-group-interval=" + this.state.nsds5replicabinddngroupcheckinterval);
|
||||
}
|
||||
+ if (this.state.nsds5replicakeepaliveupdateinterval != this.state._nsds5replicakeepaliveupdateinterval) {
|
||||
+ cmd.push("--repl-keepalive-update-interval=" + this.state.nsds5replicakeepaliveupdateinterval);
|
||||
+ }
|
||||
if (this.state.nsds5replicareleasetimeout != this.state._nsds5replicareleasetimeout) {
|
||||
cmd.push("--repl-release-timeout=" + this.state.nsds5replicareleasetimeout);
|
||||
}
|
||||
@@ -786,6 +791,29 @@ export class ReplConfig extends React.Component {
|
||||
/>
|
||||
</GridItem>
|
||||
</Grid>
|
||||
+ <Grid
|
||||
+ title="The interval in seconds that the server will apply an internal update to get the RUV from getting stale. (nsds5replicakeepaliveupdateinterval)."
|
||||
+ className="ds-margin-top"
|
||||
+ >
|
||||
+ <GridItem className="ds-label" span={3}>
|
||||
+ Refresh RUV Interval
|
||||
+ </GridItem>
|
||||
+ <GridItem span={9}>
|
||||
+ <NumberInput
|
||||
+ value={this.state.nsds5replicakeepaliveupdateinterval}
|
||||
+ min={60}
|
||||
+ max={this.maxValue}
|
||||
+ onMinus={() => { this.onMinusConfig("nsds5replicakeepaliveupdateinterval") }}
|
||||
+ onChange={(e) => { this.onConfigChange(e, "nsds5replicakeepaliveupdateinterval", 60) }}
|
||||
+ onPlus={() => { this.onPlusConfig("nsds5replicakeepaliveupdateinterval") }}
|
||||
+ inputName="input"
|
||||
+ inputAriaLabel="number input"
|
||||
+ minusBtnAriaLabel="minus"
|
||||
+ plusBtnAriaLabel="plus"
|
||||
+ widthChars={8}
|
||||
+ />
|
||||
+ </GridItem>
|
||||
+ </Grid>
|
||||
<Grid
|
||||
title="Enables faster tombstone purging (nsds5replicaprecisetombstonepurging)."
|
||||
className="ds-margin-top"
|
||||
diff --git a/src/cockpit/389-console/src/replication.jsx b/src/cockpit/389-console/src/replication.jsx
|
||||
index 28364156a..db9d030db 100644
|
||||
--- a/src/cockpit/389-console/src/replication.jsx
|
||||
+++ b/src/cockpit/389-console/src/replication.jsx
|
||||
@@ -553,6 +553,7 @@ export class Replication extends React.Component {
|
||||
nsds5replicaprotocoltimeout: 'nsds5replicaprotocoltimeout' in config.attrs ? config.attrs.nsds5replicaprotocoltimeout[0] : "",
|
||||
nsds5replicabackoffmin: 'nsds5replicabackoffmin' in config.attrs ? config.attrs.nsds5replicabackoffmin[0] : "",
|
||||
nsds5replicabackoffmax: 'nsds5replicabackoffmax' in config.attrs ? config.attrs.nsds5replicabackoffmax[0] : "",
|
||||
+ nsds5replicakeepaliveupdateinterval: 'nsds5replicakeepaliveupdateinterval' in config.attrs ? config.attrs.nsds5replicakeepaliveupdateinterval[0] : "3600",
|
||||
},
|
||||
suffixSpinning: false,
|
||||
disabled: false,
|
||||
@@ -695,6 +696,11 @@ export class Replication extends React.Component {
|
||||
nsds5replicaprotocoltimeout: 'nsds5replicaprotocoltimeout' in config.attrs ? config.attrs.nsds5replicaprotocoltimeout[0] : "",
|
||||
nsds5replicabackoffmin: 'nsds5replicabackoffmin' in config.attrs ? config.attrs.nsds5replicabackoffmin[0] : "",
|
||||
nsds5replicabackoffmax: 'nsds5replicabackoffmax' in config.attrs ? config.attrs.nsds5replicabackoffmax[0] : "",
|
||||
+ nsds5replicakeepaliveupdateinterval: 'nsds5replicakeepaliveupdateinterval' in config.attrs ? config.attrs.nsds5replicakeepaliveupdateinterval[0] : "3600",
|
||||
+ clMaxEntries: "",
|
||||
+ clMaxAge: "",
|
||||
+ clTrimInt: "",
|
||||
+ clEncrypt: false,
|
||||
}
|
||||
}, this.loadLDIFs);
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 0048cd09b..450246b3d 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -33,6 +33,7 @@ arg_to_attr = {
|
||||
'repl_backoff_min': 'nsds5replicabackoffmin',
|
||||
'repl_backoff_max': 'nsds5replicabackoffmax',
|
||||
'repl_release_timeout': 'nsds5replicareleasetimeout',
|
||||
+ 'repl_keepalive_update_interval': 'nsds5replicakeepaliveupdateinterval',
|
||||
# Changelog
|
||||
'cl_dir': 'nsslapd-changelogdir',
|
||||
'max_entries': 'nsslapd-changelogmaxentries',
|
||||
@@ -1278,6 +1279,9 @@ def create_parser(subparsers):
|
||||
"while waiting to acquire the consumer. Default is 3 seconds")
|
||||
repl_set_parser.add_argument('--repl-release-timeout', help="A timeout in seconds a replication supplier should send "
|
||||
"updates before it yields its replication session")
|
||||
+ repl_set_parser.add_argument('--repl-keepalive-update-interval', help="Interval in seconds for how often the server will apply "
|
||||
+ "an internal update to keep the RUV from getting stale. "
|
||||
+ "The default is 1 hour (3600 seconds)")
|
||||
|
||||
repl_monitor_parser = repl_subcommands.add_parser('monitor', help='Display the full replication topology report')
|
||||
repl_monitor_parser.set_defaults(func=get_repl_monitor_info)
|
||||
@@ -1289,7 +1293,7 @@ def create_parser(subparsers):
|
||||
repl_monitor_parser.add_argument('-a', '--aliases', nargs="*",
|
||||
help="Enables displaying an alias instead of host:port, if an alias is "
|
||||
"assigned to a host:port combination. The format: alias=host:port")
|
||||
-#
|
||||
+
|
||||
############################################
|
||||
# Replication Agmts
|
||||
############################################
|
||||
--
|
||||
2.37.1
|
||||
|
@ -1,113 +0,0 @@
|
||||
From b4a3b88faeafa6aa197d88ee84e4b2dbadd37ace Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 1 Nov 2021 10:42:27 -0400
|
||||
Subject: [PATCH 06/12] Issue 4973 - installer changes permissions on /run
|
||||
|
||||
Description: There was a regression when we switched over to using /run
|
||||
that caused the installer to try and create /run which
|
||||
caused the ownership to change. Fixed this by changing
|
||||
the "run_dir" to /run/dirsrv
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4973
|
||||
|
||||
Reviewed by: jchapman(Thanks!)
|
||||
---
|
||||
ldap/admin/src/defaults.inf.in | 2 +-
|
||||
src/lib389/lib389/instance/remove.py | 10 +---------
|
||||
src/lib389/lib389/instance/setup.py | 13 +++----------
|
||||
3 files changed, 5 insertions(+), 20 deletions(-)
|
||||
|
||||
diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in
|
||||
index e02248b89..92b93d695 100644
|
||||
--- a/ldap/admin/src/defaults.inf.in
|
||||
+++ b/ldap/admin/src/defaults.inf.in
|
||||
@@ -35,7 +35,7 @@ sysconf_dir = @sysconfdir@
|
||||
initconfig_dir = @initconfigdir@
|
||||
config_dir = @instconfigdir@/slapd-{instance_name}
|
||||
local_state_dir = @localstatedir@
|
||||
-run_dir = @localrundir@
|
||||
+run_dir = @localrundir@/dirsrv
|
||||
# This is the expected location of ldapi.
|
||||
ldapi = @localrundir@/slapd-{instance_name}.socket
|
||||
pid_file = @localrundir@/slapd-{instance_name}.pid
|
||||
diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py
|
||||
index 1a35ddc07..e96db3896 100644
|
||||
--- a/src/lib389/lib389/instance/remove.py
|
||||
+++ b/src/lib389/lib389/instance/remove.py
|
||||
@@ -52,9 +52,9 @@ def remove_ds_instance(dirsrv, force=False):
|
||||
remove_paths['ldif_dir'] = dirsrv.ds_paths.ldif_dir
|
||||
remove_paths['lock_dir'] = dirsrv.ds_paths.lock_dir
|
||||
remove_paths['log_dir'] = dirsrv.ds_paths.log_dir
|
||||
- # remove_paths['run_dir'] = dirsrv.ds_paths.run_dir
|
||||
remove_paths['inst_dir'] = dirsrv.ds_paths.inst_dir
|
||||
remove_paths['etc_sysconfig'] = "%s/sysconfig/dirsrv-%s" % (dirsrv.ds_paths.sysconf_dir, dirsrv.serverid)
|
||||
+ remove_paths['ldapi'] = dirsrv.ds_paths.ldapi
|
||||
|
||||
tmpfiles_d_path = dirsrv.ds_paths.tmpfiles_d + "/dirsrv-" + dirsrv.serverid + ".conf"
|
||||
|
||||
@@ -80,14 +80,6 @@ def remove_ds_instance(dirsrv, force=False):
|
||||
|
||||
### ANY NEW REMOVAL ACTION MUST BE BELOW THIS LINE!!!
|
||||
|
||||
- # Remove LDAPI socket file
|
||||
- ldapi_path = os.path.join(dirsrv.ds_paths.run_dir, "slapd-%s.socket" % dirsrv.serverid)
|
||||
- if os.path.exists(ldapi_path):
|
||||
- try:
|
||||
- os.remove(ldapi_path)
|
||||
- except OSError as e:
|
||||
- _log.debug(f"Failed to remove LDAPI socket ({ldapi_path}) Error: {str(e)}")
|
||||
-
|
||||
# Remove these paths:
|
||||
# for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
|
||||
# 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'):
|
||||
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
|
||||
index 57e7a9fd4..be6854af8 100644
|
||||
--- a/src/lib389/lib389/instance/setup.py
|
||||
+++ b/src/lib389/lib389/instance/setup.py
|
||||
@@ -732,10 +732,6 @@ class SetupDs(object):
|
||||
dse += line.replace('%', '{', 1).replace('%', '}', 1)
|
||||
|
||||
with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse:
|
||||
- if os.path.exists(os.path.dirname(slapd['ldapi'])):
|
||||
- ldapi_path = slapd['ldapi']
|
||||
- else:
|
||||
- ldapi_path = os.path.join(slapd['run_dir'], "slapd-%s.socket" % slapd['instance_name'])
|
||||
dse_fmt = dse.format(
|
||||
schema_dir=slapd['schema_dir'],
|
||||
lock_dir=slapd['lock_dir'],
|
||||
@@ -759,7 +755,7 @@ class SetupDs(object):
|
||||
db_dir=slapd['db_dir'],
|
||||
db_home_dir=slapd['db_home_dir'],
|
||||
ldapi_enabled="on",
|
||||
- ldapi=ldapi_path,
|
||||
+ ldapi=slapd['ldapi'],
|
||||
ldapi_autobind="on",
|
||||
)
|
||||
file_dse.write(dse_fmt)
|
||||
@@ -861,7 +857,7 @@ class SetupDs(object):
|
||||
SER_ROOT_PW: self._raw_secure_password,
|
||||
SER_DEPLOYED_DIR: slapd['prefix'],
|
||||
SER_LDAPI_ENABLED: 'on',
|
||||
- SER_LDAPI_SOCKET: ldapi_path,
|
||||
+ SER_LDAPI_SOCKET: slapd['ldapi'],
|
||||
SER_LDAPI_AUTOBIND: 'on'
|
||||
}
|
||||
|
||||
@@ -905,13 +901,10 @@ class SetupDs(object):
|
||||
self.log.info("Perform SELinux labeling ...")
|
||||
selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
|
||||
'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir',
|
||||
- 'schema_dir', 'tmp_dir')
|
||||
+ 'run_dir', 'schema_dir', 'tmp_dir')
|
||||
for path in selinux_paths:
|
||||
selinux_restorecon(slapd[path])
|
||||
|
||||
- # Don't run restorecon on the entire /run directory
|
||||
- selinux_restorecon(slapd['run_dir'] + '/dirsrv')
|
||||
-
|
||||
selinux_label_port(slapd['port'])
|
||||
|
||||
# Start the server
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,70 +0,0 @@
|
||||
From c26c463ac92682dcf01ddbdc11cc1109b183eb0a Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 1 Nov 2021 16:04:28 -0400
|
||||
Subject: [PATCH 07/12] Issue 4973 - update snmp to use /run/dirsrv for PID
|
||||
file
|
||||
|
||||
Description: Previously SNMP would write the agent PID file directly
|
||||
under /run (or /var/run), but this broke a CI test after
|
||||
updating lib389/defaults.inf to use /run/dirsrv.
|
||||
|
||||
Instead of hacking the CI test, I changed the path
|
||||
snmp uses to: /run/dirsrv/ Which is where it
|
||||
should really be written anyway.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4973
|
||||
|
||||
Reviewed by: vashirov(Thanks!)
|
||||
---
|
||||
ldap/servers/snmp/main.c | 4 ++--
|
||||
wrappers/systemd-snmp.service.in | 6 +++---
|
||||
2 files changed, 5 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c
|
||||
index e6271a8a9..d8eb918f6 100644
|
||||
--- a/ldap/servers/snmp/main.c
|
||||
+++ b/ldap/servers/snmp/main.c
|
||||
@@ -287,14 +287,14 @@ load_config(char *conf_path)
|
||||
}
|
||||
|
||||
/* set pidfile path */
|
||||
- if ((pidfile = malloc(strlen(LOCALRUNDIR) + strlen("/") +
|
||||
+ if ((pidfile = malloc(strlen(LOCALRUNDIR) + strlen("/dirsrv/") +
|
||||
strlen(LDAP_AGENT_PIDFILE) + 1)) != NULL) {
|
||||
strncpy(pidfile, LOCALRUNDIR, strlen(LOCALRUNDIR) + 1);
|
||||
/* The above will likely not be NULL terminated, but we need to
|
||||
* be sure that we're properly NULL terminated for the below
|
||||
* strcat() to work properly. */
|
||||
pidfile[strlen(LOCALRUNDIR)] = (char)0;
|
||||
- strcat(pidfile, "/");
|
||||
+ strcat(pidfile, "/dirsrv/");
|
||||
strcat(pidfile, LDAP_AGENT_PIDFILE);
|
||||
} else {
|
||||
printf("ldap-agent: malloc error processing config file\n");
|
||||
diff --git a/wrappers/systemd-snmp.service.in b/wrappers/systemd-snmp.service.in
|
||||
index 477bc623d..f18766cb4 100644
|
||||
--- a/wrappers/systemd-snmp.service.in
|
||||
+++ b/wrappers/systemd-snmp.service.in
|
||||
@@ -1,7 +1,7 @@
|
||||
# do not edit this file in /lib/systemd/system - instead do the following:
|
||||
# cp /lib/systemd/system/dirsrv-snmp.service /etc/systemd/system/
|
||||
# edit /etc/systemd/system/dirsrv-snmp.service
|
||||
-# systemctl daemon-reload
|
||||
+# systemctl daemon-reload
|
||||
# systemctl (re)start dirsrv-snmp.service
|
||||
[Unit]
|
||||
Description=@capbrand@ Directory Server SNMP Subagent.
|
||||
@@ -9,8 +9,8 @@ After=network.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
-PIDFile=/run/ldap-agent.pid
|
||||
-ExecStart=@sbindir@/ldap-agent @configdir@/ldap-agent.conf
|
||||
+PIDFile=/run/dirsrv/ldap-agent.pid
|
||||
+ExecStart=@sbindir@/ldap-agent @configdir@/ldap-agent.conf
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,79 @@
|
||||
From 108dd02791da19915beb29c872516c52a74fc637 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 5 Aug 2022 10:08:45 -0700
|
||||
Subject: [PATCH 3/5] Issue 5399 - UI - LDAP Editor is not updated when we
|
||||
switch instances (#5400)
|
||||
|
||||
Description: We don't refresh LDAP Editor when we switch instances.
|
||||
It may lead to unpleasant errors.
|
||||
|
||||
Add componentDidUpdate function with the appropriate processing and
|
||||
properties.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/5399
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
src/cockpit/389-console/src/LDAPEditor.jsx | 19 +++++++++++++++++++
|
||||
src/cockpit/389-console/src/ds.jsx | 1 +
|
||||
2 files changed, 20 insertions(+)
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/LDAPEditor.jsx b/src/cockpit/389-console/src/LDAPEditor.jsx
|
||||
index 70324be39..04fc97d41 100644
|
||||
--- a/src/cockpit/389-console/src/LDAPEditor.jsx
|
||||
+++ b/src/cockpit/389-console/src/LDAPEditor.jsx
|
||||
@@ -60,6 +60,7 @@ export class LDAPEditor extends React.Component {
|
||||
|
||||
this.state = {
|
||||
activeTabKey: 0,
|
||||
+ firstLoad: true,
|
||||
keyIndex: 0,
|
||||
suffixList: [],
|
||||
changeLayout: false,
|
||||
@@ -249,6 +250,12 @@ export class LDAPEditor extends React.Component {
|
||||
baseDn: this.state.baseDN
|
||||
};
|
||||
|
||||
+ if (this.state.firstLoad) {
|
||||
+ this.setState({
|
||||
+ firstLoad: false
|
||||
+ });
|
||||
+ }
|
||||
+
|
||||
this.setState({
|
||||
searching: true,
|
||||
loading: refresh
|
||||
@@ -361,6 +368,18 @@ export class LDAPEditor extends React.Component {
|
||||
});
|
||||
}
|
||||
|
||||
+ componentDidUpdate(prevProps) {
|
||||
+ if (this.props.wasActiveList.includes(7)) {
|
||||
+ if (this.state.firstLoad) {
|
||||
+ this.handleReload(true);
|
||||
+ } else {
|
||||
+ if (this.props.serverId !== prevProps.serverId) {
|
||||
+ this.handleReload(true);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
getPageData (page, perPage) {
|
||||
if (page === 1) {
|
||||
const pagedRows = this.state.rows.slice(0, 2 * perPage); // Each parent has a single child.
|
||||
diff --git a/src/cockpit/389-console/src/ds.jsx b/src/cockpit/389-console/src/ds.jsx
|
||||
index e88915e41..de4385292 100644
|
||||
--- a/src/cockpit/389-console/src/ds.jsx
|
||||
+++ b/src/cockpit/389-console/src/ds.jsx
|
||||
@@ -764,6 +764,7 @@ export class DSInstance extends React.Component {
|
||||
key="ldap-editor"
|
||||
addNotification={this.addNotification}
|
||||
serverId={this.state.serverId}
|
||||
+ wasActiveList={this.state.wasActiveList}
|
||||
setPageSectionVariant={this.setPageSectionVariant}
|
||||
/>
|
||||
</Tab>
|
||||
--
|
||||
2.37.1
|
||||
|
@ -1,70 +0,0 @@
|
||||
From 88d6ceb18e17c5a18bafb5092ae0c22241b212df Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 1 Nov 2021 14:01:11 -0400
|
||||
Subject: [PATCH 08/12] Issue 4978 - make installer robust
|
||||
|
||||
Description: When run in a container the server can fail to start
|
||||
because the installer sets the db_home_dir to /dev/shm,
|
||||
but in containers the default size of /dev/shm is too
|
||||
small for libdb. We should detect if we are in a
|
||||
container and not set db_home_dir to /dev/shm.
|
||||
|
||||
During instance removal, if an instance was not properly
|
||||
created then it can not be removed either. Make the
|
||||
uninstall more robust to accept some errors and continue
|
||||
removing the instance.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4978
|
||||
|
||||
Reviewed by: firstyear & tbordaz(Thanks!)
|
||||
---
|
||||
src/lib389/lib389/instance/setup.py | 9 +++++++++
|
||||
src/lib389/lib389/utils.py | 5 ++++-
|
||||
2 files changed, 13 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
|
||||
index be6854af8..7b0147cf9 100644
|
||||
--- a/src/lib389/lib389/instance/setup.py
|
||||
+++ b/src/lib389/lib389/instance/setup.py
|
||||
@@ -731,6 +731,15 @@ class SetupDs(object):
|
||||
for line in template_dse.readlines():
|
||||
dse += line.replace('%', '{', 1).replace('%', '}', 1)
|
||||
|
||||
+ # Check if we are in a container, if so don't use /dev/shm for the db home dir
|
||||
+ # as containers typically don't allocate enough space for dev/shm and we don't
|
||||
+ # want to unexpectedly break the server after an upgrade
|
||||
+ container_result = subprocess.run(["systemd-detect-virt", "-c"], capture_output=True)
|
||||
+ if container_result.returncode == 0:
|
||||
+ # In a container, set the db_home_dir to the db path
|
||||
+ self.log.debug("Container detected setting db home directory to db directory.")
|
||||
+ slapd['db_home_dir'] = slapd['db_dir']
|
||||
+
|
||||
with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse:
|
||||
dse_fmt = dse.format(
|
||||
schema_dir=slapd['schema_dir'],
|
||||
diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py
|
||||
index 5ba0c6676..c63b4d0ee 100644
|
||||
--- a/src/lib389/lib389/utils.py
|
||||
+++ b/src/lib389/lib389/utils.py
|
||||
@@ -266,6 +266,8 @@ def selinux_label_port(port, remove_label=False):
|
||||
:type remove_label: boolean
|
||||
:raises: ValueError: Error message
|
||||
"""
|
||||
+ if port is None:
|
||||
+ return
|
||||
try:
|
||||
import selinux
|
||||
except ImportError:
|
||||
@@ -662,7 +664,8 @@ def isLocalHost(host_name):
|
||||
Uses gethostbyname()
|
||||
"""
|
||||
# first see if this is a "well known" local hostname
|
||||
- if host_name == 'localhost' or \
|
||||
+ if host_name is None or \
|
||||
+ host_name == 'localhost' or \
|
||||
host_name == 'localhost.localdomain' or \
|
||||
host_name == socket.gethostname():
|
||||
return True
|
||||
--
|
||||
2.31.1
|
||||
|
49
SOURCES/0008-Issue-5397-Fix-various-memory-leaks.patch
Normal file
49
SOURCES/0008-Issue-5397-Fix-various-memory-leaks.patch
Normal file
@ -0,0 +1,49 @@
|
||||
From 877df07df2e41988a797778b132935b7d8acfd87 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Fri, 5 Aug 2022 14:07:18 -0400
|
||||
Subject: [PATCH 4/5] Issue 5397 - Fix various memory leaks
|
||||
|
||||
Description:
|
||||
|
||||
Fixed memory leaks in:
|
||||
|
||||
- Filter optimizer introduced sr_norm_filter_intent which dupped a filter
|
||||
but never freed it.
|
||||
- Replication connections would leak the replication manager's
|
||||
credentials.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/5397
|
||||
|
||||
Reviewed by: progier & jchapman (Thanks!!)
|
||||
---
|
||||
ldap/servers/plugins/replication/repl5_connection.c | 1 +
|
||||
ldap/servers/slapd/back-ldbm/ldbm_search.c | 1 +
|
||||
2 files changed, 2 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c
|
||||
index b6bc21c46..be8bba08e 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_connection.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_connection.c
|
||||
@@ -247,6 +247,7 @@ conn_delete_internal(Repl_Connection *conn)
|
||||
slapi_ch_free_string(&conn->last_ldap_errmsg);
|
||||
slapi_ch_free((void **)&conn->hostname);
|
||||
slapi_ch_free((void **)&conn->binddn);
|
||||
+ slapi_ch_free((void **)&conn->creds);
|
||||
slapi_ch_free((void **)&conn->plain);
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
index d0f52b6f7..771c35a33 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
@@ -1930,6 +1930,7 @@ delete_search_result_set(Slapi_PBlock *pb, back_search_result_set **sr)
|
||||
rc, filt_errs);
|
||||
}
|
||||
slapi_filter_free((*sr)->sr_norm_filter, 1);
|
||||
+ slapi_filter_free((*sr)->sr_norm_filter_intent, 1);
|
||||
memset(*sr, 0, sizeof(back_search_result_set));
|
||||
slapi_ch_free((void **)sr);
|
||||
return;
|
||||
--
|
||||
2.37.1
|
||||
|
@ -0,0 +1,60 @@
|
||||
From 27f0c60a54514773e3ffaa09cfbb71c350f44143 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Sat, 6 Aug 2022 14:03:16 -0400
|
||||
Subject: [PATCH 5/5] Issue 3903 - keep alive update event starts too soon
|
||||
|
||||
Description: THe keep alive update needs a little more time to start to
|
||||
allow changelog, and other replication protocols to startup
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/3903
|
||||
|
||||
Reviewed by: tbordaz (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/replication/regression_m2_test.py | 5 +++++
|
||||
ldap/servers/plugins/replication/repl5_replica.c | 7 ++++---
|
||||
2 files changed, 9 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index 7dd0f2984..bbf9c8486 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -821,6 +821,11 @@ def test_keepalive_entries(topo_m2):
|
||||
assert keep_alive_s1 != str(entries[0].data['keepalivetimestamp'])
|
||||
assert keep_alive_s2 != str(entries[1].data['keepalivetimestamp'])
|
||||
|
||||
+ # Test replication
|
||||
+ supplier = topo_m2.ms['supplier1']
|
||||
+ replica = Replicas(supplier).get(DEFAULT_SUFFIX)
|
||||
+ assert replica.test_replication([topo_m2.ms['supplier2']])
|
||||
+
|
||||
|
||||
@pytest.mark.ds49915
|
||||
@pytest.mark.bz1626375
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
index ded4cf754..fa6419262 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_replica.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
@@ -239,7 +239,7 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation,
|
||||
/* create supplier update event */
|
||||
if (r->repl_eqcxt_ka_update == NULL && replica_get_type(r) == REPLICA_TYPE_UPDATABLE) {
|
||||
r->repl_eqcxt_ka_update = slapi_eq_repeat_rel(replica_subentry_update, r,
|
||||
- slapi_current_rel_time_t() + START_UPDATE_DELAY,
|
||||
+ slapi_current_rel_time_t() + 30,
|
||||
replica_get_keepalive_update_interval(r));
|
||||
}
|
||||
|
||||
@@ -415,8 +415,9 @@ replica_subentry_create(const char *repl_root, ReplicaId rid)
|
||||
int return_value;
|
||||
int rc = 0;
|
||||
|
||||
- entry_string = slapi_ch_smprintf("dn: cn=%s %d,%s\nobjectclass: top\nobjectclass: ldapsubentry\nobjectclass: extensibleObject\ncn: %s %d",
|
||||
- KEEP_ALIVE_ENTRY, rid, repl_root, KEEP_ALIVE_ENTRY, rid);
|
||||
+ entry_string = slapi_ch_smprintf("dn: cn=%s %d,%s\nobjectclass: top\nobjectclass: ldapsubentry\n"
|
||||
+ "objectclass: extensibleObject\n%s: 0\ncn: %s %d",
|
||||
+ KEEP_ALIVE_ENTRY, rid, repl_root, KEEP_ALIVE_ATTR, KEEP_ALIVE_ENTRY, rid);
|
||||
if (entry_string == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
|
||||
"replica_subentry_create - Failed in slapi_ch_smprintf\n");
|
||||
--
|
||||
2.37.1
|
||||
|
@ -1,468 +0,0 @@
|
||||
From 2ae2f53756b6f13e2816bb30812740cb7ad97403 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 5 Nov 2021 09:56:43 +0100
|
||||
Subject: [PATCH 09/12] Issue 4972 - gecos with IA5 introduces a compatibility
|
||||
issue with previous (#4981)
|
||||
|
||||
releases where it was DirectoryString
|
||||
|
||||
Bug description:
|
||||
For years 'gecos' was DirectoryString (UTF8), with #50933 it was restricted to IA5 (ascii)
|
||||
https://github.com/389ds/389-ds-base/commit/0683bcde1b667b6d0ca6e8d1ef605f17c51ea2f7#
|
||||
|
||||
IA5 definition conforms rfc2307 but is a problem for existing deployments
|
||||
where entries can have 'gecos' attribute value with UTF8.
|
||||
|
||||
Fix description:
|
||||
Revert the definition to of 'gecos' being Directory String
|
||||
|
||||
Additional fix to make test_replica_backup_and_restore more
|
||||
robust to CI
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4972
|
||||
|
||||
Reviewed by: William Brown, Pierre Rogier, James Chapman (Thanks !)
|
||||
|
||||
Platforms tested: F34
|
||||
---
|
||||
.../tests/suites/schema/schema_test.py | 398 +++++++++++++++++-
|
||||
ldap/schema/10rfc2307compat.ldif | 6 +-
|
||||
2 files changed, 400 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/schema/schema_test.py b/dirsrvtests/tests/suites/schema/schema_test.py
|
||||
index d590624b6..5d62b8d59 100644
|
||||
--- a/dirsrvtests/tests/suites/schema/schema_test.py
|
||||
+++ b/dirsrvtests/tests/suites/schema/schema_test.py
|
||||
@@ -18,8 +18,12 @@ import pytest
|
||||
import six
|
||||
from ldap.cidict import cidict
|
||||
from ldap.schema import SubSchema
|
||||
+from lib389.schema import SchemaLegacy
|
||||
from lib389._constants import *
|
||||
-from lib389.topologies import topology_st
|
||||
+from lib389.topologies import topology_st, topology_m2 as topo_m2
|
||||
+from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.replica import ReplicationManager
|
||||
+from lib389.utils import ensure_bytes
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
@@ -165,6 +169,398 @@ def test_schema_comparewithfiles(topology_st):
|
||||
|
||||
log.info('test_schema_comparewithfiles: PASSED')
|
||||
|
||||
+def test_gecos_directoryString(topology_st):
|
||||
+ """Check that gecos supports directoryString value
|
||||
+
|
||||
+ :id: aee422bb-6299-4124-b5cd-d7393dac19d3
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Add a common user
|
||||
+ 2. replace gecos with a direstoryString value
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ user_properties = {
|
||||
+ 'uid': 'testuser',
|
||||
+ 'cn' : 'testuser',
|
||||
+ 'sn' : 'user',
|
||||
+ 'uidNumber' : '1000',
|
||||
+ 'gidNumber' : '2000',
|
||||
+ 'homeDirectory' : '/home/testuser',
|
||||
+ }
|
||||
+ testuser = users.create(properties=user_properties)
|
||||
+
|
||||
+ # Add a gecos UTF value
|
||||
+ testuser.replace('gecos', 'Hélène')
|
||||
+
|
||||
+def test_gecos_mixed_definition_topo(topo_m2, request):
|
||||
+ """Check that replication is still working if schema contains
|
||||
+ definitions that does not conform with a replicated entry
|
||||
+
|
||||
+ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
|
||||
+ :setup: Two suppliers replication setup
|
||||
+ :steps:
|
||||
+ 1. Create a testuser on M1
|
||||
+ 2 Stop M1 and M2
|
||||
+ 3 Change gecos def on M2 to be IA5
|
||||
+ 4 Update testuser with gecos directoryString value
|
||||
+ 5 Check replication is still working
|
||||
+ :expectedresults:
|
||||
+ 1. success
|
||||
+ 2. success
|
||||
+ 3. success
|
||||
+ 4. success
|
||||
+ 5. success
|
||||
+
|
||||
+ """
|
||||
+
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ m1 = topo_m2.ms["supplier1"]
|
||||
+ m2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+
|
||||
+ # create a test user
|
||||
+ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
|
||||
+ testuser = UserAccount(m1, testuser_dn)
|
||||
+ try:
|
||||
+ testuser.create(properties={
|
||||
+ 'uid': 'testuser',
|
||||
+ 'cn': 'testuser',
|
||||
+ 'sn': 'testuser',
|
||||
+ 'uidNumber' : '1000',
|
||||
+ 'gidNumber' : '2000',
|
||||
+ 'homeDirectory' : '/home/testuser',
|
||||
+ })
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # Stop suppliers to update the schema
|
||||
+ m1.stop()
|
||||
+ m2.stop()
|
||||
+
|
||||
+ # on M1: gecos is DirectoryString (default)
|
||||
+ # on M2: gecos is IA5
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
|
||||
+ "'gecos' DESC 'The GECOS field; the common name' " +
|
||||
+ "EQUALITY caseIgnoreIA5Match " +
|
||||
+ "SUBSTR caseIgnoreIA5SubstringsMatch " +
|
||||
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
|
||||
+ "SINGLE-VALUE )\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+
|
||||
+ # start the instances
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+
|
||||
+ # Check that gecos is IA5 on M2
|
||||
+ schema = SchemaLegacy(m2)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
|
||||
+
|
||||
+
|
||||
+ # Add a gecos UTF value on M1
|
||||
+ testuser.replace('gecos', 'Hélène')
|
||||
+
|
||||
+ # Check replication is still working
|
||||
+ testuser.replace('displayName', 'ascii value')
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+ testuser_m2 = UserAccount(m2, testuser_dn)
|
||||
+ assert testuser_m2.exists()
|
||||
+ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
|
||||
+
|
||||
+ def fin():
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+ testuser.delete()
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # on M2 restore a default 99user.ldif
|
||||
+ m2.stop()
|
||||
+ os.remove(m2.schemadir + "/99user.ldif")
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+ m2.start()
|
||||
+ m1.start()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+def test_gecos_directoryString_wins_M1(topo_m2, request):
|
||||
+ """Check that if inital syntax are IA5(M2) and DirectoryString(M1)
|
||||
+ Then directoryString wins when nsSchemaCSN M1 is the greatest
|
||||
+
|
||||
+ :id: ad119fa5-7671-45c8-b2ef-0b28ffb68fdb
|
||||
+ :setup: Two suppliers replication setup
|
||||
+ :steps:
|
||||
+ 1. Create a testuser on M1
|
||||
+ 2 Stop M1 and M2
|
||||
+ 3 Change gecos def on M2 to be IA5
|
||||
+ 4 Start M1 and M2
|
||||
+ 5 Update M1 schema so that M1 has greatest nsSchemaCSN
|
||||
+ 6 Update testuser with gecos directoryString value
|
||||
+ 7 Check replication is still working
|
||||
+ 8 Check gecos is DirectoryString on M1 and M2
|
||||
+ :expectedresults:
|
||||
+ 1. success
|
||||
+ 2. success
|
||||
+ 3. success
|
||||
+ 4. success
|
||||
+ 5. success
|
||||
+ 6. success
|
||||
+ 7. success
|
||||
+ 8. success
|
||||
+
|
||||
+ """
|
||||
+
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ m1 = topo_m2.ms["supplier1"]
|
||||
+ m2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+
|
||||
+ # create a test user
|
||||
+ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
|
||||
+ testuser = UserAccount(m1, testuser_dn)
|
||||
+ try:
|
||||
+ testuser.create(properties={
|
||||
+ 'uid': 'testuser',
|
||||
+ 'cn': 'testuser',
|
||||
+ 'sn': 'testuser',
|
||||
+ 'uidNumber' : '1000',
|
||||
+ 'gidNumber' : '2000',
|
||||
+ 'homeDirectory' : '/home/testuser',
|
||||
+ })
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # Stop suppliers to update the schema
|
||||
+ m1.stop()
|
||||
+ m2.stop()
|
||||
+
|
||||
+ # on M1: gecos is DirectoryString (default)
|
||||
+ # on M2: gecos is IA5
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
|
||||
+ "'gecos' DESC 'The GECOS field; the common name' " +
|
||||
+ "EQUALITY caseIgnoreIA5Match " +
|
||||
+ "SUBSTR caseIgnoreIA5SubstringsMatch " +
|
||||
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
|
||||
+ "SINGLE-VALUE )\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+
|
||||
+ # start the instances
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+
|
||||
+ # Check that gecos is IA5 on M2
|
||||
+ schema = SchemaLegacy(m2)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
|
||||
+
|
||||
+
|
||||
+ # update M1 schema to increase its nsschemaCSN
|
||||
+ new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )"
|
||||
+ m1.schema.add_schema('attributetypes', ensure_bytes(new_at))
|
||||
+
|
||||
+ # Add a gecos UTF value on M1
|
||||
+ testuser.replace('gecos', 'Hélène')
|
||||
+
|
||||
+ # Check replication is still working
|
||||
+ testuser.replace('displayName', 'ascii value')
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+ testuser_m2 = UserAccount(m2, testuser_dn)
|
||||
+ assert testuser_m2.exists()
|
||||
+ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
|
||||
+
|
||||
+ # Check that gecos is DirectoryString on M1
|
||||
+ schema = SchemaLegacy(m1)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
|
||||
+
|
||||
+ # Check that gecos is DirectoryString on M2
|
||||
+ schema = SchemaLegacy(m2)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
|
||||
+
|
||||
+ def fin():
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+ testuser.delete()
|
||||
+ m1.schema.del_schema('attributetypes', ensure_bytes(new_at))
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # on M2 restore a default 99user.ldif
|
||||
+ m2.stop()
|
||||
+ os.remove(m2.schemadir + "/99user.ldif")
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+ m2.start()
|
||||
+ m1.start()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+def test_gecos_directoryString_wins_M2(topo_m2, request):
|
||||
+ """Check that if inital syntax are IA5(M2) and DirectoryString(M1)
|
||||
+ Then directoryString wins when nsSchemaCSN M2 is the greatest
|
||||
+
|
||||
+ :id: 2da7f1b1-f86d-4072-a940-ba56d4bc8348
|
||||
+ :setup: Two suppliers replication setup
|
||||
+ :steps:
|
||||
+ 1. Create a testuser on M1
|
||||
+ 2 Stop M1 and M2
|
||||
+ 3 Change gecos def on M2 to be IA5
|
||||
+ 4 Start M1 and M2
|
||||
+ 5 Update M2 schema so that M2 has greatest nsSchemaCSN
|
||||
+ 6 Update testuser on M2 and trigger replication to M1
|
||||
+ 7 Update testuser on M2 with gecos directoryString value
|
||||
+ 8 Check replication is still working
|
||||
+ 9 Check gecos is DirectoryString on M1 and M2
|
||||
+ :expectedresults:
|
||||
+ 1. success
|
||||
+ 2. success
|
||||
+ 3. success
|
||||
+ 4. success
|
||||
+ 5. success
|
||||
+ 6. success
|
||||
+ 7. success
|
||||
+ 8. success
|
||||
+ 9. success
|
||||
+
|
||||
+ """
|
||||
+
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ m1 = topo_m2.ms["supplier1"]
|
||||
+ m2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+
|
||||
+ # create a test user
|
||||
+ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
|
||||
+ testuser = UserAccount(m1, testuser_dn)
|
||||
+ try:
|
||||
+ testuser.create(properties={
|
||||
+ 'uid': 'testuser',
|
||||
+ 'cn': 'testuser',
|
||||
+ 'sn': 'testuser',
|
||||
+ 'uidNumber' : '1000',
|
||||
+ 'gidNumber' : '2000',
|
||||
+ 'homeDirectory' : '/home/testuser',
|
||||
+ })
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ testuser.replace('displayName', 'to trigger replication M1-> M2')
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # Stop suppliers to update the schema
|
||||
+ m1.stop()
|
||||
+ m2.stop()
|
||||
+
|
||||
+ # on M1: gecos is DirectoryString (default)
|
||||
+ # on M2: gecos is IA5
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
|
||||
+ "'gecos' DESC 'The GECOS field; the common name' " +
|
||||
+ "EQUALITY caseIgnoreIA5Match " +
|
||||
+ "SUBSTR caseIgnoreIA5SubstringsMatch " +
|
||||
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
|
||||
+ "SINGLE-VALUE )\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+
|
||||
+ # start the instances
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+
|
||||
+ # Check that gecos is IA5 on M2
|
||||
+ schema = SchemaLegacy(m2)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
|
||||
+
|
||||
+ # update M2 schema to increase its nsschemaCSN
|
||||
+ new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )"
|
||||
+ m2.schema.add_schema('attributetypes', ensure_bytes(new_at))
|
||||
+
|
||||
+ # update just to trigger replication M2->M1
|
||||
+ # and update of M2 schema
|
||||
+ testuser_m2 = UserAccount(m2, testuser_dn)
|
||||
+ testuser_m2.replace('displayName', 'to trigger replication M2-> M1')
|
||||
+
|
||||
+ # Add a gecos UTF value on M1
|
||||
+ testuser.replace('gecos', 'Hélène')
|
||||
+
|
||||
+ # Check replication is still working
|
||||
+ testuser.replace('displayName', 'ascii value')
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+ assert testuser_m2.exists()
|
||||
+ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
|
||||
+
|
||||
+ # Check that gecos is DirectoryString on M1
|
||||
+ schema = SchemaLegacy(m1)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
|
||||
+
|
||||
+ # Check that gecos is DirectoryString on M2
|
||||
+ schema = SchemaLegacy(m2)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
|
||||
+
|
||||
+ def fin():
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+ testuser.delete()
|
||||
+ m1.schema.del_schema('attributetypes', ensure_bytes(new_at))
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # on M2 restore a default 99user.ldif
|
||||
+ m2.stop()
|
||||
+ os.remove(m2.schemadir + "/99user.ldif")
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+ m2.start()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif
|
||||
index 8ba72e1e3..998b8983b 100644
|
||||
--- a/ldap/schema/10rfc2307compat.ldif
|
||||
+++ b/ldap/schema/10rfc2307compat.ldif
|
||||
@@ -21,9 +21,9 @@ attributeTypes: (
|
||||
attributeTypes: (
|
||||
1.3.6.1.1.1.1.2 NAME 'gecos'
|
||||
DESC 'The GECOS field; the common name'
|
||||
- EQUALITY caseIgnoreIA5Match
|
||||
- SUBSTR caseIgnoreIA5SubstringsMatch
|
||||
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
|
||||
+ EQUALITY caseIgnoreMatch
|
||||
+ SUBSTR caseIgnoreSubstringsMatch
|
||||
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
||||
SINGLE-VALUE
|
||||
)
|
||||
attributeTypes: (
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,32 +0,0 @@
|
||||
From 3909877f12e50556e844bc20e72870a4fa905ada Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Tue, 9 Nov 2021 12:55:28 +0000
|
||||
Subject: [PATCH 10/12] Issue 4997 - Function declaration compiler error on
|
||||
1.4.3
|
||||
|
||||
Bug description: Building the server on the 1.4.3 branch generates a
|
||||
compiler error due to a typo in function declaration.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4997
|
||||
|
||||
Reviewed by: @jchapman (one line commit rule)
|
||||
---
|
||||
ldap/servers/slapd/slapi-private.h | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index 570765e47..d6d74e8a7 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -273,7 +273,7 @@ void *csngen_register_callbacks(CSNGen *gen, GenCSNFn genFn, void *genArg, Abort
|
||||
void csngen_unregister_callbacks(CSNGen *gen, void *cookie);
|
||||
|
||||
/* debugging function */
|
||||
-void csngen_dump_state(const CSNGen *gen);
|
||||
+void csngen_dump_state(const CSNGen *gen, int severity);
|
||||
|
||||
/* this function tests csn generator */
|
||||
void csngen_test(void);
|
||||
--
|
||||
2.31.1
|
||||
|
28
SOURCES/0010-Issue-5397-Fix-check-pick-error.patch
Normal file
28
SOURCES/0010-Issue-5397-Fix-check-pick-error.patch
Normal file
@ -0,0 +1,28 @@
|
||||
From 1b2cc62c0802af650f80eebcc716b5d5db87030e Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 8 Aug 2022 13:56:49 -0400
|
||||
Subject: [PATCH] Issue 5397 - Fix check pick error
|
||||
|
||||
Description:
|
||||
|
||||
Original commit included a free for a new filter, but that filter was
|
||||
not implemented in 1.4.3
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_search.c | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
index 771c35a33..d0f52b6f7 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
@@ -1930,7 +1930,6 @@ delete_search_result_set(Slapi_PBlock *pb, back_search_result_set **sr)
|
||||
rc, filt_errs);
|
||||
}
|
||||
slapi_filter_free((*sr)->sr_norm_filter, 1);
|
||||
- slapi_filter_free((*sr)->sr_norm_filter_intent, 1);
|
||||
memset(*sr, 0, sizeof(back_search_result_set));
|
||||
slapi_ch_free((void **)sr);
|
||||
return;
|
||||
--
|
||||
2.37.1
|
||||
|
@ -1,32 +0,0 @@
|
||||
From 60d570e52465b58167301f64792f5f85cbc85e20 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 10 Nov 2021 08:53:45 -0500
|
||||
Subject: [PATCH 11/12] Issue 4978 - use more portable python command for
|
||||
checking containers
|
||||
|
||||
Description: During the installation check for containers use arguments
|
||||
for subprocess.run() that work on all versions of python
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4978
|
||||
|
||||
Reviewed by: mreynolds(one line commit rule)
|
||||
---
|
||||
src/lib389/lib389/instance/setup.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
|
||||
index 7b0147cf9..b23d2deb8 100644
|
||||
--- a/src/lib389/lib389/instance/setup.py
|
||||
+++ b/src/lib389/lib389/instance/setup.py
|
||||
@@ -734,7 +734,7 @@ class SetupDs(object):
|
||||
# Check if we are in a container, if so don't use /dev/shm for the db home dir
|
||||
# as containers typically don't allocate enough space for dev/shm and we don't
|
||||
# want to unexpectedly break the server after an upgrade
|
||||
- container_result = subprocess.run(["systemd-detect-virt", "-c"], capture_output=True)
|
||||
+ container_result = subprocess.run(["systemd-detect-virt", "-c"], stdout=subprocess.PIPE)
|
||||
if container_result.returncode == 0:
|
||||
# In a container, set the db_home_dir to the db path
|
||||
self.log.debug("Container detected setting db home directory to db directory.")
|
||||
--
|
||||
2.31.1
|
||||
|
28
SOURCES/0011-Issue-5397-Fix-check-pick-error-2.patch
Normal file
28
SOURCES/0011-Issue-5397-Fix-check-pick-error-2.patch
Normal file
@ -0,0 +1,28 @@
|
||||
From 1203808f59614f3bace1631cc713dcaa89026dde Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 8 Aug 2022 14:19:36 -0400
|
||||
Subject: [PATCH] Issue 5397 - Fix check pick error #2
|
||||
|
||||
Description:
|
||||
|
||||
Original commit included a free for repl conn creds which does not exist
|
||||
in 1.4.3
|
||||
---
|
||||
ldap/servers/plugins/replication/repl5_connection.c | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c
|
||||
index be8bba08e..b6bc21c46 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_connection.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_connection.c
|
||||
@@ -247,7 +247,6 @@ conn_delete_internal(Repl_Connection *conn)
|
||||
slapi_ch_free_string(&conn->last_ldap_errmsg);
|
||||
slapi_ch_free((void **)&conn->hostname);
|
||||
slapi_ch_free((void **)&conn->binddn);
|
||||
- slapi_ch_free((void **)&conn->creds);
|
||||
slapi_ch_free((void **)&conn->plain);
|
||||
}
|
||||
|
||||
--
|
||||
2.37.1
|
||||
|
27
SOURCES/0012-Issue-3903-Fix-another-cherry-pick-error.patch
Normal file
27
SOURCES/0012-Issue-3903-Fix-another-cherry-pick-error.patch
Normal file
@ -0,0 +1,27 @@
|
||||
From 4e712bcb7ce7bd972515d996b5659fc607e09e2f Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 8 Aug 2022 14:41:47 -0400
|
||||
Subject: [PATCH] Issue 3903 - Fix another cherry-pick error
|
||||
|
||||
Description: erroneous ")" was added to the replica struct which broke
|
||||
the build
|
||||
---
|
||||
ldap/servers/plugins/replication/repl5_replica.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
index fa6419262..5dab57de4 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_replica.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
@@ -66,7 +66,7 @@ struct replica
|
||||
uint64_t agmt_count; /* Number of agmts */
|
||||
Slapi_Counter *release_timeout; /* The amount of time to wait before releasing active replica */
|
||||
uint64_t abort_session; /* Abort the current replica session */
|
||||
- int64_t keepalive_update_interval; /* interval to do dummy update to keep RUV fresh */)
|
||||
+ int64_t keepalive_update_interval; /* interval to do dummy update to keep RUV fresh */
|
||||
};
|
||||
|
||||
|
||||
--
|
||||
2.37.1
|
||||
|
@ -1,31 +0,0 @@
|
||||
From 2c6653edef793d46815e6df607e55d68e14fe232 Mon Sep 17 00:00:00 2001
|
||||
From: spike <spike@fedoraproject.org>
|
||||
Date: Fri, 5 Nov 2021 13:56:41 +0100
|
||||
Subject: [PATCH 12/12] Issue 4959 - Invalid /etc/hosts setup can cause
|
||||
isLocalHost to fail.
|
||||
|
||||
Description: Use local_simple_allocate in dsctl so that isLocal is always set properly
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/4959
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
---
|
||||
src/lib389/cli/dsctl | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl
|
||||
index b6c42b5cc..d2ea6cd29 100755
|
||||
--- a/src/lib389/cli/dsctl
|
||||
+++ b/src/lib389/cli/dsctl
|
||||
@@ -135,7 +135,7 @@ if __name__ == '__main__':
|
||||
log.error("Unable to access instance information. Are you running as the correct user? (usually dirsrv or root)")
|
||||
sys.exit(1)
|
||||
|
||||
- inst.allocate(insts[0])
|
||||
+ inst.local_simple_allocate(insts[0]['server-id'])
|
||||
log.debug('Instance allocated')
|
||||
|
||||
try:
|
||||
--
|
||||
2.31.1
|
||||
|
@ -1,105 +0,0 @@
|
||||
From d000349089eb15b3476ec302f4279f118336290e Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 16 Dec 2021 16:13:08 -0500
|
||||
Subject: [PATCH 1/2] CVE-2021-4091 (BZ#2030367) double-free of the virtual
|
||||
attribute context in persistent search
|
||||
|
||||
description:
|
||||
A search is processed by a worker using a private pblock.
|
||||
If the search is persistent, the worker spawn a thread
|
||||
and kind of duplicate its private pblock so that the spawn
|
||||
thread continue to process the persistent search.
|
||||
Then worker ends the initial search, reinit (free) its private pblock,
|
||||
and returns monitoring the wait_queue.
|
||||
When the persistent search completes, it frees the duplicated
|
||||
pblock.
|
||||
The problem is that private pblock and duplicated pblock
|
||||
are referring to a same structure (pb_vattr_context).
|
||||
That lead to a double free
|
||||
|
||||
Fix:
|
||||
When cloning the pblock (slapi_pblock_clone) make sure
|
||||
to transfert the references inside the original (private)
|
||||
pblock to the target (cloned) one
|
||||
That includes pb_vattr_context pointer.
|
||||
|
||||
Reviewed by: Mark Reynolds, James Chapman, Pierre Rogier (Thanks !)
|
||||
---
|
||||
ldap/servers/slapd/connection.c | 8 +++++---
|
||||
ldap/servers/slapd/pblock.c | 14 ++++++++++++--
|
||||
2 files changed, 17 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index e0c1a52d2..fc7ed9c4a 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -1823,9 +1823,11 @@ connection_threadmain()
|
||||
pthread_mutex_unlock(&(conn->c_mutex));
|
||||
}
|
||||
/* ps_add makes a shallow copy of the pb - so we
|
||||
- * can't free it or init it here - just set operation to NULL.
|
||||
- * ps_send_results will call connection_remove_operation_ext to free it
|
||||
- */
|
||||
+ * can't free it or init it here - just set operation to NULL.
|
||||
+ * ps_send_results will call connection_remove_operation_ext to free it
|
||||
+ * The connection_thread private pblock ('pb') has be cloned and should only
|
||||
+ * be reinit (slapi_pblock_init)
|
||||
+ */
|
||||
slapi_pblock_set(pb, SLAPI_OPERATION, NULL);
|
||||
slapi_pblock_init(pb);
|
||||
} else {
|
||||
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
|
||||
index a64986aeb..c78d1250f 100644
|
||||
--- a/ldap/servers/slapd/pblock.c
|
||||
+++ b/ldap/servers/slapd/pblock.c
|
||||
@@ -292,6 +292,12 @@ _pblock_assert_pb_deprecated(Slapi_PBlock *pblock)
|
||||
}
|
||||
}
|
||||
|
||||
+/* It clones the pblock
|
||||
+ * the content of the source pblock is transfered
|
||||
+ * to the target pblock (returned)
|
||||
+ * The source pblock should not be used for any operation
|
||||
+ * it needs to be reinit (slapi_pblock_init)
|
||||
+ */
|
||||
Slapi_PBlock *
|
||||
slapi_pblock_clone(Slapi_PBlock *pb)
|
||||
{
|
||||
@@ -312,28 +318,32 @@ slapi_pblock_clone(Slapi_PBlock *pb)
|
||||
if (pb->pb_task != NULL) {
|
||||
_pblock_assert_pb_task(new_pb);
|
||||
*(new_pb->pb_task) = *(pb->pb_task);
|
||||
+ memset(pb->pb_task, 0, sizeof(slapi_pblock_task));
|
||||
}
|
||||
if (pb->pb_mr != NULL) {
|
||||
_pblock_assert_pb_mr(new_pb);
|
||||
*(new_pb->pb_mr) = *(pb->pb_mr);
|
||||
+ memset(pb->pb_mr, 0, sizeof(slapi_pblock_matching_rule));
|
||||
}
|
||||
if (pb->pb_misc != NULL) {
|
||||
_pblock_assert_pb_misc(new_pb);
|
||||
*(new_pb->pb_misc) = *(pb->pb_misc);
|
||||
+ memset(pb->pb_misc, 0, sizeof(slapi_pblock_misc));
|
||||
}
|
||||
if (pb->pb_intop != NULL) {
|
||||
_pblock_assert_pb_intop(new_pb);
|
||||
*(new_pb->pb_intop) = *(pb->pb_intop);
|
||||
- /* set pwdpolicy to NULL so this clone allocates its own policy */
|
||||
- new_pb->pb_intop->pwdpolicy = NULL;
|
||||
+ memset(pb->pb_intop, 0, sizeof(slapi_pblock_intop));
|
||||
}
|
||||
if (pb->pb_intplugin != NULL) {
|
||||
_pblock_assert_pb_intplugin(new_pb);
|
||||
*(new_pb->pb_intplugin) = *(pb->pb_intplugin);
|
||||
+ memset(pb->pb_intplugin, 0,sizeof(slapi_pblock_intplugin));
|
||||
}
|
||||
if (pb->pb_deprecated != NULL) {
|
||||
_pblock_assert_pb_deprecated(new_pb);
|
||||
*(new_pb->pb_deprecated) = *(pb->pb_deprecated);
|
||||
+ memset(pb->pb_deprecated, 0, sizeof(slapi_pblock_deprecated));
|
||||
}
|
||||
#ifdef PBLOCK_ANALYTICS
|
||||
new_pb->analytics = NULL;
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,508 @@
|
||||
From 508a6dd02986024b03eeef62d135f7e16b0c85e9 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 2 Jun 2022 16:57:07 -0400
|
||||
Subject: [PATCH 1/4] Issue 5329 - Improve replication extended op logging
|
||||
|
||||
Description:
|
||||
|
||||
We need logging around parsing extended op payload, right now when it
|
||||
fails we have no idea why.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/5329
|
||||
|
||||
Reviewed by: progier, firstyear, and spichugi(Thanks!!!)
|
||||
---
|
||||
ldap/servers/plugins/replication/repl_extop.c | 207 +++++++++++++++++-
|
||||
ldap/servers/slapd/slapi2runtime.c | 1 -
|
||||
2 files changed, 197 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
|
||||
index 8b178610b..70c45ec50 100644
|
||||
--- a/ldap/servers/plugins/replication/repl_extop.c
|
||||
+++ b/ldap/servers/plugins/replication/repl_extop.c
|
||||
@@ -73,6 +73,18 @@ done:
|
||||
return rc;
|
||||
}
|
||||
|
||||
+static void
|
||||
+ruv_dump_to_log(const RUV *ruv, char *log_name)
|
||||
+{
|
||||
+ if (!ruv) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "%s: RUV: None\n", log_name);
|
||||
+ } else {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "%s: RUV:\n", log_name);
|
||||
+ ruv_dump(ruv, log_name, NULL);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+
|
||||
/* The data_guid and data parameters should only be set if we
|
||||
* are talking with a 9.0 replica. */
|
||||
static struct berval *
|
||||
@@ -95,33 +107,60 @@ create_ReplicationExtopPayload(const char *protocol_oid,
|
||||
PR_ASSERT(protocol_oid != NULL || send_end);
|
||||
PR_ASSERT(repl_root != NULL);
|
||||
|
||||
- /* Create the request data */
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "create_ReplicationExtopPayload - "
|
||||
+ "encoding '%s' payload...\n",
|
||||
+ send_end ? "End Replication" : "Start Replication");
|
||||
+ }
|
||||
|
||||
+ /* Create the request data */
|
||||
if ((tmp_bere = der_alloc()) == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload",
|
||||
+ "encoding failed: der_alloc failed\n");
|
||||
rc = LDAP_ENCODING_ERROR;
|
||||
goto loser;
|
||||
}
|
||||
if (!send_end) {
|
||||
if (ber_printf(tmp_bere, "{ss", protocol_oid, repl_root) == -1) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload",
|
||||
+ "encoding failed: ber_printf failed - protocol_oid (%s) repl_root (%s)\n",
|
||||
+ protocol_oid, repl_root);
|
||||
rc = LDAP_ENCODING_ERROR;
|
||||
goto loser;
|
||||
}
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "create_ReplicationExtopPayload - "
|
||||
+ "encoding protocol_oid: %s\n", protocol_oid);
|
||||
+ }
|
||||
} else {
|
||||
if (ber_printf(tmp_bere, "{s", repl_root) == -1) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload",
|
||||
+ "encoding failed: ber_printf failed - repl_root (%s)\n",
|
||||
+ repl_root);
|
||||
rc = LDAP_ENCODING_ERROR;
|
||||
goto loser;
|
||||
}
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "create_ReplicationExtopPayload - "
|
||||
+ "encoding repl_root: %s\n", repl_root);
|
||||
+ }
|
||||
}
|
||||
|
||||
sdn = slapi_sdn_new_dn_byref(repl_root);
|
||||
repl = replica_get_replica_from_dn(sdn);
|
||||
if (repl == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload",
|
||||
+ "encoding failed: failed to get replica from dn (%s)\n",
|
||||
+ slapi_sdn_get_dn(sdn));
|
||||
rc = LDAP_OPERATIONS_ERROR;
|
||||
goto loser;
|
||||
}
|
||||
|
||||
ruv_obj = replica_get_ruv(repl);
|
||||
if (ruv_obj == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload",
|
||||
+ "encoding failed: failed to get ruv from replica suffix (%s)\n",
|
||||
+ slapi_sdn_get_dn(sdn));
|
||||
rc = LDAP_OPERATIONS_ERROR;
|
||||
goto loser;
|
||||
}
|
||||
@@ -134,8 +173,14 @@ create_ReplicationExtopPayload(const char *protocol_oid,
|
||||
/* We need to encode and send each time the local ruv in case we have changed it */
|
||||
rc = encode_ruv(tmp_bere, ruv);
|
||||
if (rc != 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload",
|
||||
+ "encoding failed: encode_ruv failed for replica suffix (%s)\n",
|
||||
+ slapi_sdn_get_dn(sdn));
|
||||
goto loser;
|
||||
}
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ ruv_dump_to_log(ruv, "create_ReplicationExtopPayload");
|
||||
+ }
|
||||
|
||||
if (!send_end) {
|
||||
char s[CSN_STRSIZE];
|
||||
@@ -157,36 +202,67 @@ create_ReplicationExtopPayload(const char *protocol_oid,
|
||||
charray_merge(&referrals_to_send, local_replica_referral, 0);
|
||||
if (NULL != referrals_to_send) {
|
||||
if (ber_printf(tmp_bere, "[v]", referrals_to_send) == -1) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload",
|
||||
+ "encoding failed: ber_printf (referrals_to_send)\n");
|
||||
rc = LDAP_ENCODING_ERROR;
|
||||
goto loser;
|
||||
}
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ for (size_t i = 0; referrals_to_send[i]; i++) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "create_ReplicationExtopPayload - "
|
||||
+ "encoding ref: %s\n", referrals_to_send[i]);
|
||||
+ }
|
||||
+ }
|
||||
slapi_ch_free((void **)&referrals_to_send);
|
||||
}
|
||||
/* Add the CSN */
|
||||
PR_ASSERT(NULL != csn);
|
||||
if (ber_printf(tmp_bere, "s", csn_as_string(csn, PR_FALSE, s)) == -1) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload",
|
||||
+ "encoding failed: ber_printf (csnstr)\n");
|
||||
rc = LDAP_ENCODING_ERROR;
|
||||
goto loser;
|
||||
}
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "create_ReplicationExtopPayload - "
|
||||
+ "encoding csn: %s\n", csn_as_string(csn, PR_FALSE, s));
|
||||
+ }
|
||||
}
|
||||
|
||||
/* If we have data to send to a 9.0 style replica, set it here. */
|
||||
if (data_guid && data) {
|
||||
if (ber_printf(tmp_bere, "sO", data_guid, data) == -1) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload",
|
||||
+ "encoding failed: ber_printf (data_guid, data)\n");
|
||||
rc = LDAP_ENCODING_ERROR;
|
||||
goto loser;
|
||||
}
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "create_ReplicationExtopPayload - "
|
||||
+ "encoding data_guid (%s) data (%s:%ld)\n",
|
||||
+ data_guid, data->bv_val, data->bv_len);
|
||||
+ }
|
||||
}
|
||||
|
||||
+
|
||||
if (ber_printf(tmp_bere, "}") == -1) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload",
|
||||
+ "encoding failed: ber_printf\n");
|
||||
rc = LDAP_ENCODING_ERROR;
|
||||
goto loser;
|
||||
}
|
||||
|
||||
if (ber_flatten(tmp_bere, &req_data) == -1) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "create_ReplicationExtopPayload",
|
||||
+ "encoding failed: ber_flatten failed\n");
|
||||
rc = LDAP_LOCAL_ERROR;
|
||||
goto loser;
|
||||
}
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "create_ReplicationExtopPayload - "
|
||||
+ "Encoding finished\n");
|
||||
+ }
|
||||
+
|
||||
/* Success */
|
||||
goto done;
|
||||
|
||||
@@ -293,8 +369,14 @@ decode_startrepl_extop(Slapi_PBlock *pb, char **protocol_oid, char **repl_root,
|
||||
if ((NULL == extop_oid) ||
|
||||
((strcmp(extop_oid, REPL_START_NSDS50_REPLICATION_REQUEST_OID) != 0) &&
|
||||
(strcmp(extop_oid, REPL_START_NSDS90_REPLICATION_REQUEST_OID) != 0)) ||
|
||||
- !BV_HAS_DATA(extop_value)) {
|
||||
+ !BV_HAS_DATA(extop_value))
|
||||
+ {
|
||||
/* bogus */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop",
|
||||
+ "decoding failed: extop_oid (%s) (%s) extop_value (%s)\n",
|
||||
+ NULL == extop_oid ? "NULL" : "Ok",
|
||||
+ extop_oid ? extop_oid : "",
|
||||
+ extop_value ? !BV_HAS_DATA(extop_value) ? "No data" : "Ok" : "No data");
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -307,25 +389,36 @@ decode_startrepl_extop(Slapi_PBlock *pb, char **protocol_oid, char **repl_root,
|
||||
}
|
||||
|
||||
if ((tmp_bere = ber_init(extop_value)) == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop",
|
||||
+ "decoding failed: ber_init for extop_value (%s:%lu)\n",
|
||||
+ extop_value->bv_val, extop_value->bv_len);
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
if (ber_scanf(tmp_bere, "{") == LBER_ERROR) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop",
|
||||
+ "decoding failed: ber_scanf 1\n");
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
/* Get the required protocol OID and root of replicated subtree */
|
||||
if (ber_get_stringa(tmp_bere, protocol_oid) == LBER_DEFAULT) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop",
|
||||
+ "decoding failed: ber_get_stringa (protocol_oid)\n");
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
if (ber_get_stringa(tmp_bere, repl_root) == LBER_DEFAULT) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop",
|
||||
+ "decoding failed: ber_get_stringa (repl_root)\n");
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
/* get supplier's ruv */
|
||||
if (decode_ruv(tmp_bere, supplier_ruv) == -1) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop",
|
||||
+ "decoding failed: decode_ruv (supplier_ruv)\n");
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -333,33 +426,45 @@ decode_startrepl_extop(Slapi_PBlock *pb, char **protocol_oid, char **repl_root,
|
||||
/* Get the optional set of referral URLs */
|
||||
if (ber_peek_tag(tmp_bere, &len) == LBER_SET) {
|
||||
if (ber_scanf(tmp_bere, "[v]", extra_referrals) == LBER_ERROR) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop",
|
||||
+ "decoding failed: ber_scanf (extra_referrals)\n");
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
}
|
||||
/* Get the CSN */
|
||||
if (ber_get_stringa(tmp_bere, csnstr) == LBER_ERROR) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop",
|
||||
+ "decoding failed: ber_get_stringa (csnstr)\n");
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
/* Get the optional replication session callback data. */
|
||||
if (ber_peek_tag(tmp_bere, &len) == LBER_OCTETSTRING) {
|
||||
if (ber_get_stringa(tmp_bere, data_guid) == LBER_ERROR) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop",
|
||||
+ "decoding failed: ber_get_stringa (data_guid)\n");
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
/* If a data_guid was specified, data must be specified as well. */
|
||||
if (ber_peek_tag(tmp_bere, &len) == LBER_OCTETSTRING) {
|
||||
if (ber_get_stringal(tmp_bere, data) == LBER_ERROR) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop",
|
||||
+ "decoding failed: ber_get_stringal (data)\n");
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
} else {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop",
|
||||
+ "decoding failed: ber_peek_tag\n");
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
}
|
||||
if (ber_scanf(tmp_bere, "}") == LBER_ERROR) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_startrepl_extop",
|
||||
+ "decoding failed: ber_scanf 2\n");
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -378,6 +483,22 @@ free_and_return:
|
||||
if (*supplier_ruv) {
|
||||
ruv_destroy(supplier_ruv);
|
||||
}
|
||||
+ } else if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
+ "decode_startrepl_extop - decoding payload...\n");
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
+ "decode_startrepl_extop - decoded protocol_oid: %s\n", *protocol_oid);
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
+ "decode_startrepl_extop - decoded repl_root: %s\n", *repl_root);
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
+ "decode_startrepl_extop - decoded csn: %s\n", *csnstr);
|
||||
+ ruv_dump_to_log(*supplier_ruv, "decode_startrepl_extop");
|
||||
+ for (size_t i = 0; *extra_referrals && *extra_referrals[i]; i++) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "decode_startrepl_extop - "
|
||||
+ "decoded referral: %s\n", *extra_referrals[i]);
|
||||
+ }
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
+ "decode_startrepl_extop - Finshed decoding payload.\n");
|
||||
}
|
||||
if (NULL != tmp_bere) {
|
||||
ber_free(tmp_bere, 1);
|
||||
@@ -406,30 +527,54 @@ decode_endrepl_extop(Slapi_PBlock *pb, char **repl_root)
|
||||
|
||||
if ((NULL == extop_oid) ||
|
||||
(strcmp(extop_oid, REPL_END_NSDS50_REPLICATION_REQUEST_OID) != 0) ||
|
||||
- !BV_HAS_DATA(extop_value)) {
|
||||
+ !BV_HAS_DATA(extop_value))
|
||||
+ {
|
||||
/* bogus */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_endrepl_extop",
|
||||
+ "decoding failed: extop_oid (%s) correct oid (%s) extop_value data (%s)\n",
|
||||
+ extop_oid ? extop_oid : "NULL",
|
||||
+ extop_oid ? strcmp(extop_oid, REPL_END_NSDS50_REPLICATION_REQUEST_OID) != 0 ? "wrong oid" : "correct oid" : "NULL",
|
||||
+ !BV_HAS_DATA(extop_value) ? "No data" : "Has data");
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
if ((tmp_bere = ber_init(extop_value)) == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_endrepl_extop",
|
||||
+ "decoding failed: ber_init failed: extop_value (%s:%lu)\n",
|
||||
+ extop_value->bv_val, extop_value->bv_len);
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
if (ber_scanf(tmp_bere, "{") == LBER_DEFAULT) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_endrepl_extop",
|
||||
+ "decoding failed: ber_scanf failed1\n");
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
/* Get the required root of replicated subtree */
|
||||
if (ber_get_stringa(tmp_bere, repl_root) == LBER_DEFAULT) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_endrepl_extop",
|
||||
+ "decoding failed: ber_get_stringa failed\n");
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
if (ber_scanf(tmp_bere, "}") == LBER_DEFAULT) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_endrepl_extop",
|
||||
+ "decoding failed: ber_scanf2 failed\n");
|
||||
rc = -1;
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, "decode_endrepl_extop",
|
||||
+ "Decoding payload...\n");
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, "decode_endrepl_extop",
|
||||
+ "Decoded repl_root: %s\n", *repl_root);
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, "decode_endrepl_extop",
|
||||
+ "Finished decoding payload.\n");
|
||||
+ }
|
||||
+
|
||||
free_and_return:
|
||||
if (NULL != tmp_bere) {
|
||||
ber_free(tmp_bere, 1);
|
||||
@@ -461,27 +606,46 @@ decode_repl_ext_response(struct berval *bvdata, int *response_code, struct berva
|
||||
PR_ASSERT(NULL != ruv_bervals);
|
||||
|
||||
if ((NULL == response_code) || (NULL == ruv_bervals) ||
|
||||
- (NULL == data_guid) || (NULL == data) || !BV_HAS_DATA(bvdata)) {
|
||||
+ (NULL == data_guid) || (NULL == data) || !BV_HAS_DATA(bvdata))
|
||||
+ {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_repl_ext_response",
|
||||
+ "decoding failed: response_code (%s) ruv_bervals (%s) data_guid (%s) data (%s) bvdata (%s)\n",
|
||||
+ NULL == response_code ? "NULL" : "Ok",
|
||||
+ NULL == ruv_bervals ? "NULL" : "Ok",
|
||||
+ NULL == data_guid ? "NULL" : "Ok",
|
||||
+ NULL == data ? "NULL" : "Ok",
|
||||
+ !BV_HAS_DATA(bvdata) ? "No data" : "Ok");
|
||||
return_value = -1;
|
||||
} else {
|
||||
ber_len_t len;
|
||||
ber_int_t temp_response_code = 0;
|
||||
*ruv_bervals = NULL;
|
||||
if ((tmp_bere = ber_init(bvdata)) == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_repl_ext_response",
|
||||
+ "decoding failed: ber_init failed from bvdata (%s:%lu)\n",
|
||||
+ bvdata->bv_val, bvdata->bv_len);
|
||||
return_value = -1;
|
||||
} else if (ber_scanf(tmp_bere, "{e", &temp_response_code) == LBER_ERROR) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_repl_ext_response",
|
||||
+ "decoding failed: ber_scanf failed\n");
|
||||
return_value = -1;
|
||||
} else if (ber_peek_tag(tmp_bere, &len) == LBER_SEQUENCE) {
|
||||
if (ber_scanf(tmp_bere, "{V}", ruv_bervals) == LBER_ERROR) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_repl_ext_response",
|
||||
+ "decoding failed: ber_scanf2 failed from ruv_bervals\n");
|
||||
return_value = -1;
|
||||
}
|
||||
}
|
||||
/* Check for optional data from replication session callback */
|
||||
if (ber_peek_tag(tmp_bere, &len) == LBER_OCTETSTRING) {
|
||||
if (ber_scanf(tmp_bere, "aO}", data_guid, data) == LBER_ERROR) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_repl_ext_response",
|
||||
+ "decoding failed: ber_scanf3 failed from data_guid & data\n");
|
||||
return_value = -1;
|
||||
}
|
||||
} else if (ber_scanf(tmp_bere, "}") == LBER_ERROR) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "decode_repl_ext_response",
|
||||
+ "decoding failed: ber_scanf4 failed\n");
|
||||
return_value = -1;
|
||||
}
|
||||
|
||||
@@ -934,17 +1098,36 @@ send_response:
|
||||
/* ONREPL - not sure what we suppose to do here */
|
||||
}
|
||||
ber_printf(resp_bere, "{e", response);
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
+ "multisupplier_extop_StartNSDS50ReplicationRequest - encoded response: %d\n",
|
||||
+ response);
|
||||
+ }
|
||||
if (NULL != ruv_bervals) {
|
||||
ber_printf(resp_bere, "{V}", ruv_bervals);
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ ruv_dump_to_log(ruv, "multisupplier_extop_StartNSDS50ReplicationRequest");
|
||||
+ }
|
||||
}
|
||||
+
|
||||
/* Add extra data from replication session callback if necessary */
|
||||
if (is90 && data_guid && data) {
|
||||
ber_printf(resp_bere, "sO", data_guid, data);
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
+ "multisupplier_extop_StartNSDS50ReplicationRequest - encoded data_guid (%s) data (%s:%ld)\n",
|
||||
+ data_guid, data->bv_val, data->bv_len);
|
||||
+ }
|
||||
}
|
||||
|
||||
ber_printf(resp_bere, "}");
|
||||
ber_flatten(resp_bere, &resp_bval);
|
||||
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
+ "multisupplier_extop_StartNSDS50ReplicationRequest - Finished encoding payload\n");
|
||||
+ }
|
||||
+
|
||||
if (is90) {
|
||||
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_OID, REPL_NSDS90_REPLICATION_RESPONSE_OID);
|
||||
} else {
|
||||
@@ -1005,8 +1188,8 @@ send_response:
|
||||
* sending this request).
|
||||
* The situation is confused
|
||||
*/
|
||||
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multimaster_extop_StartNSDS50ReplicationRequest - "
|
||||
- "already acquired replica: replica not ready (%d) (replica=%s)\n",
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multisupplier_extop_StartNSDS50ReplicationRequest - "
|
||||
+ "already acquired replica: replica not ready (%d) (replica=%s)\n",
|
||||
response, replica_get_name(r) ? replica_get_name(r) : "no name");
|
||||
|
||||
/*
|
||||
@@ -1016,8 +1199,8 @@ send_response:
|
||||
if (r) {
|
||||
|
||||
r_locking_conn = replica_get_locking_conn(r);
|
||||
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multimaster_extop_StartNSDS50ReplicationRequest - "
|
||||
- "already acquired replica: locking_conn=%" PRIu64 ", current connid=%" PRIu64 "\n",
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multisupplier_extop_StartNSDS50ReplicationRequest - "
|
||||
+ "already acquired replica: locking_conn=%" PRIu64 ", current connid=%" PRIu64 "\n",
|
||||
r_locking_conn, connid);
|
||||
|
||||
if ((r_locking_conn != ULONG_MAX) && (r_locking_conn == connid)) {
|
||||
@@ -1032,8 +1215,8 @@ send_response:
|
||||
* On the supplier, we need to close the connection so
|
||||
* that the RA will restart a new session in a clear state
|
||||
*/
|
||||
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multimaster_extop_StartNSDS50ReplicationRequest - "
|
||||
- "already acquired replica: disconnect conn=%" PRIu64 "\n",
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "multisupplier_extop_StartNSDS50ReplicationRequest - "
|
||||
+ "already acquired replica: disconnect conn=%" PRIu64 "\n",
|
||||
connid);
|
||||
slapi_disconnect_server(conn);
|
||||
}
|
||||
@@ -1210,6 +1393,10 @@ send_response:
|
||||
if ((resp_bere = der_alloc()) == NULL) {
|
||||
goto free_and_return;
|
||||
}
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, "multisupplier_extop_EndNSDS50ReplicationRequest",
|
||||
+ "encoded response: %d\n", response);
|
||||
+ }
|
||||
ber_printf(resp_bere, "{e}", response);
|
||||
ber_flatten(resp_bere, &resp_bval);
|
||||
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_OID, REPL_NSDS50_REPLICATION_RESPONSE_OID);
|
||||
diff --git a/ldap/servers/slapd/slapi2runtime.c b/ldap/servers/slapd/slapi2runtime.c
|
||||
index 53927934a..e622f1b65 100644
|
||||
--- a/ldap/servers/slapd/slapi2runtime.c
|
||||
+++ b/ldap/servers/slapd/slapi2runtime.c
|
||||
@@ -88,7 +88,6 @@ slapi_lock_mutex(Slapi_Mutex *mutex)
|
||||
inline int __attribute__((always_inline))
|
||||
slapi_unlock_mutex(Slapi_Mutex *mutex)
|
||||
{
|
||||
- PR_ASSERT(mutex != NULL);
|
||||
if (mutex == NULL || pthread_mutex_unlock((pthread_mutex_t *)mutex) != 0) {
|
||||
return (0);
|
||||
} else {
|
||||
--
|
||||
2.37.1
|
||||
|
@ -1,102 +0,0 @@
|
||||
From 03ca5111a8de602ecef9ad33206ba593b242d0df Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Fri, 21 Jan 2022 10:15:35 -0500
|
||||
Subject: [PATCH 1/2] Issue 5127 - run restorecon on /dev/shm at server startup
|
||||
|
||||
Description:
|
||||
|
||||
Update the systemd service file to execute a script that runs
|
||||
restorecon on the DB home directory. This addresses issues with
|
||||
backup/restore, reboot, and FS restore issues that can happen when
|
||||
/dev/shm is missing or created outside of dscreate.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/5127
|
||||
|
||||
Reviewed by: progier & viktor (Thanks!!)
|
||||
---
|
||||
Makefile.am | 2 +-
|
||||
rpm/389-ds-base.spec.in | 1 +
|
||||
wrappers/ds_selinux_restorecon.sh.in | 33 ++++++++++++++++++++++++++++
|
||||
wrappers/systemd.template.service.in | 1 +
|
||||
4 files changed, 36 insertions(+), 1 deletion(-)
|
||||
create mode 100644 wrappers/ds_selinux_restorecon.sh.in
|
||||
|
||||
diff --git a/Makefile.am b/Makefile.am
|
||||
index fc5a6a7d1..d6ad273c3 100644
|
||||
--- a/Makefile.am
|
||||
+++ b/Makefile.am
|
||||
@@ -775,7 +775,7 @@ libexec_SCRIPTS += ldap/admin/src/scripts/ds_selinux_enabled \
|
||||
ldap/admin/src/scripts/ds_selinux_port_query
|
||||
endif
|
||||
if SYSTEMD
|
||||
-libexec_SCRIPTS += wrappers/ds_systemd_ask_password_acl
|
||||
+libexec_SCRIPTS += wrappers/ds_systemd_ask_password_acl wrappers/ds_selinux_restorecon.sh
|
||||
endif
|
||||
|
||||
install-data-hook:
|
||||
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
|
||||
index d80de8422..6c0d95abd 100644
|
||||
--- a/rpm/389-ds-base.spec.in
|
||||
+++ b/rpm/389-ds-base.spec.in
|
||||
@@ -623,6 +623,7 @@ exit 0
|
||||
%{_sbindir}/ns-slapd
|
||||
%{_mandir}/man8/ns-slapd.8.gz
|
||||
%{_libexecdir}/%{pkgname}/ds_systemd_ask_password_acl
|
||||
+%{_libexecdir}/%{pkgname}/ds_selinux_restorecon.sh
|
||||
%{_mandir}/man5/99user.ldif.5.gz
|
||||
%{_mandir}/man5/certmap.conf.5.gz
|
||||
%{_mandir}/man5/slapd-collations.conf.5.gz
|
||||
diff --git a/wrappers/ds_selinux_restorecon.sh.in b/wrappers/ds_selinux_restorecon.sh.in
|
||||
new file mode 100644
|
||||
index 000000000..063347de3
|
||||
--- /dev/null
|
||||
+++ b/wrappers/ds_selinux_restorecon.sh.in
|
||||
@@ -0,0 +1,33 @@
|
||||
+#!/bin/sh
|
||||
+# BEGIN COPYRIGHT BLOCK
|
||||
+# Copyright (C) 2022 Red Hat, Inc.
|
||||
+#
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# END COPYRIGHT BLOCK
|
||||
+
|
||||
+# Make sure we have the path to the dse.ldif
|
||||
+if [ -z $1 ]
|
||||
+then
|
||||
+ echo "usage: ${0} /etc/dirsrv/slapd-<instance>/dse.ldif"
|
||||
+ exit 0
|
||||
+fi
|
||||
+
|
||||
+if ! command -v restorecon &> /dev/null
|
||||
+then
|
||||
+ # restorecon is not available
|
||||
+ exit 0
|
||||
+fi
|
||||
+
|
||||
+# Grep the db_home_dir out of the config file
|
||||
+DS_HOME_DIR=`grep 'nsslapd-db-home-directory: ' $1 | awk '{print $2}'`
|
||||
+if [ -z "$DS_HOME_DIR" ]
|
||||
+then
|
||||
+ # No DB home set, that's ok
|
||||
+ exit 0
|
||||
+fi
|
||||
+
|
||||
+# Now run restorecon
|
||||
+restorecon ${DS_HOME_DIR}
|
||||
diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in
|
||||
index a8c21a9be..4485e0ec0 100644
|
||||
--- a/wrappers/systemd.template.service.in
|
||||
+++ b/wrappers/systemd.template.service.in
|
||||
@@ -14,6 +14,7 @@ EnvironmentFile=-@initconfigdir@/@package_name@
|
||||
EnvironmentFile=-@initconfigdir@/@package_name@-%i
|
||||
PIDFile=/run/@package_name@/slapd-%i.pid
|
||||
ExecStartPre=@libexecdir@/ds_systemd_ask_password_acl @instconfigdir@/slapd-%i/dse.ldif
|
||||
+ExecStartPre=@libexecdir@/ds_selinux_restorecon.sh @instconfigdir@/slapd-%i/dse.ldif
|
||||
ExecStart=@sbindir@/ns-slapd -D @instconfigdir@/slapd-%i -i /run/@package_name@/slapd-%i.pid
|
||||
PrivateTmp=on
|
||||
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,88 @@
|
||||
From 6fd4fd082424838f7d06e0de8683d28f04ec0d43 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 10 Aug 2022 08:59:15 -0400
|
||||
Subject: [PATCH 2/4] Issue 5412 - lib389 - do not set backend name to
|
||||
lowercase
|
||||
|
||||
Description:
|
||||
|
||||
There is no reason to set a new suffix to lowercase. The server
|
||||
will correctly handle the case, and some customers, especially
|
||||
with migrations, want to have the base suffix a certain case.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/5412
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/basic/basic_test.py | 33 ++++++++++++++++++--
|
||||
src/lib389/lib389/backend.py | 3 +-
|
||||
2 files changed, 32 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
index 003cd8f28..6fa4dea25 100644
|
||||
--- a/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
@@ -22,6 +22,8 @@ from lib389.idm.directorymanager import DirectoryManager
|
||||
from lib389.config import LDBMConfig
|
||||
from lib389.dseldif import DSEldif
|
||||
from lib389.rootdse import RootDSE
|
||||
+from lib389.backend import Backends
|
||||
+from lib389.idm.domain import Domain
|
||||
|
||||
|
||||
pytestmark = pytest.mark.tier0
|
||||
@@ -1410,8 +1412,35 @@ def test_ldbm_modification_audit_log(topology_st):
|
||||
assert conn.searchAuditLog('%s: %s' % (attr, VALUE))
|
||||
|
||||
|
||||
-@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.0.0'),
|
||||
- reason="This test is only required if perl is enabled, and requires root.")
|
||||
+def test_suffix_case(topology_st):
|
||||
+ """Test that the suffix case is preserved when creating a new backend
|
||||
+
|
||||
+ :id: 4eff15be-6cde-4312-b492-c88941876bda
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Create backend with uppercase characters
|
||||
+ 2. Create root node entry
|
||||
+ 3. Search should return suffix with upper case characters
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+
|
||||
+ # Start with a clean slate
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ TEST_SUFFIX = 'dc=UPPER_CASE'
|
||||
+
|
||||
+ backends = Backends(topology_st.standalone)
|
||||
+ backends.create(properties={'nsslapd-suffix': TEST_SUFFIX,
|
||||
+ 'name': 'upperCaseRoot',
|
||||
+ 'sample_entries': '001004002'})
|
||||
+
|
||||
+ domain = Domain(topology_st.standalone, TEST_SUFFIX)
|
||||
+ assert domain.dn == TEST_SUFFIX
|
||||
+
|
||||
+
|
||||
def test_dscreate(request):
|
||||
"""Test that dscreate works, we need this for now until setup-ds.pl is
|
||||
fully discontinued.
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index cbd2810e9..24613655d 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -608,8 +608,7 @@ class Backend(DSLdapObject):
|
||||
dn = ",".join(dn_comps)
|
||||
|
||||
if properties is not None:
|
||||
- suffix_dn = properties['nsslapd-suffix'].lower()
|
||||
- dn_comps = ldap.dn.explode_dn(suffix_dn)
|
||||
+ dn_comps = ldap.dn.explode_dn(properties['nsslapd-suffix'])
|
||||
ndn = ",".join(dn_comps)
|
||||
properties['nsslapd-suffix'] = ndn
|
||||
sample_entries = properties.pop(BACKEND_SAMPLE_ENTRIES, False)
|
||||
--
|
||||
2.37.1
|
||||
|
@ -1,35 +0,0 @@
|
||||
From 0ed471bae52bb0debd23336cbc5f3f1d400cbbc9 Mon Sep 17 00:00:00 2001
|
||||
From: Adam Williamson <awilliam@redhat.com>
|
||||
Date: Thu, 27 Jan 2022 11:07:26 -0800
|
||||
Subject: [PATCH] Issue 5127 - ds_selinux_restorecon.sh: always exit 0
|
||||
|
||||
Description:
|
||||
|
||||
We don't want to error out and give up on starting the service
|
||||
if the restorecon fails - it might just be that the directory
|
||||
doesn't exist and doesn't need restoring. Issue identified and
|
||||
fix suggested by Simon Farnsworth
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/5127
|
||||
|
||||
Reviewed by: adamw & mreynolds
|
||||
---
|
||||
wrappers/ds_selinux_restorecon.sh.in | 5 +++--
|
||||
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/wrappers/ds_selinux_restorecon.sh.in b/wrappers/ds_selinux_restorecon.sh.in
|
||||
index 063347de3..2d7386233 100644
|
||||
--- a/wrappers/ds_selinux_restorecon.sh.in
|
||||
+++ b/wrappers/ds_selinux_restorecon.sh.in
|
||||
@@ -29,5 +29,6 @@ then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
-# Now run restorecon
|
||||
-restorecon ${DS_HOME_DIR}
|
||||
+# Now run restorecon, but don't die if it fails (could be that the
|
||||
+# directory doesn't exist)
|
||||
+restorecon ${DS_HOME_DIR} || :
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,110 @@
|
||||
From 48ef747b731b5debfefc20757f3b3775828504c2 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 18 Aug 2022 11:17:30 +0200
|
||||
Subject: [PATCH 3/4] Issue 5418 - Sync_repl may crash while managing invalid
|
||||
cookie (#5420)
|
||||
|
||||
Bug description:
|
||||
If the servers receives an invalid cookie without separator '#',
|
||||
it parses it into an empty cookie (Sync_Cookie) instead of a NULL
|
||||
cookie (failure).
|
||||
Later it sigsegv when using the empty cookie.
|
||||
|
||||
Fix description:
|
||||
If the parsing fails return NULL
|
||||
|
||||
relates: #5418
|
||||
|
||||
Reviewed by: Viktor Ashirov, Mark Reynolds, William Brown, Simon
|
||||
Pichugin (thanks !)
|
||||
---
|
||||
.../suites/syncrepl_plugin/basic_test.py | 76 +++++++++++++++++++
|
||||
1 file changed, 76 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
index 533460e8f..375517693 100644
|
||||
--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
@@ -594,3 +594,79 @@ def test_sync_repl_cenotaph(topo_m2, request):
|
||||
pass
|
||||
|
||||
request.addfinalizer(fin)
|
||||
+
|
||||
+def test_sync_repl_invalid_cookie(topology, request):
|
||||
+ """Test sync_repl with invalid cookie
|
||||
+
|
||||
+ :id: 8fa4a8f8-acf4-42a5-90f1-6ba1d8080e46
|
||||
+ :setup: install a standalone instance
|
||||
+ :steps:
|
||||
+ 1. reset instance to standard (no retroCL, no sync_repl, no dynamic plugin)
|
||||
+ 2. Enable retroCL/content_sync
|
||||
+ 3. Establish a sync_repl connection
|
||||
+ 4. Tests servers results to search with invalid cookie
|
||||
+ 5. Add/delete an user entry to check the server is up and running
|
||||
+ :expectedresults:
|
||||
+ 1. Should succeeds
|
||||
+ 2. Should succeeds
|
||||
+ 3. Should succeeds
|
||||
+ 4. Should succeeds
|
||||
+ 5. Should succeeds
|
||||
+ """
|
||||
+
|
||||
+ # Reset the instance in a default config
|
||||
+ # Disable content sync plugin
|
||||
+ topology.standalone.restart()
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_REPL_SYNC)
|
||||
+
|
||||
+ # Disable retro changelog
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Disable dynamic plugins
|
||||
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'off')])
|
||||
+ topology.standalone.restart()
|
||||
+
|
||||
+ # Enable retro changelog
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Enbale content sync plugin
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC)
|
||||
+ topology.standalone.restart()
|
||||
+
|
||||
+ # Setup the syncer
|
||||
+ sync = ISyncRepl(topology.standalone)
|
||||
+
|
||||
+ # Test invalid cookies
|
||||
+ cookies = ('#', '##', 'a#a#a', 'a#a#1', 'foo')
|
||||
+ for invalid_cookie in cookies:
|
||||
+ log.info('Testing cookie: %s' % invalid_cookie)
|
||||
+ try:
|
||||
+ ldap_search = sync.syncrepl_search(base=DEFAULT_SUFFIX,
|
||||
+ scope=ldap.SCOPE_SUBTREE,
|
||||
+ attrlist=['objectclass', 'cn', 'homedirectory', 'sn','uid'],
|
||||
+ filterstr='(|(objectClass=groupofnames)(objectClass=person))',
|
||||
+ mode='refreshOnly',
|
||||
+ cookie=invalid_cookie)
|
||||
+ poll_result = sync.syncrepl_poll(all=1)
|
||||
+
|
||||
+ log.fatal('Invalid cookie accepted!')
|
||||
+ assert False
|
||||
+ except Exception as e:
|
||||
+ log.info('Invalid cookie correctly rejected: {}'.format(e.args[0]['info']))
|
||||
+ pass
|
||||
+
|
||||
+ # check that the server is still up and running
|
||||
+ users = UserAccounts(topology.standalone, DEFAULT_SUFFIX)
|
||||
+ user = users.create_test_user(uid=1000)
|
||||
+
|
||||
+ # Success
|
||||
+ log.info('Test complete')
|
||||
+
|
||||
+ def fin():
|
||||
+ topology.standalone.restart()
|
||||
+ try:
|
||||
+ user.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
--
|
||||
2.37.1
|
||||
|
2510
SOURCES/0016-Issue-3903-fix-repl-keep-alive-event-interval.patch
Normal file
2510
SOURCES/0016-Issue-3903-fix-repl-keep-alive-event-interval.patch
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,262 +0,0 @@
|
||||
From 93588ea455aff691bdfbf59cdef4df8fcedb69f2 Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Thu, 19 Aug 2021 10:46:00 +1000
|
||||
Subject: [PATCH 1/2] Issue 4775 - Add entryuuid CLI and Fixup (#4776)
|
||||
|
||||
Bug Description: EntryUUID when added was missing it's CLI
|
||||
and helpers for fixups.
|
||||
|
||||
Fix Description: Add the CLI elements.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/4775
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @mreynolds389 (thanks!)
|
||||
---
|
||||
src/lib389/lib389/cli_conf/plugin.py | 6 ++-
|
||||
.../lib389/cli_conf/plugins/entryuuid.py | 39 ++++++++++++++
|
||||
src/plugins/entryuuid/src/lib.rs | 54 ++++++++-----------
|
||||
3 files changed, 65 insertions(+), 34 deletions(-)
|
||||
create mode 100644 src/lib389/lib389/cli_conf/plugins/entryuuid.py
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugin.py b/src/lib389/lib389/cli_conf/plugin.py
|
||||
index 560c57f9b..7c0cf2c80 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugin.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugin.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2018 Red Hat, Inc.
|
||||
+# Copyright (C) 2022 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -27,6 +27,8 @@ from lib389.cli_conf.plugins import passthroughauth as cli_passthroughauth
|
||||
from lib389.cli_conf.plugins import retrochangelog as cli_retrochangelog
|
||||
from lib389.cli_conf.plugins import automember as cli_automember
|
||||
from lib389.cli_conf.plugins import posix_winsync as cli_posix_winsync
|
||||
+from lib389.cli_conf.plugins import contentsync as cli_contentsync
|
||||
+from lib389.cli_conf.plugins import entryuuid as cli_entryuuid
|
||||
|
||||
SINGULAR = Plugin
|
||||
MANY = Plugins
|
||||
@@ -113,6 +115,8 @@ def create_parser(subparsers):
|
||||
cli_passthroughauth.create_parser(subcommands)
|
||||
cli_retrochangelog.create_parser(subcommands)
|
||||
cli_posix_winsync.create_parser(subcommands)
|
||||
+ cli_contentsync.create_parser(subcommands)
|
||||
+ cli_entryuuid.create_parser(subcommands)
|
||||
|
||||
list_parser = subcommands.add_parser('list', help="List current configured (enabled and disabled) plugins")
|
||||
list_parser.set_defaults(func=plugin_list)
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugins/entryuuid.py b/src/lib389/lib389/cli_conf/plugins/entryuuid.py
|
||||
new file mode 100644
|
||||
index 000000000..6c86bff4b
|
||||
--- /dev/null
|
||||
+++ b/src/lib389/lib389/cli_conf/plugins/entryuuid.py
|
||||
@@ -0,0 +1,39 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2021 William Brown <william@blackhats.net.au>
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+
|
||||
+import ldap
|
||||
+from lib389.plugins import EntryUUIDPlugin
|
||||
+from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add
|
||||
+
|
||||
+def do_fixup(inst, basedn, log, args):
|
||||
+ plugin = EntryUUIDPlugin(inst)
|
||||
+ log.info('Attempting to add task entry...')
|
||||
+ if not plugin.status():
|
||||
+ log.error("'%s' is disabled. Fix up task can't be executed" % plugin.rdn)
|
||||
+ return
|
||||
+ fixup_task = plugin.fixup(args.DN, args.filter)
|
||||
+ fixup_task.wait()
|
||||
+ exitcode = fixup_task.get_exit_code()
|
||||
+ if exitcode != 0:
|
||||
+ log.error('EntryUUID fixup task has failed. Please, check the error log for more - %s' % exitcode)
|
||||
+ else:
|
||||
+ log.info('Successfully added task entry')
|
||||
+
|
||||
+def create_parser(subparsers):
|
||||
+ referint = subparsers.add_parser('entryuuid', help='Manage and configure EntryUUID plugin')
|
||||
+ subcommands = referint.add_subparsers(help='action')
|
||||
+
|
||||
+ add_generic_plugin_parsers(subcommands, EntryUUIDPlugin)
|
||||
+
|
||||
+ fixup = subcommands.add_parser('fixup', help='Run the fix-up task for EntryUUID plugin')
|
||||
+ fixup.set_defaults(func=do_fixup)
|
||||
+ fixup.add_argument('DN', help="Base DN that contains entries to fix up")
|
||||
+ fixup.add_argument('-f', '--filter',
|
||||
+ help='Filter for entries to fix up.\n If omitted, all entries under base DN'
|
||||
+ 'will have their EntryUUID attribute regenerated if not present.')
|
||||
+
|
||||
diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
|
||||
index da9f0c239..29a9f1258 100644
|
||||
--- a/src/plugins/entryuuid/src/lib.rs
|
||||
+++ b/src/plugins/entryuuid/src/lib.rs
|
||||
@@ -33,7 +33,7 @@ fn assign_uuid(e: &mut EntryRef) {
|
||||
// 🚧 safety barrier 🚧
|
||||
if e.contains_attr("entryUUID") {
|
||||
log_error!(
|
||||
- ErrorLevel::Trace,
|
||||
+ ErrorLevel::Plugin,
|
||||
"assign_uuid -> entryUUID exists, skipping dn {}",
|
||||
sdn.to_dn_string()
|
||||
);
|
||||
@@ -47,7 +47,7 @@ fn assign_uuid(e: &mut EntryRef) {
|
||||
if sdn.is_below_suffix(&*config_sdn) || sdn.is_below_suffix(&*schema_sdn) {
|
||||
// We don't need to assign to these suffixes.
|
||||
log_error!(
|
||||
- ErrorLevel::Trace,
|
||||
+ ErrorLevel::Plugin,
|
||||
"assign_uuid -> not assigning to {:?} as part of system suffix",
|
||||
sdn.to_dn_string()
|
||||
);
|
||||
@@ -57,7 +57,7 @@ fn assign_uuid(e: &mut EntryRef) {
|
||||
// Generate a new Uuid.
|
||||
let u: Uuid = Uuid::new_v4();
|
||||
log_error!(
|
||||
- ErrorLevel::Trace,
|
||||
+ ErrorLevel::Plugin,
|
||||
"assign_uuid -> assigning {:?} to dn {}",
|
||||
u,
|
||||
sdn.to_dn_string()
|
||||
@@ -78,13 +78,13 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> {
|
||||
if pb.get_is_replicated_operation() {
|
||||
log_error!(
|
||||
- ErrorLevel::Trace,
|
||||
+ ErrorLevel::Plugin,
|
||||
"betxn_pre_add -> replicated operation, will not change"
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
- log_error!(ErrorLevel::Trace, "betxn_pre_add -> start");
|
||||
+ log_error!(ErrorLevel::Plugin, "betxn_pre_add -> start");
|
||||
|
||||
let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?;
|
||||
assign_uuid(&mut e);
|
||||
@@ -105,7 +105,7 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
.first()
|
||||
.ok_or_else(|| {
|
||||
log_error!(
|
||||
- ErrorLevel::Trace,
|
||||
+ ErrorLevel::Plugin,
|
||||
"task_validate basedn error -> empty value array?"
|
||||
);
|
||||
LDAPError::Operation
|
||||
@@ -113,7 +113,7 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
.as_ref()
|
||||
.try_into()
|
||||
.map_err(|e| {
|
||||
- log_error!(ErrorLevel::Trace, "task_validate basedn error -> {:?}", e);
|
||||
+ log_error!(ErrorLevel::Plugin, "task_validate basedn error -> {:?}", e);
|
||||
LDAPError::Operation
|
||||
})?,
|
||||
None => return Err(LDAPError::ObjectClassViolation),
|
||||
@@ -124,7 +124,7 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
.first()
|
||||
.ok_or_else(|| {
|
||||
log_error!(
|
||||
- ErrorLevel::Trace,
|
||||
+ ErrorLevel::Plugin,
|
||||
"task_validate filter error -> empty value array?"
|
||||
);
|
||||
LDAPError::Operation
|
||||
@@ -132,7 +132,7 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
.as_ref()
|
||||
.try_into()
|
||||
.map_err(|e| {
|
||||
- log_error!(ErrorLevel::Trace, "task_validate filter error -> {:?}", e);
|
||||
+ log_error!(ErrorLevel::Plugin, "task_validate filter error -> {:?}", e);
|
||||
LDAPError::Operation
|
||||
})?,
|
||||
None => {
|
||||
@@ -144,17 +144,11 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
// Error if the first filter is empty?
|
||||
|
||||
// Now, to make things faster, we wrap the filter in a exclude term.
|
||||
-
|
||||
- // 2021 - #4877 because we allow entryuuid to be strings, on import these may
|
||||
- // be invalid. As a result, we DO need to allow the fixup to check the entryuuid
|
||||
- // value is correct, so we can not exclude these during the search.
|
||||
- /*
|
||||
let raw_filter = if !raw_filter.starts_with('(') && !raw_filter.ends_with('(') {
|
||||
format!("(&({})(!(entryuuid=*)))", raw_filter)
|
||||
} else {
|
||||
format!("(&{}(!(entryuuid=*)))", raw_filter)
|
||||
};
|
||||
- */
|
||||
|
||||
Ok(FixupData { basedn, raw_filter })
|
||||
}
|
||||
@@ -165,7 +159,7 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
|
||||
fn task_handler(_task: &Task, data: Self::TaskData) -> Result<Self::TaskData, PluginError> {
|
||||
log_error!(
|
||||
- ErrorLevel::Trace,
|
||||
+ ErrorLevel::Plugin,
|
||||
"task_handler -> start thread with -> {:?}",
|
||||
data
|
||||
);
|
||||
@@ -205,12 +199,12 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
}
|
||||
|
||||
fn start(_pb: &mut PblockRef) -> Result<(), PluginError> {
|
||||
- log_error!(ErrorLevel::Trace, "plugin start");
|
||||
+ log_error!(ErrorLevel::Plugin, "plugin start");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn close(_pb: &mut PblockRef) -> Result<(), PluginError> {
|
||||
- log_error!(ErrorLevel::Trace, "plugin close");
|
||||
+ log_error!(ErrorLevel::Plugin, "plugin close");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -219,20 +213,14 @@ pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError
|
||||
/* Supply a modification to the entry. */
|
||||
let sdn = e.get_sdnref();
|
||||
|
||||
- /* Check that entryuuid doesn't already exist, and is valid */
|
||||
- if let Some(valueset) = e.get_attr("entryUUID") {
|
||||
- if valueset.iter().all(|v| {
|
||||
- let u: Result<Uuid, _> = (&v).try_into();
|
||||
- u.is_ok()
|
||||
- }) {
|
||||
- // All values were valid uuid, move on!
|
||||
- log_error!(
|
||||
- ErrorLevel::Plugin,
|
||||
- "skipping fixup for -> {}",
|
||||
- sdn.to_dn_string()
|
||||
- );
|
||||
- return Ok(());
|
||||
- }
|
||||
+ /* Sanity check that entryuuid doesn't already exist */
|
||||
+ if e.contains_attr("entryUUID") {
|
||||
+ log_error!(
|
||||
+ ErrorLevel::Plugin,
|
||||
+ "skipping fixup for -> {}",
|
||||
+ sdn.to_dn_string()
|
||||
+ );
|
||||
+ return Ok(());
|
||||
}
|
||||
|
||||
// Setup the modifications
|
||||
@@ -248,7 +236,7 @@ pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError
|
||||
|
||||
match lmod.execute() {
|
||||
Ok(_) => {
|
||||
- log_error!(ErrorLevel::Trace, "fixed-up -> {}", sdn.to_dn_string());
|
||||
+ log_error!(ErrorLevel::Plugin, "fixed-up -> {}", sdn.to_dn_string());
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
--
|
||||
2.34.1
|
||||
|
@ -1,42 +0,0 @@
|
||||
From 525f2307fa3e2d0ae55c8c922e6f7220a1e5bd1b Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 3 Feb 2022 16:51:38 -0500
|
||||
Subject: [PATCH] Issue 4775 - Fix cherry-pick error
|
||||
|
||||
Bug Description: EntryUUID when added was missing it's CLI
|
||||
and helpers for fixups.
|
||||
|
||||
Fix Description: Add the CLI elements.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/4775
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @mreynolds389 (thanks!)
|
||||
---
|
||||
src/lib389/lib389/cli_conf/plugin.py | 2 --
|
||||
1 file changed, 2 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugin.py b/src/lib389/lib389/cli_conf/plugin.py
|
||||
index 7c0cf2c80..fb0ef3077 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugin.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugin.py
|
||||
@@ -27,7 +27,6 @@ from lib389.cli_conf.plugins import passthroughauth as cli_passthroughauth
|
||||
from lib389.cli_conf.plugins import retrochangelog as cli_retrochangelog
|
||||
from lib389.cli_conf.plugins import automember as cli_automember
|
||||
from lib389.cli_conf.plugins import posix_winsync as cli_posix_winsync
|
||||
-from lib389.cli_conf.plugins import contentsync as cli_contentsync
|
||||
from lib389.cli_conf.plugins import entryuuid as cli_entryuuid
|
||||
|
||||
SINGULAR = Plugin
|
||||
@@ -115,7 +114,6 @@ def create_parser(subparsers):
|
||||
cli_passthroughauth.create_parser(subcommands)
|
||||
cli_retrochangelog.create_parser(subcommands)
|
||||
cli_posix_winsync.create_parser(subcommands)
|
||||
- cli_contentsync.create_parser(subcommands)
|
||||
cli_entryuuid.create_parser(subcommands)
|
||||
|
||||
list_parser = subcommands.add_parser('list', help="List current configured (enabled and disabled) plugins")
|
||||
--
|
||||
2.34.1
|
||||
|
182
SOURCES/Cargo.lock
generated
182
SOURCES/Cargo.lock
generated
@ -4,9 +4,9 @@ version = 3
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.11.0"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
|
||||
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
@ -24,9 +24,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.0.1"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
|
||||
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
@ -65,9 +65,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.71"
|
||||
version = "1.0.73"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd"
|
||||
checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
]
|
||||
@ -80,9 +80,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.33.3"
|
||||
version = "2.34.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
|
||||
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
|
||||
dependencies = [
|
||||
"ansi_term",
|
||||
"atty",
|
||||
@ -115,6 +115,15 @@ dependencies = [
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fastrand"
|
||||
version = "1.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf"
|
||||
dependencies = [
|
||||
"instant",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fernet"
|
||||
version = "0.1.4"
|
||||
@ -145,9 +154,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.3"
|
||||
version = "0.2.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
|
||||
checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
@ -164,10 +173,19 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "0.4.8"
|
||||
name = "instant"
|
||||
version = "0.1.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4"
|
||||
checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35"
|
||||
|
||||
[[package]]
|
||||
name = "jobserver"
|
||||
@ -186,9 +204,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.104"
|
||||
version = "0.2.125"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7b2f96d100e1cf1929e7719b7edb3b90ab5298072638fccd77be9ce942ecdfce"
|
||||
checksum = "5916d2ae698f6de9bfb891ad7a8d65c09d232dc58cc4ac433c7da3b2fd84bc2b"
|
||||
|
||||
[[package]]
|
||||
name = "librnsslapd"
|
||||
@ -210,38 +228,50 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.14"
|
||||
version = "0.4.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
|
||||
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.8.0"
|
||||
version = "1.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56"
|
||||
checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9"
|
||||
|
||||
[[package]]
|
||||
name = "openssl"
|
||||
version = "0.10.36"
|
||||
version = "0.10.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8d9facdb76fec0b73c406f125d44d86fdad818d66fef0531eec9233ca425ff4a"
|
||||
checksum = "fb81a6430ac911acb25fe5ac8f1d2af1b4ea8a4fdfda0f1ee4292af2e2d8eb0e"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cfg-if",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"openssl-macros",
|
||||
"openssl-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.67"
|
||||
name = "openssl-macros"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69df2d8dfc6ce3aaf44b40dec6f487d5a886516cf6879c49e98e0710f310a058"
|
||||
checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.73"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d5fd19fb3e0a8191c1e34935718976a3e70c112ab9a24af6d7cadccd9d90bc0"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"cc",
|
||||
@ -271,15 +301,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.20"
|
||||
version = "0.3.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c9b1041b4387893b91ee6746cddfc28516aff326a3519fb2adf820932c5e6cb"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3ca011bd0129ff4ae15cd04c4eef202cadf6c51c21e47aba319b4e0501db741"
|
||||
checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
@ -289,67 +313,27 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.30"
|
||||
version = "1.0.38"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "edc3358ebc67bc8b7fa0c007f945b0b18226f78437d61bec735a9eb96b61ee70"
|
||||
checksum = "9027b48e9d4c9175fa2218adf3557f91c1137021739951d4932f5f8268ac48aa"
|
||||
dependencies = [
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.10"
|
||||
version = "1.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05"
|
||||
checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
"rand_hc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_chacha"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_hc"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7"
|
||||
dependencies = [
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.10"
|
||||
version = "0.2.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff"
|
||||
checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
]
|
||||
@ -369,24 +353,24 @@ version = "0.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.5"
|
||||
version = "1.0.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
|
||||
checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.130"
|
||||
version = "1.0.137"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913"
|
||||
checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.130"
|
||||
version = "1.0.137"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b"
|
||||
checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -395,9 +379,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.68"
|
||||
version = "1.0.81"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8"
|
||||
checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@ -429,9 +413,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.80"
|
||||
version = "1.0.94"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d010a1623fbd906d51d650a9916aaefc05ffa0e4053ff7fe601167f3e715d194"
|
||||
checksum = "a07e33e919ebcd69113d5be0e4d70c5707004ff45188910106854f38b960df4a"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -452,13 +436,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.2.0"
|
||||
version = "3.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
|
||||
checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"fastrand",
|
||||
"libc",
|
||||
"rand",
|
||||
"redox_syscall",
|
||||
"remove_dir_all",
|
||||
"winapi",
|
||||
@ -475,9 +459,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.5.8"
|
||||
version = "0.5.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa"
|
||||
checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
@ -490,9 +474,9 @@ checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.2.2"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
|
||||
checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04"
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
@ -545,18 +529,18 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "zeroize"
|
||||
version = "1.4.2"
|
||||
version = "1.5.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bf68b08513768deaa790264a7fac27a58cbf2705cfcdc9448362229217d7e970"
|
||||
checksum = "94693807d016b2f2d2e14420eb3bfcca689311ff775dcf113d74ea624b7cdf07"
|
||||
dependencies = [
|
||||
"zeroize_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zeroize_derive"
|
||||
version = "1.2.0"
|
||||
version = "1.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bdff2024a851a322b08f179173ae2ba620445aef1e838f0c196820eade4ae0c7"
|
||||
checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -25,7 +25,7 @@ ExcludeArch: i686
|
||||
|
||||
%if %{bundle_jemalloc}
|
||||
%global jemalloc_name jemalloc
|
||||
%global jemalloc_ver 5.2.1
|
||||
%global jemalloc_ver 5.3.0
|
||||
%global __provides_exclude ^libjemalloc\\.so.*$
|
||||
%endif
|
||||
|
||||
@ -47,9 +47,9 @@ ExcludeArch: i686
|
||||
|
||||
Summary: 389 Directory Server (base)
|
||||
Name: 389-ds-base
|
||||
Version: 1.4.3.28
|
||||
Version: 1.4.3.30
|
||||
Release: %{?relprefix}6%{?prerel}%{?dist}
|
||||
License: GPLv3+
|
||||
License: GPLv3+ and MIT and (ASL 2.0 or MIT) and (ASL 2.0 or Boost) and MPLv2.0 and ASL 2.0 and BSD and (ASL 2.0 with exceptions or ASL 2.0 or MIT) and (Unlicense or MIT)
|
||||
URL: https://www.port389.org
|
||||
Group: System Environment/Daemons
|
||||
Conflicts: selinux-policy-base < 3.9.8
|
||||
@ -58,61 +58,59 @@ Obsoletes: %{name} <= 1.4.0.9
|
||||
Provides: ldif2ldbm >= 0
|
||||
|
||||
##### Bundled cargo crates list - START #####
|
||||
Provides: bundled(crate(ansi_term)) = 0.11.0
|
||||
Provides: bundled(crate(ansi_term)) = 0.12.1
|
||||
Provides: bundled(crate(atty)) = 0.2.14
|
||||
Provides: bundled(crate(autocfg)) = 1.0.1
|
||||
Provides: bundled(crate(autocfg)) = 1.1.0
|
||||
Provides: bundled(crate(base64)) = 0.13.0
|
||||
Provides: bundled(crate(bitflags)) = 1.3.2
|
||||
Provides: bundled(crate(byteorder)) = 1.4.3
|
||||
Provides: bundled(crate(cbindgen)) = 0.9.1
|
||||
Provides: bundled(crate(cc)) = 1.0.71
|
||||
Provides: bundled(crate(cc)) = 1.0.73
|
||||
Provides: bundled(crate(cfg-if)) = 1.0.0
|
||||
Provides: bundled(crate(clap)) = 2.33.3
|
||||
Provides: bundled(crate(clap)) = 2.34.0
|
||||
Provides: bundled(crate(entryuuid)) = 0.1.0
|
||||
Provides: bundled(crate(entryuuid_syntax)) = 0.1.0
|
||||
Provides: bundled(crate(fastrand)) = 1.7.0
|
||||
Provides: bundled(crate(fernet)) = 0.1.4
|
||||
Provides: bundled(crate(foreign-types)) = 0.3.2
|
||||
Provides: bundled(crate(foreign-types-shared)) = 0.1.1
|
||||
Provides: bundled(crate(getrandom)) = 0.2.3
|
||||
Provides: bundled(crate(getrandom)) = 0.2.6
|
||||
Provides: bundled(crate(hermit-abi)) = 0.1.19
|
||||
Provides: bundled(crate(itoa)) = 0.4.8
|
||||
Provides: bundled(crate(instant)) = 0.1.12
|
||||
Provides: bundled(crate(itoa)) = 1.0.1
|
||||
Provides: bundled(crate(jobserver)) = 0.1.24
|
||||
Provides: bundled(crate(lazy_static)) = 1.4.0
|
||||
Provides: bundled(crate(libc)) = 0.2.104
|
||||
Provides: bundled(crate(libc)) = 0.2.125
|
||||
Provides: bundled(crate(librnsslapd)) = 0.1.0
|
||||
Provides: bundled(crate(librslapd)) = 0.1.0
|
||||
Provides: bundled(crate(log)) = 0.4.14
|
||||
Provides: bundled(crate(once_cell)) = 1.8.0
|
||||
Provides: bundled(crate(openssl)) = 0.10.36
|
||||
Provides: bundled(crate(openssl-sys)) = 0.9.67
|
||||
Provides: bundled(crate(log)) = 0.4.17
|
||||
Provides: bundled(crate(once_cell)) = 1.10.0
|
||||
Provides: bundled(crate(openssl)) = 0.10.40
|
||||
Provides: bundled(crate(openssl-macros)) = 0.1.0
|
||||
Provides: bundled(crate(openssl-sys)) = 0.9.73
|
||||
Provides: bundled(crate(paste)) = 0.1.18
|
||||
Provides: bundled(crate(paste-impl)) = 0.1.18
|
||||
Provides: bundled(crate(pkg-config)) = 0.3.20
|
||||
Provides: bundled(crate(ppv-lite86)) = 0.2.14
|
||||
Provides: bundled(crate(pkg-config)) = 0.3.25
|
||||
Provides: bundled(crate(proc-macro-hack)) = 0.5.19
|
||||
Provides: bundled(crate(proc-macro2)) = 1.0.30
|
||||
Provides: bundled(crate(quote)) = 1.0.10
|
||||
Provides: bundled(crate(rand)) = 0.8.4
|
||||
Provides: bundled(crate(rand_chacha)) = 0.3.1
|
||||
Provides: bundled(crate(rand_core)) = 0.6.3
|
||||
Provides: bundled(crate(rand_hc)) = 0.3.1
|
||||
Provides: bundled(crate(redox_syscall)) = 0.2.10
|
||||
Provides: bundled(crate(proc-macro2)) = 1.0.38
|
||||
Provides: bundled(crate(quote)) = 1.0.18
|
||||
Provides: bundled(crate(redox_syscall)) = 0.2.13
|
||||
Provides: bundled(crate(remove_dir_all)) = 0.5.3
|
||||
Provides: bundled(crate(rsds)) = 0.1.0
|
||||
Provides: bundled(crate(ryu)) = 1.0.5
|
||||
Provides: bundled(crate(serde)) = 1.0.130
|
||||
Provides: bundled(crate(serde_derive)) = 1.0.130
|
||||
Provides: bundled(crate(serde_json)) = 1.0.68
|
||||
Provides: bundled(crate(ryu)) = 1.0.9
|
||||
Provides: bundled(crate(serde)) = 1.0.137
|
||||
Provides: bundled(crate(serde_derive)) = 1.0.137
|
||||
Provides: bundled(crate(serde_json)) = 1.0.81
|
||||
Provides: bundled(crate(slapd)) = 0.1.0
|
||||
Provides: bundled(crate(slapi_r_plugin)) = 0.1.0
|
||||
Provides: bundled(crate(strsim)) = 0.8.0
|
||||
Provides: bundled(crate(syn)) = 1.0.80
|
||||
Provides: bundled(crate(syn)) = 1.0.94
|
||||
Provides: bundled(crate(synstructure)) = 0.12.6
|
||||
Provides: bundled(crate(tempfile)) = 3.2.0
|
||||
Provides: bundled(crate(tempfile)) = 3.3.0
|
||||
Provides: bundled(crate(textwrap)) = 0.11.0
|
||||
Provides: bundled(crate(toml)) = 0.5.8
|
||||
Provides: bundled(crate(toml)) = 0.5.9
|
||||
Provides: bundled(crate(unicode-width)) = 0.1.9
|
||||
Provides: bundled(crate(unicode-xid)) = 0.2.2
|
||||
Provides: bundled(crate(unicode-xid)) = 0.2.3
|
||||
Provides: bundled(crate(uuid)) = 0.8.2
|
||||
Provides: bundled(crate(vcpkg)) = 0.2.15
|
||||
Provides: bundled(crate(vec_map)) = 0.8.2
|
||||
@ -120,12 +118,12 @@ Provides: bundled(crate(wasi)) = 0.10.2+wasi_snapshot_preview1
|
||||
Provides: bundled(crate(winapi)) = 0.3.9
|
||||
Provides: bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0
|
||||
Provides: bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0
|
||||
Provides: bundled(crate(zeroize)) = 1.4.2
|
||||
Provides: bundled(crate(zeroize_derive)) = 1.2.0
|
||||
Provides: bundled(crate(zeroize)) = 1.5.5
|
||||
Provides: bundled(crate(zeroize_derive)) = 1.3.2
|
||||
##### Bundled cargo crates list - END #####
|
||||
|
||||
BuildRequires: nspr-devel
|
||||
BuildRequires: nss-devel >= 3.34
|
||||
BuildRequires: nspr-devel >= 4.32
|
||||
BuildRequires: nss-devel >= 3.67.0-7
|
||||
BuildRequires: perl-generators
|
||||
BuildRequires: openldap-devel
|
||||
BuildRequires: libdb-devel
|
||||
@ -207,7 +205,8 @@ Requires: python%{python3_pkgversion}-ldap
|
||||
# this is needed to setup SSL if you are not using the
|
||||
# administration server package
|
||||
Requires: nss-tools
|
||||
Requires: nss >= 3.34
|
||||
Requires: nspr >= 4.32
|
||||
Requires: nss >= 3.67.0-7
|
||||
|
||||
# these are not found by the auto-dependency method
|
||||
# they are required to support the mandatory LDAP SASL mechs
|
||||
@ -249,23 +248,23 @@ Source4: vendor-%{version}-1.tar.gz
|
||||
Source5: Cargo.lock
|
||||
%endif
|
||||
|
||||
Patch01: 0001-Issue-4678-RFE-automatique-disable-of-virtual-attrib.patch
|
||||
Patch02: 0002-Issue-4943-Fix-csn-generator-to-limit-time-skew-drif.patch
|
||||
Patch03: 0003-Issue-3584-Fix-PBKDF2_SHA256-hashing-in-FIPS-mode-49.patch
|
||||
Patch04: 0004-Issue-4956-Automember-allows-invalid-regex-and-does-.patch
|
||||
Patch05: 0005-Issue-4092-systemd-tmpfiles-warnings.patch
|
||||
Patch06: 0006-Issue-4973-installer-changes-permissions-on-run.patch
|
||||
Patch07: 0007-Issue-4973-update-snmp-to-use-run-dirsrv-for-PID-fil.patch
|
||||
Patch08: 0008-Issue-4978-make-installer-robust.patch
|
||||
Patch09: 0009-Issue-4972-gecos-with-IA5-introduces-a-compatibility.patch
|
||||
Patch10: 0010-Issue-4997-Function-declaration-compiler-error-on-1..patch
|
||||
Patch11: 0011-Issue-4978-use-more-portable-python-command-for-chec.patch
|
||||
Patch12: 0012-Issue-4959-Invalid-etc-hosts-setup-can-cause-isLocal.patch
|
||||
Patch13: 0013-CVE-2021-4091-BZ-2030367-double-free-of-the-virtual-.patch
|
||||
Patch14: 0014-Issue-5127-run-restorecon-on-dev-shm-at-server-start.patch
|
||||
Patch15: 0015-Issue-5127-ds_selinux_restorecon.sh-always-exit-0.patch
|
||||
Patch16: 0016-Issue-4775-Add-entryuuid-CLI-and-Fixup-4776.patch
|
||||
Patch17: 0017-Issue-4775-Fix-cherry-pick-error.patch
|
||||
Patch01: 0001-Revert-4866-cl-trimming-not-applicable-in-1.4.3.patch
|
||||
Patch02: 0002-Issue-4877-RFE-EntryUUID-to-validate-UUIDs-on-fixup-.patch
|
||||
Patch03: 0003-Issue-5126-Memory-leak-in-slapi_ldap_get_lderrno-515.patch
|
||||
Patch04: 0004-Issue-5085-Race-condition-about-snmp-collator-at-sta.patch
|
||||
Patch05: 0005-Issue-5079-BUG-multiple-ways-to-specific-primary-508.patch
|
||||
Patch06: 0006-Issue-3903-Supplier-should-do-periodic-updates.patch
|
||||
Patch07: 0007-Issue-5399-UI-LDAP-Editor-is-not-updated-when-we-swi.patch
|
||||
Patch08: 0008-Issue-5397-Fix-various-memory-leaks.patch
|
||||
Patch09: 0009-Issue-3903-keep-alive-update-event-starts-too-soon.patch
|
||||
Patch10: 0010-Issue-5397-Fix-check-pick-error.patch
|
||||
Patch11: 0011-Issue-5397-Fix-check-pick-error-2.patch
|
||||
Patch12: 0012-Issue-3903-Fix-another-cherry-pick-error.patch
|
||||
Patch13: 0013-Issue-5329-Improve-replication-extended-op-logging.patch
|
||||
Patch14: 0014-Issue-5412-lib389-do-not-set-backend-name-to-lowerca.patch
|
||||
Patch15: 0015-Issue-5418-Sync_repl-may-crash-while-managing-invali.patch
|
||||
Patch16: 0016-Issue-3903-fix-repl-keep-alive-event-interval.patch
|
||||
|
||||
|
||||
%description
|
||||
389 Directory Server is an LDAPv3 compliant server. The base package includes
|
||||
@ -279,8 +278,8 @@ Please see http://seclists.org/oss-sec/2016/q1/363 for more information.
|
||||
%package libs
|
||||
Summary: Core libraries for 389 Directory Server
|
||||
Group: System Environment/Daemons
|
||||
BuildRequires: nspr-devel
|
||||
BuildRequires: nss-devel >= 3.34
|
||||
BuildRequires: nspr-devel >= 4.32
|
||||
BuildRequires: nss-devel >= 3.67.0-7
|
||||
BuildRequires: openldap-devel
|
||||
BuildRequires: libdb-devel
|
||||
BuildRequires: cyrus-sasl-devel
|
||||
@ -333,8 +332,8 @@ Summary: Development libraries for 389 Directory Server
|
||||
Group: Development/Libraries
|
||||
Requires: %{name}-libs = %{version}-%{release}
|
||||
Requires: pkgconfig
|
||||
Requires: nspr-devel
|
||||
Requires: nss-devel >= 3.34
|
||||
Requires: nspr-devel >= 4.32
|
||||
Requires: nss-devel >= 3.67.0-7
|
||||
Requires: openldap-devel
|
||||
Requires: libtalloc
|
||||
Requires: libevent
|
||||
@ -361,6 +360,7 @@ SNMP Agent for the 389 Directory Server base package.
|
||||
Summary: A library for accessing, testing, and configuring the 389 Directory Server
|
||||
BuildArch: noarch
|
||||
Group: Development/Libraries
|
||||
Requires: 389-ds-base
|
||||
Requires: openssl
|
||||
Requires: iproute
|
||||
Requires: platform-python
|
||||
@ -885,42 +885,20 @@ exit 0
|
||||
%doc README.md
|
||||
|
||||
%changelog
|
||||
* Thu Feb 3 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-6
|
||||
- Bump version to 1.4.3.28-6
|
||||
- Resolves: Bug 2047171 - Based on 1944494 (RFC 4530 entryUUID attribute) - plugin entryuuid failing
|
||||
|
||||
* Fri Jan 28 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-5
|
||||
- Bump version to 1.4.3.28-5
|
||||
- Resolves: Bug 2045223 - ipa-restore command is failing when restore after uninstalling the server (aprt 2)
|
||||
|
||||
* Tue Jan 25 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-4
|
||||
- Bump version to 1.4.3.28-4
|
||||
- Resolves: Bug 2045223 - ipa-restore command is failing when restore after uninstalling the server
|
||||
|
||||
* Thu Nov 18 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-3
|
||||
- Bump version to 1.4.3.28-3
|
||||
- Resolves: Bug 2030367 - EMBARGOED CVE-2021-4091 389-ds:1.4/389-ds-base: double-free of the virtual attribute context in persistent search
|
||||
- Resolves: Bug 2033398 - PBKDF2 hashing does not work in FIPS mode
|
||||
|
||||
* Thu Nov 18 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-2
|
||||
- Bump version to 1.4.3.28-2
|
||||
- Resolves: Bug 2024695 - DB corruption "_entryrdn_insert_key - Same DN (dn: nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff,<SUFFIX>) is already in the entryrdn file"
|
||||
- Resolves: Bug 1859210 - systemd-tmpfiles warnings
|
||||
- Resolves: Bug 1913199 - IPA server (389ds) is very slow in execution of some searches (`&(memberOf=...)(objectClass=ipaHost)` in particular)
|
||||
- Resolves: Bug 1974236 - automatique disable of virtual attribute checking
|
||||
- Resolves: Bug 1976882 - logconv.pl -j: Use of uninitialized value $first in numeric gt (>)
|
||||
- Resolves: Bug 1981281 - ipa user-add fails with "gecos: value invalid per syntax: Invalid syntax"
|
||||
- Resolves: Bug 2015998 - Log the Auto Member invalid regex rules in the LDAP errors log
|
||||
|
||||
* Thu Oct 21 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-1
|
||||
- Bump version to 1.4.3.28-1
|
||||
- Resolves: Bug 2016014 - rebase RHEL 8.6 with 389-ds-base-1.4.3
|
||||
- Resolves: Bug 1990002 - monitor displays wrong date for connection
|
||||
- Resolves: Bug 1950335 - upgrade password hash on bind also causes passwordExpirationtime to be updated
|
||||
- Resolves: Bug 1916292 - Indexing a single backend actually processes all configured backends
|
||||
- Resolves: Bug 1780842 - [RFE] set db home directory to /dev/shm by default
|
||||
- Resolves: Bug 2000975 - Retro Changelog does not trim changes
|
||||
* Thu Aug 18 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.20-6
|
||||
- Bump version to 1.4.3.30-6
|
||||
- Resolves: Bug 2113002 - ipa-replica-manage --connect --winsync fails with traceback
|
||||
- Resolves: Bug 2118763 - SIGSEGV in sync_repl
|
||||
|
||||
* Mon Aug 8 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.20-5
|
||||
- Bump version to 1.4.3.30-5
|
||||
- Resolves: Bug 2113002 - ipa-replica-manage --connect --winsync fails with traceback
|
||||
|
||||
* Thu Jul 28 2022 Thierry Bordaz <tbordaz@redhat.com> - 1.4.3.30-4
|
||||
- Bump version to 1.4.3.30-4
|
||||
- Resolves: Bug 2085562 - Rebase 389-ds-base in 8.7
|
||||
|
||||
* Fri Jun 10 2022 Thierry Bordaz <tbordaz@redhat.com> - 1.4.3.30-3
|
||||
- Bump version to 1.4.3.30-3
|
||||
- Resolves: Bug 2085562 - Rebase 389-ds-base in 8.7
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user