Compare commits
No commits in common. "c8-stream-1.4" and "stream-1.4-rhel-8.6.0" have entirely different histories.
c8-stream-
...
stream-1.4
@ -1,3 +0,0 @@
|
||||
bd9aab32d9cbf9231058d585479813f3420dc872 SOURCES/389-ds-base-1.4.3.39.tar.bz2
|
||||
1c8f2d0dfbf39fa8cd86363bf3314351ab21f8d4 SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
8d3275209f2f8e1a69053340930ad1fb037d61fb SOURCES/vendor-1.4.3.39-3.tar.gz
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -1,3 +1,3 @@
|
||||
SOURCES/389-ds-base-1.4.3.39.tar.bz2
|
||||
SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
SOURCES/vendor-1.4.3.39-3.tar.gz
|
||||
SOURCES/389-ds-base-1.4.3.28.tar.bz2
|
||||
SOURCES/jemalloc-5.2.1.tar.bz2
|
||||
SOURCES/vendor-1.4.3.28-1.tar.gz
|
||||
|
738
0001-Issue-4678-RFE-automatique-disable-of-virtual-attrib.patch
Normal file
738
0001-Issue-4678-RFE-automatique-disable-of-virtual-attrib.patch
Normal file
@ -0,0 +1,738 @@
|
||||
From 67e19da62a9e8958458de54173dcd9bcaf53164d Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 30 Sep 2021 15:59:40 +0200
|
||||
Subject: [PATCH 01/12] Issue 4678 - RFE automatique disable of virtual
|
||||
attribute checking (#4918)
|
||||
|
||||
Bug description:
|
||||
Virtual attributes are configured via Roles or COS definitions
|
||||
and registered during initialization of those plugins.
|
||||
Virtual attributes are processed during search evaluation of
|
||||
filter and returned attributes. This processing is expensive
|
||||
and prone to create contention between searches.
|
||||
Use of virtual attribute is not frequent. So many of the
|
||||
deployement process virtual attribute even if there is none.
|
||||
|
||||
Fix description:
|
||||
The fix configure the server to ignore virtual attribute by
|
||||
default (nsslapd-ignore-virtual-attrs: on).
|
||||
At startup, if a new virtual attribute is registered or
|
||||
it exists Roles/COS definitions, then the server is
|
||||
configured to process the virtual attributes
|
||||
(nsslapd-ignore-virtual-attrs: off)
|
||||
design: https://www.port389.org/docs/389ds/design/vattr-automatic-toggle.html
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4678
|
||||
|
||||
Reviewed by: William Brown, Simon Pichugin, Mark Reynolds (Thanks !!)
|
||||
|
||||
Platforms tested: F34
|
||||
---
|
||||
.../tests/suites/config/config_test.py | 40 +++-
|
||||
dirsrvtests/tests/suites/cos/cos_test.py | 94 ++++++--
|
||||
dirsrvtests/tests/suites/roles/basic_test.py | 200 +++++++++++++++++-
|
||||
ldap/servers/plugins/roles/roles_cache.c | 9 +
|
||||
ldap/servers/slapd/libglobs.c | 2 +-
|
||||
ldap/servers/slapd/main.c | 2 +
|
||||
ldap/servers/slapd/proto-slap.h | 1 +
|
||||
ldap/servers/slapd/vattr.c | 127 +++++++++++
|
||||
src/lib389/lib389/idm/role.py | 4 +
|
||||
9 files changed, 455 insertions(+), 24 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
|
||||
index 2ecff8f98..19232c87d 100644
|
||||
--- a/dirsrvtests/tests/suites/config/config_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/config_test.py
|
||||
@@ -351,7 +351,7 @@ def test_ignore_virtual_attrs(topo):
|
||||
:setup: Standalone instance
|
||||
:steps:
|
||||
1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
|
||||
- 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF
|
||||
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
|
||||
3. Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs
|
||||
4. Set invalid value for attribute nsslapd-ignore-virtual-attrs
|
||||
5. Set nsslapd-ignore-virtual-attrs=off
|
||||
@@ -374,8 +374,8 @@ def test_ignore_virtual_attrs(topo):
|
||||
log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
|
||||
assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
|
||||
|
||||
- log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
- assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "off"
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
|
||||
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
|
||||
|
||||
log.info("Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs")
|
||||
for attribute_value in ['on', 'off', 'ON', 'OFF']:
|
||||
@@ -415,6 +415,40 @@ def test_ignore_virtual_attrs(topo):
|
||||
log.info("Test if virtual attribute i.e. postal code not shown while nsslapd-ignore-virtual-attrs: on")
|
||||
assert not test_user.present('postalcode', '117')
|
||||
|
||||
+def test_ignore_virtual_attrs_after_restart(topo):
|
||||
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
|
||||
+ The attribute is ON by default. If it set to OFF, it keeps
|
||||
+ its value on restart
|
||||
+
|
||||
+ :id: ac368649-4fda-473c-9ef8-e0c728b162af
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
|
||||
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
|
||||
+ 3. Set nsslapd-ignore-virtual-attrs=off
|
||||
+ 4. restart the instance
|
||||
+ 5. Check the attribute nsslapd-ignore-virtual-attrs is OFF
|
||||
+ :expectedresults:
|
||||
+ 1. This should be successful
|
||||
+ 2. This should be successful
|
||||
+ 3. This should be successful
|
||||
+ 4. This should be successful
|
||||
+ 5. This should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
|
||||
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
|
||||
+
|
||||
+ log.info("Set nsslapd-ignore-virtual-attrs = off")
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'off')
|
||||
+
|
||||
+ topo.standalone.restart()
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
|
||||
@pytest.mark.bz918694
|
||||
@pytest.mark.ds408
|
||||
diff --git a/dirsrvtests/tests/suites/cos/cos_test.py b/dirsrvtests/tests/suites/cos/cos_test.py
|
||||
index d6a498c73..d1f99f96f 100644
|
||||
--- a/dirsrvtests/tests/suites/cos/cos_test.py
|
||||
+++ b/dirsrvtests/tests/suites/cos/cos_test.py
|
||||
@@ -6,6 +6,8 @@
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
|
||||
+import logging
|
||||
+import time
|
||||
import pytest, os, ldap
|
||||
from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate
|
||||
from lib389._constants import DEFAULT_SUFFIX
|
||||
@@ -14,26 +16,37 @@ from lib389.idm.role import FilteredRoles
|
||||
from lib389.idm.nscontainer import nsContainer
|
||||
from lib389.idm.user import UserAccount
|
||||
|
||||
+logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
pytestmark = pytest.mark.tier1
|
||||
+@pytest.fixture(scope="function")
|
||||
+def reset_ignore_vattr(topo, request):
|
||||
+ default_ignore_vattr_value = topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs')
|
||||
+ def fin():
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', default_ignore_vattr_value)
|
||||
|
||||
-def test_positive(topo):
|
||||
- """
|
||||
- :id: a5a74235-597f-4fe8-8c38-826860927472
|
||||
- :setup: server
|
||||
- :steps:
|
||||
- 1. Add filter role entry
|
||||
- 2. Add ns container
|
||||
- 3. Add cos template
|
||||
- 4. Add CosClassic Definition
|
||||
- 5. Cos entries should be added and searchable
|
||||
- 6. employeeType attribute should be there in user entry as per the cos plugin property
|
||||
- :expectedresults:
|
||||
- 1. Operation should success
|
||||
- 2. Operation should success
|
||||
- 3. Operation should success
|
||||
- 4. Operation should success
|
||||
- 5. Operation should success
|
||||
- 6. Operation should success
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+def test_positive(topo, reset_ignore_vattr):
|
||||
+ """CoS positive tests
|
||||
+
|
||||
+ :id: a5a74235-597f-4fe8-8c38-826860927472
|
||||
+ :setup: server
|
||||
+ :steps:
|
||||
+ 1. Add filter role entry
|
||||
+ 2. Add ns container
|
||||
+ 3. Add cos template
|
||||
+ 4. Add CosClassic Definition
|
||||
+ 5. Cos entries should be added and searchable
|
||||
+ 6. employeeType attribute should be there in user entry as per the cos plugin property
|
||||
+ :expectedresults:
|
||||
+ 1. Operation should success
|
||||
+ 2. Operation should success
|
||||
+ 3. Operation should success
|
||||
+ 4. Operation should success
|
||||
+ 5. Operation should success
|
||||
+ 6. Operation should success
|
||||
"""
|
||||
# Adding ns filter role
|
||||
roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
|
||||
@@ -77,7 +90,52 @@ def test_positive(topo):
|
||||
|
||||
# CoS definition entry's cosSpecifier attribute specifies the employeeType attribute
|
||||
assert user.present('employeeType')
|
||||
+ cosdef.delete()
|
||||
+
|
||||
+def test_vattr_on_cos_definition(topo, reset_ignore_vattr):
|
||||
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
|
||||
+ The attribute is ON by default. If a cos definition is
|
||||
+ added it is moved to OFF
|
||||
+
|
||||
+ :id: e7ef5254-386f-4362-bbb4-9409f3f51b08
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
|
||||
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
|
||||
+ 3. Create a cos definition for employeeType
|
||||
+ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF (with a delay for postop processing)
|
||||
+ 5. Check a message "slapi_vattrspi_regattr - Because employeeType,.." in error logs
|
||||
+ :expectedresults:
|
||||
+ 1. This should be successful
|
||||
+ 2. This should be successful
|
||||
+ 3. This should be successful
|
||||
+ 4. This should be successful
|
||||
+ 5. This should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
|
||||
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
|
||||
+
|
||||
+ # creating CosClassicDefinition
|
||||
+ log.info("Create a cos definition")
|
||||
+ properties = {'cosTemplateDn': 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX),
|
||||
+ 'cosAttribute': 'employeeType',
|
||||
+ 'cosSpecifier': 'nsrole',
|
||||
+ 'cn': 'cosClassicGenerateEmployeeTypeUsingnsrole'}
|
||||
+ cosdef = CosClassicDefinition(topo.standalone,'cn=cosClassicGenerateEmployeeTypeUsingnsrole,{}'.format(DEFAULT_SUFFIX))\
|
||||
+ .create(properties=properties)
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
+ time.sleep(2)
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
|
||||
+ topo.standalone.stop()
|
||||
+ assert topo.standalone.searchErrorsLog("slapi_vattrspi_regattr - Because employeeType is a new registered virtual attribute , nsslapd-ignore-virtual-attrs was set to \'off\'")
|
||||
+ topo.standalone.start()
|
||||
+ cosdef.delete()
|
||||
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
index 47a531794..bec3aedfc 100644
|
||||
--- a/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
@@ -11,6 +11,8 @@
|
||||
Importing necessary Modules.
|
||||
"""
|
||||
|
||||
+import logging
|
||||
+import time
|
||||
import os
|
||||
import pytest
|
||||
|
||||
@@ -22,6 +24,9 @@ from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.role import FilteredRoles, ManagedRoles, NestedRoles
|
||||
from lib389.idm.domain import Domain
|
||||
|
||||
+logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
DNBASE = "o=acivattr,{}".format(DEFAULT_SUFFIX)
|
||||
@@ -35,7 +40,7 @@ FILTERROLESALESROLE = "cn=FILTERROLESALESROLE,{}".format(DNBASE)
|
||||
FILTERROLEENGROLE = "cn=FILTERROLEENGROLE,{}".format(DNBASE)
|
||||
|
||||
|
||||
-def test_filterrole(topo):
|
||||
+def test_filterrole(topo, request):
|
||||
"""Test Filter Role
|
||||
|
||||
:id: 8ada4064-786b-11e8-8634-8c16451d917b
|
||||
@@ -136,8 +141,20 @@ def test_filterrole(topo):
|
||||
SALES_OU, DNBASE]:
|
||||
UserAccount(topo.standalone, dn_dn).delete()
|
||||
|
||||
+ def fin():
|
||||
+ topo.standalone.restart()
|
||||
+ try:
|
||||
+ filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ for i in filtered_roles.list():
|
||||
+ i.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
|
||||
-def test_managedrole(topo):
|
||||
+def test_managedrole(topo, request):
|
||||
"""Test Managed Role
|
||||
|
||||
:id: d52a9c00-3bf6-11e9-9b7b-8c16451d917b
|
||||
@@ -209,6 +226,16 @@ def test_managedrole(topo):
|
||||
for i in roles.list():
|
||||
i.delete()
|
||||
|
||||
+ def fin():
|
||||
+ topo.standalone.restart()
|
||||
+ try:
|
||||
+ role = ManagedRoles(topo.standalone, DEFAULT_SUFFIX).get('ROLE1')
|
||||
+ role.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def _final(request, topo):
|
||||
@@ -220,6 +247,7 @@ def _final(request, topo):
|
||||
def finofaci():
|
||||
"""
|
||||
Removes and Restores ACIs and other users after the test.
|
||||
+ And restore nsslapd-ignore-virtual-attrs to default
|
||||
"""
|
||||
domain = Domain(topo.standalone, DEFAULT_SUFFIX)
|
||||
domain.remove_all('aci')
|
||||
@@ -234,6 +262,8 @@ def _final(request, topo):
|
||||
for i in aci_list:
|
||||
domain.add("aci", i)
|
||||
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
|
||||
+
|
||||
request.addfinalizer(finofaci)
|
||||
|
||||
|
||||
@@ -296,6 +326,172 @@ def test_nestedrole(topo, _final):
|
||||
conn = users.get('test_user_3').bind(PW_DM)
|
||||
assert UserAccounts(conn, DEFAULT_SUFFIX).list()
|
||||
|
||||
+def test_vattr_on_filtered_role(topo, request):
|
||||
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
|
||||
+ The attribute is ON by default. If a filtered role is
|
||||
+ added it is moved to OFF
|
||||
+
|
||||
+ :id: 88b3ad3c-f39a-4eb7-a8c9-07c685f11908
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
|
||||
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
|
||||
+ 3. Create a filtered role
|
||||
+ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF
|
||||
+ 5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs
|
||||
+ :expectedresults:
|
||||
+ 1. This should be successful
|
||||
+ 2. This should be successful
|
||||
+ 3. This should be successful
|
||||
+ 4. This should be successful
|
||||
+ 5. This should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
|
||||
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
|
||||
+
|
||||
+ log.info("Create a filtered role")
|
||||
+ try:
|
||||
+ Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX)
|
||||
+ except:
|
||||
+ pass
|
||||
+ roles = FilteredRoles(topo.standalone, DNBASE)
|
||||
+ roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'})
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
+
|
||||
+ topo.standalone.stop()
|
||||
+ assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'")
|
||||
+
|
||||
+ def fin():
|
||||
+ topo.standalone.restart()
|
||||
+ try:
|
||||
+ filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ for i in filtered_roles.list():
|
||||
+ i.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+def test_vattr_on_filtered_role_restart(topo, request):
|
||||
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
|
||||
+ If it exists a filtered role definition at restart then
|
||||
+ nsslapd-ignore-virtual-attrs should be set to 'off'
|
||||
+
|
||||
+ :id: 972183f7-d18f-40e0-94ab-580e7b7d78d0
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
|
||||
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
|
||||
+ 3. Create a filtered role
|
||||
+ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF
|
||||
+ 5. restart the instance
|
||||
+ 6. Check the presence of virtual attribute is detected
|
||||
+ 7. Check the value of nsslapd-ignore-virtual-attrs should be OFF
|
||||
+ :expectedresults:
|
||||
+ 1. This should be successful
|
||||
+ 2. This should be successful
|
||||
+ 3. This should be successful
|
||||
+ 4. This should be successful
|
||||
+ 5. This should be successful
|
||||
+ 6. This should be successful
|
||||
+ 7. This should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
|
||||
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
|
||||
+
|
||||
+ log.info("Create a filtered role")
|
||||
+ try:
|
||||
+ Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX)
|
||||
+ except:
|
||||
+ pass
|
||||
+ roles = FilteredRoles(topo.standalone, DNBASE)
|
||||
+ roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'})
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
+
|
||||
+
|
||||
+ log.info("Check the virtual attribute definition is found (after a required delay)")
|
||||
+ topo.standalone.restart()
|
||||
+ time.sleep(5)
|
||||
+ assert topo.standalone.searchErrorsLog("Found a role/cos definition in")
|
||||
+ assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'")
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
+
|
||||
+ def fin():
|
||||
+ topo.standalone.restart()
|
||||
+ try:
|
||||
+ filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ for i in filtered_roles.list():
|
||||
+ i.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+
|
||||
+def test_vattr_on_managed_role(topo, request):
|
||||
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
|
||||
+ The attribute is ON by default. If a managed role is
|
||||
+ added it is moved to OFF
|
||||
+
|
||||
+ :id: 664b722d-c1ea-41e4-8f6c-f9c87a212346
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config
|
||||
+ 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON
|
||||
+ 3. Create a managed role
|
||||
+ 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF
|
||||
+ 5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs
|
||||
+ :expectedresults:
|
||||
+ 1. This should be successful
|
||||
+ 2. This should be successful
|
||||
+ 3. This should be successful
|
||||
+ 4. This should be successful
|
||||
+ 5. This should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs')
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON")
|
||||
+ assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on"
|
||||
+
|
||||
+ log.info("Create a managed role")
|
||||
+ roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ role = roles.create(properties={"cn": 'ROLE1'})
|
||||
+
|
||||
+ log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
+ assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
+
|
||||
+ topo.standalone.stop()
|
||||
+ assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'")
|
||||
+
|
||||
+ def fin():
|
||||
+ topo.standalone.restart()
|
||||
+ try:
|
||||
+ filtered_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ for i in filtered_roles.list():
|
||||
+ i.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+ topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on')
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c
|
||||
index 3d076a4cb..cd00e0aba 100644
|
||||
--- a/ldap/servers/plugins/roles/roles_cache.c
|
||||
+++ b/ldap/servers/plugins/roles/roles_cache.c
|
||||
@@ -530,6 +530,15 @@ roles_cache_trigger_update_role(char *dn, Slapi_Entry *roles_entry, Slapi_DN *be
|
||||
}
|
||||
|
||||
slapi_rwlock_unlock(global_lock);
|
||||
+ {
|
||||
+ /* A role definition has been updated, enable vattr handling */
|
||||
+ char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
|
||||
+ errorbuf[0] = '\0';
|
||||
+ config_set_ignore_vattrs(CONFIG_IGNORE_VATTRS, "off", errorbuf, 1);
|
||||
+ slapi_log_err(SLAPI_LOG_INFO,
|
||||
+ "roles_cache_trigger_update_role",
|
||||
+ "Because of virtual attribute definition (role), %s was set to 'off'\n", CONFIG_IGNORE_VATTRS);
|
||||
+ }
|
||||
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, "<-- roles_cache_trigger_update_role: %p \n", roles_list);
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
|
||||
index 2ea4cd760..f6dacce30 100644
|
||||
--- a/ldap/servers/slapd/libglobs.c
|
||||
+++ b/ldap/servers/slapd/libglobs.c
|
||||
@@ -1803,7 +1803,7 @@ FrontendConfig_init(void)
|
||||
init_ndn_cache_enabled = cfg->ndn_cache_enabled = LDAP_ON;
|
||||
cfg->ndn_cache_max_size = SLAPD_DEFAULT_NDN_SIZE;
|
||||
init_sasl_mapping_fallback = cfg->sasl_mapping_fallback = LDAP_OFF;
|
||||
- init_ignore_vattrs = cfg->ignore_vattrs = LDAP_OFF;
|
||||
+ init_ignore_vattrs = cfg->ignore_vattrs = LDAP_ON;
|
||||
cfg->sasl_max_bufsize = SLAPD_DEFAULT_SASL_MAXBUFSIZE;
|
||||
cfg->unhashed_pw_switch = SLAPD_DEFAULT_UNHASHED_PW_SWITCH;
|
||||
init_return_orig_type = cfg->return_orig_type = LDAP_OFF;
|
||||
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
|
||||
index 4931a4ca4..61ed40b7d 100644
|
||||
--- a/ldap/servers/slapd/main.c
|
||||
+++ b/ldap/servers/slapd/main.c
|
||||
@@ -1042,6 +1042,8 @@ main(int argc, char **argv)
|
||||
eq_start(); /* must be done after plugins started - DEPRECATED */
|
||||
eq_start_rel(); /* must be done after plugins started */
|
||||
|
||||
+ vattr_check(); /* Check if it exists virtual attribute definitions */
|
||||
+
|
||||
#ifdef HPUX10
|
||||
/* HPUX linker voodoo */
|
||||
if (collation_init == NULL) {
|
||||
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
|
||||
index c143f3772..442a621aa 100644
|
||||
--- a/ldap/servers/slapd/proto-slap.h
|
||||
+++ b/ldap/servers/slapd/proto-slap.h
|
||||
@@ -1462,6 +1462,7 @@ void subentry_create_filter(Slapi_Filter **filter);
|
||||
*/
|
||||
void vattr_init(void);
|
||||
void vattr_cleanup(void);
|
||||
+void vattr_check(void);
|
||||
|
||||
/*
|
||||
* slapd_plhash.c - supplement to NSPR plhash
|
||||
diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c
|
||||
index 09dab6ecf..24750a57c 100644
|
||||
--- a/ldap/servers/slapd/vattr.c
|
||||
+++ b/ldap/servers/slapd/vattr.c
|
||||
@@ -64,6 +64,10 @@
|
||||
#define SOURCEFILE "vattr.c"
|
||||
static char *sourcefile = SOURCEFILE;
|
||||
|
||||
+/* stolen from roles_cache.h, must remain in sync */
|
||||
+#define NSROLEATTR "nsRole"
|
||||
+static Slapi_Eq_Context vattr_check_ctx = {0};
|
||||
+
|
||||
/* Define only for module test code */
|
||||
/* #define VATTR_TEST_CODE */
|
||||
|
||||
@@ -130,6 +134,112 @@ vattr_cleanup()
|
||||
{
|
||||
/* We need to free and remove anything that was inserted first */
|
||||
vattr_map_destroy();
|
||||
+ slapi_eq_cancel_rel(vattr_check_ctx);
|
||||
+}
|
||||
+
|
||||
+static void
|
||||
+vattr_check_thread(void *arg)
|
||||
+{
|
||||
+ Slapi_Backend *be = NULL;
|
||||
+ char *cookie = NULL;
|
||||
+ Slapi_DN *base_sdn = NULL;
|
||||
+ Slapi_PBlock *search_pb = NULL;
|
||||
+ Slapi_Entry **entries = NULL;
|
||||
+ int32_t rc;
|
||||
+ int32_t check_suffix; /* used to skip suffixes in ignored_backend */
|
||||
+ PRBool exist_vattr_definition = PR_FALSE;
|
||||
+ char *ignored_backend[5] = {"cn=config", "cn=schema", "cn=monitor", "cn=changelog", NULL}; /* suffixes to ignore */
|
||||
+ char *suffix;
|
||||
+ int ignore_vattrs;
|
||||
+
|
||||
+ ignore_vattrs = config_get_ignore_vattrs();
|
||||
+
|
||||
+ if (!ignore_vattrs) {
|
||||
+ /* Nothing to do more, we are already evaluating virtual attribute */
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ search_pb = slapi_pblock_new();
|
||||
+ be = slapi_get_first_backend(&cookie);
|
||||
+ while (be && !exist_vattr_definition && !slapi_is_shutting_down()) {
|
||||
+ base_sdn = (Slapi_DN *) slapi_be_getsuffix(be, 0);
|
||||
+ suffix = (char *) slapi_sdn_get_dn(base_sdn);
|
||||
+
|
||||
+ if (suffix) {
|
||||
+ /* First check that we need to check that suffix */
|
||||
+ check_suffix = 1;
|
||||
+ for (size_t i = 0; ignored_backend[i]; i++) {
|
||||
+ if (strcasecmp(suffix, ignored_backend[i]) == 0) {
|
||||
+ check_suffix = 0;
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* search for a role or cos definition */
|
||||
+ if (check_suffix) {
|
||||
+ slapi_search_internal_set_pb(search_pb, slapi_sdn_get_dn(base_sdn),
|
||||
+ LDAP_SCOPE_SUBTREE, "(&(objectclass=ldapsubentry)(|(objectclass=nsRoleDefinition)(objectclass=cosSuperDefinition)))",
|
||||
+ NULL, 0, NULL, NULL, (void *) plugin_get_default_component_id(), 0);
|
||||
+ slapi_search_internal_pb(search_pb);
|
||||
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
|
||||
+
|
||||
+ if (rc == LDAP_SUCCESS) {
|
||||
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
|
||||
+ if (entries && entries[0]) {
|
||||
+ /* it exists at least a cos or role definition */
|
||||
+ exist_vattr_definition = PR_TRUE;
|
||||
+ slapi_log_err(SLAPI_LOG_INFO,
|
||||
+ "vattr_check_thread",
|
||||
+ "Found a role/cos definition in %s\n", slapi_entry_get_dn(entries[0]));
|
||||
+ } else {
|
||||
+ slapi_log_err(SLAPI_LOG_INFO,
|
||||
+ "vattr_check_thread",
|
||||
+ "No role/cos definition in %s\n", slapi_sdn_get_dn(base_sdn));
|
||||
+ }
|
||||
+ }
|
||||
+ slapi_free_search_results_internal(search_pb);
|
||||
+ } /* check_suffix */
|
||||
+ } /* suffix */
|
||||
+ be = (backend *) slapi_get_next_backend(cookie);
|
||||
+ }
|
||||
+ slapi_pblock_destroy(search_pb);
|
||||
+ slapi_ch_free_string(&cookie);
|
||||
+
|
||||
+ /* Now if a virtual attribute is defined, then CONFIG_IGNORE_VATTRS -> off */
|
||||
+ if (exist_vattr_definition) {
|
||||
+ char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
|
||||
+ errorbuf[0] = '\0';
|
||||
+ config_set_ignore_vattrs(CONFIG_IGNORE_VATTRS, "off", errorbuf, 1);
|
||||
+ slapi_log_err(SLAPI_LOG_INFO,
|
||||
+ "vattr_check_thread",
|
||||
+ "Because of virtual attribute definition, %s was set to 'off'\n", CONFIG_IGNORE_VATTRS);
|
||||
+ }
|
||||
+}
|
||||
+static void
|
||||
+vattr_check_schedule_once(time_t when __attribute__((unused)), void *arg)
|
||||
+{
|
||||
+ if (PR_CreateThread(PR_USER_THREAD,
|
||||
+ vattr_check_thread,
|
||||
+ (void *) arg,
|
||||
+ PR_PRIORITY_NORMAL,
|
||||
+ PR_GLOBAL_THREAD,
|
||||
+ PR_UNJOINABLE_THREAD,
|
||||
+ SLAPD_DEFAULT_THREAD_STACKSIZE) == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR,
|
||||
+ "vattr_check_schedule_once",
|
||||
+ "Fails to check if %s needs to be toggled to FALSE\n", CONFIG_IGNORE_VATTRS);
|
||||
+ }
|
||||
+}
|
||||
+#define VATTR_CHECK_DELAY 3
|
||||
+void
|
||||
+vattr_check()
|
||||
+{
|
||||
+ /* Schedule running a callback that will create a thread
|
||||
+ * but make sure it is called a first thing when event loop is created */
|
||||
+ time_t now;
|
||||
+
|
||||
+ now = slapi_current_rel_time_t();
|
||||
+ vattr_check_ctx = slapi_eq_once_rel(vattr_check_schedule_once, NULL, now + VATTR_CHECK_DELAY);
|
||||
}
|
||||
|
||||
/* The public interface functions start here */
|
||||
@@ -1631,6 +1741,9 @@ slapi_vattrspi_regattr(vattr_sp_handle *h, char *type_name_to_register, char *DN
|
||||
char *type_to_add;
|
||||
int free_type_to_add = 0;
|
||||
Slapi_DN original_dn;
|
||||
+ int ignore_vattrs;
|
||||
+
|
||||
+ ignore_vattrs = config_get_ignore_vattrs();
|
||||
|
||||
slapi_sdn_init(&original_dn);
|
||||
|
||||
@@ -1676,6 +1789,20 @@ slapi_vattrspi_regattr(vattr_sp_handle *h, char *type_name_to_register, char *DN
|
||||
if (free_type_to_add) {
|
||||
slapi_ch_free((void **)&type_to_add);
|
||||
}
|
||||
+ if (ignore_vattrs && strcasecmp(type_name_to_register, NSROLEATTR)) {
|
||||
+ /* A new virtual attribute is registered.
|
||||
+ * This new vattr being *different* than the default roles vattr 'nsRole'
|
||||
+ * It is time to allow vattr lookup
|
||||
+ */
|
||||
+ char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
|
||||
+ errorbuf[0] = '\0';
|
||||
+ config_set_ignore_vattrs(CONFIG_IGNORE_VATTRS, "off", errorbuf, 1);
|
||||
+ slapi_log_err(SLAPI_LOG_INFO,
|
||||
+ "slapi_vattrspi_regattr",
|
||||
+ "Because %s is a new registered virtual attribute , %s was set to 'off'\n",
|
||||
+ type_name_to_register,
|
||||
+ CONFIG_IGNORE_VATTRS);
|
||||
+ }
|
||||
|
||||
return ret;
|
||||
}
|
||||
diff --git a/src/lib389/lib389/idm/role.py b/src/lib389/lib389/idm/role.py
|
||||
index fe91aab6f..9a2bff3d6 100644
|
||||
--- a/src/lib389/lib389/idm/role.py
|
||||
+++ b/src/lib389/lib389/idm/role.py
|
||||
@@ -252,6 +252,8 @@ class FilteredRole(Role):
|
||||
self._rdn_attribute = 'cn'
|
||||
self._create_objectclasses = ['nsComplexRoleDefinition', 'nsFilteredRoleDefinition']
|
||||
|
||||
+ self._protected = False
|
||||
+
|
||||
|
||||
|
||||
class FilteredRoles(Roles):
|
||||
@@ -285,6 +287,7 @@ class ManagedRole(Role):
|
||||
self._rdn_attribute = 'cn'
|
||||
self._create_objectclasses = ['nsSimpleRoleDefinition', 'nsManagedRoleDefinition']
|
||||
|
||||
+ self._protected = False
|
||||
|
||||
class ManagedRoles(Roles):
|
||||
"""DSLdapObjects that represents all Managed Roles entries
|
||||
@@ -320,6 +323,7 @@ class NestedRole(Role):
|
||||
self._rdn_attribute = 'cn'
|
||||
self._create_objectclasses = ['nsComplexRoleDefinition', 'nsNestedRoleDefinition']
|
||||
|
||||
+ self._protected = False
|
||||
|
||||
class NestedRoles(Roles):
|
||||
"""DSLdapObjects that represents all NestedRoles entries in suffix.
|
||||
--
|
||||
2.31.1
|
||||
|
621
0002-Issue-4943-Fix-csn-generator-to-limit-time-skew-drif.patch
Normal file
621
0002-Issue-4943-Fix-csn-generator-to-limit-time-skew-drif.patch
Normal file
@ -0,0 +1,621 @@
|
||||
From 968ad6b5039d839bfbc61da755c252cc7598415b Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 25 Oct 2021 17:09:57 +0200
|
||||
Subject: [PATCH 02/12] Issue 4943 - Fix csn generator to limit time skew drift
|
||||
- PR 4946
|
||||
|
||||
---
|
||||
ldap/servers/slapd/csngen.c | 433 +++++++++++++++++-------------
|
||||
ldap/servers/slapd/slapi-plugin.h | 9 +
|
||||
2 files changed, 255 insertions(+), 187 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/csngen.c b/ldap/servers/slapd/csngen.c
|
||||
index fcd88b4cc..c7c5c2ba8 100644
|
||||
--- a/ldap/servers/slapd/csngen.c
|
||||
+++ b/ldap/servers/slapd/csngen.c
|
||||
@@ -18,8 +18,9 @@
|
||||
#include "prcountr.h"
|
||||
#include "slap.h"
|
||||
|
||||
+
|
||||
#define CSN_MAX_SEQNUM 0xffff /* largest sequence number */
|
||||
-#define CSN_MAX_TIME_ADJUST 24 * 60 * 60 /* maximum allowed time adjustment (in seconds) = 1 day */
|
||||
+#define CSN_MAX_TIME_ADJUST _SEC_PER_DAY /* maximum allowed time adjustment (in seconds) = 1 day */
|
||||
#define ATTR_CSN_GENERATOR_STATE "nsState" /* attribute that stores csn state information */
|
||||
#define STATE_FORMAT "%8x%8x%8x%4hx%4hx"
|
||||
#define STATE_LENGTH 32
|
||||
@@ -27,6 +28,8 @@
|
||||
#define CSN_CALC_TSTAMP(gen) ((gen)->state.sampled_time + \
|
||||
(gen)->state.local_offset + \
|
||||
(gen)->state.remote_offset)
|
||||
+#define TIME_DIFF_WARNING_DELAY (30*_SEC_PER_DAY) /* log an info message when difference
|
||||
+ between clock is greater than this delay */
|
||||
|
||||
/*
|
||||
* **************************************************************************
|
||||
@@ -63,6 +66,7 @@ typedef struct csngen_state
|
||||
struct csngen
|
||||
{
|
||||
csngen_state state; /* persistent state of the generator */
|
||||
+ int32_t (*gettime)(struct timespec *tp); /* Get local time */
|
||||
callback_list callbacks; /* list of callbacks registered with the generator */
|
||||
Slapi_RWLock *lock; /* concurrency control */
|
||||
};
|
||||
@@ -78,7 +82,7 @@ static int _csngen_init_callbacks(CSNGen *gen);
|
||||
static void _csngen_call_callbacks(const CSNGen *gen, const CSN *csn, PRBool abort);
|
||||
static int _csngen_cmp_callbacks(const void *el1, const void *el2);
|
||||
static void _csngen_free_callbacks(CSNGen *gen);
|
||||
-static int _csngen_adjust_local_time(CSNGen *gen, time_t cur_time);
|
||||
+static int _csngen_adjust_local_time(CSNGen *gen);
|
||||
|
||||
/*
|
||||
* **************************************************************************
|
||||
@@ -121,6 +125,7 @@ csngen_new(ReplicaId rid, Slapi_Attr *state)
|
||||
_csngen_init_callbacks(gen);
|
||||
|
||||
gen->state.rid = rid;
|
||||
+ gen->gettime = slapi_clock_utc_gettime;
|
||||
|
||||
if (state) {
|
||||
rc = _csngen_parse_state(gen, state);
|
||||
@@ -164,10 +169,7 @@ csngen_free(CSNGen **gen)
|
||||
int
|
||||
csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
|
||||
{
|
||||
- struct timespec now = {0};
|
||||
int rc = CSN_SUCCESS;
|
||||
- time_t cur_time;
|
||||
- int delta;
|
||||
|
||||
if (gen == NULL || csn == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn", "Invalid argument\n");
|
||||
@@ -180,39 +182,13 @@ csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
|
||||
return CSN_MEMORY_ERROR;
|
||||
}
|
||||
|
||||
- if ((rc = slapi_clock_gettime(&now)) != 0) {
|
||||
- /* Failed to get system time, we must abort */
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn",
|
||||
- "Failed to get system time (%s)\n",
|
||||
- slapd_system_strerror(rc));
|
||||
- return CSN_TIME_ERROR;
|
||||
- }
|
||||
- cur_time = now.tv_sec;
|
||||
-
|
||||
slapi_rwlock_wrlock(gen->lock);
|
||||
|
||||
- /* check if the time should be adjusted */
|
||||
- delta = cur_time - gen->state.sampled_time;
|
||||
- if (delta > _SEC_PER_DAY || delta < (-1 * _SEC_PER_DAY)) {
|
||||
- /* We had a jump larger than a day */
|
||||
- slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn",
|
||||
- "Detected large jump in CSN time. Delta: %d (current time: %ld vs previous time: %ld)\n",
|
||||
- delta, cur_time, gen->state.sampled_time);
|
||||
- }
|
||||
- if (delta > 0) {
|
||||
- rc = _csngen_adjust_local_time(gen, cur_time);
|
||||
- if (rc != CSN_SUCCESS) {
|
||||
- slapi_rwlock_unlock(gen->lock);
|
||||
- return rc;
|
||||
- }
|
||||
+ rc = _csngen_adjust_local_time(gen);
|
||||
+ if (rc != CSN_SUCCESS) {
|
||||
+ slapi_rwlock_unlock(gen->lock);
|
||||
+ return rc;
|
||||
}
|
||||
- /* if (delta < 0) this means the local system time was set back
|
||||
- * the new csn will be generated based on sampled time, which is
|
||||
- * ahead of system time and previously generated csns.
|
||||
- * the time stamp of the csn will not change until system time
|
||||
- * catches up or is corrected by remote csns.
|
||||
- * But we need to ensure that the seq_num does not overflow.
|
||||
- */
|
||||
|
||||
if (gen->state.seq_num == CSN_MAX_SEQNUM) {
|
||||
slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn", "Sequence rollover; "
|
||||
@@ -261,13 +237,36 @@ csngen_rewrite_rid(CSNGen *gen, ReplicaId rid)
|
||||
}
|
||||
|
||||
/* this function should be called when a remote CSN for the same part of
|
||||
- the dit becomes known to the server (for instance, as part of RUV during
|
||||
- replication session. In response, the generator would adjust its notion
|
||||
- of time so that it does not generate smaller csns */
|
||||
+ * the dit becomes known to the server (for instance, as part of RUV during
|
||||
+ * replication session. In response, the generator would adjust its notion
|
||||
+ * of time so that it does not generate smaller csns
|
||||
+ *
|
||||
+ * The following counters are updated
|
||||
+ * - when a new csn is generated
|
||||
+ * - when csngen is adjusted (beginning of a incoming (extop) or outgoing
|
||||
+ * (inc_protocol) session)
|
||||
+ *
|
||||
+ * sampled_time: It takes the value of current system time.
|
||||
+ *
|
||||
+ * remote offset: it is updated when 'csn' argument is ahead of the next csn
|
||||
+ * that the csn generator will generate. It is the MAX jump ahead, it is not
|
||||
+ * cumulative counter (e.g. if remote_offset=7 and 'csn' is 5sec ahead
|
||||
+ * remote_offset stays the same. The jump ahead (5s) pour into the local offset.
|
||||
+ * It is not clear of the interest of this counter. It gives an indication of
|
||||
+ * the maximum jump ahead but not much.
|
||||
+ *
|
||||
+ * local offset: it is increased if
|
||||
+ * - system time is going backward (compare sampled_time)
|
||||
+ * - if 'csn' argument is ahead of csn that the csn generator would generate
|
||||
+ * AND diff('csn', csngen.new_csn) < remote_offset
|
||||
+ * then the diff "pour" into local_offset
|
||||
+ * It is decreased as the clock is ticking, local offset is "consumed" as
|
||||
+ * sampled_time progresses.
|
||||
+ */
|
||||
int
|
||||
csngen_adjust_time(CSNGen *gen, const CSN *csn)
|
||||
{
|
||||
- time_t remote_time, remote_offset, cur_time;
|
||||
+ time_t remote_time, remote_offset, cur_time, old_time, new_time;
|
||||
PRUint16 remote_seqnum;
|
||||
int rc;
|
||||
extern int config_get_ignore_time_skew(void);
|
||||
@@ -281,6 +280,11 @@ csngen_adjust_time(CSNGen *gen, const CSN *csn)
|
||||
|
||||
slapi_rwlock_wrlock(gen->lock);
|
||||
|
||||
+ /* Get last local csn time */
|
||||
+ old_time = CSN_CALC_TSTAMP(gen);
|
||||
+ /* update local offset and sample_time */
|
||||
+ rc = _csngen_adjust_local_time(gen);
|
||||
+
|
||||
if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
cur_time = CSN_CALC_TSTAMP(gen);
|
||||
slapi_log_err(SLAPI_LOG_REPL, "csngen_adjust_time",
|
||||
@@ -290,79 +294,60 @@ csngen_adjust_time(CSNGen *gen, const CSN *csn)
|
||||
gen->state.local_offset,
|
||||
gen->state.remote_offset);
|
||||
}
|
||||
- /* make sure we have the current time */
|
||||
- cur_time = slapi_current_utc_time();
|
||||
-
|
||||
- /* make sure sampled_time is current */
|
||||
- /* must only call adjust_local_time if the current time is greater than
|
||||
- the generator state time */
|
||||
- if ((cur_time > gen->state.sampled_time) &&
|
||||
- (CSN_SUCCESS != (rc = _csngen_adjust_local_time(gen, cur_time)))) {
|
||||
+ if (rc != CSN_SUCCESS) {
|
||||
/* _csngen_adjust_local_time will log error */
|
||||
slapi_rwlock_unlock(gen->lock);
|
||||
- csngen_dump_state(gen);
|
||||
+ csngen_dump_state(gen, SLAPI_LOG_DEBUG);
|
||||
return rc;
|
||||
}
|
||||
|
||||
- cur_time = CSN_CALC_TSTAMP(gen);
|
||||
- if (remote_time >= cur_time) {
|
||||
- time_t new_time = 0;
|
||||
-
|
||||
- if (remote_seqnum > gen->state.seq_num) {
|
||||
- if (remote_seqnum < CSN_MAX_SEQNUM) {
|
||||
- gen->state.seq_num = remote_seqnum + 1;
|
||||
- } else {
|
||||
- remote_time++;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- remote_offset = remote_time - cur_time;
|
||||
- if (remote_offset > gen->state.remote_offset) {
|
||||
- if (ignore_time_skew || (remote_offset <= CSN_MAX_TIME_ADJUST)) {
|
||||
- gen->state.remote_offset = remote_offset;
|
||||
- } else /* remote_offset > CSN_MAX_TIME_ADJUST */
|
||||
- {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "csngen_adjust_time",
|
||||
- "Adjustment limit exceeded; value - %ld, limit - %ld\n",
|
||||
- remote_offset, (long)CSN_MAX_TIME_ADJUST);
|
||||
- slapi_rwlock_unlock(gen->lock);
|
||||
- csngen_dump_state(gen);
|
||||
- return CSN_LIMIT_EXCEEDED;
|
||||
- }
|
||||
- } else if (remote_offset > 0) { /* still need to account for this */
|
||||
- gen->state.local_offset += remote_offset;
|
||||
+ remote_offset = remote_time - CSN_CALC_TSTAMP(gen);
|
||||
+ if (remote_offset > 0) {
|
||||
+ if (!ignore_time_skew && (gen->state.remote_offset + remote_offset > CSN_MAX_TIME_ADJUST)) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "csngen_adjust_time",
|
||||
+ "Adjustment limit exceeded; value - %ld, limit - %ld\n",
|
||||
+ remote_offset, (long)CSN_MAX_TIME_ADJUST);
|
||||
+ slapi_rwlock_unlock(gen->lock);
|
||||
+ csngen_dump_state(gen, SLAPI_LOG_DEBUG);
|
||||
+ return CSN_LIMIT_EXCEEDED;
|
||||
}
|
||||
-
|
||||
- new_time = CSN_CALC_TSTAMP(gen);
|
||||
- /* let's revisit the seq num - if the new time is > the old
|
||||
- tiem, we should reset the seq number to remote + 1 if
|
||||
- this won't cause a wrap around */
|
||||
- if (new_time >= cur_time) {
|
||||
- /* just set seq_num regardless of whether the current one
|
||||
- is < or > than the remote one - the goal of this function
|
||||
- is to make sure we generate CSNs > the remote CSN - if
|
||||
- we have increased the time, we can decrease the seqnum
|
||||
- and still guarantee that any new CSNs generated will be
|
||||
- > any current CSNs we have generated */
|
||||
- if (remote_seqnum < gen->state.seq_num) {
|
||||
- gen->state.seq_num ++;
|
||||
- } else {
|
||||
- gen->state.seq_num = remote_seqnum + 1;
|
||||
- }
|
||||
+ gen->state.remote_offset += remote_offset;
|
||||
+ /* To avoid beat phenomena between suppliers let put 1 second in local_offset
|
||||
+ * it will be eaten at next clock tick rather than increasing remote offset
|
||||
+ * If we do not do that we will have a time skew drift of 1 second per 2 seconds
|
||||
+ * if suppliers are desynchronized by 0.5 second
|
||||
+ */
|
||||
+ if (gen->state.local_offset == 0) {
|
||||
+ gen->state.local_offset++;
|
||||
+ gen->state.remote_offset--;
|
||||
}
|
||||
- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
- slapi_log_err(SLAPI_LOG_REPL, "csngen_adjust_time",
|
||||
- "gen state after %08lx%04x:%ld:%ld:%ld\n",
|
||||
- new_time, gen->state.seq_num,
|
||||
- gen->state.sampled_time,
|
||||
- gen->state.local_offset,
|
||||
- gen->state.remote_offset);
|
||||
+ }
|
||||
+ /* Time to compute seqnum so that
|
||||
+ * new csn >= remote csn and new csn >= old local csn
|
||||
+ */
|
||||
+ new_time = CSN_CALC_TSTAMP(gen);
|
||||
+ PR_ASSERT(new_time >= old_time);
|
||||
+ PR_ASSERT(new_time >= remote_time);
|
||||
+ if (new_time > old_time) {
|
||||
+ /* Can reset (local) seqnum */
|
||||
+ gen->state.seq_num = 0;
|
||||
+ }
|
||||
+ if (new_time == remote_time && remote_seqnum >= gen->state.seq_num) {
|
||||
+ if (remote_seqnum >= CSN_MAX_SEQNUM) {
|
||||
+ gen->state.seq_num = 0;
|
||||
+ gen->state.local_offset++;
|
||||
+ } else {
|
||||
+ gen->state.seq_num = remote_seqnum + 1;
|
||||
}
|
||||
- } else if (gen->state.remote_offset > 0) {
|
||||
- /* decrease remote offset? */
|
||||
- /* how to decrease remote offset but ensure that we don't
|
||||
- generate a duplicate CSN, or a CSN smaller than one we've already
|
||||
- generated? */
|
||||
+ }
|
||||
+
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, "csngen_adjust_time",
|
||||
+ "gen state after %08lx%04x:%ld:%ld:%ld\n",
|
||||
+ new_time, gen->state.seq_num,
|
||||
+ gen->state.sampled_time,
|
||||
+ gen->state.local_offset,
|
||||
+ gen->state.remote_offset);
|
||||
}
|
||||
|
||||
slapi_rwlock_unlock(gen->lock);
|
||||
@@ -435,16 +420,16 @@ csngen_unregister_callbacks(CSNGen *gen, void *cookie)
|
||||
|
||||
/* debugging function */
|
||||
void
|
||||
-csngen_dump_state(const CSNGen *gen)
|
||||
+csngen_dump_state(const CSNGen *gen, int severity)
|
||||
{
|
||||
if (gen) {
|
||||
slapi_rwlock_rdlock(gen->lock);
|
||||
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "CSN generator's state:\n");
|
||||
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\treplica id: %d\n", gen->state.rid);
|
||||
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tsampled time: %ld\n", gen->state.sampled_time);
|
||||
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tlocal offset: %ld\n", gen->state.local_offset);
|
||||
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tremote offset: %ld\n", gen->state.remote_offset);
|
||||
- slapi_log_err(SLAPI_LOG_DEBUG, "csngen_dump_state", "\tsequence number: %d\n", gen->state.seq_num);
|
||||
+ slapi_log_err(severity, "csngen_dump_state", "CSN generator's state:\n");
|
||||
+ slapi_log_err(severity, "csngen_dump_state", "\treplica id: %d\n", gen->state.rid);
|
||||
+ slapi_log_err(severity, "csngen_dump_state", "\tsampled time: %ld\n", gen->state.sampled_time);
|
||||
+ slapi_log_err(severity, "csngen_dump_state", "\tlocal offset: %ld\n", gen->state.local_offset);
|
||||
+ slapi_log_err(severity, "csngen_dump_state", "\tremote offset: %ld\n", gen->state.remote_offset);
|
||||
+ slapi_log_err(severity, "csngen_dump_state", "\tsequence number: %d\n", gen->state.seq_num);
|
||||
slapi_rwlock_unlock(gen->lock);
|
||||
}
|
||||
}
|
||||
@@ -459,7 +444,7 @@ csngen_test()
|
||||
CSNGen *gen = csngen_new(255, NULL);
|
||||
|
||||
slapi_log_err(SLAPI_LOG_DEBUG, "csngen_test", "staring csn generator test ...");
|
||||
- csngen_dump_state(gen);
|
||||
+ csngen_dump_state(gen, SLAPI_LOG_INFO);
|
||||
|
||||
rc = _csngen_start_test_threads(gen);
|
||||
if (rc == 0) {
|
||||
@@ -469,7 +454,7 @@ csngen_test()
|
||||
}
|
||||
|
||||
_csngen_stop_test_threads();
|
||||
- csngen_dump_state(gen);
|
||||
+ csngen_dump_state(gen, SLAPI_LOG_INFO);
|
||||
slapi_log_err(SLAPI_LOG_DEBUG, "csngen_test", "csn generator test is complete...");
|
||||
}
|
||||
|
||||
@@ -574,94 +559,93 @@ _csngen_cmp_callbacks(const void *el1, const void *el2)
|
||||
return 1;
|
||||
}
|
||||
|
||||
+/* Get time and adjust local offset */
|
||||
static int
|
||||
-_csngen_adjust_local_time(CSNGen *gen, time_t cur_time)
|
||||
+_csngen_adjust_local_time(CSNGen *gen)
|
||||
{
|
||||
extern int config_get_ignore_time_skew(void);
|
||||
int ignore_time_skew = config_get_ignore_time_skew();
|
||||
- time_t time_diff = cur_time - gen->state.sampled_time;
|
||||
+ struct timespec now = {0};
|
||||
+ time_t time_diff;
|
||||
+ time_t cur_time;
|
||||
+ int rc;
|
||||
|
||||
+
|
||||
+ if ((rc = gen->gettime(&now)) != 0) {
|
||||
+ /* Failed to get system time, we must abort */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn",
|
||||
+ "Failed to get system time (%s)\n",
|
||||
+ slapd_system_strerror(rc));
|
||||
+ return CSN_TIME_ERROR;
|
||||
+ }
|
||||
+ cur_time = now.tv_sec;
|
||||
+ time_diff = cur_time - gen->state.sampled_time;
|
||||
+
|
||||
+ /* check if the time should be adjusted */
|
||||
if (time_diff == 0) {
|
||||
/* This is a no op - _csngen_adjust_local_time should never be called
|
||||
in this case, because there is nothing to adjust - but just return
|
||||
here to protect ourselves
|
||||
*/
|
||||
return CSN_SUCCESS;
|
||||
- } else if (time_diff > 0) {
|
||||
- time_t ts_before = CSN_CALC_TSTAMP(gen);
|
||||
- time_t ts_after = 0;
|
||||
- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
- time_t new_time = CSN_CALC_TSTAMP(gen);
|
||||
- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
|
||||
- "gen state before %08lx%04x:%ld:%ld:%ld\n",
|
||||
- new_time, gen->state.seq_num,
|
||||
- gen->state.sampled_time,
|
||||
- gen->state.local_offset,
|
||||
- gen->state.remote_offset);
|
||||
- }
|
||||
-
|
||||
- gen->state.sampled_time = cur_time;
|
||||
- if (time_diff > gen->state.local_offset)
|
||||
- gen->state.local_offset = 0;
|
||||
- else
|
||||
- gen->state.local_offset = gen->state.local_offset - time_diff;
|
||||
-
|
||||
- /* only reset the seq_num if the new timestamp part of the CSN
|
||||
- is going to be greater than the old one - if they are the
|
||||
- same after the above adjustment (which can happen if
|
||||
- csngen_adjust_time has to store the offset in the
|
||||
- local_offset field) we must not allow the CSN to regress or
|
||||
- generate duplicate numbers */
|
||||
- ts_after = CSN_CALC_TSTAMP(gen);
|
||||
- if (ts_after > ts_before) {
|
||||
- gen->state.seq_num = 0; /* only reset if new time > old time */
|
||||
- }
|
||||
-
|
||||
- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
- time_t new_time = CSN_CALC_TSTAMP(gen);
|
||||
- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
|
||||
- "gen state after %08lx%04x:%ld:%ld:%ld\n",
|
||||
- new_time, gen->state.seq_num,
|
||||
- gen->state.sampled_time,
|
||||
- gen->state.local_offset,
|
||||
- gen->state.remote_offset);
|
||||
- }
|
||||
- return CSN_SUCCESS;
|
||||
- } else /* time was turned back */
|
||||
- {
|
||||
- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
- time_t new_time = CSN_CALC_TSTAMP(gen);
|
||||
- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
|
||||
- "gen state back before %08lx%04x:%ld:%ld:%ld\n",
|
||||
- new_time, gen->state.seq_num,
|
||||
- gen->state.sampled_time,
|
||||
- gen->state.local_offset,
|
||||
- gen->state.remote_offset);
|
||||
- }
|
||||
+ }
|
||||
+ if (labs(time_diff) > TIME_DIFF_WARNING_DELAY) {
|
||||
+ /* We had a jump larger than a day */
|
||||
+ slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn",
|
||||
+ "Detected large jump in CSN time. Delta: %ld (current time: %ld vs previous time: %ld)\n",
|
||||
+ time_diff, cur_time, gen->state.sampled_time);
|
||||
+ }
|
||||
+ if (!ignore_time_skew && (gen->state.local_offset - time_diff > CSN_MAX_TIME_ADJUST)) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "_csngen_adjust_local_time",
|
||||
+ "Adjustment limit exceeded; value - %ld, limit - %d\n",
|
||||
+ gen->state.local_offset - time_diff, CSN_MAX_TIME_ADJUST);
|
||||
+ return CSN_LIMIT_EXCEEDED;
|
||||
+ }
|
||||
|
||||
- if (!ignore_time_skew && (labs(time_diff) > CSN_MAX_TIME_ADJUST)) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "_csngen_adjust_local_time",
|
||||
- "Adjustment limit exceeded; value - %ld, limit - %d\n",
|
||||
- labs(time_diff), CSN_MAX_TIME_ADJUST);
|
||||
- return CSN_LIMIT_EXCEEDED;
|
||||
- }
|
||||
+ time_t ts_before = CSN_CALC_TSTAMP(gen);
|
||||
+ time_t ts_after = 0;
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ time_t new_time = CSN_CALC_TSTAMP(gen);
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
|
||||
+ "gen state before %08lx%04x:%ld:%ld:%ld\n",
|
||||
+ new_time, gen->state.seq_num,
|
||||
+ gen->state.sampled_time,
|
||||
+ gen->state.local_offset,
|
||||
+ gen->state.remote_offset);
|
||||
+ }
|
||||
|
||||
- gen->state.sampled_time = cur_time;
|
||||
- gen->state.local_offset = MAX_VAL(gen->state.local_offset, labs(time_diff));
|
||||
- gen->state.seq_num = 0;
|
||||
+ gen->state.sampled_time = cur_time;
|
||||
+ gen->state.local_offset = MAX_VAL(0, gen->state.local_offset - time_diff);
|
||||
+ /* new local_offset = MAX_VAL(0, old sample_time + old local_offset - cur_time)
|
||||
+ * ==> new local_offset >= 0 and
|
||||
+ * new local_offset + cur_time >= old sample_time + old local_offset
|
||||
+ * ==> new local_offset + cur_time + remote_offset >=
|
||||
+ * sample_time + old local_offset + remote_offset
|
||||
+ * ==> CSN_CALC_TSTAMP(new gen) >= CSN_CALC_TSTAMP(old gen)
|
||||
+ */
|
||||
|
||||
- if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
- time_t new_time = CSN_CALC_TSTAMP(gen);
|
||||
- slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
|
||||
- "gen state back after %08lx%04x:%ld:%ld:%ld\n",
|
||||
- new_time, gen->state.seq_num,
|
||||
- gen->state.sampled_time,
|
||||
- gen->state.local_offset,
|
||||
- gen->state.remote_offset);
|
||||
- }
|
||||
+ /* only reset the seq_num if the new timestamp part of the CSN
|
||||
+ is going to be greater than the old one - if they are the
|
||||
+ same after the above adjustment (which can happen if
|
||||
+ csngen_adjust_time has to store the offset in the
|
||||
+ local_offset field) we must not allow the CSN to regress or
|
||||
+ generate duplicate numbers */
|
||||
+ ts_after = CSN_CALC_TSTAMP(gen);
|
||||
+ PR_ASSERT(ts_after >= ts_before);
|
||||
+ if (ts_after > ts_before) {
|
||||
+ gen->state.seq_num = 0; /* only reset if new time > old time */
|
||||
+ }
|
||||
|
||||
- return CSN_SUCCESS;
|
||||
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
|
||||
+ time_t new_time = CSN_CALC_TSTAMP(gen);
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, "_csngen_adjust_local_time",
|
||||
+ "gen state after %08lx%04x:%ld:%ld:%ld\n",
|
||||
+ new_time, gen->state.seq_num,
|
||||
+ gen->state.sampled_time,
|
||||
+ gen->state.local_offset,
|
||||
+ gen->state.remote_offset);
|
||||
}
|
||||
+ return CSN_SUCCESS;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -799,7 +783,7 @@ _csngen_remote_tester_main(void *data)
|
||||
"Failed to adjust generator's time; csn error - %d\n", rc);
|
||||
}
|
||||
|
||||
- csngen_dump_state(gen);
|
||||
+ csngen_dump_state(gen, SLAPI_LOG_INFO);
|
||||
}
|
||||
csn_free(&csn);
|
||||
|
||||
@@ -825,8 +809,83 @@ _csngen_local_tester_main(void *data)
|
||||
/*
|
||||
* g_sampled_time -= slapi_rand () % 100;
|
||||
*/
|
||||
- csngen_dump_state(gen);
|
||||
+ csngen_dump_state(gen, SLAPI_LOG_INFO);
|
||||
}
|
||||
|
||||
PR_AtomicDecrement(&s_thread_count);
|
||||
}
|
||||
+
|
||||
+int _csngen_tester_state;
|
||||
+int _csngen_tester_state_rid;
|
||||
+
|
||||
+static int
|
||||
+_mynoise(int time, int len, double height)
|
||||
+{
|
||||
+ if (((time/len) % 2) == 0) {
|
||||
+ return -height + 2 * height * ( time % len ) / (len-1);
|
||||
+ } else {
|
||||
+ return height - 2 * height * ( time % len ) / (len-1);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+
|
||||
+int32_t _csngen_tester_gettime(struct timespec *tp)
|
||||
+{
|
||||
+ int vtime = _csngen_tester_state ;
|
||||
+ tp->tv_sec = 0x1000000 + vtime + 2 * _csngen_tester_state_rid;
|
||||
+ if (_csngen_tester_state_rid == 3) {
|
||||
+ /* tp->tv_sec += _mynoise(vtime, 10, 1.5); */
|
||||
+ tp->tv_sec += _mynoise(vtime, 30, 15);
|
||||
+ }
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+/* Mimic a fully meshed multi suplier topology */
|
||||
+void csngen_multi_suppliers_test(void)
|
||||
+{
|
||||
+#define NB_TEST_MASTERS 6
|
||||
+#define NB_TEST_STATES 500
|
||||
+ CSNGen *gen[NB_TEST_MASTERS];
|
||||
+ struct timespec now = {0};
|
||||
+ CSN *last_csn = NULL;
|
||||
+ CSN *csn = NULL;
|
||||
+ int i,j,rc;
|
||||
+
|
||||
+ _csngen_tester_gettime(&now);
|
||||
+
|
||||
+ for (i=0; i< NB_TEST_MASTERS; i++) {
|
||||
+ gen[i] = csngen_new(i+1, NULL);
|
||||
+ gen[i]->gettime = _csngen_tester_gettime;
|
||||
+ gen[i]->state.sampled_time = now.tv_sec;
|
||||
+ }
|
||||
+
|
||||
+ for (_csngen_tester_state=0; _csngen_tester_state < NB_TEST_STATES; _csngen_tester_state++) {
|
||||
+ for (i=0; i< NB_TEST_MASTERS; i++) {
|
||||
+ _csngen_tester_state_rid = i+1;
|
||||
+ rc = csngen_new_csn(gen[i], &csn, PR_FALSE);
|
||||
+ if (rc) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ csngen_dump_state(gen[i], SLAPI_LOG_INFO);
|
||||
+
|
||||
+ if (csn_compare(csn, last_csn) <= 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "csngen_multi_suppliers_test",
|
||||
+ "CSN generated in disorder state=%d rid=%d\n", _csngen_tester_state, _csngen_tester_state_rid);
|
||||
+ _csngen_tester_state = NB_TEST_STATES;
|
||||
+ break;
|
||||
+ }
|
||||
+ last_csn = csn;
|
||||
+
|
||||
+ for (j=0; j< NB_TEST_MASTERS; j++) {
|
||||
+ if (i==j) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ _csngen_tester_state_rid = j+1;
|
||||
+ rc = csngen_adjust_time(gen[j], csn);
|
||||
+ if (rc) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
|
||||
index 56765fdfb..59c5ec9ab 100644
|
||||
--- a/ldap/servers/slapd/slapi-plugin.h
|
||||
+++ b/ldap/servers/slapd/slapi-plugin.h
|
||||
@@ -6762,8 +6762,17 @@ time_t slapi_current_time(void) __attribute__((deprecated));
|
||||
*
|
||||
* \param tp - a timespec struct where the system time is set
|
||||
* \return result code, upon success tp is set to the system time
|
||||
+ * as a clock in UTC timezone. This clock adjusts with ntp steps,
|
||||
+ * and should NOT be used for timer information.
|
||||
*/
|
||||
int32_t slapi_clock_gettime(struct timespec *tp);
|
||||
+/*
|
||||
+ * slapi_clock_gettime should have better been called
|
||||
+ * slapi_clock_utc_gettime but sice the function pre-existed
|
||||
+ * we are just adding an alias (to avoid risking to break
|
||||
+ * some custom plugins)
|
||||
+ */
|
||||
+#define slapi_clock_utc_gettime slapi_clock_gettime
|
||||
|
||||
/**
|
||||
* Returns the current system time as a hr clock relative to uptime
|
||||
--
|
||||
2.31.1
|
||||
|
240
0003-Issue-3584-Fix-PBKDF2_SHA256-hashing-in-FIPS-mode-49.patch
Normal file
240
0003-Issue-3584-Fix-PBKDF2_SHA256-hashing-in-FIPS-mode-49.patch
Normal file
@ -0,0 +1,240 @@
|
||||
From 957ffd53b041c19d27753a028e6f514dcc75dfbd Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 26 Oct 2021 15:51:24 -0700
|
||||
Subject: [PATCH 03/12] Issue 3584 - Fix PBKDF2_SHA256 hashing in FIPS mode
|
||||
(#4949)
|
||||
|
||||
Issue Description: Use PK11_Decrypt function to get hash data
|
||||
because PK11_ExtractKeyValue function is forbidden in FIPS mode.
|
||||
We can't extract keys while in FIPS mode. But we use PK11_ExtractKeyValue
|
||||
for hashes, and it's not forbidden.
|
||||
|
||||
We can't use OpenSSL's PBKDF2-SHA256 implementation right now because
|
||||
we need to support an upgrade procedure while in FIPS mode (update
|
||||
hash on bind). For that, we should fix existing PBKDF2 usage, and we can
|
||||
switch to OpenSSL's PBKDF2-SHA256 in the following versions.
|
||||
|
||||
Fix Description: Use PK11_Decrypt function to get the data.
|
||||
|
||||
Enable TLS on all CI test topologies while in FIPS because without
|
||||
that we don't set up the NSS database correctly.
|
||||
|
||||
Add PBKDF2-SHA256 (OpenSSL) to ldif templates, so the password scheme is
|
||||
discoverable by internal functions.
|
||||
|
||||
https://github.com/389ds/389-ds-base/issues/3584
|
||||
|
||||
Reviewed by: @progier389, @mreynolds389, @Firstyear, @tbordaz (Thanks!!)
|
||||
---
|
||||
.../healthcheck/health_security_test.py | 10 ---
|
||||
ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 62 ++++++++++++++++---
|
||||
ldap/servers/slapd/main.c | 12 ++++
|
||||
src/lib389/lib389/__init__.py | 4 ++
|
||||
src/lib389/lib389/topologies.py | 6 +-
|
||||
src/lib389/lib389/utils.py | 13 ++++
|
||||
6 files changed, 86 insertions(+), 21 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/healthcheck/health_security_test.py b/dirsrvtests/tests/suites/healthcheck/health_security_test.py
|
||||
index 6c0d27aaa..c1dc7938c 100644
|
||||
--- a/dirsrvtests/tests/suites/healthcheck/health_security_test.py
|
||||
+++ b/dirsrvtests/tests/suites/healthcheck/health_security_test.py
|
||||
@@ -40,16 +40,6 @@ else:
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
-def is_fips():
|
||||
- if os.path.exists('/proc/sys/crypto/fips_enabled'):
|
||||
- with open('/proc/sys/crypto/fips_enabled', 'r') as f:
|
||||
- state = f.readline().strip()
|
||||
- if state == '1':
|
||||
- return True
|
||||
- else:
|
||||
- return False
|
||||
-
|
||||
-
|
||||
def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None):
|
||||
args = FakeArgs()
|
||||
args.instance = instance.serverid
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
index d310dc792..dcac4fcdd 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
@@ -91,10 +91,11 @@ pbkdf2_sha256_extract(char *hash_in, SECItem *salt, uint32_t *iterations)
|
||||
SECStatus
|
||||
pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *salt, uint32_t iterations)
|
||||
{
|
||||
- SECItem *result = NULL;
|
||||
SECAlgorithmID *algid = NULL;
|
||||
PK11SlotInfo *slot = NULL;
|
||||
PK11SymKey *symkey = NULL;
|
||||
+ SECItem *wrapKeyData = NULL;
|
||||
+ SECStatus rv = SECFailure;
|
||||
|
||||
/* We assume that NSS is already started. */
|
||||
algid = PK11_CreatePBEV2AlgorithmID(SEC_OID_PKCS5_PBKDF2, SEC_OID_HMAC_SHA256, SEC_OID_HMAC_SHA256, hash_out_len, iterations, salt);
|
||||
@@ -104,7 +105,6 @@ pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *s
|
||||
slot = PK11_GetBestSlotMultiple(mechanism_array, 2, NULL);
|
||||
if (slot != NULL) {
|
||||
symkey = PK11_PBEKeyGen(slot, algid, pwd, PR_FALSE, NULL);
|
||||
- PK11_FreeSlot(slot);
|
||||
if (symkey == NULL) {
|
||||
/* We try to get the Error here but NSS has two or more error interfaces, and sometimes it uses none of them. */
|
||||
int32_t status = PORT_GetError();
|
||||
@@ -123,18 +123,60 @@ pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *s
|
||||
return SECFailure;
|
||||
}
|
||||
|
||||
- if (PK11_ExtractKeyValue(symkey) == SECSuccess) {
|
||||
- result = PK11_GetKeyData(symkey);
|
||||
- if (result != NULL && result->len <= hash_out_len) {
|
||||
- memcpy(hash_out, result->data, result->len);
|
||||
- PK11_FreeSymKey(symkey);
|
||||
+ /*
|
||||
+ * First, we need to generate a wrapped key for PK11_Decrypt call:
|
||||
+ * slot is the same slot we used in PK11_PBEKeyGen()
|
||||
+ * 256 bits / 8 bit per byte
|
||||
+ */
|
||||
+ PK11SymKey *wrapKey = PK11_KeyGen(slot, CKM_AES_ECB, NULL, 256/8, NULL);
|
||||
+ PK11_FreeSlot(slot);
|
||||
+ if (wrapKey == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to generate a wrapped key.\n");
|
||||
+ return SECFailure;
|
||||
+ }
|
||||
+
|
||||
+ wrapKeyData = (SECItem *)PORT_Alloc(sizeof(SECItem));
|
||||
+ /* Align the wrapped key with 32 bytes. */
|
||||
+ wrapKeyData->len = (PK11_GetKeyLength(symkey) + 31) & ~31;
|
||||
+ /* Allocate the aligned space for pkc5PBE key plus AESKey block */
|
||||
+ wrapKeyData->data = (unsigned char *)slapi_ch_calloc(wrapKeyData->len, sizeof(unsigned char));
|
||||
+
|
||||
+ /* Get symkey wrapped with wrapKey - required for PK11_Decrypt call */
|
||||
+ rv = PK11_WrapSymKey(CKM_AES_ECB, NULL, wrapKey, symkey, wrapKeyData);
|
||||
+ if (rv != SECSuccess) {
|
||||
+ PK11_FreeSymKey(symkey);
|
||||
+ PK11_FreeSymKey(wrapKey);
|
||||
+ SECITEM_FreeItem(wrapKeyData, PR_TRUE);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to wrap the symkey. (%d)\n", rv);
|
||||
+ return SECFailure;
|
||||
+ }
|
||||
+
|
||||
+ /* Allocate the space for our result */
|
||||
+ void *result = (char *)slapi_ch_calloc(wrapKeyData->len, sizeof(char));
|
||||
+ unsigned int result_len = 0;
|
||||
+
|
||||
+ /* User wrapKey to decrypt the wrapped contents.
|
||||
+ * result is the hash that we need;
|
||||
+ * result_len is the actual lengh of the data;
|
||||
+ * has_out_len is the maximum (the space we allocted for hash_out)
|
||||
+ */
|
||||
+ rv = PK11_Decrypt(wrapKey, CKM_AES_ECB, NULL, result, &result_len, hash_out_len, wrapKeyData->data, wrapKeyData->len);
|
||||
+ PK11_FreeSymKey(symkey);
|
||||
+ PK11_FreeSymKey(wrapKey);
|
||||
+ SECITEM_FreeItem(wrapKeyData, PR_TRUE);
|
||||
+
|
||||
+ if (rv == SECSuccess) {
|
||||
+ if (result != NULL && result_len <= hash_out_len) {
|
||||
+ memcpy(hash_out, result, result_len);
|
||||
+ slapi_ch_free((void **)&result);
|
||||
} else {
|
||||
- PK11_FreeSymKey(symkey);
|
||||
- slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to retrieve (get) hash output.\n");
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to retrieve (get) hash output.\n");
|
||||
+ slapi_ch_free((void **)&result);
|
||||
return SECFailure;
|
||||
}
|
||||
} else {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to extract hash output.\n");
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "pbkdf2_sha256_hash", "Unable to extract hash output. (%d)\n", rv);
|
||||
+ slapi_ch_free((void **)&result);
|
||||
return SECFailure;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
|
||||
index 61ed40b7d..04d0494f8 100644
|
||||
--- a/ldap/servers/slapd/main.c
|
||||
+++ b/ldap/servers/slapd/main.c
|
||||
@@ -2895,9 +2895,21 @@ slapd_do_all_nss_ssl_init(int slapd_exemode, int importexport_encrypt, int s_por
|
||||
* is enabled or not. We use NSS for random number generation and
|
||||
* other things even if we are not going to accept SSL connections.
|
||||
* We also need NSS for attribute encryption/decryption on import and export.
|
||||
+ *
|
||||
+ * It's important to remember that while in FIPS mode the administrator should always enable
|
||||
+ * the security, otherwise we don't call slapd_pk11_authenticate which is a requirement for FIPS mode
|
||||
*/
|
||||
+ PRBool isFIPS = slapd_pk11_isFIPS();
|
||||
int init_ssl = config_get_security();
|
||||
|
||||
+ if (isFIPS && !init_ssl) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "slapd_do_all_nss_ssl_init",
|
||||
+ "ERROR: TLS is not enabled, and the machine is in FIPS mode. "
|
||||
+ "Some functionality won't work correctly (for example, "
|
||||
+ "users with PBKDF2_SHA256 password scheme won't be able to log in). "
|
||||
+ "It's highly advisable to enable TLS on this instance.\n");
|
||||
+ }
|
||||
+
|
||||
if (slapd_exemode == SLAPD_EXEMODE_SLAPD) {
|
||||
init_ssl = init_ssl && (0 != s_port) && (s_port <= LDAP_PORT_MAX);
|
||||
} else {
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index 29ee5245a..e0299c5b4 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -1588,6 +1588,10 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
:param post_open: Open the server connection after restart.
|
||||
:type post_open: bool
|
||||
"""
|
||||
+ if self.config.get_attr_val_utf8_l("nsslapd-security") == 'on':
|
||||
+ self.restart(post_open=post_open)
|
||||
+ return
|
||||
+
|
||||
# If it doesn't exist, create a cadb.
|
||||
ssca = NssSsl(dbpath=self.get_ssca_dir())
|
||||
if not ssca._db_exists():
|
||||
diff --git a/src/lib389/lib389/topologies.py b/src/lib389/lib389/topologies.py
|
||||
index e9969f524..e7d56582d 100644
|
||||
--- a/src/lib389/lib389/topologies.py
|
||||
+++ b/src/lib389/lib389/topologies.py
|
||||
@@ -15,7 +15,7 @@ import socket
|
||||
import pytest
|
||||
|
||||
from lib389 import DirSrv
|
||||
-from lib389.utils import generate_ds_params
|
||||
+from lib389.utils import generate_ds_params, is_fips
|
||||
from lib389.mit_krb5 import MitKrb5
|
||||
from lib389.saslmap import SaslMappings
|
||||
from lib389.replica import ReplicationManager, Replicas
|
||||
@@ -108,6 +108,10 @@ def _create_instances(topo_dict, suffix):
|
||||
if role == ReplicaRole.HUB:
|
||||
hs[instance.serverid] = instance
|
||||
instances.update(hs)
|
||||
+ # We should always enable TLS while in FIPS mode because otherwise NSS database won't be
|
||||
+ # configured in a FIPS compliant way
|
||||
+ if is_fips():
|
||||
+ instance.enable_tls()
|
||||
log.info("Instance with parameters {} was created.".format(args_instance))
|
||||
|
||||
if "standalone1" in instances and len(instances) == 1:
|
||||
diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py
|
||||
index b270784ce..5ba0c6676 100644
|
||||
--- a/src/lib389/lib389/utils.py
|
||||
+++ b/src/lib389/lib389/utils.py
|
||||
@@ -1430,3 +1430,16 @@ def is_valid_hostname(hostname):
|
||||
hostname = hostname[:-1] # strip exactly one dot from the right, if present
|
||||
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
|
||||
return all(allowed.match(x) for x in hostname.split("."))
|
||||
+
|
||||
+
|
||||
+def is_fips():
|
||||
+ if os.path.exists('/proc/sys/crypto/fips_enabled'):
|
||||
+ with open('/proc/sys/crypto/fips_enabled', 'r') as f:
|
||||
+ state = f.readline().strip()
|
||||
+ if state == '1':
|
||||
+ return True
|
||||
+ else:
|
||||
+ return False
|
||||
+ else:
|
||||
+ return False
|
||||
+
|
||||
--
|
||||
2.31.1
|
||||
|
114
0004-Issue-4956-Automember-allows-invalid-regex-and-does-.patch
Normal file
114
0004-Issue-4956-Automember-allows-invalid-regex-and-does-.patch
Normal file
@ -0,0 +1,114 @@
|
||||
From d037688c072c4cb84fbf9b2a6cb24927f7950605 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 20 Oct 2021 10:04:06 -0400
|
||||
Subject: [PATCH 04/12] Issue 4956 - Automember allows invalid regex, and does
|
||||
not log proper error
|
||||
|
||||
Bug Description: The server was detecting an invalid automember
|
||||
regex, but it did not reject it, and it did not
|
||||
log which regex rule was invalid.
|
||||
|
||||
Fix Description: By properly rejecting the invalid regex will also
|
||||
trigger the proper error logging to occur.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4956
|
||||
|
||||
Reviewed by: tbordaz & spichugi(Thanks!!)
|
||||
---
|
||||
.../automember_plugin/configuration_test.py | 49 +++++++++++++++++--
|
||||
ldap/servers/plugins/automember/automember.c | 1 +
|
||||
2 files changed, 46 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/automember_plugin/configuration_test.py b/dirsrvtests/tests/suites/automember_plugin/configuration_test.py
|
||||
index 0f9cc49dc..4a6b596db 100644
|
||||
--- a/dirsrvtests/tests/suites/automember_plugin/configuration_test.py
|
||||
+++ b/dirsrvtests/tests/suites/automember_plugin/configuration_test.py
|
||||
@@ -1,21 +1,20 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2019 Red Hat, Inc.
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
|
||||
+import ldap
|
||||
import os
|
||||
import pytest
|
||||
-
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions, MemberOfPlugin
|
||||
-import ldap
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
-
|
||||
@pytest.mark.bz834056
|
||||
def test_configuration(topo):
|
||||
"""
|
||||
@@ -52,6 +51,48 @@ def test_configuration(topo):
|
||||
'"cn=SuffDef1,ou=autouserGroups,cn=config" '
|
||||
'can not be a child of the plugin config area "cn=config"')
|
||||
|
||||
+def test_invalid_regex(topo):
|
||||
+ """Test invalid regex is properly reportedin the error log
|
||||
+
|
||||
+ :id: a6d89f84-ec76-4871-be96-411d051800b1
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Setup automember
|
||||
+ 2. Add invalid regex
|
||||
+ 3. Error log reports useful message
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+ REGEX_DN = "cn=regex1,cn=testregex,cn=auto membership plugin,cn=plugins,cn=config"
|
||||
+ REGEX_VALUE = "cn=*invalid*"
|
||||
+ REGEX_ESC_VALUE = "cn=\\*invalid\\*"
|
||||
+ GROUP_DN = "cn=demo_group,ou=groups," + DEFAULT_SUFFIX
|
||||
+
|
||||
+ AutoMembershipPlugin(topo.standalone).remove_all("nsslapd-pluginConfigArea")
|
||||
+ automemberplugin = AutoMembershipPlugin(topo.standalone)
|
||||
+
|
||||
+ automember_prop = {
|
||||
+ 'cn': 'testRegex',
|
||||
+ 'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX,
|
||||
+ 'autoMemberFilter': 'objectclass=*',
|
||||
+ 'autoMemberDefaultGroup': GROUP_DN,
|
||||
+ 'autoMemberGroupingAttr': 'member:dn',
|
||||
+ }
|
||||
+ automember_defs = AutoMembershipDefinitions(topo.standalone, "cn=Auto Membership Plugin,cn=plugins,cn=config")
|
||||
+ automember_def = automember_defs.create(properties=automember_prop)
|
||||
+ automember_def.add_regex_rule("regex1", GROUP_DN, include_regex=[REGEX_VALUE])
|
||||
+
|
||||
+ automemberplugin.enable()
|
||||
+ topo.standalone.restart()
|
||||
+
|
||||
+ # Check errors log for invalid message
|
||||
+ ERR_STR1 = "automember_parse_regex_rule - Unable to parse regex rule"
|
||||
+ ERR_STR2 = f"Skipping invalid inclusive regex rule in rule entry \"{REGEX_DN}\" \\(rule = \"{REGEX_ESC_VALUE}\"\\)"
|
||||
+ assert topo.standalone.searchErrorsLog(ERR_STR1)
|
||||
+ assert topo.standalone.searchErrorsLog(ERR_STR2)
|
||||
+
|
||||
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
|
||||
index 39350ad53..b92b89bd5 100644
|
||||
--- a/ldap/servers/plugins/automember/automember.c
|
||||
+++ b/ldap/servers/plugins/automember/automember.c
|
||||
@@ -1217,6 +1217,7 @@ automember_parse_regex_rule(char *rule_string)
|
||||
"automember_parse_regex_rule - Unable to parse "
|
||||
"regex rule (invalid regex). Error \"%s\".\n",
|
||||
recomp_result ? recomp_result : "unknown");
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Validation has passed, so create the regex rule struct and fill it in.
|
||||
--
|
||||
2.31.1
|
||||
|
245
0005-Issue-4092-systemd-tmpfiles-warnings.patch
Normal file
245
0005-Issue-4092-systemd-tmpfiles-warnings.patch
Normal file
@ -0,0 +1,245 @@
|
||||
From 9c08a053938eb28821fad7d0850c046ef2ed44c4 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 9 Dec 2020 16:16:30 -0500
|
||||
Subject: [PATCH 05/12] Issue 4092 - systemd-tmpfiles warnings
|
||||
|
||||
Bug Description:
|
||||
|
||||
systemd-tmpfiles warns about legacy paths in our tmpfiles configs.
|
||||
Using /var/run also introduces a race condition, see the following
|
||||
issue https://pagure.io/389-ds-base/issue/47429
|
||||
|
||||
Fix Description:
|
||||
|
||||
Instead of using @localstatedir@/run use @localrundir@ which was
|
||||
introduced in #850.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/766
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4092
|
||||
|
||||
Reviewed by: vashirov & firstyear(Thanks!)
|
||||
---
|
||||
Makefile.am | 4 ++--
|
||||
configure.ac | 10 ++++++++--
|
||||
dirsrvtests/tests/suites/basic/basic_test.py | 3 ++-
|
||||
ldap/admin/src/defaults.inf.in | 8 ++++----
|
||||
ldap/servers/snmp/main.c | 8 ++++----
|
||||
src/lib389/lib389/__init__.py | 3 +++
|
||||
src/lib389/lib389/instance/options.py | 7 ++++++-
|
||||
src/lib389/lib389/instance/remove.py | 13 ++++++++-----
|
||||
src/lib389/lib389/instance/setup.py | 10 ++++++++--
|
||||
9 files changed, 45 insertions(+), 21 deletions(-)
|
||||
|
||||
diff --git a/Makefile.am b/Makefile.am
|
||||
index 36434cf17..fc5a6a7d1 100644
|
||||
--- a/Makefile.am
|
||||
+++ b/Makefile.am
|
||||
@@ -141,8 +141,8 @@ PATH_DEFINES = -DLOCALSTATEDIR="\"$(localstatedir)\"" -DSYSCONFDIR="\"$(sysconfd
|
||||
-DLIBDIR="\"$(libdir)\"" -DBINDIR="\"$(bindir)\"" \
|
||||
-DDATADIR="\"$(datadir)\"" -DDOCDIR="\"$(docdir)\"" \
|
||||
-DSBINDIR="\"$(sbindir)\"" -DPLUGINDIR="\"$(serverplugindir)\"" \
|
||||
- -DTEMPLATEDIR="\"$(sampledatadir)\"" -DSYSTEMSCHEMADIR="\"$(systemschemadir)\""
|
||||
-
|
||||
+ -DTEMPLATEDIR="\"$(sampledatadir)\"" -DSYSTEMSCHEMADIR="\"$(systemschemadir)\"" \
|
||||
+ -DLOCALRUNDIR="\"$(localrundir)\""
|
||||
# Now that we have all our defines in place, setup the CPPFLAGS
|
||||
|
||||
# These flags are the "must have" for all components
|
||||
diff --git a/configure.ac b/configure.ac
|
||||
index 61bf35e4a..9845beb7d 100644
|
||||
--- a/configure.ac
|
||||
+++ b/configure.ac
|
||||
@@ -418,7 +418,14 @@ fi
|
||||
|
||||
m4_include(m4/fhs.m4)
|
||||
|
||||
-localrundir='/run'
|
||||
+# /run directory path
|
||||
+AC_ARG_WITH([localrundir],
|
||||
+ AS_HELP_STRING([--with-localrundir=DIR],
|
||||
+ [Runtime data directory]),
|
||||
+ [localrundir=$with_localrundir],
|
||||
+ [localrundir="/run"])
|
||||
+AC_SUBST([localrundir])
|
||||
+
|
||||
cockpitdir=/389-console
|
||||
|
||||
# installation paths - by default, we store everything
|
||||
@@ -899,7 +906,6 @@ AC_SUBST(ldaplib_defs)
|
||||
AC_SUBST(ldaptool_bindir)
|
||||
AC_SUBST(ldaptool_opts)
|
||||
AC_SUBST(plainldif_opts)
|
||||
-AC_SUBST(localrundir)
|
||||
|
||||
AC_SUBST(brand)
|
||||
AC_SUBST(capbrand)
|
||||
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
index 41726f073..7e80c443b 100644
|
||||
--- a/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
@@ -901,7 +901,8 @@ def test_basic_ldapagent(topology_st, import_example_ldif):
|
||||
# Remember, this is *forking*
|
||||
check_output([os.path.join(topology_st.standalone.get_sbin_dir(), 'ldap-agent'), config_file])
|
||||
# First kill any previous agents ....
|
||||
- pidpath = os.path.join(var_dir, 'run/ldap-agent.pid')
|
||||
+ run_dir = topology_st.standalone.get_run_dir()
|
||||
+ pidpath = os.path.join(run_dir, 'ldap-agent.pid')
|
||||
pid = None
|
||||
with open(pidpath, 'r') as pf:
|
||||
pid = pf.readlines()[0].strip()
|
||||
diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in
|
||||
index d5f504591..e02248b89 100644
|
||||
--- a/ldap/admin/src/defaults.inf.in
|
||||
+++ b/ldap/admin/src/defaults.inf.in
|
||||
@@ -35,12 +35,12 @@ sysconf_dir = @sysconfdir@
|
||||
initconfig_dir = @initconfigdir@
|
||||
config_dir = @instconfigdir@/slapd-{instance_name}
|
||||
local_state_dir = @localstatedir@
|
||||
-run_dir = @localstatedir@/run/dirsrv
|
||||
+run_dir = @localrundir@
|
||||
# This is the expected location of ldapi.
|
||||
-ldapi = @localstatedir@/run/slapd-{instance_name}.socket
|
||||
+ldapi = @localrundir@/slapd-{instance_name}.socket
|
||||
+pid_file = @localrundir@/slapd-{instance_name}.pid
|
||||
ldapi_listen = on
|
||||
ldapi_autobind = on
|
||||
-pid_file = @localstatedir@/run/dirsrv/slapd-{instance_name}.pid
|
||||
inst_dir = @serverdir@/slapd-{instance_name}
|
||||
plugin_dir = @serverplugindir@
|
||||
system_schema_dir = @systemschemadir@
|
||||
@@ -54,7 +54,7 @@ root_dn = cn=Directory Manager
|
||||
schema_dir = @instconfigdir@/slapd-{instance_name}/schema
|
||||
cert_dir = @instconfigdir@/slapd-{instance_name}
|
||||
|
||||
-lock_dir = @localstatedir@/lock/dirsrv/slapd-{instance_name}
|
||||
+lock_dir = @localrundir@/lock/dirsrv/slapd-{instance_name}
|
||||
log_dir = @localstatedir@/log/dirsrv/slapd-{instance_name}
|
||||
access_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/access
|
||||
audit_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/audit
|
||||
diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c
|
||||
index 88a4d532a..e6271a8a9 100644
|
||||
--- a/ldap/servers/snmp/main.c
|
||||
+++ b/ldap/servers/snmp/main.c
|
||||
@@ -287,14 +287,14 @@ load_config(char *conf_path)
|
||||
}
|
||||
|
||||
/* set pidfile path */
|
||||
- if ((pidfile = malloc(strlen(LOCALSTATEDIR) + strlen("/run/") +
|
||||
+ if ((pidfile = malloc(strlen(LOCALRUNDIR) + strlen("/") +
|
||||
strlen(LDAP_AGENT_PIDFILE) + 1)) != NULL) {
|
||||
- strncpy(pidfile, LOCALSTATEDIR, strlen(LOCALSTATEDIR) + 1);
|
||||
+ strncpy(pidfile, LOCALRUNDIR, strlen(LOCALRUNDIR) + 1);
|
||||
/* The above will likely not be NULL terminated, but we need to
|
||||
* be sure that we're properly NULL terminated for the below
|
||||
* strcat() to work properly. */
|
||||
- pidfile[strlen(LOCALSTATEDIR)] = (char)0;
|
||||
- strcat(pidfile, "/run/");
|
||||
+ pidfile[strlen(LOCALRUNDIR)] = (char)0;
|
||||
+ strcat(pidfile, "/");
|
||||
strcat(pidfile, LDAP_AGENT_PIDFILE);
|
||||
} else {
|
||||
printf("ldap-agent: malloc error processing config file\n");
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index e0299c5b4..2a0b83913 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -1709,6 +1709,9 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
def get_bin_dir(self):
|
||||
return self.ds_paths.bin_dir
|
||||
|
||||
+ def get_run_dir(self):
|
||||
+ return self.ds_paths.run_dir
|
||||
+
|
||||
def get_plugin_dir(self):
|
||||
return self.ds_paths.plugin_dir
|
||||
|
||||
diff --git a/src/lib389/lib389/instance/options.py b/src/lib389/lib389/instance/options.py
|
||||
index 4e083618c..d5b95e6df 100644
|
||||
--- a/src/lib389/lib389/instance/options.py
|
||||
+++ b/src/lib389/lib389/instance/options.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2019 Red Hat, Inc.
|
||||
+# Copyright (C) 2021 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -32,6 +32,7 @@ format_keys = [
|
||||
'backup_dir',
|
||||
'db_dir',
|
||||
'db_home_dir',
|
||||
+ 'ldapi',
|
||||
'ldif_dir',
|
||||
'lock_dir',
|
||||
'log_dir',
|
||||
@@ -233,6 +234,10 @@ class Slapd2Base(Options2):
|
||||
self._helptext['local_state_dir'] = "Sets the location of Directory Server variable data. Only set this parameter in a development environment."
|
||||
self._advanced['local_state_dir'] = True
|
||||
|
||||
+ self._options['ldapi'] = ds_paths.ldapi
|
||||
+ self._type['ldapi'] = str
|
||||
+ self._helptext['ldapi'] = "Sets the location of socket interface of the Directory Server."
|
||||
+
|
||||
self._options['lib_dir'] = ds_paths.lib_dir
|
||||
self._type['lib_dir'] = str
|
||||
self._helptext['lib_dir'] = "Sets the location of Directory Server shared libraries. Only set this parameter in a development environment."
|
||||
diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py
|
||||
index d7bb48ce0..1a35ddc07 100644
|
||||
--- a/src/lib389/lib389/instance/remove.py
|
||||
+++ b/src/lib389/lib389/instance/remove.py
|
||||
@@ -78,13 +78,16 @@ def remove_ds_instance(dirsrv, force=False):
|
||||
|
||||
_log.debug("Found instance marker at %s! Proceeding to remove ..." % dse_ldif_path)
|
||||
|
||||
- # Stop the instance (if running) and now we know it really does exist
|
||||
- # and hopefully have permission to access it ...
|
||||
- _log.debug("Stopping instance %s" % dirsrv.serverid)
|
||||
- dirsrv.stop()
|
||||
-
|
||||
### ANY NEW REMOVAL ACTION MUST BE BELOW THIS LINE!!!
|
||||
|
||||
+ # Remove LDAPI socket file
|
||||
+ ldapi_path = os.path.join(dirsrv.ds_paths.run_dir, "slapd-%s.socket" % dirsrv.serverid)
|
||||
+ if os.path.exists(ldapi_path):
|
||||
+ try:
|
||||
+ os.remove(ldapi_path)
|
||||
+ except OSError as e:
|
||||
+ _log.debug(f"Failed to remove LDAPI socket ({ldapi_path}) Error: {str(e)}")
|
||||
+
|
||||
# Remove these paths:
|
||||
# for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
|
||||
# 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'):
|
||||
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
|
||||
index ab7a2da85..57e7a9fd4 100644
|
||||
--- a/src/lib389/lib389/instance/setup.py
|
||||
+++ b/src/lib389/lib389/instance/setup.py
|
||||
@@ -732,7 +732,10 @@ class SetupDs(object):
|
||||
dse += line.replace('%', '{', 1).replace('%', '}', 1)
|
||||
|
||||
with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse:
|
||||
- ldapi_path = os.path.join(slapd['local_state_dir'], "run/slapd-%s.socket" % slapd['instance_name'])
|
||||
+ if os.path.exists(os.path.dirname(slapd['ldapi'])):
|
||||
+ ldapi_path = slapd['ldapi']
|
||||
+ else:
|
||||
+ ldapi_path = os.path.join(slapd['run_dir'], "slapd-%s.socket" % slapd['instance_name'])
|
||||
dse_fmt = dse.format(
|
||||
schema_dir=slapd['schema_dir'],
|
||||
lock_dir=slapd['lock_dir'],
|
||||
@@ -902,10 +905,13 @@ class SetupDs(object):
|
||||
self.log.info("Perform SELinux labeling ...")
|
||||
selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
|
||||
'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir',
|
||||
- 'run_dir', 'schema_dir', 'tmp_dir')
|
||||
+ 'schema_dir', 'tmp_dir')
|
||||
for path in selinux_paths:
|
||||
selinux_restorecon(slapd[path])
|
||||
|
||||
+ # Don't run restorecon on the entire /run directory
|
||||
+ selinux_restorecon(slapd['run_dir'] + '/dirsrv')
|
||||
+
|
||||
selinux_label_port(slapd['port'])
|
||||
|
||||
# Start the server
|
||||
--
|
||||
2.31.1
|
||||
|
113
0006-Issue-4973-installer-changes-permissions-on-run.patch
Normal file
113
0006-Issue-4973-installer-changes-permissions-on-run.patch
Normal file
@ -0,0 +1,113 @@
|
||||
From b4a3b88faeafa6aa197d88ee84e4b2dbadd37ace Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 1 Nov 2021 10:42:27 -0400
|
||||
Subject: [PATCH 06/12] Issue 4973 - installer changes permissions on /run
|
||||
|
||||
Description: There was a regression when we switched over to using /run
|
||||
that caused the installer to try and create /run which
|
||||
caused the ownership to change. Fixed this by changing
|
||||
the "run_dir" to /run/dirsrv
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4973
|
||||
|
||||
Reviewed by: jchapman(Thanks!)
|
||||
---
|
||||
ldap/admin/src/defaults.inf.in | 2 +-
|
||||
src/lib389/lib389/instance/remove.py | 10 +---------
|
||||
src/lib389/lib389/instance/setup.py | 13 +++----------
|
||||
3 files changed, 5 insertions(+), 20 deletions(-)
|
||||
|
||||
diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in
|
||||
index e02248b89..92b93d695 100644
|
||||
--- a/ldap/admin/src/defaults.inf.in
|
||||
+++ b/ldap/admin/src/defaults.inf.in
|
||||
@@ -35,7 +35,7 @@ sysconf_dir = @sysconfdir@
|
||||
initconfig_dir = @initconfigdir@
|
||||
config_dir = @instconfigdir@/slapd-{instance_name}
|
||||
local_state_dir = @localstatedir@
|
||||
-run_dir = @localrundir@
|
||||
+run_dir = @localrundir@/dirsrv
|
||||
# This is the expected location of ldapi.
|
||||
ldapi = @localrundir@/slapd-{instance_name}.socket
|
||||
pid_file = @localrundir@/slapd-{instance_name}.pid
|
||||
diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py
|
||||
index 1a35ddc07..e96db3896 100644
|
||||
--- a/src/lib389/lib389/instance/remove.py
|
||||
+++ b/src/lib389/lib389/instance/remove.py
|
||||
@@ -52,9 +52,9 @@ def remove_ds_instance(dirsrv, force=False):
|
||||
remove_paths['ldif_dir'] = dirsrv.ds_paths.ldif_dir
|
||||
remove_paths['lock_dir'] = dirsrv.ds_paths.lock_dir
|
||||
remove_paths['log_dir'] = dirsrv.ds_paths.log_dir
|
||||
- # remove_paths['run_dir'] = dirsrv.ds_paths.run_dir
|
||||
remove_paths['inst_dir'] = dirsrv.ds_paths.inst_dir
|
||||
remove_paths['etc_sysconfig'] = "%s/sysconfig/dirsrv-%s" % (dirsrv.ds_paths.sysconf_dir, dirsrv.serverid)
|
||||
+ remove_paths['ldapi'] = dirsrv.ds_paths.ldapi
|
||||
|
||||
tmpfiles_d_path = dirsrv.ds_paths.tmpfiles_d + "/dirsrv-" + dirsrv.serverid + ".conf"
|
||||
|
||||
@@ -80,14 +80,6 @@ def remove_ds_instance(dirsrv, force=False):
|
||||
|
||||
### ANY NEW REMOVAL ACTION MUST BE BELOW THIS LINE!!!
|
||||
|
||||
- # Remove LDAPI socket file
|
||||
- ldapi_path = os.path.join(dirsrv.ds_paths.run_dir, "slapd-%s.socket" % dirsrv.serverid)
|
||||
- if os.path.exists(ldapi_path):
|
||||
- try:
|
||||
- os.remove(ldapi_path)
|
||||
- except OSError as e:
|
||||
- _log.debug(f"Failed to remove LDAPI socket ({ldapi_path}) Error: {str(e)}")
|
||||
-
|
||||
# Remove these paths:
|
||||
# for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
|
||||
# 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'):
|
||||
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
|
||||
index 57e7a9fd4..be6854af8 100644
|
||||
--- a/src/lib389/lib389/instance/setup.py
|
||||
+++ b/src/lib389/lib389/instance/setup.py
|
||||
@@ -732,10 +732,6 @@ class SetupDs(object):
|
||||
dse += line.replace('%', '{', 1).replace('%', '}', 1)
|
||||
|
||||
with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse:
|
||||
- if os.path.exists(os.path.dirname(slapd['ldapi'])):
|
||||
- ldapi_path = slapd['ldapi']
|
||||
- else:
|
||||
- ldapi_path = os.path.join(slapd['run_dir'], "slapd-%s.socket" % slapd['instance_name'])
|
||||
dse_fmt = dse.format(
|
||||
schema_dir=slapd['schema_dir'],
|
||||
lock_dir=slapd['lock_dir'],
|
||||
@@ -759,7 +755,7 @@ class SetupDs(object):
|
||||
db_dir=slapd['db_dir'],
|
||||
db_home_dir=slapd['db_home_dir'],
|
||||
ldapi_enabled="on",
|
||||
- ldapi=ldapi_path,
|
||||
+ ldapi=slapd['ldapi'],
|
||||
ldapi_autobind="on",
|
||||
)
|
||||
file_dse.write(dse_fmt)
|
||||
@@ -861,7 +857,7 @@ class SetupDs(object):
|
||||
SER_ROOT_PW: self._raw_secure_password,
|
||||
SER_DEPLOYED_DIR: slapd['prefix'],
|
||||
SER_LDAPI_ENABLED: 'on',
|
||||
- SER_LDAPI_SOCKET: ldapi_path,
|
||||
+ SER_LDAPI_SOCKET: slapd['ldapi'],
|
||||
SER_LDAPI_AUTOBIND: 'on'
|
||||
}
|
||||
|
||||
@@ -905,13 +901,10 @@ class SetupDs(object):
|
||||
self.log.info("Perform SELinux labeling ...")
|
||||
selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
|
||||
'ldif_dir', 'lock_dir', 'log_dir', 'db_home_dir',
|
||||
- 'schema_dir', 'tmp_dir')
|
||||
+ 'run_dir', 'schema_dir', 'tmp_dir')
|
||||
for path in selinux_paths:
|
||||
selinux_restorecon(slapd[path])
|
||||
|
||||
- # Don't run restorecon on the entire /run directory
|
||||
- selinux_restorecon(slapd['run_dir'] + '/dirsrv')
|
||||
-
|
||||
selinux_label_port(slapd['port'])
|
||||
|
||||
# Start the server
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,70 @@
|
||||
From c26c463ac92682dcf01ddbdc11cc1109b183eb0a Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 1 Nov 2021 16:04:28 -0400
|
||||
Subject: [PATCH 07/12] Issue 4973 - update snmp to use /run/dirsrv for PID
|
||||
file
|
||||
|
||||
Description: Previously SNMP would write the agent PID file directly
|
||||
under /run (or /var/run), but this broke a CI test after
|
||||
updating lib389/defaults.inf to use /run/dirsrv.
|
||||
|
||||
Instead of hacking the CI test, I changed the path
|
||||
snmp uses to: /run/dirsrv/ Which is where it
|
||||
should really be written anyway.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4973
|
||||
|
||||
Reviewed by: vashirov(Thanks!)
|
||||
---
|
||||
ldap/servers/snmp/main.c | 4 ++--
|
||||
wrappers/systemd-snmp.service.in | 6 +++---
|
||||
2 files changed, 5 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c
|
||||
index e6271a8a9..d8eb918f6 100644
|
||||
--- a/ldap/servers/snmp/main.c
|
||||
+++ b/ldap/servers/snmp/main.c
|
||||
@@ -287,14 +287,14 @@ load_config(char *conf_path)
|
||||
}
|
||||
|
||||
/* set pidfile path */
|
||||
- if ((pidfile = malloc(strlen(LOCALRUNDIR) + strlen("/") +
|
||||
+ if ((pidfile = malloc(strlen(LOCALRUNDIR) + strlen("/dirsrv/") +
|
||||
strlen(LDAP_AGENT_PIDFILE) + 1)) != NULL) {
|
||||
strncpy(pidfile, LOCALRUNDIR, strlen(LOCALRUNDIR) + 1);
|
||||
/* The above will likely not be NULL terminated, but we need to
|
||||
* be sure that we're properly NULL terminated for the below
|
||||
* strcat() to work properly. */
|
||||
pidfile[strlen(LOCALRUNDIR)] = (char)0;
|
||||
- strcat(pidfile, "/");
|
||||
+ strcat(pidfile, "/dirsrv/");
|
||||
strcat(pidfile, LDAP_AGENT_PIDFILE);
|
||||
} else {
|
||||
printf("ldap-agent: malloc error processing config file\n");
|
||||
diff --git a/wrappers/systemd-snmp.service.in b/wrappers/systemd-snmp.service.in
|
||||
index 477bc623d..f18766cb4 100644
|
||||
--- a/wrappers/systemd-snmp.service.in
|
||||
+++ b/wrappers/systemd-snmp.service.in
|
||||
@@ -1,7 +1,7 @@
|
||||
# do not edit this file in /lib/systemd/system - instead do the following:
|
||||
# cp /lib/systemd/system/dirsrv-snmp.service /etc/systemd/system/
|
||||
# edit /etc/systemd/system/dirsrv-snmp.service
|
||||
-# systemctl daemon-reload
|
||||
+# systemctl daemon-reload
|
||||
# systemctl (re)start dirsrv-snmp.service
|
||||
[Unit]
|
||||
Description=@capbrand@ Directory Server SNMP Subagent.
|
||||
@@ -9,8 +9,8 @@ After=network.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
-PIDFile=/run/ldap-agent.pid
|
||||
-ExecStart=@sbindir@/ldap-agent @configdir@/ldap-agent.conf
|
||||
+PIDFile=/run/dirsrv/ldap-agent.pid
|
||||
+ExecStart=@sbindir@/ldap-agent @configdir@/ldap-agent.conf
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
--
|
||||
2.31.1
|
||||
|
70
0008-Issue-4978-make-installer-robust.patch
Normal file
70
0008-Issue-4978-make-installer-robust.patch
Normal file
@ -0,0 +1,70 @@
|
||||
From 88d6ceb18e17c5a18bafb5092ae0c22241b212df Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 1 Nov 2021 14:01:11 -0400
|
||||
Subject: [PATCH 08/12] Issue 4978 - make installer robust
|
||||
|
||||
Description: When run in a container the server can fail to start
|
||||
because the installer sets the db_home_dir to /dev/shm,
|
||||
but in containers the default size of /dev/shm is too
|
||||
small for libdb. We should detect if we are in a
|
||||
container and not set db_home_dir to /dev/shm.
|
||||
|
||||
During instance removal, if an instance was not properly
|
||||
created then it can not be removed either. Make the
|
||||
uninstall more robust to accept some errors and continue
|
||||
removing the instance.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4978
|
||||
|
||||
Reviewed by: firstyear & tbordaz(Thanks!)
|
||||
---
|
||||
src/lib389/lib389/instance/setup.py | 9 +++++++++
|
||||
src/lib389/lib389/utils.py | 5 ++++-
|
||||
2 files changed, 13 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
|
||||
index be6854af8..7b0147cf9 100644
|
||||
--- a/src/lib389/lib389/instance/setup.py
|
||||
+++ b/src/lib389/lib389/instance/setup.py
|
||||
@@ -731,6 +731,15 @@ class SetupDs(object):
|
||||
for line in template_dse.readlines():
|
||||
dse += line.replace('%', '{', 1).replace('%', '}', 1)
|
||||
|
||||
+ # Check if we are in a container, if so don't use /dev/shm for the db home dir
|
||||
+ # as containers typically don't allocate enough space for dev/shm and we don't
|
||||
+ # want to unexpectedly break the server after an upgrade
|
||||
+ container_result = subprocess.run(["systemd-detect-virt", "-c"], capture_output=True)
|
||||
+ if container_result.returncode == 0:
|
||||
+ # In a container, set the db_home_dir to the db path
|
||||
+ self.log.debug("Container detected setting db home directory to db directory.")
|
||||
+ slapd['db_home_dir'] = slapd['db_dir']
|
||||
+
|
||||
with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse:
|
||||
dse_fmt = dse.format(
|
||||
schema_dir=slapd['schema_dir'],
|
||||
diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py
|
||||
index 5ba0c6676..c63b4d0ee 100644
|
||||
--- a/src/lib389/lib389/utils.py
|
||||
+++ b/src/lib389/lib389/utils.py
|
||||
@@ -266,6 +266,8 @@ def selinux_label_port(port, remove_label=False):
|
||||
:type remove_label: boolean
|
||||
:raises: ValueError: Error message
|
||||
"""
|
||||
+ if port is None:
|
||||
+ return
|
||||
try:
|
||||
import selinux
|
||||
except ImportError:
|
||||
@@ -662,7 +664,8 @@ def isLocalHost(host_name):
|
||||
Uses gethostbyname()
|
||||
"""
|
||||
# first see if this is a "well known" local hostname
|
||||
- if host_name == 'localhost' or \
|
||||
+ if host_name is None or \
|
||||
+ host_name == 'localhost' or \
|
||||
host_name == 'localhost.localdomain' or \
|
||||
host_name == socket.gethostname():
|
||||
return True
|
||||
--
|
||||
2.31.1
|
||||
|
468
0009-Issue-4972-gecos-with-IA5-introduces-a-compatibility.patch
Normal file
468
0009-Issue-4972-gecos-with-IA5-introduces-a-compatibility.patch
Normal file
@ -0,0 +1,468 @@
|
||||
From 2ae2f53756b6f13e2816bb30812740cb7ad97403 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 5 Nov 2021 09:56:43 +0100
|
||||
Subject: [PATCH 09/12] Issue 4972 - gecos with IA5 introduces a compatibility
|
||||
issue with previous (#4981)
|
||||
|
||||
releases where it was DirectoryString
|
||||
|
||||
Bug description:
|
||||
For years 'gecos' was DirectoryString (UTF8), with #50933 it was restricted to IA5 (ascii)
|
||||
https://github.com/389ds/389-ds-base/commit/0683bcde1b667b6d0ca6e8d1ef605f17c51ea2f7#
|
||||
|
||||
IA5 definition conforms rfc2307 but is a problem for existing deployments
|
||||
where entries can have 'gecos' attribute value with UTF8.
|
||||
|
||||
Fix description:
|
||||
Revert the definition to of 'gecos' being Directory String
|
||||
|
||||
Additional fix to make test_replica_backup_and_restore more
|
||||
robust to CI
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4972
|
||||
|
||||
Reviewed by: William Brown, Pierre Rogier, James Chapman (Thanks !)
|
||||
|
||||
Platforms tested: F34
|
||||
---
|
||||
.../tests/suites/schema/schema_test.py | 398 +++++++++++++++++-
|
||||
ldap/schema/10rfc2307compat.ldif | 6 +-
|
||||
2 files changed, 400 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/schema/schema_test.py b/dirsrvtests/tests/suites/schema/schema_test.py
|
||||
index d590624b6..5d62b8d59 100644
|
||||
--- a/dirsrvtests/tests/suites/schema/schema_test.py
|
||||
+++ b/dirsrvtests/tests/suites/schema/schema_test.py
|
||||
@@ -18,8 +18,12 @@ import pytest
|
||||
import six
|
||||
from ldap.cidict import cidict
|
||||
from ldap.schema import SubSchema
|
||||
+from lib389.schema import SchemaLegacy
|
||||
from lib389._constants import *
|
||||
-from lib389.topologies import topology_st
|
||||
+from lib389.topologies import topology_st, topology_m2 as topo_m2
|
||||
+from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.replica import ReplicationManager
|
||||
+from lib389.utils import ensure_bytes
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
@@ -165,6 +169,398 @@ def test_schema_comparewithfiles(topology_st):
|
||||
|
||||
log.info('test_schema_comparewithfiles: PASSED')
|
||||
|
||||
+def test_gecos_directoryString(topology_st):
|
||||
+ """Check that gecos supports directoryString value
|
||||
+
|
||||
+ :id: aee422bb-6299-4124-b5cd-d7393dac19d3
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Add a common user
|
||||
+ 2. replace gecos with a direstoryString value
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ user_properties = {
|
||||
+ 'uid': 'testuser',
|
||||
+ 'cn' : 'testuser',
|
||||
+ 'sn' : 'user',
|
||||
+ 'uidNumber' : '1000',
|
||||
+ 'gidNumber' : '2000',
|
||||
+ 'homeDirectory' : '/home/testuser',
|
||||
+ }
|
||||
+ testuser = users.create(properties=user_properties)
|
||||
+
|
||||
+ # Add a gecos UTF value
|
||||
+ testuser.replace('gecos', 'Hélène')
|
||||
+
|
||||
+def test_gecos_mixed_definition_topo(topo_m2, request):
|
||||
+ """Check that replication is still working if schema contains
|
||||
+ definitions that does not conform with a replicated entry
|
||||
+
|
||||
+ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
|
||||
+ :setup: Two suppliers replication setup
|
||||
+ :steps:
|
||||
+ 1. Create a testuser on M1
|
||||
+ 2 Stop M1 and M2
|
||||
+ 3 Change gecos def on M2 to be IA5
|
||||
+ 4 Update testuser with gecos directoryString value
|
||||
+ 5 Check replication is still working
|
||||
+ :expectedresults:
|
||||
+ 1. success
|
||||
+ 2. success
|
||||
+ 3. success
|
||||
+ 4. success
|
||||
+ 5. success
|
||||
+
|
||||
+ """
|
||||
+
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ m1 = topo_m2.ms["supplier1"]
|
||||
+ m2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+
|
||||
+ # create a test user
|
||||
+ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
|
||||
+ testuser = UserAccount(m1, testuser_dn)
|
||||
+ try:
|
||||
+ testuser.create(properties={
|
||||
+ 'uid': 'testuser',
|
||||
+ 'cn': 'testuser',
|
||||
+ 'sn': 'testuser',
|
||||
+ 'uidNumber' : '1000',
|
||||
+ 'gidNumber' : '2000',
|
||||
+ 'homeDirectory' : '/home/testuser',
|
||||
+ })
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # Stop suppliers to update the schema
|
||||
+ m1.stop()
|
||||
+ m2.stop()
|
||||
+
|
||||
+ # on M1: gecos is DirectoryString (default)
|
||||
+ # on M2: gecos is IA5
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
|
||||
+ "'gecos' DESC 'The GECOS field; the common name' " +
|
||||
+ "EQUALITY caseIgnoreIA5Match " +
|
||||
+ "SUBSTR caseIgnoreIA5SubstringsMatch " +
|
||||
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
|
||||
+ "SINGLE-VALUE )\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+
|
||||
+ # start the instances
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+
|
||||
+ # Check that gecos is IA5 on M2
|
||||
+ schema = SchemaLegacy(m2)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
|
||||
+
|
||||
+
|
||||
+ # Add a gecos UTF value on M1
|
||||
+ testuser.replace('gecos', 'Hélène')
|
||||
+
|
||||
+ # Check replication is still working
|
||||
+ testuser.replace('displayName', 'ascii value')
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+ testuser_m2 = UserAccount(m2, testuser_dn)
|
||||
+ assert testuser_m2.exists()
|
||||
+ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
|
||||
+
|
||||
+ def fin():
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+ testuser.delete()
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # on M2 restore a default 99user.ldif
|
||||
+ m2.stop()
|
||||
+ os.remove(m2.schemadir + "/99user.ldif")
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+ m2.start()
|
||||
+ m1.start()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+def test_gecos_directoryString_wins_M1(topo_m2, request):
|
||||
+ """Check that if inital syntax are IA5(M2) and DirectoryString(M1)
|
||||
+ Then directoryString wins when nsSchemaCSN M1 is the greatest
|
||||
+
|
||||
+ :id: ad119fa5-7671-45c8-b2ef-0b28ffb68fdb
|
||||
+ :setup: Two suppliers replication setup
|
||||
+ :steps:
|
||||
+ 1. Create a testuser on M1
|
||||
+ 2 Stop M1 and M2
|
||||
+ 3 Change gecos def on M2 to be IA5
|
||||
+ 4 Start M1 and M2
|
||||
+ 5 Update M1 schema so that M1 has greatest nsSchemaCSN
|
||||
+ 6 Update testuser with gecos directoryString value
|
||||
+ 7 Check replication is still working
|
||||
+ 8 Check gecos is DirectoryString on M1 and M2
|
||||
+ :expectedresults:
|
||||
+ 1. success
|
||||
+ 2. success
|
||||
+ 3. success
|
||||
+ 4. success
|
||||
+ 5. success
|
||||
+ 6. success
|
||||
+ 7. success
|
||||
+ 8. success
|
||||
+
|
||||
+ """
|
||||
+
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ m1 = topo_m2.ms["supplier1"]
|
||||
+ m2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+
|
||||
+ # create a test user
|
||||
+ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
|
||||
+ testuser = UserAccount(m1, testuser_dn)
|
||||
+ try:
|
||||
+ testuser.create(properties={
|
||||
+ 'uid': 'testuser',
|
||||
+ 'cn': 'testuser',
|
||||
+ 'sn': 'testuser',
|
||||
+ 'uidNumber' : '1000',
|
||||
+ 'gidNumber' : '2000',
|
||||
+ 'homeDirectory' : '/home/testuser',
|
||||
+ })
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # Stop suppliers to update the schema
|
||||
+ m1.stop()
|
||||
+ m2.stop()
|
||||
+
|
||||
+ # on M1: gecos is DirectoryString (default)
|
||||
+ # on M2: gecos is IA5
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
|
||||
+ "'gecos' DESC 'The GECOS field; the common name' " +
|
||||
+ "EQUALITY caseIgnoreIA5Match " +
|
||||
+ "SUBSTR caseIgnoreIA5SubstringsMatch " +
|
||||
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
|
||||
+ "SINGLE-VALUE )\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+
|
||||
+ # start the instances
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+
|
||||
+ # Check that gecos is IA5 on M2
|
||||
+ schema = SchemaLegacy(m2)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
|
||||
+
|
||||
+
|
||||
+ # update M1 schema to increase its nsschemaCSN
|
||||
+ new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )"
|
||||
+ m1.schema.add_schema('attributetypes', ensure_bytes(new_at))
|
||||
+
|
||||
+ # Add a gecos UTF value on M1
|
||||
+ testuser.replace('gecos', 'Hélène')
|
||||
+
|
||||
+ # Check replication is still working
|
||||
+ testuser.replace('displayName', 'ascii value')
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+ testuser_m2 = UserAccount(m2, testuser_dn)
|
||||
+ assert testuser_m2.exists()
|
||||
+ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
|
||||
+
|
||||
+ # Check that gecos is DirectoryString on M1
|
||||
+ schema = SchemaLegacy(m1)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
|
||||
+
|
||||
+ # Check that gecos is DirectoryString on M2
|
||||
+ schema = SchemaLegacy(m2)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
|
||||
+
|
||||
+ def fin():
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+ testuser.delete()
|
||||
+ m1.schema.del_schema('attributetypes', ensure_bytes(new_at))
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # on M2 restore a default 99user.ldif
|
||||
+ m2.stop()
|
||||
+ os.remove(m2.schemadir + "/99user.ldif")
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+ m2.start()
|
||||
+ m1.start()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+def test_gecos_directoryString_wins_M2(topo_m2, request):
|
||||
+ """Check that if inital syntax are IA5(M2) and DirectoryString(M1)
|
||||
+ Then directoryString wins when nsSchemaCSN M2 is the greatest
|
||||
+
|
||||
+ :id: 2da7f1b1-f86d-4072-a940-ba56d4bc8348
|
||||
+ :setup: Two suppliers replication setup
|
||||
+ :steps:
|
||||
+ 1. Create a testuser on M1
|
||||
+ 2 Stop M1 and M2
|
||||
+ 3 Change gecos def on M2 to be IA5
|
||||
+ 4 Start M1 and M2
|
||||
+ 5 Update M2 schema so that M2 has greatest nsSchemaCSN
|
||||
+ 6 Update testuser on M2 and trigger replication to M1
|
||||
+ 7 Update testuser on M2 with gecos directoryString value
|
||||
+ 8 Check replication is still working
|
||||
+ 9 Check gecos is DirectoryString on M1 and M2
|
||||
+ :expectedresults:
|
||||
+ 1. success
|
||||
+ 2. success
|
||||
+ 3. success
|
||||
+ 4. success
|
||||
+ 5. success
|
||||
+ 6. success
|
||||
+ 7. success
|
||||
+ 8. success
|
||||
+ 9. success
|
||||
+
|
||||
+ """
|
||||
+
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ m1 = topo_m2.ms["supplier1"]
|
||||
+ m2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+
|
||||
+ # create a test user
|
||||
+ testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX)
|
||||
+ testuser = UserAccount(m1, testuser_dn)
|
||||
+ try:
|
||||
+ testuser.create(properties={
|
||||
+ 'uid': 'testuser',
|
||||
+ 'cn': 'testuser',
|
||||
+ 'sn': 'testuser',
|
||||
+ 'uidNumber' : '1000',
|
||||
+ 'gidNumber' : '2000',
|
||||
+ 'homeDirectory' : '/home/testuser',
|
||||
+ })
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ pass
|
||||
+ testuser.replace('displayName', 'to trigger replication M1-> M2')
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # Stop suppliers to update the schema
|
||||
+ m1.stop()
|
||||
+ m2.stop()
|
||||
+
|
||||
+ # on M1: gecos is DirectoryString (default)
|
||||
+ # on M2: gecos is IA5
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " +
|
||||
+ "'gecos' DESC 'The GECOS field; the common name' " +
|
||||
+ "EQUALITY caseIgnoreIA5Match " +
|
||||
+ "SUBSTR caseIgnoreIA5SubstringsMatch " +
|
||||
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " +
|
||||
+ "SINGLE-VALUE )\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+
|
||||
+ # start the instances
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+
|
||||
+ # Check that gecos is IA5 on M2
|
||||
+ schema = SchemaLegacy(m2)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26"
|
||||
+
|
||||
+ # update M2 schema to increase its nsschemaCSN
|
||||
+ new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )"
|
||||
+ m2.schema.add_schema('attributetypes', ensure_bytes(new_at))
|
||||
+
|
||||
+ # update just to trigger replication M2->M1
|
||||
+ # and update of M2 schema
|
||||
+ testuser_m2 = UserAccount(m2, testuser_dn)
|
||||
+ testuser_m2.replace('displayName', 'to trigger replication M2-> M1')
|
||||
+
|
||||
+ # Add a gecos UTF value on M1
|
||||
+ testuser.replace('gecos', 'Hélène')
|
||||
+
|
||||
+ # Check replication is still working
|
||||
+ testuser.replace('displayName', 'ascii value')
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+ assert testuser_m2.exists()
|
||||
+ assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value'
|
||||
+
|
||||
+ # Check that gecos is DirectoryString on M1
|
||||
+ schema = SchemaLegacy(m1)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
|
||||
+
|
||||
+ # Check that gecos is DirectoryString on M2
|
||||
+ schema = SchemaLegacy(m2)
|
||||
+ attributetypes = schema.query_attributetype('gecos')
|
||||
+ assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15"
|
||||
+
|
||||
+ def fin():
|
||||
+ m1.start()
|
||||
+ m2.start()
|
||||
+ testuser.delete()
|
||||
+ m1.schema.del_schema('attributetypes', ensure_bytes(new_at))
|
||||
+ repl.wait_for_replication(m1, m2)
|
||||
+
|
||||
+ # on M2 restore a default 99user.ldif
|
||||
+ m2.stop()
|
||||
+ os.remove(m2.schemadir + "/99user.ldif")
|
||||
+ schema_filename = (m2.schemadir + "/99user.ldif")
|
||||
+ try:
|
||||
+ with open(schema_filename, 'w') as schema_file:
|
||||
+ schema_file.write("dn: cn=schema\n")
|
||||
+ os.chmod(schema_filename, 0o777)
|
||||
+ except OSError as e:
|
||||
+ log.fatal("Failed to update schema file: " +
|
||||
+ "{} Error: {}".format(schema_filename, str(e)))
|
||||
+ m2.start()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif
|
||||
index 8ba72e1e3..998b8983b 100644
|
||||
--- a/ldap/schema/10rfc2307compat.ldif
|
||||
+++ b/ldap/schema/10rfc2307compat.ldif
|
||||
@@ -21,9 +21,9 @@ attributeTypes: (
|
||||
attributeTypes: (
|
||||
1.3.6.1.1.1.1.2 NAME 'gecos'
|
||||
DESC 'The GECOS field; the common name'
|
||||
- EQUALITY caseIgnoreIA5Match
|
||||
- SUBSTR caseIgnoreIA5SubstringsMatch
|
||||
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
|
||||
+ EQUALITY caseIgnoreMatch
|
||||
+ SUBSTR caseIgnoreSubstringsMatch
|
||||
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
||||
SINGLE-VALUE
|
||||
)
|
||||
attributeTypes: (
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,32 @@
|
||||
From 3909877f12e50556e844bc20e72870a4fa905ada Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Tue, 9 Nov 2021 12:55:28 +0000
|
||||
Subject: [PATCH 10/12] Issue 4997 - Function declaration compiler error on
|
||||
1.4.3
|
||||
|
||||
Bug description: Building the server on the 1.4.3 branch generates a
|
||||
compiler error due to a typo in function declaration.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/4997
|
||||
|
||||
Reviewed by: @jchapman (one line commit rule)
|
||||
---
|
||||
ldap/servers/slapd/slapi-private.h | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index 570765e47..d6d74e8a7 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -273,7 +273,7 @@ void *csngen_register_callbacks(CSNGen *gen, GenCSNFn genFn, void *genArg, Abort
|
||||
void csngen_unregister_callbacks(CSNGen *gen, void *cookie);
|
||||
|
||||
/* debugging function */
|
||||
-void csngen_dump_state(const CSNGen *gen);
|
||||
+void csngen_dump_state(const CSNGen *gen, int severity);
|
||||
|
||||
/* this function tests csn generator */
|
||||
void csngen_test(void);
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,32 @@
|
||||
From 60d570e52465b58167301f64792f5f85cbc85e20 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 10 Nov 2021 08:53:45 -0500
|
||||
Subject: [PATCH 11/12] Issue 4978 - use more portable python command for
|
||||
checking containers
|
||||
|
||||
Description: During the installation check for containers use arguments
|
||||
for subprocess.run() that work on all versions of python
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/4978
|
||||
|
||||
Reviewed by: mreynolds(one line commit rule)
|
||||
---
|
||||
src/lib389/lib389/instance/setup.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
|
||||
index 7b0147cf9..b23d2deb8 100644
|
||||
--- a/src/lib389/lib389/instance/setup.py
|
||||
+++ b/src/lib389/lib389/instance/setup.py
|
||||
@@ -734,7 +734,7 @@ class SetupDs(object):
|
||||
# Check if we are in a container, if so don't use /dev/shm for the db home dir
|
||||
# as containers typically don't allocate enough space for dev/shm and we don't
|
||||
# want to unexpectedly break the server after an upgrade
|
||||
- container_result = subprocess.run(["systemd-detect-virt", "-c"], capture_output=True)
|
||||
+ container_result = subprocess.run(["systemd-detect-virt", "-c"], stdout=subprocess.PIPE)
|
||||
if container_result.returncode == 0:
|
||||
# In a container, set the db_home_dir to the db path
|
||||
self.log.debug("Container detected setting db home directory to db directory.")
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,31 @@
|
||||
From 2c6653edef793d46815e6df607e55d68e14fe232 Mon Sep 17 00:00:00 2001
|
||||
From: spike <spike@fedoraproject.org>
|
||||
Date: Fri, 5 Nov 2021 13:56:41 +0100
|
||||
Subject: [PATCH 12/12] Issue 4959 - Invalid /etc/hosts setup can cause
|
||||
isLocalHost to fail.
|
||||
|
||||
Description: Use local_simple_allocate in dsctl so that isLocal is always set properly
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/4959
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
---
|
||||
src/lib389/cli/dsctl | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl
|
||||
index b6c42b5cc..d2ea6cd29 100755
|
||||
--- a/src/lib389/cli/dsctl
|
||||
+++ b/src/lib389/cli/dsctl
|
||||
@@ -135,7 +135,7 @@ if __name__ == '__main__':
|
||||
log.error("Unable to access instance information. Are you running as the correct user? (usually dirsrv or root)")
|
||||
sys.exit(1)
|
||||
|
||||
- inst.allocate(insts[0])
|
||||
+ inst.local_simple_allocate(insts[0]['server-id'])
|
||||
log.debug('Instance allocated')
|
||||
|
||||
try:
|
||||
--
|
||||
2.31.1
|
||||
|
105
0013-CVE-2021-4091-BZ-2030367-double-free-of-the-virtual-.patch
Normal file
105
0013-CVE-2021-4091-BZ-2030367-double-free-of-the-virtual-.patch
Normal file
@ -0,0 +1,105 @@
|
||||
From d000349089eb15b3476ec302f4279f118336290e Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 16 Dec 2021 16:13:08 -0500
|
||||
Subject: [PATCH 1/2] CVE-2021-4091 (BZ#2030367) double-free of the virtual
|
||||
attribute context in persistent search
|
||||
|
||||
description:
|
||||
A search is processed by a worker using a private pblock.
|
||||
If the search is persistent, the worker spawn a thread
|
||||
and kind of duplicate its private pblock so that the spawn
|
||||
thread continue to process the persistent search.
|
||||
Then worker ends the initial search, reinit (free) its private pblock,
|
||||
and returns monitoring the wait_queue.
|
||||
When the persistent search completes, it frees the duplicated
|
||||
pblock.
|
||||
The problem is that private pblock and duplicated pblock
|
||||
are referring to a same structure (pb_vattr_context).
|
||||
That lead to a double free
|
||||
|
||||
Fix:
|
||||
When cloning the pblock (slapi_pblock_clone) make sure
|
||||
to transfert the references inside the original (private)
|
||||
pblock to the target (cloned) one
|
||||
That includes pb_vattr_context pointer.
|
||||
|
||||
Reviewed by: Mark Reynolds, James Chapman, Pierre Rogier (Thanks !)
|
||||
---
|
||||
ldap/servers/slapd/connection.c | 8 +++++---
|
||||
ldap/servers/slapd/pblock.c | 14 ++++++++++++--
|
||||
2 files changed, 17 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index e0c1a52d2..fc7ed9c4a 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -1823,9 +1823,11 @@ connection_threadmain()
|
||||
pthread_mutex_unlock(&(conn->c_mutex));
|
||||
}
|
||||
/* ps_add makes a shallow copy of the pb - so we
|
||||
- * can't free it or init it here - just set operation to NULL.
|
||||
- * ps_send_results will call connection_remove_operation_ext to free it
|
||||
- */
|
||||
+ * can't free it or init it here - just set operation to NULL.
|
||||
+ * ps_send_results will call connection_remove_operation_ext to free it
|
||||
+ * The connection_thread private pblock ('pb') has be cloned and should only
|
||||
+ * be reinit (slapi_pblock_init)
|
||||
+ */
|
||||
slapi_pblock_set(pb, SLAPI_OPERATION, NULL);
|
||||
slapi_pblock_init(pb);
|
||||
} else {
|
||||
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
|
||||
index a64986aeb..c78d1250f 100644
|
||||
--- a/ldap/servers/slapd/pblock.c
|
||||
+++ b/ldap/servers/slapd/pblock.c
|
||||
@@ -292,6 +292,12 @@ _pblock_assert_pb_deprecated(Slapi_PBlock *pblock)
|
||||
}
|
||||
}
|
||||
|
||||
+/* It clones the pblock
|
||||
+ * the content of the source pblock is transfered
|
||||
+ * to the target pblock (returned)
|
||||
+ * The source pblock should not be used for any operation
|
||||
+ * it needs to be reinit (slapi_pblock_init)
|
||||
+ */
|
||||
Slapi_PBlock *
|
||||
slapi_pblock_clone(Slapi_PBlock *pb)
|
||||
{
|
||||
@@ -312,28 +318,32 @@ slapi_pblock_clone(Slapi_PBlock *pb)
|
||||
if (pb->pb_task != NULL) {
|
||||
_pblock_assert_pb_task(new_pb);
|
||||
*(new_pb->pb_task) = *(pb->pb_task);
|
||||
+ memset(pb->pb_task, 0, sizeof(slapi_pblock_task));
|
||||
}
|
||||
if (pb->pb_mr != NULL) {
|
||||
_pblock_assert_pb_mr(new_pb);
|
||||
*(new_pb->pb_mr) = *(pb->pb_mr);
|
||||
+ memset(pb->pb_mr, 0, sizeof(slapi_pblock_matching_rule));
|
||||
}
|
||||
if (pb->pb_misc != NULL) {
|
||||
_pblock_assert_pb_misc(new_pb);
|
||||
*(new_pb->pb_misc) = *(pb->pb_misc);
|
||||
+ memset(pb->pb_misc, 0, sizeof(slapi_pblock_misc));
|
||||
}
|
||||
if (pb->pb_intop != NULL) {
|
||||
_pblock_assert_pb_intop(new_pb);
|
||||
*(new_pb->pb_intop) = *(pb->pb_intop);
|
||||
- /* set pwdpolicy to NULL so this clone allocates its own policy */
|
||||
- new_pb->pb_intop->pwdpolicy = NULL;
|
||||
+ memset(pb->pb_intop, 0, sizeof(slapi_pblock_intop));
|
||||
}
|
||||
if (pb->pb_intplugin != NULL) {
|
||||
_pblock_assert_pb_intplugin(new_pb);
|
||||
*(new_pb->pb_intplugin) = *(pb->pb_intplugin);
|
||||
+ memset(pb->pb_intplugin, 0,sizeof(slapi_pblock_intplugin));
|
||||
}
|
||||
if (pb->pb_deprecated != NULL) {
|
||||
_pblock_assert_pb_deprecated(new_pb);
|
||||
*(new_pb->pb_deprecated) = *(pb->pb_deprecated);
|
||||
+ memset(pb->pb_deprecated, 0, sizeof(slapi_pblock_deprecated));
|
||||
}
|
||||
#ifdef PBLOCK_ANALYTICS
|
||||
new_pb->analytics = NULL;
|
||||
--
|
||||
2.31.1
|
||||
|
102
0014-Issue-5127-run-restorecon-on-dev-shm-at-server-start.patch
Normal file
102
0014-Issue-5127-run-restorecon-on-dev-shm-at-server-start.patch
Normal file
@ -0,0 +1,102 @@
|
||||
From 03ca5111a8de602ecef9ad33206ba593b242d0df Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Fri, 21 Jan 2022 10:15:35 -0500
|
||||
Subject: [PATCH 1/2] Issue 5127 - run restorecon on /dev/shm at server startup
|
||||
|
||||
Description:
|
||||
|
||||
Update the systemd service file to execute a script that runs
|
||||
restorecon on the DB home directory. This addresses issues with
|
||||
backup/restore, reboot, and FS restore issues that can happen when
|
||||
/dev/shm is missing or created outside of dscreate.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/5127
|
||||
|
||||
Reviewed by: progier & viktor (Thanks!!)
|
||||
---
|
||||
Makefile.am | 2 +-
|
||||
rpm/389-ds-base.spec.in | 1 +
|
||||
wrappers/ds_selinux_restorecon.sh.in | 33 ++++++++++++++++++++++++++++
|
||||
wrappers/systemd.template.service.in | 1 +
|
||||
4 files changed, 36 insertions(+), 1 deletion(-)
|
||||
create mode 100644 wrappers/ds_selinux_restorecon.sh.in
|
||||
|
||||
diff --git a/Makefile.am b/Makefile.am
|
||||
index fc5a6a7d1..d6ad273c3 100644
|
||||
--- a/Makefile.am
|
||||
+++ b/Makefile.am
|
||||
@@ -775,7 +775,7 @@ libexec_SCRIPTS += ldap/admin/src/scripts/ds_selinux_enabled \
|
||||
ldap/admin/src/scripts/ds_selinux_port_query
|
||||
endif
|
||||
if SYSTEMD
|
||||
-libexec_SCRIPTS += wrappers/ds_systemd_ask_password_acl
|
||||
+libexec_SCRIPTS += wrappers/ds_systemd_ask_password_acl wrappers/ds_selinux_restorecon.sh
|
||||
endif
|
||||
|
||||
install-data-hook:
|
||||
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
|
||||
index d80de8422..6c0d95abd 100644
|
||||
--- a/rpm/389-ds-base.spec.in
|
||||
+++ b/rpm/389-ds-base.spec.in
|
||||
@@ -623,6 +623,7 @@ exit 0
|
||||
%{_sbindir}/ns-slapd
|
||||
%{_mandir}/man8/ns-slapd.8.gz
|
||||
%{_libexecdir}/%{pkgname}/ds_systemd_ask_password_acl
|
||||
+%{_libexecdir}/%{pkgname}/ds_selinux_restorecon.sh
|
||||
%{_mandir}/man5/99user.ldif.5.gz
|
||||
%{_mandir}/man5/certmap.conf.5.gz
|
||||
%{_mandir}/man5/slapd-collations.conf.5.gz
|
||||
diff --git a/wrappers/ds_selinux_restorecon.sh.in b/wrappers/ds_selinux_restorecon.sh.in
|
||||
new file mode 100644
|
||||
index 000000000..063347de3
|
||||
--- /dev/null
|
||||
+++ b/wrappers/ds_selinux_restorecon.sh.in
|
||||
@@ -0,0 +1,33 @@
|
||||
+#!/bin/sh
|
||||
+# BEGIN COPYRIGHT BLOCK
|
||||
+# Copyright (C) 2022 Red Hat, Inc.
|
||||
+#
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# END COPYRIGHT BLOCK
|
||||
+
|
||||
+# Make sure we have the path to the dse.ldif
|
||||
+if [ -z $1 ]
|
||||
+then
|
||||
+ echo "usage: ${0} /etc/dirsrv/slapd-<instance>/dse.ldif"
|
||||
+ exit 0
|
||||
+fi
|
||||
+
|
||||
+if ! command -v restorecon &> /dev/null
|
||||
+then
|
||||
+ # restorecon is not available
|
||||
+ exit 0
|
||||
+fi
|
||||
+
|
||||
+# Grep the db_home_dir out of the config file
|
||||
+DS_HOME_DIR=`grep 'nsslapd-db-home-directory: ' $1 | awk '{print $2}'`
|
||||
+if [ -z "$DS_HOME_DIR" ]
|
||||
+then
|
||||
+ # No DB home set, that's ok
|
||||
+ exit 0
|
||||
+fi
|
||||
+
|
||||
+# Now run restorecon
|
||||
+restorecon ${DS_HOME_DIR}
|
||||
diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in
|
||||
index a8c21a9be..4485e0ec0 100644
|
||||
--- a/wrappers/systemd.template.service.in
|
||||
+++ b/wrappers/systemd.template.service.in
|
||||
@@ -14,6 +14,7 @@ EnvironmentFile=-@initconfigdir@/@package_name@
|
||||
EnvironmentFile=-@initconfigdir@/@package_name@-%i
|
||||
PIDFile=/run/@package_name@/slapd-%i.pid
|
||||
ExecStartPre=@libexecdir@/ds_systemd_ask_password_acl @instconfigdir@/slapd-%i/dse.ldif
|
||||
+ExecStartPre=@libexecdir@/ds_selinux_restorecon.sh @instconfigdir@/slapd-%i/dse.ldif
|
||||
ExecStart=@sbindir@/ns-slapd -D @instconfigdir@/slapd-%i -i /run/@package_name@/slapd-%i.pid
|
||||
PrivateTmp=on
|
||||
|
||||
--
|
||||
2.31.1
|
||||
|
35
0015-Issue-5127-ds_selinux_restorecon.sh-always-exit-0.patch
Normal file
35
0015-Issue-5127-ds_selinux_restorecon.sh-always-exit-0.patch
Normal file
@ -0,0 +1,35 @@
|
||||
From 0ed471bae52bb0debd23336cbc5f3f1d400cbbc9 Mon Sep 17 00:00:00 2001
|
||||
From: Adam Williamson <awilliam@redhat.com>
|
||||
Date: Thu, 27 Jan 2022 11:07:26 -0800
|
||||
Subject: [PATCH] Issue 5127 - ds_selinux_restorecon.sh: always exit 0
|
||||
|
||||
Description:
|
||||
|
||||
We don't want to error out and give up on starting the service
|
||||
if the restorecon fails - it might just be that the directory
|
||||
doesn't exist and doesn't need restoring. Issue identified and
|
||||
fix suggested by Simon Farnsworth
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/5127
|
||||
|
||||
Reviewed by: adamw & mreynolds
|
||||
---
|
||||
wrappers/ds_selinux_restorecon.sh.in | 5 +++--
|
||||
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/wrappers/ds_selinux_restorecon.sh.in b/wrappers/ds_selinux_restorecon.sh.in
|
||||
index 063347de3..2d7386233 100644
|
||||
--- a/wrappers/ds_selinux_restorecon.sh.in
|
||||
+++ b/wrappers/ds_selinux_restorecon.sh.in
|
||||
@@ -29,5 +29,6 @@ then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
-# Now run restorecon
|
||||
-restorecon ${DS_HOME_DIR}
|
||||
+# Now run restorecon, but don't die if it fails (could be that the
|
||||
+# directory doesn't exist)
|
||||
+restorecon ${DS_HOME_DIR} || :
|
||||
--
|
||||
2.31.1
|
||||
|
262
0016-Issue-4775-Add-entryuuid-CLI-and-Fixup-4776.patch
Normal file
262
0016-Issue-4775-Add-entryuuid-CLI-and-Fixup-4776.patch
Normal file
@ -0,0 +1,262 @@
|
||||
From 93588ea455aff691bdfbf59cdef4df8fcedb69f2 Mon Sep 17 00:00:00 2001
|
||||
From: Firstyear <william@blackhats.net.au>
|
||||
Date: Thu, 19 Aug 2021 10:46:00 +1000
|
||||
Subject: [PATCH 1/2] Issue 4775 - Add entryuuid CLI and Fixup (#4776)
|
||||
|
||||
Bug Description: EntryUUID when added was missing it's CLI
|
||||
and helpers for fixups.
|
||||
|
||||
Fix Description: Add the CLI elements.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/4775
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @mreynolds389 (thanks!)
|
||||
---
|
||||
src/lib389/lib389/cli_conf/plugin.py | 6 ++-
|
||||
.../lib389/cli_conf/plugins/entryuuid.py | 39 ++++++++++++++
|
||||
src/plugins/entryuuid/src/lib.rs | 54 ++++++++-----------
|
||||
3 files changed, 65 insertions(+), 34 deletions(-)
|
||||
create mode 100644 src/lib389/lib389/cli_conf/plugins/entryuuid.py
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugin.py b/src/lib389/lib389/cli_conf/plugin.py
|
||||
index 560c57f9b..7c0cf2c80 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugin.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugin.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2018 Red Hat, Inc.
|
||||
+# Copyright (C) 2022 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -27,6 +27,8 @@ from lib389.cli_conf.plugins import passthroughauth as cli_passthroughauth
|
||||
from lib389.cli_conf.plugins import retrochangelog as cli_retrochangelog
|
||||
from lib389.cli_conf.plugins import automember as cli_automember
|
||||
from lib389.cli_conf.plugins import posix_winsync as cli_posix_winsync
|
||||
+from lib389.cli_conf.plugins import contentsync as cli_contentsync
|
||||
+from lib389.cli_conf.plugins import entryuuid as cli_entryuuid
|
||||
|
||||
SINGULAR = Plugin
|
||||
MANY = Plugins
|
||||
@@ -113,6 +115,8 @@ def create_parser(subparsers):
|
||||
cli_passthroughauth.create_parser(subcommands)
|
||||
cli_retrochangelog.create_parser(subcommands)
|
||||
cli_posix_winsync.create_parser(subcommands)
|
||||
+ cli_contentsync.create_parser(subcommands)
|
||||
+ cli_entryuuid.create_parser(subcommands)
|
||||
|
||||
list_parser = subcommands.add_parser('list', help="List current configured (enabled and disabled) plugins")
|
||||
list_parser.set_defaults(func=plugin_list)
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugins/entryuuid.py b/src/lib389/lib389/cli_conf/plugins/entryuuid.py
|
||||
new file mode 100644
|
||||
index 000000000..6c86bff4b
|
||||
--- /dev/null
|
||||
+++ b/src/lib389/lib389/cli_conf/plugins/entryuuid.py
|
||||
@@ -0,0 +1,39 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2021 William Brown <william@blackhats.net.au>
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+
|
||||
+import ldap
|
||||
+from lib389.plugins import EntryUUIDPlugin
|
||||
+from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add
|
||||
+
|
||||
+def do_fixup(inst, basedn, log, args):
|
||||
+ plugin = EntryUUIDPlugin(inst)
|
||||
+ log.info('Attempting to add task entry...')
|
||||
+ if not plugin.status():
|
||||
+ log.error("'%s' is disabled. Fix up task can't be executed" % plugin.rdn)
|
||||
+ return
|
||||
+ fixup_task = plugin.fixup(args.DN, args.filter)
|
||||
+ fixup_task.wait()
|
||||
+ exitcode = fixup_task.get_exit_code()
|
||||
+ if exitcode != 0:
|
||||
+ log.error('EntryUUID fixup task has failed. Please, check the error log for more - %s' % exitcode)
|
||||
+ else:
|
||||
+ log.info('Successfully added task entry')
|
||||
+
|
||||
+def create_parser(subparsers):
|
||||
+ referint = subparsers.add_parser('entryuuid', help='Manage and configure EntryUUID plugin')
|
||||
+ subcommands = referint.add_subparsers(help='action')
|
||||
+
|
||||
+ add_generic_plugin_parsers(subcommands, EntryUUIDPlugin)
|
||||
+
|
||||
+ fixup = subcommands.add_parser('fixup', help='Run the fix-up task for EntryUUID plugin')
|
||||
+ fixup.set_defaults(func=do_fixup)
|
||||
+ fixup.add_argument('DN', help="Base DN that contains entries to fix up")
|
||||
+ fixup.add_argument('-f', '--filter',
|
||||
+ help='Filter for entries to fix up.\n If omitted, all entries under base DN'
|
||||
+ 'will have their EntryUUID attribute regenerated if not present.')
|
||||
+
|
||||
diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs
|
||||
index da9f0c239..29a9f1258 100644
|
||||
--- a/src/plugins/entryuuid/src/lib.rs
|
||||
+++ b/src/plugins/entryuuid/src/lib.rs
|
||||
@@ -33,7 +33,7 @@ fn assign_uuid(e: &mut EntryRef) {
|
||||
// 🚧 safety barrier 🚧
|
||||
if e.contains_attr("entryUUID") {
|
||||
log_error!(
|
||||
- ErrorLevel::Trace,
|
||||
+ ErrorLevel::Plugin,
|
||||
"assign_uuid -> entryUUID exists, skipping dn {}",
|
||||
sdn.to_dn_string()
|
||||
);
|
||||
@@ -47,7 +47,7 @@ fn assign_uuid(e: &mut EntryRef) {
|
||||
if sdn.is_below_suffix(&*config_sdn) || sdn.is_below_suffix(&*schema_sdn) {
|
||||
// We don't need to assign to these suffixes.
|
||||
log_error!(
|
||||
- ErrorLevel::Trace,
|
||||
+ ErrorLevel::Plugin,
|
||||
"assign_uuid -> not assigning to {:?} as part of system suffix",
|
||||
sdn.to_dn_string()
|
||||
);
|
||||
@@ -57,7 +57,7 @@ fn assign_uuid(e: &mut EntryRef) {
|
||||
// Generate a new Uuid.
|
||||
let u: Uuid = Uuid::new_v4();
|
||||
log_error!(
|
||||
- ErrorLevel::Trace,
|
||||
+ ErrorLevel::Plugin,
|
||||
"assign_uuid -> assigning {:?} to dn {}",
|
||||
u,
|
||||
sdn.to_dn_string()
|
||||
@@ -78,13 +78,13 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> {
|
||||
if pb.get_is_replicated_operation() {
|
||||
log_error!(
|
||||
- ErrorLevel::Trace,
|
||||
+ ErrorLevel::Plugin,
|
||||
"betxn_pre_add -> replicated operation, will not change"
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
- log_error!(ErrorLevel::Trace, "betxn_pre_add -> start");
|
||||
+ log_error!(ErrorLevel::Plugin, "betxn_pre_add -> start");
|
||||
|
||||
let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?;
|
||||
assign_uuid(&mut e);
|
||||
@@ -105,7 +105,7 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
.first()
|
||||
.ok_or_else(|| {
|
||||
log_error!(
|
||||
- ErrorLevel::Trace,
|
||||
+ ErrorLevel::Plugin,
|
||||
"task_validate basedn error -> empty value array?"
|
||||
);
|
||||
LDAPError::Operation
|
||||
@@ -113,7 +113,7 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
.as_ref()
|
||||
.try_into()
|
||||
.map_err(|e| {
|
||||
- log_error!(ErrorLevel::Trace, "task_validate basedn error -> {:?}", e);
|
||||
+ log_error!(ErrorLevel::Plugin, "task_validate basedn error -> {:?}", e);
|
||||
LDAPError::Operation
|
||||
})?,
|
||||
None => return Err(LDAPError::ObjectClassViolation),
|
||||
@@ -124,7 +124,7 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
.first()
|
||||
.ok_or_else(|| {
|
||||
log_error!(
|
||||
- ErrorLevel::Trace,
|
||||
+ ErrorLevel::Plugin,
|
||||
"task_validate filter error -> empty value array?"
|
||||
);
|
||||
LDAPError::Operation
|
||||
@@ -132,7 +132,7 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
.as_ref()
|
||||
.try_into()
|
||||
.map_err(|e| {
|
||||
- log_error!(ErrorLevel::Trace, "task_validate filter error -> {:?}", e);
|
||||
+ log_error!(ErrorLevel::Plugin, "task_validate filter error -> {:?}", e);
|
||||
LDAPError::Operation
|
||||
})?,
|
||||
None => {
|
||||
@@ -144,17 +144,11 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
// Error if the first filter is empty?
|
||||
|
||||
// Now, to make things faster, we wrap the filter in a exclude term.
|
||||
-
|
||||
- // 2021 - #4877 because we allow entryuuid to be strings, on import these may
|
||||
- // be invalid. As a result, we DO need to allow the fixup to check the entryuuid
|
||||
- // value is correct, so we can not exclude these during the search.
|
||||
- /*
|
||||
let raw_filter = if !raw_filter.starts_with('(') && !raw_filter.ends_with('(') {
|
||||
format!("(&({})(!(entryuuid=*)))", raw_filter)
|
||||
} else {
|
||||
format!("(&{}(!(entryuuid=*)))", raw_filter)
|
||||
};
|
||||
- */
|
||||
|
||||
Ok(FixupData { basedn, raw_filter })
|
||||
}
|
||||
@@ -165,7 +159,7 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
|
||||
fn task_handler(_task: &Task, data: Self::TaskData) -> Result<Self::TaskData, PluginError> {
|
||||
log_error!(
|
||||
- ErrorLevel::Trace,
|
||||
+ ErrorLevel::Plugin,
|
||||
"task_handler -> start thread with -> {:?}",
|
||||
data
|
||||
);
|
||||
@@ -205,12 +199,12 @@ impl SlapiPlugin3 for EntryUuid {
|
||||
}
|
||||
|
||||
fn start(_pb: &mut PblockRef) -> Result<(), PluginError> {
|
||||
- log_error!(ErrorLevel::Trace, "plugin start");
|
||||
+ log_error!(ErrorLevel::Plugin, "plugin start");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn close(_pb: &mut PblockRef) -> Result<(), PluginError> {
|
||||
- log_error!(ErrorLevel::Trace, "plugin close");
|
||||
+ log_error!(ErrorLevel::Plugin, "plugin close");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -219,20 +213,14 @@ pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError
|
||||
/* Supply a modification to the entry. */
|
||||
let sdn = e.get_sdnref();
|
||||
|
||||
- /* Check that entryuuid doesn't already exist, and is valid */
|
||||
- if let Some(valueset) = e.get_attr("entryUUID") {
|
||||
- if valueset.iter().all(|v| {
|
||||
- let u: Result<Uuid, _> = (&v).try_into();
|
||||
- u.is_ok()
|
||||
- }) {
|
||||
- // All values were valid uuid, move on!
|
||||
- log_error!(
|
||||
- ErrorLevel::Plugin,
|
||||
- "skipping fixup for -> {}",
|
||||
- sdn.to_dn_string()
|
||||
- );
|
||||
- return Ok(());
|
||||
- }
|
||||
+ /* Sanity check that entryuuid doesn't already exist */
|
||||
+ if e.contains_attr("entryUUID") {
|
||||
+ log_error!(
|
||||
+ ErrorLevel::Plugin,
|
||||
+ "skipping fixup for -> {}",
|
||||
+ sdn.to_dn_string()
|
||||
+ );
|
||||
+ return Ok(());
|
||||
}
|
||||
|
||||
// Setup the modifications
|
||||
@@ -248,7 +236,7 @@ pub fn entryuuid_fixup_mapfn(e: &EntryRef, _data: &()) -> Result<(), PluginError
|
||||
|
||||
match lmod.execute() {
|
||||
Ok(_) => {
|
||||
- log_error!(ErrorLevel::Trace, "fixed-up -> {}", sdn.to_dn_string());
|
||||
+ log_error!(ErrorLevel::Plugin, "fixed-up -> {}", sdn.to_dn_string());
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
--
|
||||
2.34.1
|
||||
|
42
0017-Issue-4775-Fix-cherry-pick-error.patch
Normal file
42
0017-Issue-4775-Fix-cherry-pick-error.patch
Normal file
@ -0,0 +1,42 @@
|
||||
From 525f2307fa3e2d0ae55c8c922e6f7220a1e5bd1b Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 3 Feb 2022 16:51:38 -0500
|
||||
Subject: [PATCH] Issue 4775 - Fix cherry-pick error
|
||||
|
||||
Bug Description: EntryUUID when added was missing it's CLI
|
||||
and helpers for fixups.
|
||||
|
||||
Fix Description: Add the CLI elements.
|
||||
|
||||
fixes: https://github.com/389ds/389-ds-base/issues/4775
|
||||
|
||||
Author: William Brown <william@blackhats.net.au>
|
||||
|
||||
Review by: @mreynolds389 (thanks!)
|
||||
---
|
||||
src/lib389/lib389/cli_conf/plugin.py | 2 --
|
||||
1 file changed, 2 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugin.py b/src/lib389/lib389/cli_conf/plugin.py
|
||||
index 7c0cf2c80..fb0ef3077 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugin.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugin.py
|
||||
@@ -27,7 +27,6 @@ from lib389.cli_conf.plugins import passthroughauth as cli_passthroughauth
|
||||
from lib389.cli_conf.plugins import retrochangelog as cli_retrochangelog
|
||||
from lib389.cli_conf.plugins import automember as cli_automember
|
||||
from lib389.cli_conf.plugins import posix_winsync as cli_posix_winsync
|
||||
-from lib389.cli_conf.plugins import contentsync as cli_contentsync
|
||||
from lib389.cli_conf.plugins import entryuuid as cli_entryuuid
|
||||
|
||||
SINGULAR = Plugin
|
||||
@@ -115,7 +114,6 @@ def create_parser(subparsers):
|
||||
cli_passthroughauth.create_parser(subcommands)
|
||||
cli_retrochangelog.create_parser(subcommands)
|
||||
cli_posix_winsync.create_parser(subcommands)
|
||||
- cli_contentsync.create_parser(subcommands)
|
||||
cli_entryuuid.create_parser(subcommands)
|
||||
|
||||
list_parser = subcommands.add_parser('list', help="List current configured (enabled and disabled) plugins")
|
||||
--
|
||||
2.34.1
|
||||
|
23
389-ds-base-git-local.sh
Normal file
23
389-ds-base-git-local.sh
Normal file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
DATE=`date +%Y%m%d`
|
||||
# use a real tag name here
|
||||
VERSION=1.3.5.14
|
||||
PKGNAME=389-ds-base
|
||||
TAG=${TAG:-$PKGNAME-$VERSION}
|
||||
#SRCNAME=$PKGNAME-$VERSION-$DATE
|
||||
SRCNAME=$PKGNAME-$VERSION
|
||||
|
||||
test -d .git || {
|
||||
echo you must be in the ds git repo to use this
|
||||
echo bye
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ -z "$1" ] ; then
|
||||
dir=.
|
||||
else
|
||||
dir="$1"
|
||||
fi
|
||||
|
||||
git archive --prefix=$SRCNAME/ $TAG | bzip2 > $dir/$SRCNAME.tar.bz2
|
@ -25,7 +25,7 @@ ExcludeArch: i686
|
||||
|
||||
%if %{bundle_jemalloc}
|
||||
%global jemalloc_name jemalloc
|
||||
%global jemalloc_ver 5.3.0
|
||||
%global jemalloc_ver 5.2.1
|
||||
%global __provides_exclude ^libjemalloc\\.so.*$
|
||||
%endif
|
||||
|
||||
@ -45,15 +45,11 @@ ExcludeArch: i686
|
||||
# Filter argparse-manpage from autogenerated package Requires
|
||||
%global __requires_exclude ^python.*argparse-manpage
|
||||
|
||||
# Force to require nss version greater or equal as the version available at the build time
|
||||
# See bz1986327
|
||||
%define dirsrv_requires_ge() %(LC_ALL="C" echo '%*' | xargs -r rpm -q --qf 'Requires: %%{name} >= %%{epoch}:%%{version}\\n' | sed -e 's/ (none):/ /' -e 's/ 0:/ /' | grep -v "is not")
|
||||
|
||||
Summary: 389 Directory Server (base)
|
||||
Name: 389-ds-base
|
||||
Version: 1.4.3.39
|
||||
Release: %{?relprefix}14%{?prerel}%{?dist}
|
||||
License: GPL-3.0-or-later WITH GPL-3.0-389-ds-base-exception AND (0BSD OR Apache-2.0 OR MIT) AND (Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT) AND (Apache-2.0 OR BSD-2-Clause OR MIT) AND (Apache-2.0 OR BSL-1.0) AND (Apache-2.0 OR LGPL-2.1-or-later OR MIT) AND (Apache-2.0 OR MIT OR Zlib) AND (Apache-2.0 OR MIT) AND (MIT OR Apache-2.0) AND Unicode-3.0 AND (MIT OR Unlicense) AND Apache-2.0 AND BSD-3-Clause AND MIT AND MPL-2.0
|
||||
Version: 1.4.3.28
|
||||
Release: %{?relprefix}6%{?prerel}%{?dist}
|
||||
License: GPLv3+
|
||||
URL: https://www.port389.org
|
||||
Group: System Environment/Daemons
|
||||
Conflicts: selinux-policy-base < 3.9.8
|
||||
@ -62,116 +58,75 @@ Obsoletes: %{name} <= 1.4.0.9
|
||||
Provides: ldif2ldbm >= 0
|
||||
|
||||
##### Bundled cargo crates list - START #####
|
||||
Provides: bundled(crate(addr2line)) = 0.24.2
|
||||
Provides: bundled(crate(adler2)) = 2.0.0
|
||||
Provides: bundled(crate(ahash)) = 0.7.8
|
||||
Provides: bundled(crate(ansi_term)) = 0.12.1
|
||||
Provides: bundled(crate(ansi_term)) = 0.11.0
|
||||
Provides: bundled(crate(atty)) = 0.2.14
|
||||
Provides: bundled(crate(autocfg)) = 1.4.0
|
||||
Provides: bundled(crate(backtrace)) = 0.3.75
|
||||
Provides: bundled(crate(base64)) = 0.13.1
|
||||
Provides: bundled(crate(bitflags)) = 2.9.1
|
||||
Provides: bundled(crate(byteorder)) = 1.5.0
|
||||
Provides: bundled(crate(autocfg)) = 1.0.1
|
||||
Provides: bundled(crate(base64)) = 0.13.0
|
||||
Provides: bundled(crate(bitflags)) = 1.3.2
|
||||
Provides: bundled(crate(byteorder)) = 1.4.3
|
||||
Provides: bundled(crate(cbindgen)) = 0.9.1
|
||||
Provides: bundled(crate(cc)) = 1.2.25
|
||||
Provides: bundled(crate(cc)) = 1.0.71
|
||||
Provides: bundled(crate(cfg-if)) = 1.0.0
|
||||
Provides: bundled(crate(clap)) = 2.34.0
|
||||
Provides: bundled(crate(concread)) = 0.2.21
|
||||
Provides: bundled(crate(crossbeam)) = 0.8.4
|
||||
Provides: bundled(crate(crossbeam-channel)) = 0.5.15
|
||||
Provides: bundled(crate(crossbeam-deque)) = 0.8.6
|
||||
Provides: bundled(crate(crossbeam-epoch)) = 0.9.18
|
||||
Provides: bundled(crate(crossbeam-queue)) = 0.3.12
|
||||
Provides: bundled(crate(crossbeam-utils)) = 0.8.21
|
||||
Provides: bundled(crate(errno)) = 0.3.12
|
||||
Provides: bundled(crate(fastrand)) = 2.3.0
|
||||
Provides: bundled(crate(clap)) = 2.33.3
|
||||
Provides: bundled(crate(entryuuid)) = 0.1.0
|
||||
Provides: bundled(crate(entryuuid_syntax)) = 0.1.0
|
||||
Provides: bundled(crate(fernet)) = 0.1.4
|
||||
Provides: bundled(crate(foreign-types)) = 0.3.2
|
||||
Provides: bundled(crate(foreign-types-shared)) = 0.1.1
|
||||
Provides: bundled(crate(getrandom)) = 0.3.3
|
||||
Provides: bundled(crate(gimli)) = 0.31.1
|
||||
Provides: bundled(crate(hashbrown)) = 0.12.3
|
||||
Provides: bundled(crate(getrandom)) = 0.2.3
|
||||
Provides: bundled(crate(hermit-abi)) = 0.1.19
|
||||
Provides: bundled(crate(instant)) = 0.1.13
|
||||
Provides: bundled(crate(itoa)) = 1.0.15
|
||||
Provides: bundled(crate(jobserver)) = 0.1.33
|
||||
Provides: bundled(crate(libc)) = 0.2.172
|
||||
Provides: bundled(crate(linux-raw-sys)) = 0.9.4
|
||||
Provides: bundled(crate(lock_api)) = 0.4.13
|
||||
Provides: bundled(crate(log)) = 0.4.27
|
||||
Provides: bundled(crate(lru)) = 0.7.8
|
||||
Provides: bundled(crate(memchr)) = 2.7.4
|
||||
Provides: bundled(crate(miniz_oxide)) = 0.8.8
|
||||
Provides: bundled(crate(object)) = 0.36.7
|
||||
Provides: bundled(crate(once_cell)) = 1.21.3
|
||||
Provides: bundled(crate(openssl)) = 0.10.73
|
||||
Provides: bundled(crate(openssl-macros)) = 0.1.1
|
||||
Provides: bundled(crate(openssl-sys)) = 0.9.109
|
||||
Provides: bundled(crate(parking_lot)) = 0.11.2
|
||||
Provides: bundled(crate(parking_lot_core)) = 0.8.6
|
||||
Provides: bundled(crate(itoa)) = 0.4.8
|
||||
Provides: bundled(crate(jobserver)) = 0.1.24
|
||||
Provides: bundled(crate(lazy_static)) = 1.4.0
|
||||
Provides: bundled(crate(libc)) = 0.2.104
|
||||
Provides: bundled(crate(librnsslapd)) = 0.1.0
|
||||
Provides: bundled(crate(librslapd)) = 0.1.0
|
||||
Provides: bundled(crate(log)) = 0.4.14
|
||||
Provides: bundled(crate(once_cell)) = 1.8.0
|
||||
Provides: bundled(crate(openssl)) = 0.10.36
|
||||
Provides: bundled(crate(openssl-sys)) = 0.9.67
|
||||
Provides: bundled(crate(paste)) = 0.1.18
|
||||
Provides: bundled(crate(paste-impl)) = 0.1.18
|
||||
Provides: bundled(crate(pin-project-lite)) = 0.2.16
|
||||
Provides: bundled(crate(pkg-config)) = 0.3.32
|
||||
Provides: bundled(crate(ppv-lite86)) = 0.2.21
|
||||
Provides: bundled(crate(proc-macro-hack)) = 0.5.20+deprecated
|
||||
Provides: bundled(crate(proc-macro2)) = 1.0.95
|
||||
Provides: bundled(crate(quote)) = 1.0.40
|
||||
Provides: bundled(crate(r-efi)) = 5.2.0
|
||||
Provides: bundled(crate(rand)) = 0.8.5
|
||||
Provides: bundled(crate(pkg-config)) = 0.3.20
|
||||
Provides: bundled(crate(ppv-lite86)) = 0.2.14
|
||||
Provides: bundled(crate(proc-macro-hack)) = 0.5.19
|
||||
Provides: bundled(crate(proc-macro2)) = 1.0.30
|
||||
Provides: bundled(crate(quote)) = 1.0.10
|
||||
Provides: bundled(crate(rand)) = 0.8.4
|
||||
Provides: bundled(crate(rand_chacha)) = 0.3.1
|
||||
Provides: bundled(crate(rand_core)) = 0.6.4
|
||||
Provides: bundled(crate(redox_syscall)) = 0.2.16
|
||||
Provides: bundled(crate(rand_core)) = 0.6.3
|
||||
Provides: bundled(crate(rand_hc)) = 0.3.1
|
||||
Provides: bundled(crate(redox_syscall)) = 0.2.10
|
||||
Provides: bundled(crate(remove_dir_all)) = 0.5.3
|
||||
Provides: bundled(crate(rsds)) = 0.1.0
|
||||
Provides: bundled(crate(rustc-demangle)) = 0.1.24
|
||||
Provides: bundled(crate(rustix)) = 1.0.7
|
||||
Provides: bundled(crate(ryu)) = 1.0.20
|
||||
Provides: bundled(crate(scopeguard)) = 1.2.0
|
||||
Provides: bundled(crate(serde)) = 1.0.219
|
||||
Provides: bundled(crate(serde_derive)) = 1.0.219
|
||||
Provides: bundled(crate(serde_json)) = 1.0.140
|
||||
Provides: bundled(crate(shlex)) = 1.3.0
|
||||
Provides: bundled(crate(smallvec)) = 1.15.0
|
||||
Provides: bundled(crate(ryu)) = 1.0.5
|
||||
Provides: bundled(crate(serde)) = 1.0.130
|
||||
Provides: bundled(crate(serde_derive)) = 1.0.130
|
||||
Provides: bundled(crate(serde_json)) = 1.0.68
|
||||
Provides: bundled(crate(slapd)) = 0.1.0
|
||||
Provides: bundled(crate(slapi_r_plugin)) = 0.1.0
|
||||
Provides: bundled(crate(strsim)) = 0.8.0
|
||||
Provides: bundled(crate(syn)) = 2.0.101
|
||||
Provides: bundled(crate(tempfile)) = 3.20.0
|
||||
Provides: bundled(crate(syn)) = 1.0.80
|
||||
Provides: bundled(crate(synstructure)) = 0.12.6
|
||||
Provides: bundled(crate(tempfile)) = 3.2.0
|
||||
Provides: bundled(crate(textwrap)) = 0.11.0
|
||||
Provides: bundled(crate(tokio)) = 1.45.1
|
||||
Provides: bundled(crate(tokio-macros)) = 2.5.0
|
||||
Provides: bundled(crate(toml)) = 0.5.11
|
||||
Provides: bundled(crate(unicode-ident)) = 1.0.18
|
||||
Provides: bundled(crate(unicode-width)) = 0.1.14
|
||||
Provides: bundled(crate(toml)) = 0.5.8
|
||||
Provides: bundled(crate(unicode-width)) = 0.1.9
|
||||
Provides: bundled(crate(unicode-xid)) = 0.2.2
|
||||
Provides: bundled(crate(uuid)) = 0.8.2
|
||||
Provides: bundled(crate(vcpkg)) = 0.2.15
|
||||
Provides: bundled(crate(vec_map)) = 0.8.2
|
||||
Provides: bundled(crate(version_check)) = 0.9.5
|
||||
Provides: bundled(crate(wasi)) = 0.14.2+wasi_0.2.4
|
||||
Provides: bundled(crate(wasi)) = 0.10.2+wasi_snapshot_preview1
|
||||
Provides: bundled(crate(winapi)) = 0.3.9
|
||||
Provides: bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0
|
||||
Provides: bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0
|
||||
Provides: bundled(crate(windows-sys)) = 0.59.0
|
||||
Provides: bundled(crate(windows-targets)) = 0.52.6
|
||||
Provides: bundled(crate(windows_aarch64_gnullvm)) = 0.52.6
|
||||
Provides: bundled(crate(windows_aarch64_msvc)) = 0.52.6
|
||||
Provides: bundled(crate(windows_i686_gnu)) = 0.52.6
|
||||
Provides: bundled(crate(windows_i686_gnullvm)) = 0.52.6
|
||||
Provides: bundled(crate(windows_i686_msvc)) = 0.52.6
|
||||
Provides: bundled(crate(windows_x86_64_gnu)) = 0.52.6
|
||||
Provides: bundled(crate(windows_x86_64_gnullvm)) = 0.52.6
|
||||
Provides: bundled(crate(windows_x86_64_msvc)) = 0.52.6
|
||||
Provides: bundled(crate(wit-bindgen-rt)) = 0.39.0
|
||||
Provides: bundled(crate(zerocopy)) = 0.8.25
|
||||
Provides: bundled(crate(zerocopy-derive)) = 0.8.25
|
||||
Provides: bundled(crate(zeroize)) = 1.8.1
|
||||
Provides: bundled(crate(zeroize_derive)) = 1.4.2
|
||||
Provides: bundled(crate(zeroize)) = 1.4.2
|
||||
Provides: bundled(crate(zeroize_derive)) = 1.2.0
|
||||
##### Bundled cargo crates list - END #####
|
||||
|
||||
|
||||
|
||||
BuildRequires: nspr-devel
|
||||
BuildRequires: nss-devel
|
||||
BuildRequires: nss-devel >= 3.34
|
||||
BuildRequires: perl-generators
|
||||
BuildRequires: openldap-clients
|
||||
BuildRequires: openldap-devel
|
||||
BuildRequires: libdb-devel
|
||||
BuildRequires: cyrus-sasl-devel
|
||||
@ -230,7 +185,6 @@ BuildRequires: python%{python3_pkgversion}-argcomplete
|
||||
BuildRequires: python%{python3_pkgversion}-argparse-manpage
|
||||
BuildRequires: python%{python3_pkgversion}-policycoreutils
|
||||
BuildRequires: python%{python3_pkgversion}-libselinux
|
||||
BuildRequires: python%{python3_pkgversion}-cryptography
|
||||
|
||||
# For cockpit
|
||||
BuildRequires: rsync
|
||||
@ -253,9 +207,7 @@ Requires: python%{python3_pkgversion}-ldap
|
||||
# this is needed to setup SSL if you are not using the
|
||||
# administration server package
|
||||
Requires: nss-tools
|
||||
%dirsrv_requires_ge nss
|
||||
Requires: nss >= 3.67.0-7
|
||||
Requires: nspr >= 4.32
|
||||
Requires: nss >= 3.34
|
||||
|
||||
# these are not found by the auto-dependency method
|
||||
# they are required to support the mandatory LDAP SASL mechs
|
||||
@ -272,7 +224,6 @@ Requires: cracklib-dicts
|
||||
# This picks up libperl.so as a Requires, so we add this versioned one
|
||||
Requires: perl(:MODULE_COMPAT_%(eval "`%{__perl} -V:version`"; echo $version))
|
||||
Requires: perl-Errno >= 1.23-360
|
||||
Requires: acl
|
||||
|
||||
# Needed by logconv.pl
|
||||
Requires: perl-DB_File
|
||||
@ -294,60 +245,27 @@ Source2: %{name}-devel.README
|
||||
Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2
|
||||
%endif
|
||||
%if %{use_rust}
|
||||
Source4: vendor-%{version}-3.tar.gz
|
||||
Source5: Cargo-%{version}-3.lock
|
||||
Source4: vendor-%{version}-1.tar.gz
|
||||
Source5: Cargo.lock
|
||||
%endif
|
||||
|
||||
Patch01: 0001-issue-5647-covscan-memory-leak-in-audit-log-when-add.patch
|
||||
Patch02: 0002-Issue-5647-Fix-unused-variable-warning-from-previous.patch
|
||||
Patch03: 0003-Issue-5407-sync_repl-crashes-if-enabled-while-dynami.patch
|
||||
Patch04: 0004-Issue-5547-automember-plugin-improvements.patch
|
||||
Patch05: 0005-Issue-3527-Support-HAProxy-and-Instance-on-the-same-.patch
|
||||
Patch06: 0006-CVE-2024-2199.patch
|
||||
Patch07: 0007-CVE-2024-3657.patch
|
||||
Patch08: 0008-Issue-6096-Improve-connection-timeout-error-logging-.patch
|
||||
Patch09: 0009-Issue-6103-New-connection-timeout-error-breaks-error.patch
|
||||
Patch10: 0010-Issue-6103-New-connection-timeout-error-breaks-error.patch
|
||||
Patch11: 0011-Issue-6172-RFE-improve-the-performance-of-evaluation.patch
|
||||
Patch12: 0012-Security-fix-for-CVE-2024-5953.patch
|
||||
Patch13: 0013-Issue-4778-Add-COMPACT_CL5-task-to-dsconf-replicatio.patch
|
||||
Patch14: 0014-Issue-6417-If-an-entry-RDN-is-identical-to-the-suffi.patch
|
||||
Patch15: 0015-Issue-6224-d2entry-Could-not-open-id2entry-err-0-at-.patch
|
||||
Patch16: 0016-Issue-6224-Fix-merge-issue-in-389-ds-base-2.1-for-ds.patch
|
||||
Patch17: 0017-Issue-6224-Remove-test_referral_subsuffix-from-ds_lo.patch
|
||||
Patch18: 0018-Issue-6417-2nd-If-an-entry-RDN-is-identical-to-the-s.patch
|
||||
Patch19: 0019-Issue-6417-2nd-fix-typo.patch
|
||||
Patch20: 0020-Issue-6417-3rd-If-an-entry-RDN-is-identical-to-the-s.patch
|
||||
Patch21: 0021-Issue-6509-Race-condition-with-Paged-Result-searches.patch
|
||||
Patch22: 0022-Issue-6509-Fix-cherry-pick-issue-race-condition-in-P.patch
|
||||
Patch23: 0023-Issue-6304-RFE-when-memberof-is-enabled-defer-update.patch
|
||||
Patch24: 0024-Issue-6436-MOD-on-a-large-group-slow-if-substring-in.patch
|
||||
Patch25: 0025-Issue-6494-Various-errors-when-using-extended-matchi.patch
|
||||
Patch26: 0026-Issue-6004-idletimeout-may-be-ignored-6005.patch
|
||||
Patch27: 0027-Issue-6004-2nd-idletimeout-may-be-ignored-6569.patch
|
||||
Patch28: 0028-Issue-6485-Fix-double-free-in-USN-cleanup-task.patch
|
||||
#Patch29: 0029-Issue-6553-Update-concread-to-0.5.4-and-refactor-sta.patch
|
||||
Patch30: 0030-Issue-5841-dsconf-incorrectly-setting-up-Pass-Throug.patch
|
||||
Patch31: 0031-Issue-6067-Add-hidden-v-and-j-options-to-each-CLI-su.patch
|
||||
Patch32: 0032-Issue-6067-Improve-dsidm-CLI-No-Such-Entry-handling-.patch
|
||||
Patch33: 0033-Issue-6067-Update-dsidm-to-prioritize-basedn-from-.d.patch
|
||||
Patch34: 0034-Issue-6155-ldap-agent-fails-to-start-because-of-perm.patch
|
||||
Patch35: 0035-Issue-5305-OpenLDAP-version-autodetection-doesn-t-wo.patch
|
||||
Patch36: 0036-Issue-1925-Add-a-CI-test-5936.patch
|
||||
Patch37: 0037-Issue-6494-2nd-Various-errors-when-using-extended-ma.patch
|
||||
Patch38: 0038-Issue-6494-3rd-Various-errors-when-using-extended-ma.patch
|
||||
Patch39: 0039-Issue-6494-4th-Various-errors-when-using-extended-ma.patch
|
||||
Patch40: 0040-Issue-6497-lib389-Configure-replication-for-multiple.patch
|
||||
Patch41: 0041-Issue-6655-fix-replication-release-replica-decoding-.patch
|
||||
Patch42: 0042-Issue-6655-fix-merge-conflict.patch
|
||||
Patch43: 0043-Issue-6571-Nested-group-does-not-receive-memberOf-at.patch
|
||||
Patch44: 0044-Issue-6571-2nd-Nested-group-does-not-receive-memberO.patch
|
||||
Patch45: 0045-Issue-6698-NPE-after-configuring-invalid-filtered-ro.patch
|
||||
Patch46: 0046-Issue-6686-CLI-Re-enabling-user-accounts-that-reache.patch
|
||||
Patch47: 0047-Issue-6302-Allow-to-run-replication-status-without-a.patch
|
||||
|
||||
|
||||
#Patch100: cargo.patch
|
||||
Patch01: 0001-Issue-4678-RFE-automatique-disable-of-virtual-attrib.patch
|
||||
Patch02: 0002-Issue-4943-Fix-csn-generator-to-limit-time-skew-drif.patch
|
||||
Patch03: 0003-Issue-3584-Fix-PBKDF2_SHA256-hashing-in-FIPS-mode-49.patch
|
||||
Patch04: 0004-Issue-4956-Automember-allows-invalid-regex-and-does-.patch
|
||||
Patch05: 0005-Issue-4092-systemd-tmpfiles-warnings.patch
|
||||
Patch06: 0006-Issue-4973-installer-changes-permissions-on-run.patch
|
||||
Patch07: 0007-Issue-4973-update-snmp-to-use-run-dirsrv-for-PID-fil.patch
|
||||
Patch08: 0008-Issue-4978-make-installer-robust.patch
|
||||
Patch09: 0009-Issue-4972-gecos-with-IA5-introduces-a-compatibility.patch
|
||||
Patch10: 0010-Issue-4997-Function-declaration-compiler-error-on-1..patch
|
||||
Patch11: 0011-Issue-4978-use-more-portable-python-command-for-chec.patch
|
||||
Patch12: 0012-Issue-4959-Invalid-etc-hosts-setup-can-cause-isLocal.patch
|
||||
Patch13: 0013-CVE-2021-4091-BZ-2030367-double-free-of-the-virtual-.patch
|
||||
Patch14: 0014-Issue-5127-run-restorecon-on-dev-shm-at-server-start.patch
|
||||
Patch15: 0015-Issue-5127-ds_selinux_restorecon.sh-always-exit-0.patch
|
||||
Patch16: 0016-Issue-4775-Add-entryuuid-CLI-and-Fixup-4776.patch
|
||||
Patch17: 0017-Issue-4775-Fix-cherry-pick-error.patch
|
||||
|
||||
%description
|
||||
389 Directory Server is an LDAPv3 compliant server. The base package includes
|
||||
@ -362,7 +280,7 @@ Please see http://seclists.org/oss-sec/2016/q1/363 for more information.
|
||||
Summary: Core libraries for 389 Directory Server
|
||||
Group: System Environment/Daemons
|
||||
BuildRequires: nspr-devel
|
||||
BuildRequires: nss-devel
|
||||
BuildRequires: nss-devel >= 3.34
|
||||
BuildRequires: openldap-devel
|
||||
BuildRequires: libdb-devel
|
||||
BuildRequires: cyrus-sasl-devel
|
||||
@ -416,7 +334,7 @@ Group: Development/Libraries
|
||||
Requires: %{name}-libs = %{version}-%{release}
|
||||
Requires: pkgconfig
|
||||
Requires: nspr-devel
|
||||
Requires: nss-devel
|
||||
Requires: nss-devel >= 3.34
|
||||
Requires: openldap-devel
|
||||
Requires: libtalloc
|
||||
Requires: libevent
|
||||
@ -443,7 +361,6 @@ SNMP Agent for the 389 Directory Server base package.
|
||||
Summary: A library for accessing, testing, and configuring the 389 Directory Server
|
||||
BuildArch: noarch
|
||||
Group: Development/Libraries
|
||||
Requires: %{name} = %{version}-%{release}
|
||||
Requires: openssl
|
||||
Requires: iproute
|
||||
Requires: platform-python
|
||||
@ -457,7 +374,6 @@ Requires: python%{python3_pkgversion}-argcomplete
|
||||
Requires: python%{python3_pkgversion}-libselinux
|
||||
Requires: python%{python3_pkgversion}-setuptools
|
||||
Requires: python%{python3_pkgversion}-distro
|
||||
Requires: python%{python3_pkgversion}-cryptography
|
||||
%{?python_provide:%python_provide python%{python3_pkgversion}-lib389}
|
||||
|
||||
%description -n python%{python3_pkgversion}-lib389
|
||||
@ -469,18 +385,16 @@ Summary: Cockpit UI Plugin for configuring and administering the 389 Di
|
||||
BuildArch: noarch
|
||||
Requires: cockpit
|
||||
Requires: platform-python
|
||||
Requires: %{name} = %{version}-%{release}
|
||||
Requires: python%{python3_pkgversion}-lib389 = %{version}-%{release}
|
||||
Requires: python%{python3_pkgversion}-lib389
|
||||
|
||||
%description -n cockpit-389-ds
|
||||
A cockpit UI Plugin for configuring and administering the 389 Directory Server
|
||||
|
||||
%prep
|
||||
%autosetup -p1 -n %{name}-%{version}%{?prerel}
|
||||
%autosetup -p1 -v -n %{name}-%{version}%{?prerel}
|
||||
%if %{use_rust}
|
||||
rm -rf vendor
|
||||
tar xzf %{SOURCE4}
|
||||
cp %{SOURCE5} src/Cargo.lock
|
||||
tar xvzf %{SOURCE4}
|
||||
cp %{SOURCE5} src/
|
||||
%endif
|
||||
%if %{bundle_jemalloc}
|
||||
%setup -q -n %{name}-%{version}%{?prerel} -T -D -b 3
|
||||
@ -538,7 +452,7 @@ pushd ../%{jemalloc_name}-%{jemalloc_ver}
|
||||
--libdir=%{_libdir}/%{pkgname}/lib \
|
||||
--bindir=%{_libdir}/%{pkgname}/bin \
|
||||
--enable-prof
|
||||
%make_build
|
||||
make %{?_smp_mflags}
|
||||
popd
|
||||
%endif
|
||||
|
||||
@ -572,7 +486,8 @@ sed -i "1s/\"1\"/\"8\"/" %{_builddir}/%{name}-%{version}%{?prerel}/src/lib389/m
|
||||
# Generate symbolic info for debuggers
|
||||
export XCFLAGS=$RPM_OPT_FLAGS
|
||||
|
||||
%make_build
|
||||
#make %{?_smp_mflags}
|
||||
make
|
||||
|
||||
%install
|
||||
|
||||
@ -594,7 +509,7 @@ popd
|
||||
|
||||
mkdir -p $RPM_BUILD_ROOT/var/log/%{pkgname}
|
||||
mkdir -p $RPM_BUILD_ROOT/var/lib/%{pkgname}
|
||||
mkdir -p $RPM_BUILD_ROOT/var/lock/%{pkgname}
|
||||
mkdir -p $RPM_BUILD_ROOT/var/3lock/%{pkgname}
|
||||
|
||||
# for systemd
|
||||
mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/systemd/system/%{groupname}.wants
|
||||
@ -970,107 +885,42 @@ exit 0
|
||||
%doc README.md
|
||||
|
||||
%changelog
|
||||
* Thu Jun 5 2025 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-14
|
||||
- Reverts: RHEL-80704 - Increased memory consumption caused by NDN cache [rhel-8.10.z]
|
||||
- Resolves: RHEL-95442 - ns-slapd[xxxx]: segfault at 10d7d0d0 ip 00007ff734050cdb sp 00007ff6de9f1430 error 6 in libslapd.so.0.1.0[7ff733ec0000+1b3000] [rhel-8.10.z]
|
||||
* Thu Feb 3 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-6
|
||||
- Bump version to 1.4.3.28-6
|
||||
- Resolves: Bug 2047171 - Based on 1944494 (RFC 4530 entryUUID attribute) - plugin entryuuid failing
|
||||
|
||||
* Thu May 15 2025 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-13
|
||||
- Resolves: RHEL-89749 - Nested group does not receive memberOf attribute [rhel-8.10.z]
|
||||
- Resolves: RHEL-89758 - dsidm Error: float() argument must be a string or a number, not 'NoneType' [rhel-8.10.z]
|
||||
- Resolves: RHEL-89765 - Crash in __strlen_sse2 when using the nsRole filter rewriter. [rhel-8.10.z]
|
||||
- Resolves: RHEL-89778 - RHDS12.2 NSMMReplicationPlugin - release_replica Unable to parse the response [rhel-8.10.z]
|
||||
* Fri Jan 28 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-5
|
||||
- Bump version to 1.4.3.28-5
|
||||
- Resolves: Bug 2045223 - ipa-restore command is failing when restore after uninstalling the server (aprt 2)
|
||||
|
||||
* Thu Apr 03 2025 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-12
|
||||
- Resolves: RHEL-85499 - [RFE] defer memberof nested updates [rhel-8.10.z]
|
||||
- Resolves: RHEL-65663 - dsconf incorrectly setting up Pass-Through Authentication
|
||||
- Resolves: RHEL-80704 - Increased memory consumption caused by NDN cache [rhel-8.10.z]
|
||||
- Resolves: RHEL-81127 - nsslapd-idletimeout is ignored [rhel-8.10.z]
|
||||
- Resolves: RHEL-81136 - Healthcheck tool should warn admin about creating a substring index on membership attribute [rhel-8.10.z]
|
||||
- Resolves: RHEL-81143 - 389DirectoryServer Process Stops When Setting up Sorted VLV Index [rhel-8.10.z]
|
||||
- Resolves: RHEL-81152 - AddressSanitizer: double-free [rhel-8.10.z]
|
||||
- Resolves: RHEL-81176 - Verbose option for dsctl is not shown in help of actions [rhel-8.10.z]
|
||||
* Tue Jan 25 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-4
|
||||
- Bump version to 1.4.3.28-4
|
||||
- Resolves: Bug 2045223 - ipa-restore command is failing when restore after uninstalling the server
|
||||
|
||||
* Thu Jan 23 2025 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-11
|
||||
- Resolves: RHEL-72487 - IPA LDAP error code T3 when no exceeded time limit from a paged search result [rhel-8.10.z]
|
||||
* Thu Nov 18 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-3
|
||||
- Bump version to 1.4.3.28-3
|
||||
- Resolves: Bug 2030367 - EMBARGOED CVE-2021-4091 389-ds:1.4/389-ds-base: double-free of the virtual attribute context in persistent search
|
||||
- Resolves: Bug 2033398 - PBKDF2 hashing does not work in FIPS mode
|
||||
|
||||
* Fri Jan 17 2025 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-10
|
||||
- Resolves: RHEL-69822 - "Duplicated DN detected" errors when creating indexes or importing entries. [rhel-8.10.z]
|
||||
- Resolves: RHEL-71215 - Sub suffix causes "id2entry - Could not open id2entry err 0" error when the Directory Server starts [rhel-8.10.z]
|
||||
* Thu Nov 18 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-2
|
||||
- Bump version to 1.4.3.28-2
|
||||
- Resolves: Bug 2024695 - DB corruption "_entryrdn_insert_key - Same DN (dn: nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff,<SUFFIX>) is already in the entryrdn file"
|
||||
- Resolves: Bug 1859210 - systemd-tmpfiles warnings
|
||||
- Resolves: Bug 1913199 - IPA server (389ds) is very slow in execution of some searches (`&(memberOf=...)(objectClass=ipaHost)` in particular)
|
||||
- Resolves: Bug 1974236 - automatique disable of virtual attribute checking
|
||||
- Resolves: Bug 1976882 - logconv.pl -j: Use of uninitialized value $first in numeric gt (>)
|
||||
- Resolves: Bug 1981281 - ipa user-add fails with "gecos: value invalid per syntax: Invalid syntax"
|
||||
- Resolves: Bug 2015998 - Log the Auto Member invalid regex rules in the LDAP errors log
|
||||
|
||||
* Fri Nov 22 2024 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-9
|
||||
- Resolves: RHEL-64360 - Cannot compact the replication changelog using dsconf. [rhel-8.10.z]
|
||||
|
||||
* Mon Sep 09 2024 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-8
|
||||
- Bump version to 1.4.3.39-8
|
||||
- Resolves: RHEL-40943 - CVE-2024-5953 389-ds:1.4/389-ds-base: Malformed userPassword hash may cause Denial of Service [rhel-8.10.z]
|
||||
- Resolves: RHEL-58069 - perf search result investigation for many large static groups and members [rhel-8.10.0.z]
|
||||
|
||||
* Thu Jun 13 2024 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-7
|
||||
- Bump version to 1.4.3.39-7
|
||||
- Resolves: RHEL-16277 - LDAP connections are closed with code T2 before the IO block timeout is reached. [rhel-8.10.0.z]
|
||||
|
||||
* Thu Jun 13 2024 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-6
|
||||
- Bump version to 1.4.3.39-6
|
||||
- Resolves: RHEL-16277 - LDAP connections are closed with code T2 before the IO block timeout is reached. [rhel-8.10.0.z]
|
||||
|
||||
* Tue Jun 11 2024 Viktor Ashirov <vashirov@redhat.com> - 1.4.3.39-5
|
||||
- Bump version to 1.4.3.39-5
|
||||
- Resolves: RHEL-16277 - LDAP connections are closed with code T2 before the IO block timeout is reached. [rhel-8.10.0.z]
|
||||
|
||||
* Thu Jun 06 2024 James Chapman <jachapma@redhat.com> - 1.4.3.39-4
|
||||
- Bump version to 1.4.3.39-4
|
||||
- Resolves: RHEL-34818 - redhat-ds:11/389-ds-base: Malformed userPassword may cause crash at do_modify in slapd/modify.c
|
||||
- Resolves: RHEL-34824 - redhat-ds:11/389-ds-base: potential denial of service via specially crafted kerberos AS-REQ request
|
||||
|
||||
* Thu Mar 14 2024 Simon Pichugin <spichugi@redhat.com> - 1.4.3.39-3
|
||||
- Bump version to 1.4.3.39-3
|
||||
- Resolves: RHEL-19240 - RFE Add PROXY protocol support to 389-ds-base via confiuration item - similar to Postfix
|
||||
|
||||
* Mon Feb 05 2024 Thierry Bordaz <tbordaz@redhat.com> - 1.4.3.39-2
|
||||
- Bump version to 1.4.3.39-2
|
||||
- Resolves: RHEL-23209 - CVE-2024-1062 389-ds:1.4/389-ds-base: a heap overflow leading to denail-of-servce while writing a value larger than 256 chars (in log_entry_attr)
|
||||
- Resolves: RHEL-5390 - schema-compat-plugin expensive with automember rebuild
|
||||
- Resolves: RHEL-5135 - crash in sync_update_persist_op() of content sync plugin
|
||||
|
||||
* Tue Jan 16 2024 Simon Pichugin <spichugi@redhat.com> - 1.4.3.39-1
|
||||
- Bump version to 1.4.3.39-1
|
||||
- Resolves: RHEL-19028 - Rebase 389-ds-base in RHEL 8.10 to 1.4.3.39
|
||||
- Resolves: RHEL-19240 - [RFE] Add PROXY protocol support to 389-ds-base
|
||||
- Resolves: RHEL-5143 - SELinux labeling for dirsrv files seen during ipa install/uninstall should be moved to DEBUG.
|
||||
- Resolves: RHEL-5107 - bdb_start - Detected Disorderly Shutdown directory server is not starting
|
||||
- Resolves: RHEL-16338 - ns-slapd crash in slapi_attr_basetype
|
||||
- Resolves: RHEL-14025 - After an upgrade the LDAP server won't start if nsslapd-conntablesize is present in the dse.ldif file.
|
||||
* Thu Oct 21 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.28-1
|
||||
- Bump version to 1.4.3.28-1
|
||||
- Resolves: Bug 2016014 - rebase RHEL 8.6 with 389-ds-base-1.4.3
|
||||
- Resolves: Bug 1990002 - monitor displays wrong date for connection
|
||||
- Resolves: Bug 1950335 - upgrade password hash on bind also causes passwordExpirationtime to be updated
|
||||
- Resolves: Bug 1916292 - Indexing a single backend actually processes all configured backends
|
||||
- Resolves: Bug 1780842 - [RFE] set db home directory to /dev/shm by default
|
||||
- Resolves: Bug 2000975 - Retro Changelog does not trim changes
|
||||
|
||||
|
||||
* Fri Dec 08 2023 James Chapman <jachapma@redhat.com> - 1.4.3.38-1
|
||||
- Bump version to 1.4.3.38-1
|
||||
- Resolves: RHEL-19028 - Rebase 389-ds-base in RHEL 8.10 to 1.4.3.38
|
||||
|
||||
* Wed Aug 16 2023 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.37-1
|
||||
- Bump versionto 1.4.3.37-1
|
||||
- Resolves: rhbz#2224505 - Paged search impacts performance
|
||||
- Resolves: rhbz#2220890 - healthcheck tool needs to be updates for new default password storage scheme
|
||||
- Resolves: rhbz#2218235 - python3-lib389: Python tarfile extraction needs change to avoid a warning
|
||||
- Resolves: rhbz#2210491 - dtablesize being set to soft maxfiledescriptor limit causing massive slowdown in large enviroments.
|
||||
- Resolves: rhbz#2149967 - SELinux labeling for dirsrv files seen during ipa install/uninstall should be moved to DEBUG
|
||||
|
||||
* Tue Jul 11 2023 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.36-2
|
||||
- Bump version to 1.4.3.36-2
|
||||
- Resolves: rhbz#2220890 - healthcheck tool needs to be updates for new default password storage scheme
|
||||
|
||||
* Wed Jun 14 2023 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.36-1
|
||||
- Bump version to 1.4.3.36-1
|
||||
- Resolves: rhbz#2188628 - Rebase 389-ds-base in RHEL 8.9 to 1.4.3.36
|
||||
|
||||
* Mon May 22 2023 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.35-1
|
||||
- Bump version to 1.4.3.35-1
|
||||
- Resolves: rhbz#2188628 - Rebase 389-ds-base in RHEL 8.9 to 1.4.3.35
|
||||
|
||||
* Tue Nov 15 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.32-1
|
||||
- Bump version to 1.4.3.32-1
|
||||
- Resolves: Bug 2098138 - broken nsslapd-subtree-rename-switch option in rhds11
|
||||
- Resolves: Bug 2119063 - entryuuid fixup tasks fails because entryUUID is not mutable
|
||||
- Resolves: Bug 2136610 - [RFE] Add 'cn' attribute to IPA audit logs
|
||||
- Resolves: Bug 2142638 - pam mutex lock causing high etimes, affecting red hat internal sso
|
||||
- Resolves: Bug 2096795 - [RFE] Support ECDSA private keys for TLS
|
||||
|
565
Cargo.lock
generated
Normal file
565
Cargo.lock
generated
Normal file
@ -0,0 +1,565 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
|
||||
|
||||
[[package]]
|
||||
name = "cbindgen"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"syn",
|
||||
"tempfile",
|
||||
"toml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.71"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.33.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
|
||||
dependencies = [
|
||||
"ansi_term",
|
||||
"atty",
|
||||
"bitflags",
|
||||
"strsim",
|
||||
"textwrap",
|
||||
"unicode-width",
|
||||
"vec_map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "entryuuid"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"paste",
|
||||
"slapi_r_plugin",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "entryuuid_syntax"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"paste",
|
||||
"slapi_r_plugin",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fernet"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"byteorder",
|
||||
"getrandom",
|
||||
"openssl",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
|
||||
dependencies = [
|
||||
"foreign-types-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types-shared"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"wasi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "0.4.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4"
|
||||
|
||||
[[package]]
|
||||
name = "jobserver"
|
||||
version = "0.1.24"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.104"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7b2f96d100e1cf1929e7719b7edb3b90ab5298072638fccd77be9ce942ecdfce"
|
||||
|
||||
[[package]]
|
||||
name = "librnsslapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cbindgen",
|
||||
"libc",
|
||||
"slapd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "librslapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cbindgen",
|
||||
"libc",
|
||||
"slapd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56"
|
||||
|
||||
[[package]]
|
||||
name = "openssl"
|
||||
version = "0.10.36"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8d9facdb76fec0b73c406f125d44d86fdad818d66fef0531eec9233ca425ff4a"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cfg-if",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"openssl-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.67"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69df2d8dfc6ce3aaf44b40dec6f487d5a886516cf6879c49e98e0710f310a058"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"cc",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
|
||||
dependencies = [
|
||||
"paste-impl",
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste-impl"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
|
||||
dependencies = [
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c9b1041b4387893b91ee6746cddfc28516aff326a3519fb2adf820932c5e6cb"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3ca011bd0129ff4ae15cd04c4eef202cadf6c51c21e47aba319b4e0501db741"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.30"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "edc3358ebc67bc8b7fa0c007f945b0b18226f78437d61bec735a9eb96b61ee70"
|
||||
dependencies = [
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
"rand_hc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_chacha"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_hc"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7"
|
||||
dependencies = [
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "remove_dir_all"
|
||||
version = "0.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rsds"
|
||||
version = "0.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.130"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.130"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.68"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"fernet",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slapi_r_plugin"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"paste",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.80"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d010a1623fbd906d51d650a9916aaefc05ffa0e4053ff7fe601167f3e715d194"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "synstructure"
|
||||
version = "0.12.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"rand",
|
||||
"redox_syscall",
|
||||
"remove_dir_all",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
dependencies = [
|
||||
"unicode-width",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.5.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "vcpkg"
|
||||
version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
|
||||
|
||||
[[package]]
|
||||
name = "vec_map"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.10.2+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu",
|
||||
"winapi-x86_64-pc-windows-gnu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "zeroize"
|
||||
version = "1.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bf68b08513768deaa790264a7fac27a58cbf2705cfcdc9448362229217d7e970"
|
||||
dependencies = [
|
||||
"zeroize_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zeroize_derive"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bdff2024a851a322b08f179173ae2ba620445aef1e838f0c196820eade4ae0c7"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"synstructure",
|
||||
]
|
@ -1,119 +0,0 @@
|
||||
From dddb14210b402f317e566b6387c76a8e659bf7fa Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 14 Feb 2023 13:34:10 +0100
|
||||
Subject: [PATCH 1/2] issue 5647 - covscan: memory leak in audit log when
|
||||
adding entries (#5650)
|
||||
|
||||
covscan reported an issue about "vals" variable in auditlog.c:231 and indeed a charray_free is missing.
|
||||
Issue: 5647
|
||||
Reviewed by: @mreynolds389, @droideck
|
||||
---
|
||||
ldap/servers/slapd/auditlog.c | 71 +++++++++++++++++++----------------
|
||||
1 file changed, 38 insertions(+), 33 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 68cbc674d..3128e0497 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -177,6 +177,40 @@ write_auditfail_log_entry(Slapi_PBlock *pb)
|
||||
slapi_ch_free_string(&audit_config);
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Write the attribute values to the audit log as "comments"
|
||||
+ *
|
||||
+ * Slapi_Attr *entry - the attribute begin logged.
|
||||
+ * char *attrname - the attribute name.
|
||||
+ * lenstr *l - the audit log buffer
|
||||
+ *
|
||||
+ * Resulting output in the log:
|
||||
+ *
|
||||
+ * #ATTR: VALUE
|
||||
+ * #ATTR: VALUE
|
||||
+ */
|
||||
+static void
|
||||
+log_entry_attr(Slapi_Attr *entry_attr, char *attrname, lenstr *l)
|
||||
+{
|
||||
+ Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
+ for(size_t i = 0; vals && vals[i]; i++) {
|
||||
+ char log_val[256] = "";
|
||||
+ const struct berval *bv = slapi_value_get_berval(vals[i]);
|
||||
+ if (bv->bv_len >= 256) {
|
||||
+ strncpy(log_val, bv->bv_val, 252);
|
||||
+ strcpy(log_val+252, "...");
|
||||
+ } else {
|
||||
+ strncpy(log_val, bv->bv_val, bv->bv_len);
|
||||
+ log_val[bv->bv_len] = 0;
|
||||
+ }
|
||||
+ addlenstr(l, "#");
|
||||
+ addlenstr(l, attrname);
|
||||
+ addlenstr(l, ": ");
|
||||
+ addlenstr(l, log_val);
|
||||
+ addlenstr(l, "\n");
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Write "requested" attributes from the entry to the audit log as "comments"
|
||||
*
|
||||
@@ -212,21 +246,9 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
for (req_attr = ldap_utf8strtok_r(display_attrs, ", ", &last); req_attr;
|
||||
req_attr = ldap_utf8strtok_r(NULL, ", ", &last))
|
||||
{
|
||||
- char **vals = slapi_entry_attr_get_charray(entry, req_attr);
|
||||
- for(size_t i = 0; vals && vals[i]; i++) {
|
||||
- char log_val[256] = {0};
|
||||
-
|
||||
- if (strlen(vals[i]) > 256) {
|
||||
- strncpy(log_val, vals[i], 252);
|
||||
- strcat(log_val, "...");
|
||||
- } else {
|
||||
- strcpy(log_val, vals[i]);
|
||||
- }
|
||||
- addlenstr(l, "#");
|
||||
- addlenstr(l, req_attr);
|
||||
- addlenstr(l, ": ");
|
||||
- addlenstr(l, log_val);
|
||||
- addlenstr(l, "\n");
|
||||
+ slapi_entry_attr_find(entry, req_attr, &entry_attr);
|
||||
+ if (entry_attr) {
|
||||
+ log_entry_attr(entry_attr, req_attr, l);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -234,7 +256,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
for (; entry_attr; entry_attr = entry_attr->a_next) {
|
||||
Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
char *attr = NULL;
|
||||
- const char *val = NULL;
|
||||
|
||||
slapi_attr_get_type(entry_attr, &attr);
|
||||
if (strcmp(attr, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) {
|
||||
@@ -251,23 +272,7 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
addlenstr(l, ": ****************************\n");
|
||||
continue;
|
||||
}
|
||||
-
|
||||
- for(size_t i = 0; vals && vals[i]; i++) {
|
||||
- char log_val[256] = {0};
|
||||
-
|
||||
- val = slapi_value_get_string(vals[i]);
|
||||
- if (strlen(val) > 256) {
|
||||
- strncpy(log_val, val, 252);
|
||||
- strcat(log_val, "...");
|
||||
- } else {
|
||||
- strcpy(log_val, val);
|
||||
- }
|
||||
- addlenstr(l, "#");
|
||||
- addlenstr(l, attr);
|
||||
- addlenstr(l, ": ");
|
||||
- addlenstr(l, log_val);
|
||||
- addlenstr(l, "\n");
|
||||
- }
|
||||
+ log_entry_attr(entry_attr, attr, l);
|
||||
}
|
||||
}
|
||||
slapi_ch_free_string(&display_attrs);
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,27 +0,0 @@
|
||||
From be7c2b82958e91ce08775bf6b5da3c311d3b00e5 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 20 Feb 2023 16:14:05 +0100
|
||||
Subject: [PATCH 2/2] Issue 5647 - Fix unused variable warning from previous
|
||||
commit (#5670)
|
||||
|
||||
* issue 5647 - memory leak in audit log when adding entries
|
||||
* Issue 5647 - Fix unused variable warning from previous commit
|
||||
---
|
||||
ldap/servers/slapd/auditlog.c | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 3128e0497..0597ecc6f 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -254,7 +254,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
} else {
|
||||
/* Return all attributes */
|
||||
for (; entry_attr; entry_attr = entry_attr->a_next) {
|
||||
- Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
char *attr = NULL;
|
||||
|
||||
slapi_attr_get_type(entry_attr, &attr);
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,147 +0,0 @@
|
||||
From 692c4cec6cc5c0086cf58f83bcfa690c766c9887 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 2 Feb 2024 14:14:28 +0100
|
||||
Subject: [PATCH] Issue 5407 - sync_repl crashes if enabled while dynamic
|
||||
plugin is enabled (#5411)
|
||||
|
||||
Bug description:
|
||||
When dynamic plugin is enabled, if a MOD enables sync_repl plugin
|
||||
then sync_repl init function registers the postop callback
|
||||
that will be called for the MOD itself while the preop
|
||||
has not been called.
|
||||
postop expects preop to be called and so primary operation
|
||||
to be set. When it is not set it crashes
|
||||
|
||||
Fix description:
|
||||
If the primary operation is not set, just return
|
||||
|
||||
relates: #5407
|
||||
---
|
||||
.../suites/syncrepl_plugin/basic_test.py | 68 +++++++++++++++++++
|
||||
ldap/servers/plugins/sync/sync_persist.c | 23 ++++++-
|
||||
2 files changed, 90 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
index eb3770b78..cdf35eeaa 100644
|
||||
--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
@@ -592,6 +592,74 @@ def test_sync_repl_cenotaph(topo_m2, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+def test_sync_repl_dynamic_plugin(topology, request):
|
||||
+ """Test sync_repl with dynamic plugin
|
||||
+
|
||||
+ :id: d4f84913-c18a-459f-8525-110f610ca9e6
|
||||
+ :setup: install a standalone instance
|
||||
+ :steps:
|
||||
+ 1. reset instance to standard (no retroCL, no sync_repl, no dynamic plugin)
|
||||
+ 2. Enable dynamic plugin
|
||||
+ 3. Enable retroCL/content_sync
|
||||
+ 4. Establish a sync_repl req
|
||||
+ :expectedresults:
|
||||
+ 1. Should succeeds
|
||||
+ 2. Should succeeds
|
||||
+ 3. Should succeeds
|
||||
+ 4. Should succeeds
|
||||
+ """
|
||||
+
|
||||
+ # Reset the instance in a default config
|
||||
+ # Disable content sync plugin
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_REPL_SYNC)
|
||||
+
|
||||
+ # Disable retro changelog
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Disable dynamic plugins
|
||||
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'off')])
|
||||
+ topology.standalone.restart()
|
||||
+
|
||||
+ # Now start the test
|
||||
+ # Enable dynamic plugins
|
||||
+ try:
|
||||
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')])
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error('Failed to enable dynamic plugin! {}'.format(e.args[0]['desc']))
|
||||
+ assert False
|
||||
+
|
||||
+ # Enable retro changelog
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Enbale content sync plugin
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC)
|
||||
+
|
||||
+ # create a sync repl client and wait 5 seconds to be sure it is running
|
||||
+ sync_repl = Sync_persist(topology.standalone)
|
||||
+ sync_repl.start()
|
||||
+ time.sleep(5)
|
||||
+
|
||||
+ # create users
|
||||
+ users = UserAccounts(topology.standalone, DEFAULT_SUFFIX)
|
||||
+ users_set = []
|
||||
+ for i in range(10001, 10004):
|
||||
+ users_set.append(users.create_test_user(uid=i))
|
||||
+
|
||||
+ time.sleep(10)
|
||||
+ # delete users, that automember/memberof will generate nested updates
|
||||
+ for user in users_set:
|
||||
+ user.delete()
|
||||
+ # stop the server to get the sync_repl result set (exit from while loop).
|
||||
+ # Only way I found to acheive that.
|
||||
+ # and wait a bit to let sync_repl thread time to set its result before fetching it.
|
||||
+ topology.standalone.stop()
|
||||
+ sync_repl.get_result()
|
||||
+ sync_repl.join()
|
||||
+ log.info('test_sync_repl_dynamic_plugin: PASS\n')
|
||||
+
|
||||
+ # Success
|
||||
+ log.info('Test complete')
|
||||
+
|
||||
def test_sync_repl_invalid_cookie(topology, request):
|
||||
"""Test sync_repl with invalid cookie
|
||||
|
||||
diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c
|
||||
index d2210b64c..283607361 100644
|
||||
--- a/ldap/servers/plugins/sync/sync_persist.c
|
||||
+++ b/ldap/servers/plugins/sync/sync_persist.c
|
||||
@@ -156,6 +156,17 @@ ignore_op_pl(Slapi_PBlock *pb)
|
||||
* This is the same for ident
|
||||
*/
|
||||
prim_op = get_thread_primary_op();
|
||||
+ if (prim_op == NULL) {
|
||||
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
|
||||
+ * The only known case it happens is with dynamic plugin enabled and an
|
||||
+ * update that enable the sync_repl plugin. In such case sync_repl registers
|
||||
+ * the postop (sync_update_persist_op) that is called while the preop was not called
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ "ignore_op_pl - Operation without primary op set (0x%lx)\n",
|
||||
+ (ulong) op);
|
||||
+ return;
|
||||
+ }
|
||||
ident = sync_persist_get_operation_extension(pb);
|
||||
|
||||
if (ident) {
|
||||
@@ -232,8 +243,18 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
|
||||
|
||||
|
||||
prim_op = get_thread_primary_op();
|
||||
+ if (prim_op == NULL) {
|
||||
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
|
||||
+ * The only known case it happens is with dynamic plugin enabled and an
|
||||
+ * update that enable the sync_repl plugin. In such case sync_repl registers
|
||||
+ * the postop (sync_update_persist_op) that is called while the preop was not called
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ "sync_update_persist_op - Operation without primary op set (0x%lx)\n",
|
||||
+ (ulong) pb_op);
|
||||
+ return;
|
||||
+ }
|
||||
ident = sync_persist_get_operation_extension(pb);
|
||||
- PR_ASSERT(prim_op);
|
||||
|
||||
if ((ident == NULL) && operation_is_flag_set(pb_op, OP_FLAG_NOOP)) {
|
||||
/* This happens for URP (add cenotaph, fixup rename, tombstone resurrect)
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,840 +0,0 @@
|
||||
From 8dc61a176323f0d41df730abd715ccff3034c2be Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Sun, 27 Nov 2022 09:37:19 -0500
|
||||
Subject: [PATCH] Issue 5547 - automember plugin improvements
|
||||
|
||||
Description:
|
||||
|
||||
Rebuild task has the following improvements:
|
||||
|
||||
- Only one task allowed at a time
|
||||
- Do not cleanup previous members by default. Add new CLI option to intentionally
|
||||
cleanup memberships before rebuilding from scratch.
|
||||
- Add better task logging to show fixup progress
|
||||
|
||||
To prevent automember from being called in a nested be_txn loop thread storage is
|
||||
used to check and skip these loops.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/5547
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
---
|
||||
.../automember_plugin/automember_mod_test.py | 43 +++-
|
||||
ldap/servers/plugins/automember/automember.c | 232 ++++++++++++++----
|
||||
ldap/servers/slapd/back-ldbm/ldbm_add.c | 11 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 10 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 11 +-
|
||||
.../lib389/cli_conf/plugins/automember.py | 10 +-
|
||||
src/lib389/lib389/plugins.py | 7 +-
|
||||
src/lib389/lib389/tasks.py | 9 +-
|
||||
8 files changed, 250 insertions(+), 83 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
index 8d25384bf..7a0ed3275 100644
|
||||
--- a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
+++ b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
@@ -5,12 +5,13 @@
|
||||
# License: GPL (version 3 or any later version).
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
-#
|
||||
+import ldap
|
||||
import logging
|
||||
import pytest
|
||||
import os
|
||||
+import time
|
||||
from lib389.utils import ds_is_older
|
||||
-from lib389._constants import *
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.idm.group import Groups
|
||||
@@ -41,6 +42,11 @@ def automember_fixture(topo, request):
|
||||
user_accts = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
user = user_accts.create_test_user()
|
||||
|
||||
+ # Create extra users
|
||||
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ for i in range(0, 100):
|
||||
+ users.create_test_user(uid=i)
|
||||
+
|
||||
# Create automember definitions and regex rules
|
||||
automember_prop = {
|
||||
'cn': 'testgroup_definition',
|
||||
@@ -59,7 +65,7 @@ def automember_fixture(topo, request):
|
||||
automemberplugin.enable()
|
||||
topo.standalone.restart()
|
||||
|
||||
- return (user, groups)
|
||||
+ return user, groups
|
||||
|
||||
|
||||
def test_mods(automember_fixture, topo):
|
||||
@@ -72,19 +78,21 @@ def test_mods(automember_fixture, topo):
|
||||
2. Update user that should add it to group[1]
|
||||
3. Update user that should add it to group[2]
|
||||
4. Update user that should add it to group[0]
|
||||
- 5. Test rebuild task correctly moves user to group[1]
|
||||
+ 5. Test rebuild task adds user to group[1]
|
||||
+ 6. Test rebuild task cleanups groups and only adds it to group[1]
|
||||
:expectedresults:
|
||||
1. Success
|
||||
2. Success
|
||||
3. Success
|
||||
4. Success
|
||||
5. Success
|
||||
+ 6. Success
|
||||
"""
|
||||
(user, groups) = automember_fixture
|
||||
|
||||
# Update user which should go into group[0]
|
||||
user.replace('cn', 'whatever')
|
||||
- groups[0].is_member(user.dn)
|
||||
+ assert groups[0].is_member(user.dn)
|
||||
if groups[1].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -92,7 +100,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user0 which should go into group[1]
|
||||
user.replace('cn', 'mark')
|
||||
- groups[1].is_member(user.dn)
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -100,7 +108,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user which should go into group[2]
|
||||
user.replace('cn', 'simon')
|
||||
- groups[2].is_member(user.dn)
|
||||
+ assert groups[2].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[1].is_member(user.dn):
|
||||
@@ -108,7 +116,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user which should go back into group[0] (full circle)
|
||||
user.replace('cn', 'whatever')
|
||||
- groups[0].is_member(user.dn)
|
||||
+ assert groups[0].is_member(user.dn)
|
||||
if groups[1].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -128,12 +136,24 @@ def test_mods(automember_fixture, topo):
|
||||
automemberplugin.enable()
|
||||
topo.standalone.restart()
|
||||
|
||||
- # Run rebuild task
|
||||
+ # Run rebuild task (no cleanup)
|
||||
task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount")
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ # test only one fixup task is allowed at a time
|
||||
+ automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=top")
|
||||
task.wait()
|
||||
|
||||
- # Test membership
|
||||
- groups[1].is_member(user.dn)
|
||||
+ # Test membership (user should still be in groups[0])
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
+ if not groups[0].is_member(user.dn):
|
||||
+ assert False
|
||||
+
|
||||
+ # Run rebuild task with cleanup
|
||||
+ task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount", cleanup=True)
|
||||
+ task.wait()
|
||||
+
|
||||
+ # Test membership (user should only be in groups[1])
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -148,4 +168,3 @@ if __name__ == '__main__':
|
||||
# -s for DEBUG mode
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main(["-s", CURRENT_FILE])
|
||||
-
|
||||
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
|
||||
index 3494d0343..419adb052 100644
|
||||
--- a/ldap/servers/plugins/automember/automember.c
|
||||
+++ b/ldap/servers/plugins/automember/automember.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2011 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -14,7 +14,7 @@
|
||||
* Auto Membership Plug-in
|
||||
*/
|
||||
#include "automember.h"
|
||||
-
|
||||
+#include <pthread.h>
|
||||
|
||||
/*
|
||||
* Plug-in globals
|
||||
@@ -22,7 +22,9 @@
|
||||
static PRCList *g_automember_config = NULL;
|
||||
static Slapi_RWLock *g_automember_config_lock = NULL;
|
||||
static uint64_t abort_rebuild_task = 0;
|
||||
-
|
||||
+static pthread_key_t td_automem_block_nested;
|
||||
+static PRBool fixup_running = PR_FALSE;
|
||||
+static PRLock *fixup_lock = NULL;
|
||||
static void *_PluginID = NULL;
|
||||
static Slapi_DN *_PluginDN = NULL;
|
||||
static Slapi_DN *_ConfigAreaDN = NULL;
|
||||
@@ -93,9 +95,43 @@ static void automember_task_export_destructor(Slapi_Task *task);
|
||||
static void automember_task_map_destructor(Slapi_Task *task);
|
||||
|
||||
#define DEFAULT_FILE_MODE PR_IRUSR | PR_IWUSR
|
||||
+#define FIXUP_PROGRESS_LIMIT 1000
|
||||
static uint64_t plugin_do_modify = 0;
|
||||
static uint64_t plugin_is_betxn = 0;
|
||||
|
||||
+/* automember_plugin fixup task and add operations should block other be_txn
|
||||
+ * plugins from calling automember_post_op_mod() */
|
||||
+static int32_t
|
||||
+slapi_td_block_nested_post_op(void)
|
||||
+{
|
||||
+ int32_t val = 12345;
|
||||
+
|
||||
+ if (pthread_setspecific(td_automem_block_nested, (void *)&val) != 0) {
|
||||
+ return PR_FAILURE;
|
||||
+ }
|
||||
+ return PR_SUCCESS;
|
||||
+}
|
||||
+
|
||||
+static int32_t
|
||||
+slapi_td_unblock_nested_post_op(void)
|
||||
+{
|
||||
+ if (pthread_setspecific(td_automem_block_nested, NULL) != 0) {
|
||||
+ return PR_FAILURE;
|
||||
+ }
|
||||
+ return PR_SUCCESS;
|
||||
+}
|
||||
+
|
||||
+static int32_t
|
||||
+slapi_td_is_post_op_nested(void)
|
||||
+{
|
||||
+ int32_t *value = pthread_getspecific(td_automem_block_nested);
|
||||
+
|
||||
+ if (value == NULL) {
|
||||
+ return 0;
|
||||
+ }
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Config cache locking functions
|
||||
*/
|
||||
@@ -317,6 +353,14 @@ automember_start(Slapi_PBlock *pb)
|
||||
return -1;
|
||||
}
|
||||
|
||||
+ if (fixup_lock == NULL) {
|
||||
+ if ((fixup_lock = PR_NewLock()) == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_start - Failed to create fixup lock.\n");
|
||||
+ return -1;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* Get the plug-in target dn from the system
|
||||
* and store it for future use. */
|
||||
@@ -360,6 +404,11 @@ automember_start(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
|
||||
+ if (pthread_key_create(&td_automem_block_nested, NULL) != 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_start - pthread_key_create failed\n");
|
||||
+ }
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_start - ready for service\n");
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
@@ -394,6 +443,8 @@ automember_close(Slapi_PBlock *pb __attribute__((unused)))
|
||||
slapi_sdn_free(&_ConfigAreaDN);
|
||||
slapi_destroy_rwlock(g_automember_config_lock);
|
||||
g_automember_config_lock = NULL;
|
||||
+ PR_DestroyLock(fixup_lock);
|
||||
+ fixup_lock = NULL;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"<-- automember_close\n");
|
||||
@@ -1619,7 +1670,6 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
-
|
||||
/*
|
||||
* automember_update_member_value()
|
||||
*
|
||||
@@ -1634,7 +1684,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
LDAPMod *mods[2];
|
||||
char *vals[2];
|
||||
char *member_value = NULL;
|
||||
- int rc = 0;
|
||||
+ int rc = LDAP_SUCCESS;
|
||||
Slapi_DN *group_sdn;
|
||||
|
||||
/* First thing check that the group still exists */
|
||||
@@ -1653,7 +1703,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
"automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n",
|
||||
group_dn, rc);
|
||||
}
|
||||
- return rc;
|
||||
+ goto out;
|
||||
}
|
||||
|
||||
/* If grouping_value is dn, we need to fetch the dn instead. */
|
||||
@@ -1879,6 +1929,13 @@ automember_mod_post_op(Slapi_PBlock *pb)
|
||||
PRCList *list = NULL;
|
||||
int rc = SLAPI_PLUGIN_SUCCESS;
|
||||
|
||||
+ if (slapi_td_is_post_op_nested()) {
|
||||
+ /* don't process op twice in the same thread */
|
||||
+ return rc;
|
||||
+ } else {
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ }
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"--> automember_mod_post_op\n");
|
||||
|
||||
@@ -2005,6 +2062,7 @@ automember_mod_post_op(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
}
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"<-- automember_mod_post_op (%d)\n", rc);
|
||||
@@ -2024,6 +2082,13 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"--> automember_add_post_op\n");
|
||||
|
||||
+ if (slapi_td_is_post_op_nested()) {
|
||||
+ /* don't process op twice in the same thread */
|
||||
+ return rc;
|
||||
+ } else {
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ }
|
||||
+
|
||||
/* Reload config if a config entry was added. */
|
||||
if ((sdn = automember_get_sdn(pb))) {
|
||||
if (automember_dn_is_config(sdn)) {
|
||||
@@ -2039,7 +2104,7 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
|
||||
/* If replication, just bail. */
|
||||
if (automember_isrepl(pb)) {
|
||||
- return SLAPI_PLUGIN_SUCCESS;
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Get the newly added entry. */
|
||||
@@ -2052,7 +2117,7 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
tombstone);
|
||||
slapi_value_free(&tombstone);
|
||||
if (is_tombstone) {
|
||||
- return SLAPI_PLUGIN_SUCCESS;
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Check if a config entry applies
|
||||
@@ -2063,21 +2128,19 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
list = PR_LIST_HEAD(g_automember_config);
|
||||
while (list != g_automember_config) {
|
||||
config = (struct configEntry *)list;
|
||||
-
|
||||
/* Does the entry meet scope and filter requirements? */
|
||||
if (slapi_dn_issuffix(slapi_sdn_get_dn(sdn), config->scope) &&
|
||||
- (slapi_filter_test_simple(e, config->filter) == 0)) {
|
||||
+ (slapi_filter_test_simple(e, config->filter) == 0))
|
||||
+ {
|
||||
/* Find out what membership changes are needed and make them. */
|
||||
if (automember_update_membership(config, e, NULL) == SLAPI_PLUGIN_FAILURE) {
|
||||
rc = SLAPI_PLUGIN_FAILURE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
-
|
||||
list = PR_NEXT_LINK(list);
|
||||
}
|
||||
}
|
||||
-
|
||||
automember_config_unlock();
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
@@ -2098,6 +2161,7 @@ bail:
|
||||
slapi_pblock_set(pb, SLAPI_RESULT_CODE, &result);
|
||||
slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, &errtxt);
|
||||
}
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -2138,6 +2202,7 @@ typedef struct _task_data
|
||||
Slapi_DN *base_dn;
|
||||
char *bind_dn;
|
||||
int scope;
|
||||
+ PRBool cleanup;
|
||||
} task_data;
|
||||
|
||||
static void
|
||||
@@ -2270,6 +2335,7 @@ automember_task_abort_thread(void *arg)
|
||||
* basedn: dc=example,dc=com
|
||||
* filter: (uid=*)
|
||||
* scope: sub
|
||||
+ * cleanup: yes/on (default is off)
|
||||
*
|
||||
* basedn and filter are required. If scope is omitted, the default is sub
|
||||
*/
|
||||
@@ -2284,9 +2350,22 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
const char *base_dn;
|
||||
const char *filter;
|
||||
const char *scope;
|
||||
+ const char *cleanup_str;
|
||||
+ PRBool cleanup = PR_FALSE;
|
||||
|
||||
*returncode = LDAP_SUCCESS;
|
||||
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ if (fixup_running) {
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_task_add - there is already a fixup task running\n");
|
||||
+ rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
+ goto out;
|
||||
+ }
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+
|
||||
/*
|
||||
* Grab the task params
|
||||
*/
|
||||
@@ -2300,6 +2379,12 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
goto out;
|
||||
}
|
||||
+ if ((cleanup_str = slapi_entry_attr_get_ref(e, "cleanup"))) {
|
||||
+ if (strcasecmp(cleanup_str, "yes") == 0 || strcasecmp(cleanup_str, "on")) {
|
||||
+ cleanup = PR_TRUE;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
scope = slapi_fetch_attr(e, "scope", "sub");
|
||||
/*
|
||||
* setup our task data
|
||||
@@ -2315,6 +2400,7 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
mytaskdata->bind_dn = slapi_ch_strdup(bind_dn);
|
||||
mytaskdata->base_dn = slapi_sdn_new_dn_byval(base_dn);
|
||||
mytaskdata->filter_str = slapi_ch_strdup(filter);
|
||||
+ mytaskdata->cleanup = cleanup;
|
||||
|
||||
if (scope) {
|
||||
if (strcasecmp(scope, "sub") == 0) {
|
||||
@@ -2334,6 +2420,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
task = slapi_plugin_new_task(slapi_entry_get_ndn(e), arg);
|
||||
slapi_task_set_destructor_fn(task, automember_task_destructor);
|
||||
slapi_task_set_data(task, mytaskdata);
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_TRUE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
/*
|
||||
* Start the task as a separate thread
|
||||
*/
|
||||
@@ -2345,6 +2434,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
"automember_task_add - Unable to create task thread!\n");
|
||||
*returncode = LDAP_OPERATIONS_ERROR;
|
||||
slapi_task_finish(task, *returncode);
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_FALSE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
} else {
|
||||
rv = SLAPI_DSE_CALLBACK_OK;
|
||||
@@ -2372,6 +2464,9 @@ automember_rebuild_task_thread(void *arg)
|
||||
PRCList *list = NULL;
|
||||
PRCList *include_list = NULL;
|
||||
int result = 0;
|
||||
+ int64_t fixup_progress_count = 0;
|
||||
+ int64_t fixup_progress_elapsed = 0;
|
||||
+ int64_t fixup_start_time = 0;
|
||||
size_t i = 0;
|
||||
|
||||
/* Reset abort flag */
|
||||
@@ -2380,6 +2475,7 @@ automember_rebuild_task_thread(void *arg)
|
||||
if (!task) {
|
||||
return; /* no task */
|
||||
}
|
||||
+
|
||||
slapi_task_inc_refcount(task);
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_rebuild_task_thread - Refcount incremented.\n");
|
||||
@@ -2393,9 +2489,11 @@ automember_rebuild_task_thread(void *arg)
|
||||
slapi_task_log_status(task, "Automember rebuild task starting (base dn: (%s) filter (%s)...",
|
||||
slapi_sdn_get_dn(td->base_dn), td->filter_str);
|
||||
/*
|
||||
- * Set the bind dn in the local thread data
|
||||
+ * Set the bind dn in the local thread data, and block post op mods
|
||||
*/
|
||||
slapi_td_set_dn(slapi_ch_strdup(td->bind_dn));
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ fixup_start_time = slapi_current_rel_time_t();
|
||||
/*
|
||||
* Take the config lock now and search the database
|
||||
*/
|
||||
@@ -2426,6 +2524,21 @@ automember_rebuild_task_thread(void *arg)
|
||||
* Loop over the entries
|
||||
*/
|
||||
for (i = 0; entries && (entries[i] != NULL); i++) {
|
||||
+ fixup_progress_count++;
|
||||
+ if (fixup_progress_count % FIXUP_PROGRESS_LIMIT == 0 ) {
|
||||
+ slapi_task_log_notice(task,
|
||||
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
|
||||
+ fixup_progress_count,
|
||||
+ slapi_current_rel_time_t() - fixup_start_time,
|
||||
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
|
||||
+ slapi_task_log_status(task,
|
||||
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
|
||||
+ fixup_progress_count,
|
||||
+ slapi_current_rel_time_t() - fixup_start_time,
|
||||
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
|
||||
+ slapi_task_inc_progress(task);
|
||||
+ fixup_progress_elapsed = slapi_current_rel_time_t();
|
||||
+ }
|
||||
if (slapi_atomic_load_64(&abort_rebuild_task, __ATOMIC_ACQUIRE) == 1) {
|
||||
/* The task was aborted */
|
||||
slapi_task_log_notice(task, "Automember rebuild task was intentionally aborted");
|
||||
@@ -2443,48 +2556,66 @@ automember_rebuild_task_thread(void *arg)
|
||||
if (slapi_dn_issuffix(slapi_entry_get_dn(entries[i]), config->scope) &&
|
||||
(slapi_filter_test_simple(entries[i], config->filter) == 0))
|
||||
{
|
||||
- /* First clear out all the defaults groups */
|
||||
- for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
|
||||
- if ((result = automember_update_member_value(entries[i], config->default_groups[ii],
|
||||
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
|
||||
- {
|
||||
- slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from default group (%s) error (%d)",
|
||||
- config->default_groups[ii], result);
|
||||
- slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from default group (%s) error (%d)",
|
||||
- config->default_groups[ii], result);
|
||||
- slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
- "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
- config->default_groups[ii], result);
|
||||
- goto out;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- /* Then clear out the non-default group */
|
||||
- if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
|
||||
- include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
|
||||
- while (include_list != (PRCList *)config->inclusive_rules) {
|
||||
- struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
|
||||
- if ((result = automember_update_member_value(entries[i], slapi_sdn_get_dn(curr_rule->target_group_dn),
|
||||
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
|
||||
+ if (td->cleanup) {
|
||||
+
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Cleaning up groups (config %s)\n",
|
||||
+ config->dn);
|
||||
+ /* First clear out all the defaults groups */
|
||||
+ for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
|
||||
+ if ((result = automember_update_member_value(entries[i],
|
||||
+ config->default_groups[ii],
|
||||
+ config->grouping_attr,
|
||||
+ config->grouping_value,
|
||||
+ NULL, DEL_MEMBER)))
|
||||
{
|
||||
slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from group (%s) error (%d)",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ "member from default group (%s) error (%d)",
|
||||
+ config->default_groups[ii], result);
|
||||
slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from group (%s) error (%d)",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ "member from default group (%s) error (%d)",
|
||||
+ config->default_groups[ii], result);
|
||||
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ config->default_groups[ii], result);
|
||||
goto out;
|
||||
}
|
||||
- include_list = PR_NEXT_LINK(include_list);
|
||||
}
|
||||
+
|
||||
+ /* Then clear out the non-default group */
|
||||
+ if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
|
||||
+ include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
|
||||
+ while (include_list != (PRCList *)config->inclusive_rules) {
|
||||
+ struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
|
||||
+ if ((result = automember_update_member_value(entries[i],
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn),
|
||||
+ config->grouping_attr,
|
||||
+ config->grouping_value,
|
||||
+ NULL, DEL_MEMBER)))
|
||||
+ {
|
||||
+ slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
+ "member from group (%s) error (%d)",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
+ "member from group (%s) error (%d)",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ include_list = PR_NEXT_LINK(include_list);
|
||||
+ }
|
||||
+ }
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Finished cleaning up groups (config %s)\n",
|
||||
+ config->dn);
|
||||
}
|
||||
|
||||
/* Update the memberships for this entries */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Updating membership (config %s)\n",
|
||||
+ config->dn);
|
||||
if (slapi_is_shutting_down() ||
|
||||
automember_update_membership(config, entries[i], NULL) == SLAPI_PLUGIN_FAILURE)
|
||||
{
|
||||
@@ -2508,15 +2639,22 @@ out:
|
||||
slapi_task_log_notice(task, "Automember rebuild task aborted. Error (%d)", result);
|
||||
slapi_task_log_status(task, "Automember rebuild task aborted. Error (%d)", result);
|
||||
} else {
|
||||
- slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
|
||||
- slapi_task_log_status(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
|
||||
+ slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
|
||||
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
|
||||
+ slapi_task_log_status(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
|
||||
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
|
||||
}
|
||||
slapi_task_inc_progress(task);
|
||||
slapi_task_finish(task, result);
|
||||
slapi_task_dec_refcount(task);
|
||||
slapi_atomic_store_64(&abort_rebuild_task, 0, __ATOMIC_RELEASE);
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_FALSE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
- "automember_rebuild_task_thread - Refcount decremented.\n");
|
||||
+ "automember_rebuild_task_thread - task finished, refcount decremented.\n");
|
||||
}
|
||||
|
||||
/*
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
index ba2d73a84..ce4c314a1 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -1264,10 +1264,6 @@ ldbm_back_add(Slapi_PBlock *pb)
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
if (addingentry_id_assigned) {
|
||||
next_id_return(be, addingentry->ep_id);
|
||||
}
|
||||
@@ -1376,6 +1372,11 @@ diskfull_return:
|
||||
if (!not_an_error) {
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
}
|
||||
+
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
}
|
||||
|
||||
common_return:
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
index de23190c3..27f0ac58a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
@@ -1407,11 +1407,6 @@ commit_return:
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
-
|
||||
if (tombstone) {
|
||||
if (cache_is_in_cache(&inst->inst_cache, tombstone)) {
|
||||
tomb_ep_id = tombstone->ep_id; /* Otherwise, tombstone might have been freed. */
|
||||
@@ -1496,6 +1491,11 @@ error_return:
|
||||
conn_id, op_id, parent_modify_c.old_entry, parent_modify_c.new_entry, myrc);
|
||||
}
|
||||
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
+
|
||||
common_return:
|
||||
if (orig_entry) {
|
||||
/* NOTE: #define SLAPI_DELETE_BEPREOP_ENTRY SLAPI_ENTRY_PRE_OP */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
index 537369055..64b293001 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -1043,11 +1043,6 @@ ldbm_back_modify(Slapi_PBlock *pb)
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
-
|
||||
if (postentry != NULL) {
|
||||
slapi_entry_free(postentry);
|
||||
postentry = NULL;
|
||||
@@ -1103,6 +1098,10 @@ error_return:
|
||||
if (!not_an_error) {
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
}
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
}
|
||||
|
||||
/* if ec is in cache, remove it, then add back e if we still have it */
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugins/automember.py b/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
index 15b00c633..568586ad8 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
@@ -155,7 +155,7 @@ def fixup(inst, basedn, log, args):
|
||||
log.info('Attempting to add task entry... This will fail if Automembership plug-in is not enabled.')
|
||||
if not plugin.status():
|
||||
log.error("'%s' is disabled. Rebuild membership task can't be executed" % plugin.rdn)
|
||||
- fixup_task = plugin.fixup(args.DN, args.filter)
|
||||
+ fixup_task = plugin.fixup(args.DN, args.filter, args.cleanup)
|
||||
if args.wait:
|
||||
log.info(f'Waiting for fixup task "{fixup_task.dn}" to complete. You can safely exit by pressing Control C ...')
|
||||
fixup_task.wait(timeout=args.timeout)
|
||||
@@ -225,8 +225,8 @@ def create_parser(subparsers):
|
||||
subcommands = automember.add_subparsers(help='action')
|
||||
add_generic_plugin_parsers(subcommands, AutoMembershipPlugin)
|
||||
|
||||
- list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
|
||||
- subcommands_list = list.add_subparsers(help='action')
|
||||
+ automember_list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
|
||||
+ subcommands_list = automember_list.add_subparsers(help='action')
|
||||
list_definitions = subcommands_list.add_parser('definitions', help='Lists Automembership definitions.')
|
||||
list_definitions.set_defaults(func=definition_list)
|
||||
list_regexes = subcommands_list.add_parser('regexes', help='List Automembership regex rules.')
|
||||
@@ -269,6 +269,8 @@ def create_parser(subparsers):
|
||||
fixup_task.add_argument('-f', '--filter', required=True, help='Sets the LDAP filter for entries to fix up')
|
||||
fixup_task.add_argument('-s', '--scope', required=True, choices=['sub', 'base', 'one'], type=str.lower,
|
||||
help='Sets the LDAP search scope for entries to fix up')
|
||||
+ fixup_task.add_argument('--cleanup', action='store_true',
|
||||
+ help="Clean up previous group memberships before rebuilding")
|
||||
fixup_task.add_argument('--wait', action='store_true',
|
||||
help="Wait for the task to finish, this could take a long time")
|
||||
fixup_task.add_argument('--timeout', default=0, type=int,
|
||||
@@ -279,7 +281,7 @@ def create_parser(subparsers):
|
||||
fixup_status.add_argument('--dn', help="The task entry's DN")
|
||||
fixup_status.add_argument('--show-log', action='store_true', help="Display the task log")
|
||||
fixup_status.add_argument('--watch', action='store_true',
|
||||
- help="Watch the task's status and wait for it to finish")
|
||||
+ help="Watch the task's status and wait for it to finish")
|
||||
|
||||
abort_fixup = subcommands.add_parser('abort-fixup', help='Abort the rebuild membership task.')
|
||||
abort_fixup.set_defaults(func=abort)
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 52691a44c..a1ad0a45b 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -1141,13 +1141,15 @@ class AutoMembershipPlugin(Plugin):
|
||||
def __init__(self, instance, dn="cn=Auto Membership Plugin,cn=plugins,cn=config"):
|
||||
super(AutoMembershipPlugin, self).__init__(instance, dn)
|
||||
|
||||
- def fixup(self, basedn, _filter=None):
|
||||
+ def fixup(self, basedn, _filter=None, cleanup=False):
|
||||
"""Create an automember rebuild membership task
|
||||
|
||||
:param basedn: Basedn to fix up
|
||||
:type basedn: str
|
||||
:param _filter: a filter for entries to fix up
|
||||
:type _filter: str
|
||||
+ :param cleanup: cleanup old group memberships
|
||||
+ :type cleanup: boolean
|
||||
|
||||
:returns: an instance of Task(DSLdapObject)
|
||||
"""
|
||||
@@ -1156,6 +1158,9 @@ class AutoMembershipPlugin(Plugin):
|
||||
task_properties = {'basedn': basedn}
|
||||
if _filter is not None:
|
||||
task_properties['filter'] = _filter
|
||||
+ if cleanup:
|
||||
+ task_properties['cleanup'] = "yes"
|
||||
+
|
||||
task.create(properties=task_properties)
|
||||
|
||||
return task
|
||||
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
|
||||
index 1a16bbb83..193805780 100644
|
||||
--- a/src/lib389/lib389/tasks.py
|
||||
+++ b/src/lib389/lib389/tasks.py
|
||||
@@ -1006,12 +1006,13 @@ class Tasks(object):
|
||||
return exitCode
|
||||
|
||||
def automemberRebuild(self, suffix=DEFAULT_SUFFIX, scope='sub',
|
||||
- filterstr='objectclass=top', args=None):
|
||||
+ filterstr='objectclass=top', cleanup=False, args=None):
|
||||
'''
|
||||
- @param suffix - The suffix the task should examine - defualt is
|
||||
+ @param suffix - The suffix the task should examine - default is
|
||||
"dc=example,dc=com"
|
||||
@param scope - The scope of the search to find entries
|
||||
- @param fitlerstr - THe search filter to find entries
|
||||
+ @param fitlerstr - The search filter to find entries
|
||||
+ @param cleanup - reset/clear the old group mmeberships prior to rebuilding
|
||||
@param args - is a dictionary that contains modifier of the task
|
||||
wait: True/[False] - If True, waits for the completion of
|
||||
the task before to return
|
||||
@@ -1027,6 +1028,8 @@ class Tasks(object):
|
||||
entry.setValues('basedn', suffix)
|
||||
entry.setValues('filter', filterstr)
|
||||
entry.setValues('scope', scope)
|
||||
+ if cleanup:
|
||||
+ entry.setValues('cleanup', 'yes')
|
||||
|
||||
# start the task and possibly wait for task completion
|
||||
try:
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,83 +0,0 @@
|
||||
From 9319d5b022918f14cacb00e3faef85a6ab730a26 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 27 Feb 2024 16:30:47 -0800
|
||||
Subject: [PATCH] Issue 3527 - Support HAProxy and Instance on the same machine
|
||||
configuration (#6107)
|
||||
|
||||
Description: Improve how we handle HAProxy connections to work better when
|
||||
the DS and HAProxy are on the same machine.
|
||||
Ensure the client and header destination IPs are checked against the trusted IP list.
|
||||
|
||||
Additionally, this change will also allow configuration having
|
||||
HAProxy is listening on a different subnet than the one used to forward the request.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/3527
|
||||
|
||||
Reviewed by: @progier389, @jchapma (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/connection.c | 35 +++++++++++++++++++++++++--------
|
||||
1 file changed, 27 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index d28a39bf7..10a8cc577 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -1187,6 +1187,8 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
|
||||
char str_ip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
char str_haproxy_ip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
char str_haproxy_destip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
+ int trusted_matches_ip_found = 0;
|
||||
+ int trusted_matches_destip_found = 0;
|
||||
struct berval **bvals = NULL;
|
||||
int proxy_connection = 0;
|
||||
|
||||
@@ -1245,21 +1247,38 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
|
||||
normalize_IPv4(conn->cin_addr, buf_ip, sizeof(buf_ip), str_ip, sizeof(str_ip));
|
||||
normalize_IPv4(&pr_netaddr_dest, buf_haproxy_destip, sizeof(buf_haproxy_destip),
|
||||
str_haproxy_destip, sizeof(str_haproxy_destip));
|
||||
+ size_t ip_len = strlen(buf_ip);
|
||||
+ size_t destip_len = strlen(buf_haproxy_destip);
|
||||
|
||||
/* Now, reset RC and set it to 0 only if a match is found */
|
||||
haproxy_rc = -1;
|
||||
|
||||
- /* Allow only:
|
||||
- * Trusted IP == Original Client IP == HAProxy Header Destination IP */
|
||||
+ /*
|
||||
+ * We need to allow a configuration where DS instance and HAProxy are on the same machine.
|
||||
+ * In this case, we need to check if
|
||||
+ * the HAProxy client IP (which will be a loopback address) matches one of the the trusted IP addresses,
|
||||
+ * while still checking that
|
||||
+ * the HAProxy header destination IP address matches one of the trusted IP addresses.
|
||||
+ * Additionally, this change will also allow configuration having
|
||||
+ * HAProxy listening on a different subnet than one used to forward the request.
|
||||
+ */
|
||||
for (size_t i = 0; bvals[i] != NULL; ++i) {
|
||||
- if ((strlen(bvals[i]->bv_val) == strlen(buf_ip)) &&
|
||||
- (strlen(bvals[i]->bv_val) == strlen(buf_haproxy_destip)) &&
|
||||
- (strncasecmp(bvals[i]->bv_val, buf_ip, strlen(buf_ip)) == 0) &&
|
||||
- (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, strlen(buf_haproxy_destip)) == 0)) {
|
||||
- haproxy_rc = 0;
|
||||
- break;
|
||||
+ size_t bval_len = strlen(bvals[i]->bv_val);
|
||||
+
|
||||
+ /* Check if the Client IP (HAProxy's machine IP) address matches the trusted IP address */
|
||||
+ if (!trusted_matches_ip_found) {
|
||||
+ trusted_matches_ip_found = (bval_len == ip_len) && (strncasecmp(bvals[i]->bv_val, buf_ip, ip_len) == 0);
|
||||
+ }
|
||||
+ /* Check if the HAProxy header destination IP address matches the trusted IP address */
|
||||
+ if (!trusted_matches_destip_found) {
|
||||
+ trusted_matches_destip_found = (bval_len == destip_len) && (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, destip_len) == 0);
|
||||
}
|
||||
}
|
||||
+
|
||||
+ if (trusted_matches_ip_found && trusted_matches_destip_found) {
|
||||
+ haproxy_rc = 0;
|
||||
+ }
|
||||
+
|
||||
if (haproxy_rc == -1) {
|
||||
slapi_log_err(SLAPI_LOG_CONNS, "connection_read_operation", "HAProxy header received from unknown source.\n");
|
||||
disconnect_server_nomutex(conn, conn->c_connid, -1, SLAPD_DISCONNECT_PROXY_UNKNOWN, EPROTO);
|
||||
--
|
||||
2.45.0
|
||||
|
@ -1,108 +0,0 @@
|
||||
From 016a2b6bd3e27cbff36609824a75b020dfd24823 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 1 May 2024 15:01:33 +0100
|
||||
Subject: [PATCH] CVE-2024-2199
|
||||
|
||||
---
|
||||
.../tests/suites/password/password_test.py | 56 +++++++++++++++++++
|
||||
ldap/servers/slapd/modify.c | 8 ++-
|
||||
2 files changed, 62 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py
|
||||
index 38079476a..b3ff08904 100644
|
||||
--- a/dirsrvtests/tests/suites/password/password_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/password_test.py
|
||||
@@ -65,6 +65,62 @@ def test_password_delete_specific_password(topology_st):
|
||||
log.info('test_password_delete_specific_password: PASSED')
|
||||
|
||||
|
||||
+def test_password_modify_non_utf8(topology_st):
|
||||
+ """Attempt a modify of the userPassword attribute with
|
||||
+ an invalid non utf8 value
|
||||
+
|
||||
+ :id: a31af9d5-d665-42b9-8d6e-fea3d0837d36
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Add a user if it doesnt exist and set its password
|
||||
+ 2. Verify password with a bind
|
||||
+ 3. Modify userPassword attr with invalid value
|
||||
+ 4. Attempt a bind with invalid password value
|
||||
+ 5. Verify original password with a bind
|
||||
+ :expectedresults:
|
||||
+ 1. The user with userPassword should be added successfully
|
||||
+ 2. Operation should be successful
|
||||
+ 3. Server returns ldap.UNWILLING_TO_PERFORM
|
||||
+ 4. Server returns ldap.INVALID_CREDENTIALS
|
||||
+ 5. Operation should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info('Running test_password_modify_non_utf8...')
|
||||
+
|
||||
+ # Create user and set password
|
||||
+ standalone = topology_st.standalone
|
||||
+ users = UserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ if not users.exists(TEST_USER_PROPERTIES['uid'][0]):
|
||||
+ user = users.create(properties=TEST_USER_PROPERTIES)
|
||||
+ else:
|
||||
+ user = users.get(TEST_USER_PROPERTIES['uid'][0])
|
||||
+ user.set('userpassword', PASSWORD)
|
||||
+
|
||||
+ # Verify password
|
||||
+ try:
|
||||
+ user.bind(PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ # Modify userPassword with an invalid value
|
||||
+ password = b'tes\x82t-password' # A non UTF-8 encoded password
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ user.replace('userpassword', password)
|
||||
+
|
||||
+ # Verify a bind fails with invalid pasword
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ user.bind(password)
|
||||
+
|
||||
+ # Verify we can still bind with original password
|
||||
+ try:
|
||||
+ user.bind(PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('test_password_modify_non_utf8: PASSED')
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
|
||||
index 5ca78539c..669bb104c 100644
|
||||
--- a/ldap/servers/slapd/modify.c
|
||||
+++ b/ldap/servers/slapd/modify.c
|
||||
@@ -765,8 +765,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
|
||||
* flagged - leave mod attributes alone */
|
||||
if (!repl_op && !skip_modified_attrs && lastmod) {
|
||||
modify_update_last_modified_attr(pb, &smods);
|
||||
+ slapi_pblock_set(pb, SLAPI_MODIFY_MODS, slapi_mods_get_ldapmods_byref(&smods));
|
||||
}
|
||||
|
||||
+
|
||||
if (0 == slapi_mods_get_num_mods(&smods)) {
|
||||
/* nothing to do - no mods - this is not an error - just
|
||||
send back LDAP_SUCCESS */
|
||||
@@ -933,8 +935,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
|
||||
|
||||
/* encode password */
|
||||
if (pw_encodevals_ext(pb, sdn, va)) {
|
||||
- slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s.\n", slapi_entry_get_dn_const(e));
|
||||
- send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to store attribute \"userPassword\" correctly\n", 0, NULL);
|
||||
+ slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s, "
|
||||
+ "check value is utf8 string.\n", slapi_entry_get_dn_const(e));
|
||||
+ send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to hash \"userPassword\" attribute, "
|
||||
+ "check value is utf8 string.\n", 0, NULL);
|
||||
valuearray_free(&va);
|
||||
goto free_and_return;
|
||||
}
|
||||
--
|
||||
2.45.0
|
||||
|
@ -1,213 +0,0 @@
|
||||
From d5bbe52fbe84a7d3b5938bf82d5c4af15061a8e2 Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Wed, 17 Apr 2024 18:18:04 +0200
|
||||
Subject: [PATCH] CVE-2024-3657
|
||||
|
||||
---
|
||||
.../tests/suites/filter/large_filter_test.py | 34 +++++-
|
||||
ldap/servers/slapd/back-ldbm/index.c | 111 ++++++++++--------
|
||||
2 files changed, 92 insertions(+), 53 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/large_filter_test.py b/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
index ecc7bf979..40526bb16 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
@@ -13,19 +13,29 @@ verify and testing Filter from a search
|
||||
|
||||
import os
|
||||
import pytest
|
||||
+import ldap
|
||||
|
||||
-from lib389._constants import PW_DM
|
||||
+from lib389._constants import PW_DM, DEFAULT_SUFFIX, ErrorLog
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.user import UserAccounts, UserAccount
|
||||
from lib389.idm.account import Accounts
|
||||
from lib389.backend import Backends
|
||||
from lib389.idm.domain import Domain
|
||||
+from lib389.utils import get_ldapurl_from_serverid
|
||||
|
||||
SUFFIX = 'dc=anuj,dc=com'
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
|
||||
+def open_new_ldapi_conn(dsinstance):
|
||||
+ ldapurl, certdir = get_ldapurl_from_serverid(dsinstance)
|
||||
+ assert 'ldapi://' in ldapurl
|
||||
+ conn = ldap.initialize(ldapurl)
|
||||
+ conn.sasl_interactive_bind_s("", ldap.sasl.external())
|
||||
+ return conn
|
||||
+
|
||||
+
|
||||
@pytest.fixture(scope="module")
|
||||
def _create_entries(request, topo):
|
||||
"""
|
||||
@@ -160,6 +170,28 @@ def test_large_filter(topo, _create_entries, real_value):
|
||||
assert len(Accounts(conn, SUFFIX).filter(real_value)) == 3
|
||||
|
||||
|
||||
+def test_long_filter_value(topo):
|
||||
+ """Exercise large eq filter with dn syntax attributes
|
||||
+
|
||||
+ :id: b069ef72-fcc3-11ee-981c-482ae39447e5
|
||||
+ :setup: Standalone
|
||||
+ :steps:
|
||||
+ 1. Try to pass filter rules as per the condition.
|
||||
+ :expectedresults:
|
||||
+ 1. Pass
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ conn = open_new_ldapi_conn(inst.serverid)
|
||||
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE,ErrorLog.SEARCH_FILTER))
|
||||
+ filter_value = "a\x1Edmin" * 1025
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ filter_value = "aAdmin" * 1025
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ filter_value = "*"
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c
|
||||
index 410db23d1..30fa09ebb 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/index.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/index.c
|
||||
@@ -71,6 +71,32 @@ typedef struct _index_buffer_handle index_buffer_handle;
|
||||
#define INDEX_BUFFER_FLAG_SERIALIZE 1
|
||||
#define INDEX_BUFFER_FLAG_STATS 2
|
||||
|
||||
+/*
|
||||
+ * space needed to encode a byte:
|
||||
+ * 0x00-0x31 and 0x7f-0xff requires 3 bytes: \xx
|
||||
+ * 0x22 and 0x5C requires 2 bytes: \" and \\
|
||||
+ * other requires 1 byte: c
|
||||
+ */
|
||||
+static char encode_size[] = {
|
||||
+ /* 0x00 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x10 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x20 */ 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1,
|
||||
+ /* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3,
|
||||
+ /* 0x80 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x90 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xA0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xB0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xC0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xD0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xE0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xF0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+};
|
||||
+
|
||||
+
|
||||
/* Index buffering functions */
|
||||
|
||||
static int
|
||||
@@ -799,65 +825,46 @@ index_add_mods(
|
||||
|
||||
/*
|
||||
* Convert a 'struct berval' into a displayable ASCII string
|
||||
+ * returns the printable string
|
||||
*/
|
||||
-
|
||||
-#define SPECIAL(c) (c < 32 || c > 126 || c == '\\' || c == '"')
|
||||
-
|
||||
const char *
|
||||
encode(const struct berval *data, char buf[BUFSIZ])
|
||||
{
|
||||
- char *s;
|
||||
- char *last;
|
||||
- if (data == NULL || data->bv_len == 0)
|
||||
- return "";
|
||||
- last = data->bv_val + data->bv_len - 1;
|
||||
- for (s = data->bv_val; s < last; ++s) {
|
||||
- if (SPECIAL(*s)) {
|
||||
- char *first = data->bv_val;
|
||||
- char *bufNext = buf;
|
||||
- size_t bufSpace = BUFSIZ - 4;
|
||||
- while (1) {
|
||||
- /* printf ("%lu bytes ASCII\n", (unsigned long)(s - first)); */
|
||||
- if (bufSpace < (size_t)(s - first))
|
||||
- s = first + bufSpace - 1;
|
||||
- if (s != first) {
|
||||
- memcpy(bufNext, first, s - first);
|
||||
- bufNext += (s - first);
|
||||
- bufSpace -= (s - first);
|
||||
- }
|
||||
- do {
|
||||
- if (bufSpace) {
|
||||
- *bufNext++ = '\\';
|
||||
- --bufSpace;
|
||||
- }
|
||||
- if (bufSpace < 2) {
|
||||
- memcpy(bufNext, "..", 2);
|
||||
- bufNext += 2;
|
||||
- goto bail;
|
||||
- }
|
||||
- if (*s == '\\' || *s == '"') {
|
||||
- *bufNext++ = *s;
|
||||
- --bufSpace;
|
||||
- } else {
|
||||
- sprintf(bufNext, "%02x", (unsigned)*(unsigned char *)s);
|
||||
- bufNext += 2;
|
||||
- bufSpace -= 2;
|
||||
- }
|
||||
- } while (++s <= last && SPECIAL(*s));
|
||||
- if (s > last)
|
||||
- break;
|
||||
- first = s;
|
||||
- while (!SPECIAL(*s) && s <= last)
|
||||
- ++s;
|
||||
- }
|
||||
- bail:
|
||||
- *bufNext = '\0';
|
||||
- /* printf ("%lu chars in buffer\n", (unsigned long)(bufNext - buf)); */
|
||||
+ if (!data || !data->bv_val) {
|
||||
+ strcpy(buf, "<NULL>");
|
||||
+ return buf;
|
||||
+ }
|
||||
+ char *endbuff = &buf[BUFSIZ-4]; /* Reserve space to append "...\0" */
|
||||
+ char *ptout = buf;
|
||||
+ unsigned char *ptin = (unsigned char*) data->bv_val;
|
||||
+ unsigned char *endptin = ptin+data->bv_len;
|
||||
+
|
||||
+ while (ptin < endptin) {
|
||||
+ if (ptout >= endbuff) {
|
||||
+ /*
|
||||
+ * BUFSIZ(8K) > SLAPI_LOG_BUFSIZ(2K) so the error log message will be
|
||||
+ * truncated anyway. So there is no real interrest to test if the original
|
||||
+ * data contains no special characters and return it as is.
|
||||
+ */
|
||||
+ strcpy(endbuff, "...");
|
||||
return buf;
|
||||
}
|
||||
+ switch (encode_size[*ptin]) {
|
||||
+ case 1:
|
||||
+ *ptout++ = *ptin++;
|
||||
+ break;
|
||||
+ case 2:
|
||||
+ *ptout++ = '\\';
|
||||
+ *ptout++ = *ptin++;
|
||||
+ break;
|
||||
+ case 3:
|
||||
+ sprintf(ptout, "\\%02x", *ptin++);
|
||||
+ ptout += 3;
|
||||
+ break;
|
||||
+ }
|
||||
}
|
||||
- /* printf ("%lu bytes, all ASCII\n", (unsigned long)(s - data->bv_val)); */
|
||||
- return data->bv_val;
|
||||
+ *ptout = 0;
|
||||
+ return buf;
|
||||
}
|
||||
|
||||
static const char *
|
||||
--
|
||||
2.45.0
|
||||
|
@ -1,143 +0,0 @@
|
||||
From 6e5f03d5872129963106024f53765234a282406c Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 16 Feb 2024 11:13:16 +0000
|
||||
Subject: [PATCH] Issue 6096 - Improve connection timeout error logging (#6097)
|
||||
|
||||
Bug description: When a paged result search is run with a time limit,
|
||||
if the time limit is exceed the server closes the connection with
|
||||
closed IO timeout (nsslapd-ioblocktimeout) - T2. This error message
|
||||
is incorrect as the reason the connection has been closed was because
|
||||
the specified time limit on a paged result search has been exceeded.
|
||||
|
||||
Fix description: Correct error message
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6096
|
||||
|
||||
Reviewed by: @tbordaz (Thank you)
|
||||
---
|
||||
ldap/admin/src/logconv.pl | 24 ++++++++++++++++++-
|
||||
ldap/servers/slapd/daemon.c | 4 ++--
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 1 +
|
||||
ldap/servers/slapd/disconnect_errors.h | 2 +-
|
||||
4 files changed, 27 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl
|
||||
index 7698c383a..2a933c4a3 100755
|
||||
--- a/ldap/admin/src/logconv.pl
|
||||
+++ b/ldap/admin/src/logconv.pl
|
||||
@@ -267,7 +267,7 @@ my $optimeAvg = 0;
|
||||
my %cipher = ();
|
||||
my @removefiles = ();
|
||||
|
||||
-my @conncodes = qw(A1 B1 B4 T1 T2 B2 B3 R1 P1 P2 U1);
|
||||
+my @conncodes = qw(A1 B1 B4 T1 T2 T3 B2 B3 R1 P1 P2 U1);
|
||||
my %conn = ();
|
||||
map {$conn{$_} = $_} @conncodes;
|
||||
|
||||
@@ -355,6 +355,7 @@ $connmsg{"B1"} = "Bad Ber Tag Encountered";
|
||||
$connmsg{"B4"} = "Server failed to flush data (response) back to Client";
|
||||
$connmsg{"T1"} = "Idle Timeout Exceeded";
|
||||
$connmsg{"T2"} = "IO Block Timeout Exceeded or NTSSL Timeout";
|
||||
+$connmsg{"T3"} = "Paged Search Time Limit Exceeded";
|
||||
$connmsg{"B2"} = "Ber Too Big";
|
||||
$connmsg{"B3"} = "Ber Peek";
|
||||
$connmsg{"R1"} = "Revents";
|
||||
@@ -1723,6 +1724,10 @@ if ($usage =~ /j/i || $verb eq "yes"){
|
||||
print "\n $recCount. You have some coonections that are being closed by the ioblocktimeout setting. You may want to increase the ioblocktimeout.\n";
|
||||
$recCount++;
|
||||
}
|
||||
+ if (defined($conncount->{"T3"}) and $conncount->{"T3"} > 0){
|
||||
+ print "\n $recCount. You have some connections that are being closed because a paged result search limit has been exceeded. You may want to increase the search time limit.\n";
|
||||
+ $recCount++;
|
||||
+ }
|
||||
# compare binds to unbinds, if the difference is more than 30% of the binds, then report a issue
|
||||
if (($bindCount - $unbindCount) > ($bindCount*.3)){
|
||||
print "\n $recCount. You have a significant difference between binds and unbinds. You may want to investigate this difference.\n";
|
||||
@@ -2366,6 +2371,7 @@ sub parseLineNormal
|
||||
$brokenPipeCount++;
|
||||
if (m/- T1/){ $hashes->{rc}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{rc}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{rc}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{rc}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{rc}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{rc}->{"B4"}++; }
|
||||
@@ -2381,6 +2387,7 @@ sub parseLineNormal
|
||||
$connResetByPeerCount++;
|
||||
if (m/- T1/){ $hashes->{src}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{src}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{src}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{src}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{src}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{src}->{"B4"}++; }
|
||||
@@ -2396,6 +2403,7 @@ sub parseLineNormal
|
||||
$resourceUnavailCount++;
|
||||
if (m/- T1/){ $hashes->{rsrc}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{rsrc}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{rsrc}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{rsrc}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{rsrc}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{rsrc}->{"B4"}++; }
|
||||
@@ -2494,6 +2502,20 @@ sub parseLineNormal
|
||||
}
|
||||
}
|
||||
}
|
||||
+ if (m/- T3/){
|
||||
+ if ($_ =~ /conn= *([0-9A-Z]+)/i) {
|
||||
+ $exc = "no";
|
||||
+ $ip = getIPfromConn($1, $serverRestartCount);
|
||||
+ for (my $xxx = 0; $xxx < $#excludeIP; $xxx++){
|
||||
+ if ($ip eq $excludeIP[$xxx]){$exc = "yes";}
|
||||
+ }
|
||||
+ if ($exc ne "yes"){
|
||||
+ $hashes->{T3}->{$ip}++;
|
||||
+ $hashes->{conncount}->{"T3"}++;
|
||||
+ $connCodeCount++;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
if (m/- B2/){
|
||||
if ($_ =~ /conn= *([0-9A-Z]+)/i) {
|
||||
$exc = "no";
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 5a48aa66f..bb80dae36 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1599,9 +1599,9 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
int add_fd = 1;
|
||||
/* check timeout for PAGED RESULTS */
|
||||
if (pagedresults_is_timedout_nolock(c)) {
|
||||
- /* Exceeded the timelimit; disconnect the client */
|
||||
+ /* Exceeded the paged search timelimit; disconnect the client */
|
||||
disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_IO_TIMEOUT,
|
||||
+ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
0);
|
||||
connection_table_move_connection_out_of_active_list(ct,
|
||||
c);
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index f7a31d728..c2d9e283b 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -27,6 +27,7 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
|
||||
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
|
||||
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
|
||||
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
|
||||
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
|
||||
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
|
||||
ER2(SLAPD_DISCONNECT_POLL, "P2")
|
||||
diff --git a/ldap/servers/slapd/disconnect_errors.h b/ldap/servers/slapd/disconnect_errors.h
|
||||
index a0484f1c2..e118f674c 100644
|
||||
--- a/ldap/servers/slapd/disconnect_errors.h
|
||||
+++ b/ldap/servers/slapd/disconnect_errors.h
|
||||
@@ -35,6 +35,6 @@
|
||||
#define SLAPD_DISCONNECT_SASL_FAIL SLAPD_DISCONNECT_ERROR_BASE + 12
|
||||
#define SLAPD_DISCONNECT_PROXY_INVALID_HEADER SLAPD_DISCONNECT_ERROR_BASE + 13
|
||||
#define SLAPD_DISCONNECT_PROXY_UNKNOWN SLAPD_DISCONNECT_ERROR_BASE + 14
|
||||
-
|
||||
+#define SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT SLAPD_DISCONNECT_ERROR_BASE + 15
|
||||
|
||||
#endif /* __DISCONNECT_ERRORS_H_ */
|
||||
--
|
||||
2.45.0
|
||||
|
@ -1,44 +0,0 @@
|
||||
From a112394af3a20787755029804684d57a9c3ffa9a Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 21 Feb 2024 12:43:03 +0000
|
||||
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
|
||||
(#6104)
|
||||
|
||||
Bug description: A recent addition to the connection disconnect error
|
||||
messaging, conflicts with how errormap.c maps error codes/strings.
|
||||
|
||||
Fix description: errormap expects error codes/strings to be in ascending
|
||||
order. Moved the new error code to the bottom of the list.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6103
|
||||
|
||||
Reviewed by: @droideck. @progier389 (Thank you)
|
||||
---
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 5 +++--
|
||||
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index c2d9e283b..f603a08ce 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -14,7 +14,8 @@
|
||||
/* disconnect_error_strings.h
|
||||
*
|
||||
* Strings describing the errors used in logging the reason a connection
|
||||
- * was closed.
|
||||
+ * was closed. Ensure definitions are in the same order as the error codes
|
||||
+ * defined in disconnect_errors.h
|
||||
*/
|
||||
#ifndef __DISCONNECT_ERROR_STRINGS_H_
|
||||
#define __DISCONNECT_ERROR_STRINGS_H_
|
||||
@@ -35,6 +36,6 @@ ER2(SLAPD_DISCONNECT_NTSSL_TIMEOUT, "T2")
|
||||
ER2(SLAPD_DISCONNECT_SASL_FAIL, "S1")
|
||||
ER2(SLAPD_DISCONNECT_PROXY_INVALID_HEADER, "P3")
|
||||
ER2(SLAPD_DISCONNECT_PROXY_UNKNOWN, "P4")
|
||||
-
|
||||
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
|
||||
#endif /* __DISCONNECT_ERROR_STRINGS_H_ */
|
||||
--
|
||||
2.45.0
|
||||
|
@ -1,30 +0,0 @@
|
||||
From edd9abc8901604dde1d739d87ca2906734d53dd3 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 13 Jun 2024 13:35:09 +0200
|
||||
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
|
||||
|
||||
Description:
|
||||
Remove duplicate SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT error code.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6103
|
||||
|
||||
Reviewed by: @tbordaz (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index f603a08ce..d49cc79a2 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -28,7 +28,6 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
|
||||
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
|
||||
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
|
||||
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
|
||||
-ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
|
||||
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
|
||||
ER2(SLAPD_DISCONNECT_POLL, "P2")
|
||||
--
|
||||
2.45.0
|
||||
|
@ -1,220 +0,0 @@
|
||||
From 8cf981c00ae18d3efaeb10819282cd991621e9a2 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 22 May 2024 11:29:05 +0200
|
||||
Subject: [PATCH] Issue 6172 - RFE: improve the performance of evaluation of
|
||||
filter component when tested against a large valueset (like group members)
|
||||
(#6173)
|
||||
|
||||
Bug description:
|
||||
Before returning an entry (to a SRCH) the server checks that the entry matches the SRCH filter.
|
||||
If a filter component (equality) is testing the value (ava) against a
|
||||
large valueset (like uniquemember values), it takes a long time because
|
||||
of the large number of values and required normalization of the values.
|
||||
This can be improved taking benefit of sorted valueset. Those sorted
|
||||
valueset were created to improve updates of large valueset (groups) but
|
||||
at that time not implemented in SRCH path.
|
||||
|
||||
Fix description:
|
||||
In case of LDAP_FILTER_EQUALITY component, the server can get
|
||||
benefit of the sorted valuearray.
|
||||
To limit the risk of regression, we use the sorted valuearray
|
||||
only for the DN syntax attribute. Indeed the sorted valuearray was
|
||||
designed for those type of attribute.
|
||||
With those two limitations, there is no need of a toggle and
|
||||
the call to plugin_call_syntax_filter_ava can be replaced by
|
||||
a call to slapi_valueset_find.
|
||||
In both cases, sorted valueset and plugin_call_syntax_filter_ava, ava and
|
||||
values are normalized.
|
||||
In sorted valueset, the values have been normalized to insert the index
|
||||
in the sorted array and then comparison is done on normalized values.
|
||||
In plugin_call_syntax_filter_ava, all values in valuearray (of valueset) are normalized
|
||||
before comparison.
|
||||
|
||||
relates: #6172
|
||||
|
||||
Reviewed by: Pierre Rogier, Simon Pichugin (Big Thanks !!!)
|
||||
---
|
||||
.../tests/suites/filter/filter_test.py | 125 ++++++++++++++++++
|
||||
ldap/servers/slapd/filterentry.c | 22 ++-
|
||||
2 files changed, 146 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
index d6bfa5a3b..4baaf04a7 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
@@ -9,7 +9,11 @@
|
||||
import logging
|
||||
|
||||
import pytest
|
||||
+import time
|
||||
+from lib389.dirsrv_log import DirsrvAccessLog
|
||||
from lib389.tasks import *
|
||||
+from lib389.backend import Backends, Backend
|
||||
+from lib389.dbgen import dbgen_users, dbgen_groups
|
||||
from lib389.topologies import topology_st
|
||||
from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX
|
||||
from lib389.utils import *
|
||||
@@ -304,6 +308,127 @@ def test_extended_search(topology_st):
|
||||
ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
|
||||
assert len(ents) == 1
|
||||
|
||||
+def test_match_large_valueset(topology_st):
|
||||
+ """Test that when returning a big number of entries
|
||||
+ and that we need to match the filter from a large valueset
|
||||
+ we get benefit to use the sorted valueset
|
||||
+
|
||||
+ :id: 7db5aa88-50e0-4c31-85dd-1d2072cb674c
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Create a users and groups backends and tune them
|
||||
+ 2. Generate a test ldif (2k users and 1K groups with all users)
|
||||
+ 3. Import test ldif file using Offline import (ldif2db).
|
||||
+ 4. Prim the 'groups' entrycache with a "fast" search
|
||||
+ 5. Search the 'groups' with a difficult matching value
|
||||
+ 6. check that etime from step 5 is less than a second
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Create a users and groups backends should PASS
|
||||
+ 2. Generate LDIF should PASS.
|
||||
+ 3. Offline import should PASS.
|
||||
+ 4. Priming should PASS.
|
||||
+ 5. Performance search should PASS.
|
||||
+ 6. Etime of performance search should PASS.
|
||||
+ """
|
||||
+
|
||||
+ log.info('Running test_match_large_valueset...')
|
||||
+ #
|
||||
+ # Test online/offline LDIF imports
|
||||
+ #
|
||||
+ inst = topology_st.standalone
|
||||
+ inst.start()
|
||||
+ backends = Backends(inst)
|
||||
+ users_suffix = "ou=users,%s" % DEFAULT_SUFFIX
|
||||
+ users_backend = 'users'
|
||||
+ users_ldif = 'users_import.ldif'
|
||||
+ groups_suffix = "ou=groups,%s" % DEFAULT_SUFFIX
|
||||
+ groups_backend = 'groups'
|
||||
+ groups_ldif = 'groups_import.ldif'
|
||||
+ groups_entrycache = '200000000'
|
||||
+ users_number = 2000
|
||||
+ groups_number = 1000
|
||||
+
|
||||
+
|
||||
+ # For priming the cache we just want to be fast
|
||||
+ # taking the first value in the valueset is good
|
||||
+ # whether the valueset is sorted or not
|
||||
+ priming_user_rdn = "user0001"
|
||||
+
|
||||
+ # For performance testing, this is important to use
|
||||
+ # user1000 rather then user0001
|
||||
+ # Because user0001 is the first value in the valueset
|
||||
+ # whether we use the sorted valuearray or non sorted
|
||||
+ # valuearray the performance will be similar.
|
||||
+ # With middle value user1000, the performance boost of
|
||||
+ # the sorted valuearray will make the difference.
|
||||
+ perf_user_rdn = "user1000"
|
||||
+
|
||||
+ # Step 1. Prepare the backends and tune the groups entrycache
|
||||
+ try:
|
||||
+ be_users = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': users_suffix, 'name': users_backend})
|
||||
+ be_groups = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': groups_suffix, 'name': groups_backend})
|
||||
+
|
||||
+ # set the entry cache to 200Mb as the 1K groups of 2K users require at least 170Mb
|
||||
+ be_groups.replace('nsslapd-cachememsize', groups_entrycache)
|
||||
+ except:
|
||||
+ raise
|
||||
+
|
||||
+ # Step 2. Generate a test ldif (10k users entries)
|
||||
+ log.info("Generating users LDIF...")
|
||||
+ ldif_dir = inst.get_ldif_dir()
|
||||
+ users_import_ldif = "%s/%s" % (ldif_dir, users_ldif)
|
||||
+ groups_import_ldif = "%s/%s" % (ldif_dir, groups_ldif)
|
||||
+ dbgen_users(inst, users_number, users_import_ldif, suffix=users_suffix, generic=True, parent=users_suffix)
|
||||
+
|
||||
+ # Generate a test ldif (800 groups with 10k members) that fit in 700Mb entry cache
|
||||
+ props = {
|
||||
+ "name": "group",
|
||||
+ "suffix": groups_suffix,
|
||||
+ "parent": groups_suffix,
|
||||
+ "number": groups_number,
|
||||
+ "numMembers": users_number,
|
||||
+ "createMembers": False,
|
||||
+ "memberParent": users_suffix,
|
||||
+ "membershipAttr": "uniquemember",
|
||||
+ }
|
||||
+ dbgen_groups(inst, groups_import_ldif, props)
|
||||
+
|
||||
+ # Step 3. Do the both offline imports
|
||||
+ inst.stop()
|
||||
+ if not inst.ldif2db(users_backend, None, None, None, users_import_ldif):
|
||||
+ log.fatal('test_basic_import_export: Offline users import failed')
|
||||
+ assert False
|
||||
+ if not inst.ldif2db(groups_backend, None, None, None, groups_import_ldif):
|
||||
+ log.fatal('test_basic_import_export: Offline groups import failed')
|
||||
+ assert False
|
||||
+ inst.start()
|
||||
+
|
||||
+ # Step 4. first prime the cache
|
||||
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (priming_user_rdn, users_suffix), ['dn'])
|
||||
+ assert len(entries) == groups_number
|
||||
+
|
||||
+ # Step 5. Now do the real performance checking it should take less than a second
|
||||
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||
+ search_start = time.time()
|
||||
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (perf_user_rdn, users_suffix), ['dn'])
|
||||
+ duration = time.time() - search_start
|
||||
+ log.info("Duration of the search was %f", duration)
|
||||
+
|
||||
+ # Step 6. Gather the etime from the access log
|
||||
+ inst.stop()
|
||||
+ access_log = DirsrvAccessLog(inst)
|
||||
+ search_result = access_log.match(".*RESULT err=0 tag=101 nentries=%s.*" % groups_number)
|
||||
+ log.info("Found patterns are %s", search_result[0])
|
||||
+ log.info("Found patterns are %s", search_result[1])
|
||||
+ etime = float(search_result[1].split('etime=')[1])
|
||||
+ log.info("Duration of the search from access log was %f", etime)
|
||||
+ assert len(entries) == groups_number
|
||||
+ assert (etime < 1)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/filterentry.c b/ldap/servers/slapd/filterentry.c
|
||||
index fd8fdda9f..cae5c7edc 100644
|
||||
--- a/ldap/servers/slapd/filterentry.c
|
||||
+++ b/ldap/servers/slapd/filterentry.c
|
||||
@@ -296,7 +296,27 @@ test_ava_filter(
|
||||
rc = -1;
|
||||
for (; a != NULL; a = a->a_next) {
|
||||
if (slapi_attr_type_cmp(ava->ava_type, a->a_type, SLAPI_TYPE_CMP_SUBTYPE) == 0) {
|
||||
- rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||
+ if ((ftype == LDAP_FILTER_EQUALITY) &&
|
||||
+ (slapi_attr_is_dn_syntax_type(a->a_type))) {
|
||||
+ /* This path is for a performance improvement */
|
||||
+
|
||||
+ /* In case of equality filter we can get benefit of the
|
||||
+ * sorted valuearray (from valueset).
|
||||
+ * This improvement is limited to DN syntax attributes for
|
||||
+ * which the sorted valueset was designed.
|
||||
+ */
|
||||
+ Slapi_Value *sval = NULL;
|
||||
+ sval = slapi_value_new_berval(&ava->ava_value);
|
||||
+ if (slapi_valueset_find((const Slapi_Attr *)a, &a->a_present_values, sval)) {
|
||||
+ rc = 0;
|
||||
+ }
|
||||
+ slapi_value_free(&sval);
|
||||
+ } else {
|
||||
+ /* When sorted valuearray optimization cannot be used
|
||||
+ * lets filter the value according to its syntax
|
||||
+ */
|
||||
+ rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||
+ }
|
||||
if (rc == 0) {
|
||||
break;
|
||||
}
|
||||
--
|
||||
2.46.0
|
||||
|
@ -1,163 +0,0 @@
|
||||
From 57051154bafaf50b83fc27dadbd89a49fd1c8c36 Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Fri, 14 Jun 2024 13:27:10 +0200
|
||||
Subject: [PATCH] Security fix for CVE-2024-5953
|
||||
|
||||
Description:
|
||||
A denial of service vulnerability was found in the 389 Directory Server.
|
||||
This issue may allow an authenticated user to cause a server denial
|
||||
of service while attempting to log in with a user with a malformed hash
|
||||
in their password.
|
||||
|
||||
Fix Description:
|
||||
To prevent buffer overflow when a bind request is processed, the bind fails
|
||||
if the hash size is not coherent without even attempting to process further
|
||||
the hashed password.
|
||||
|
||||
References:
|
||||
- https://nvd.nist.gov/vuln/detail/CVE-2024-5953
|
||||
- https://access.redhat.com/security/cve/CVE-2024-5953
|
||||
- https://bugzilla.redhat.com/show_bug.cgi?id=2292104
|
||||
---
|
||||
.../tests/suites/password/regression_test.py | 54 ++++++++++++++++++-
|
||||
ldap/servers/plugins/pwdstorage/md5_pwd.c | 9 +++-
|
||||
ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 6 +++
|
||||
3 files changed, 66 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/regression_test.py b/dirsrvtests/tests/suites/password/regression_test.py
|
||||
index 8f1facb6d..1fa581643 100644
|
||||
--- a/dirsrvtests/tests/suites/password/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/regression_test.py
|
||||
@@ -7,12 +7,14 @@
|
||||
#
|
||||
import pytest
|
||||
import time
|
||||
+import glob
|
||||
+import base64
|
||||
from lib389._constants import PASSWORD, DN_DM, DEFAULT_SUFFIX
|
||||
from lib389._constants import SUFFIX, PASSWORD, DN_DM, DN_CONFIG, PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DEFAULT_CHANGELOG_DB
|
||||
from lib389 import Entry
|
||||
from lib389.topologies import topology_m1 as topo_supplier
|
||||
-from lib389.idm.user import UserAccounts
|
||||
-from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer
|
||||
+from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer, ds_supports_new_changelog
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
|
||||
@@ -39,6 +41,13 @@ TEST_PASSWORDS += ['CNpwtest1ZZZZ', 'ZZZZZCNpwtest1',
|
||||
TEST_PASSWORDS2 = (
|
||||
'CN12pwtest31', 'SN3pwtest231', 'UID1pwtest123', 'MAIL2pwtest12@redhat.com', '2GN1pwtest123', 'People123')
|
||||
|
||||
+SUPPORTED_SCHEMES = (
|
||||
+ "{SHA}", "{SSHA}", "{SHA256}", "{SSHA256}",
|
||||
+ "{SHA384}", "{SSHA384}", "{SHA512}", "{SSHA512}",
|
||||
+ "{crypt}", "{NS-MTA-MD5}", "{clear}", "{MD5}",
|
||||
+ "{SMD5}", "{PBKDF2_SHA256}", "{PBKDF2_SHA512}",
|
||||
+ "{GOST_YESCRYPT}", "{PBKDF2-SHA256}", "{PBKDF2-SHA512}" )
|
||||
+
|
||||
def _check_unhashed_userpw(inst, user_dn, is_present=False):
|
||||
"""Check if unhashed#user#password attribute is present or not in the changelog"""
|
||||
unhashed_pwd_attribute = 'unhashed#user#password'
|
||||
@@ -319,6 +328,47 @@ def test_unhashed_pw_switch(topo_supplier):
|
||||
# Add debugging steps(if any)...
|
||||
pass
|
||||
|
||||
+@pytest.mark.parametrize("scheme", SUPPORTED_SCHEMES )
|
||||
+def test_long_hashed_password(topo, create_user, scheme):
|
||||
+ """Check that hashed password with very long value does not cause trouble
|
||||
+
|
||||
+ :id: 252a1f76-114b-11ef-8a7a-482ae39447e5
|
||||
+ :setup: standalone Instance
|
||||
+ :parametrized: yes
|
||||
+ :steps:
|
||||
+ 1. Add a test user user
|
||||
+ 2. Set a long password with requested scheme
|
||||
+ 3. Bind on that user using a wrong password
|
||||
+ 4. Check that instance is still alive
|
||||
+ 5. Remove the added user
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Should get ldap.INVALID_CREDENTIALS exception
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ # Make sure that server is started as this test may crash it
|
||||
+ inst.start()
|
||||
+ # Adding Test user (It may already exists if previous test failed)
|
||||
+ user2 = UserAccount(inst, dn='uid=test_user_1002,ou=People,dc=example,dc=com')
|
||||
+ if not user2.exists():
|
||||
+ user2 = users.create_test_user(uid=1002, gid=2002)
|
||||
+ # Setting hashed password
|
||||
+ passwd = 'A'*4000
|
||||
+ hashed_passwd = scheme.encode('utf-8') + base64.b64encode(passwd.encode('utf-8'))
|
||||
+ user2.replace('userpassword', hashed_passwd)
|
||||
+ # Bind on that user using a wrong password
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ conn = user2.bind(PASSWORD)
|
||||
+ # Check that instance is still alive
|
||||
+ assert inst.status()
|
||||
+ # Remove the added user
|
||||
+ user2.delete()
|
||||
+
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/md5_pwd.c b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
index 1e2cf58e7..b9a48d5ca 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
@@ -37,6 +37,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
unsigned char hash_out[MD5_HASH_LEN];
|
||||
unsigned char b2a_out[MD5_HASH_LEN * 2]; /* conservative */
|
||||
SECItem binary_item;
|
||||
+ size_t dbpwd_len = strlen(dbpwd);
|
||||
|
||||
ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||
if (ctx == NULL) {
|
||||
@@ -45,6 +46,12 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
goto loser;
|
||||
}
|
||||
|
||||
+ if (dbpwd_len >= sizeof b2a_out) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||
+ "The hashed password stored in the user entry is longer than any valid md5 hash");
|
||||
+ goto loser;
|
||||
+ }
|
||||
+
|
||||
/* create the hash */
|
||||
PK11_DigestBegin(ctx);
|
||||
PK11_DigestOp(ctx, (const unsigned char *)userpwd, strlen(userpwd));
|
||||
@@ -57,7 +64,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
bver = NSSBase64_EncodeItem(NULL, (char *)b2a_out, sizeof b2a_out, &binary_item);
|
||||
/* bver points to b2a_out upon success */
|
||||
if (bver) {
|
||||
- rc = slapi_ct_memcmp(bver, dbpwd, strlen(dbpwd));
|
||||
+ rc = slapi_ct_memcmp(bver, dbpwd, dbpwd_len);
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||
"Could not base64 encode hashed value for password compare");
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
index dcac4fcdd..82b8c9501 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
@@ -255,6 +255,12 @@ pbkdf2_sha256_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
passItem.data = (unsigned char *)userpwd;
|
||||
passItem.len = strlen(userpwd);
|
||||
|
||||
+ if (pwdstorage_base64_decode_len(dbpwd, dbpwd_len) > sizeof dbhash) {
|
||||
+ /* Hashed value is too long and cannot match any value generated by pbkdf2_sha256_hash */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value. (hashed value is too long)\n");
|
||||
+ return result;
|
||||
+ }
|
||||
+
|
||||
/* Decode the DBpwd to bytes from b64 */
|
||||
if (PL_Base64Decode(dbpwd, dbpwd_len, dbhash) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value\n");
|
||||
--
|
||||
2.46.0
|
||||
|
@ -1,178 +0,0 @@
|
||||
From e8a5b1deef1b455aafecb71efc029d2407b1b06f Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 16 Jul 2024 08:32:21 -0700
|
||||
Subject: [PATCH] Issue 4778 - Add COMPACT_CL5 task to dsconf replication
|
||||
(#6260)
|
||||
|
||||
Description: In 1.4.3, the changelog is not part of a backend.
|
||||
It can be compacted with nsds5task: CAMPACT_CL5 as part of the replication entry.
|
||||
Add the task as a compact-changelog command under the dsconf replication tool.
|
||||
Add tests for the feature and fix old tests.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/4778
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
.../tests/suites/config/compact_test.py | 36 ++++++++++++++---
|
||||
src/lib389/lib389/cli_conf/replication.py | 10 +++++
|
||||
src/lib389/lib389/replica.py | 40 +++++++++++++++++++
|
||||
3 files changed, 81 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/compact_test.py b/dirsrvtests/tests/suites/config/compact_test.py
|
||||
index 317258d0e..31d98d10c 100644
|
||||
--- a/dirsrvtests/tests/suites/config/compact_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/compact_test.py
|
||||
@@ -13,14 +13,14 @@ import time
|
||||
import datetime
|
||||
from lib389.tasks import DBCompactTask
|
||||
from lib389.backend import DatabaseConfig
|
||||
-from lib389.replica import Changelog5
|
||||
+from lib389.replica import Changelog5, Replicas
|
||||
from lib389.topologies import topology_m1 as topo
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def test_compact_db_task(topo):
|
||||
- """Specify a test case purpose or name here
|
||||
+ """Test compaction of database
|
||||
|
||||
:id: 1b3222ef-a336-4259-be21-6a52f76e1859
|
||||
:setup: Standalone Instance
|
||||
@@ -48,7 +48,7 @@ def test_compact_db_task(topo):
|
||||
|
||||
|
||||
def test_compaction_interval_and_time(topo):
|
||||
- """Specify a test case purpose or name here
|
||||
+ """Test compaction interval and time for database and changelog
|
||||
|
||||
:id: f361bee9-d7e7-4569-9255-d7b60dd9d92e
|
||||
:setup: Supplier Instance
|
||||
@@ -95,10 +95,36 @@ def test_compaction_interval_and_time(topo):
|
||||
|
||||
# Check compaction occurred as expected
|
||||
time.sleep(45)
|
||||
- assert not inst.searchErrorsLog("Compacting databases")
|
||||
+ assert not inst.searchErrorsLog("compacting replication changelogs")
|
||||
|
||||
time.sleep(90)
|
||||
- assert inst.searchErrorsLog("Compacting databases")
|
||||
+ assert inst.searchErrorsLog("compacting replication changelogs")
|
||||
+ inst.deleteErrorLogs(restart=False)
|
||||
+
|
||||
+
|
||||
+def test_compact_cl5_task(topo):
|
||||
+ """Test compaction of changelog5 database
|
||||
+
|
||||
+ :id: aadfa9f7-73c0-463a-912c-0a29aa1f8167
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Run compaction task
|
||||
+ 2. Check errors log to show task was run
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+ inst = topo.ms["supplier1"]
|
||||
+
|
||||
+ replicas = Replicas(inst)
|
||||
+ replicas.compact_changelog(log=log)
|
||||
+
|
||||
+ # Check compaction occurred as expected. But instead of time.sleep(5) check 1 sec in loop
|
||||
+ for _ in range(5):
|
||||
+ time.sleep(1)
|
||||
+ if inst.searchErrorsLog("compacting replication changelogs"):
|
||||
+ break
|
||||
+ assert inst.searchErrorsLog("compacting replication changelogs")
|
||||
inst.deleteErrorLogs(restart=False)
|
||||
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 352c0ee5b..ccc394255 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -1199,6 +1199,11 @@ def restore_cl_dir(inst, basedn, log, args):
|
||||
replicas.restore_changelog(replica_roots=args.REPLICA_ROOTS, log=log)
|
||||
|
||||
|
||||
+def compact_cl5(inst, basedn, log, args):
|
||||
+ replicas = Replicas(inst)
|
||||
+ replicas.compact_changelog(replica_roots=args.REPLICA_ROOTS, log=log)
|
||||
+
|
||||
+
|
||||
def create_parser(subparsers):
|
||||
|
||||
############################################
|
||||
@@ -1326,6 +1331,11 @@ def create_parser(subparsers):
|
||||
help="Specify one replica root whose changelog you want to restore. "
|
||||
"The replica root will be consumed from the LDIF file name if the option is omitted.")
|
||||
|
||||
+ compact_cl = repl_subcommands.add_parser('compact-changelog', help='Compact the changelog database')
|
||||
+ compact_cl.set_defaults(func=compact_cl5)
|
||||
+ compact_cl.add_argument('REPLICA_ROOTS', nargs="+",
|
||||
+ help="Specify replica roots whose changelog you want to compact.")
|
||||
+
|
||||
restore_changelogdir = restore_subcommands.add_parser('from-changelogdir', help='Restore LDIF files from changelogdir.')
|
||||
restore_changelogdir.set_defaults(func=restore_cl_dir)
|
||||
restore_changelogdir.add_argument('REPLICA_ROOTS', nargs="+",
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 94e1fdad5..1f321972d 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -1648,6 +1648,11 @@ class Replica(DSLdapObject):
|
||||
"""
|
||||
self.replace('nsds5task', 'ldif2cl')
|
||||
|
||||
+ def begin_task_compact_cl5(self):
|
||||
+ """Begin COMPACT_CL5 task
|
||||
+ """
|
||||
+ self.replace('nsds5task', 'COMPACT_CL5')
|
||||
+
|
||||
def get_suffix(self):
|
||||
"""Return the suffix
|
||||
"""
|
||||
@@ -1829,6 +1834,41 @@ class Replicas(DSLdapObjects):
|
||||
log.error(f"Changelog LDIF for '{repl_root}' was not found")
|
||||
continue
|
||||
|
||||
+ def compact_changelog(self, replica_roots=[], log=None):
|
||||
+ """Compact Directory Server replication changelog
|
||||
+
|
||||
+ :param replica_roots: Replica suffixes that need to be processed (and optional LDIF file path)
|
||||
+ :type replica_roots: list of str
|
||||
+ :param log: The logger object
|
||||
+ :type log: logger
|
||||
+ """
|
||||
+
|
||||
+ if log is None:
|
||||
+ log = self._log
|
||||
+
|
||||
+ # Check if the changelog entry exists
|
||||
+ try:
|
||||
+ cl = Changelog5(self._instance)
|
||||
+ cl.get_attr_val_utf8_l("nsslapd-changelogdir")
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ raise ValueError("Changelog entry was not found. Probably, the replication is not enabled on this instance")
|
||||
+
|
||||
+ # Get all the replicas on the server if --replica-roots option is not specified
|
||||
+ repl_roots = []
|
||||
+ if not replica_roots:
|
||||
+ for replica in self.list():
|
||||
+ repl_roots.append(replica.get_attr_val_utf8("nsDS5ReplicaRoot"))
|
||||
+ else:
|
||||
+ for repl_root in replica_roots:
|
||||
+ repl_roots.append(repl_root)
|
||||
+
|
||||
+ # Dump the changelog for the replica
|
||||
+
|
||||
+ # Dump the changelog for the replica
|
||||
+ for repl_root in repl_roots:
|
||||
+ replica = self.get(repl_root)
|
||||
+ replica.begin_task_compact_cl5()
|
||||
+
|
||||
|
||||
class BootstrapReplicationManager(DSLdapObject):
|
||||
"""A Replication Manager credential for bootstrapping the repl process.
|
||||
--
|
||||
2.47.0
|
||||
|
@ -1,55 +0,0 @@
|
||||
From d1cd9a5675e2953b7c8034ebb87a434cdd3ce0c3 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 2 Dec 2024 17:18:32 +0100
|
||||
Subject: [PATCH] Issue 6417 - If an entry RDN is identical to the suffix, then
|
||||
Entryrdn gets broken during a reindex (#6418)
|
||||
|
||||
Bug description:
|
||||
During a reindex, the entryrdn index is built at the end from
|
||||
each entry in the suffix.
|
||||
If one entry has a RDN that is identical to the suffix DN,
|
||||
then entryrdn_lookup_dn may erroneously return the suffix DN
|
||||
as the DN of the entry.
|
||||
|
||||
Fix description:
|
||||
When the lookup entry has no parent (because index is under
|
||||
work) the loop lookup the entry using the RDN.
|
||||
If this RDN matches the suffix DN, then it exits from the loop
|
||||
with the suffix DN.
|
||||
Before exiting it checks that the original lookup entryID
|
||||
is equal to suffix entryID. If it does not match
|
||||
the function fails and then the DN from the entry will be
|
||||
built from id2enty
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier, Simon Pichugin (Thanks !!!)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 11 ++++++++++-
|
||||
1 file changed, 10 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 5797dd779..83b041192 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1224,7 +1224,16 @@ entryrdn_lookup_dn(backend *be,
|
||||
}
|
||||
goto bail;
|
||||
}
|
||||
- maybesuffix = 1;
|
||||
+ if (workid == 1) {
|
||||
+ /* The loop (workid) iterates from the starting 'id'
|
||||
+ * up to the suffix ID (i.e. '1').
|
||||
+ * A corner case (#6417) is if an entry, on the path
|
||||
+ * 'id' -> suffix, has the same RDN than the suffix.
|
||||
+ * In order to erroneously believe the loop hits the suffix
|
||||
+ * we need to check that 'workid' is '1' (suffix)
|
||||
+ */
|
||||
+ maybesuffix = 1;
|
||||
+ }
|
||||
} else {
|
||||
_entryrdn_cursor_print_error("entryrdn_lookup_dn",
|
||||
key.data, data.size, data.ulen, rc);
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,267 +0,0 @@
|
||||
From 9b2fc77a36156ea987dcea6e2043f8e4c4a6b259 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 18 Jun 2024 14:21:07 +0200
|
||||
Subject: [PATCH] Issue 6224 - d2entry - Could not open id2entry err 0 - at
|
||||
startup when having sub-suffixes (#6225)
|
||||
|
||||
Problem:: d2entry - Could not open id2entry err 0 is logged at startup when having sub-suffixes
|
||||
Reason: The slapi_exist_referral internal search access a backend that is not yet started.
|
||||
Solution: Limit the internal search to a single backend
|
||||
|
||||
Issue: #6224
|
||||
|
||||
Reviewed by: @droideck Thanks!
|
||||
|
||||
(cherry picked from commit 796f703021e961fdd8cbc53b4ad4e20258af0e96)
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 1 +
|
||||
.../suites/mapping_tree/regression_test.py | 161 +++++++++++++++++-
|
||||
ldap/servers/slapd/backend.c | 7 +-
|
||||
3 files changed, 159 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 812936c62..84a9c6ec8 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,6 +1222,7 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+<<<<<<< HEAD
|
||||
def test_referral_subsuffix(topology_st, request):
|
||||
"""Test the results of an inverted parent suffix definition in the configuration.
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/mapping_tree/regression_test.py b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
index 99d4a1d5f..689ff9f59 100644
|
||||
--- a/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
@@ -11,10 +11,14 @@ import ldap
|
||||
import logging
|
||||
import os
|
||||
import pytest
|
||||
+import time
|
||||
from lib389.backend import Backends, Backend
|
||||
+from lib389._constants import HOST_STANDALONE, PORT_STANDALONE, DN_DM, PW_DM
|
||||
from lib389.dbgen import dbgen_users
|
||||
from lib389.mappingTree import MappingTrees
|
||||
from lib389.topologies import topology_st
|
||||
+from lib389.referral import Referrals, Referral
|
||||
+
|
||||
|
||||
try:
|
||||
from lib389.backend import BackendSuffixView
|
||||
@@ -31,14 +35,26 @@ else:
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
+PARENT_SUFFIX = "dc=parent"
|
||||
+CHILD1_SUFFIX = f"dc=child1,{PARENT_SUFFIX}"
|
||||
+CHILD2_SUFFIX = f"dc=child2,{PARENT_SUFFIX}"
|
||||
+
|
||||
+PARENT_REFERRAL_DN = f"cn=ref,ou=People,{PARENT_SUFFIX}"
|
||||
+CHILD1_REFERRAL_DN = f"cn=ref,ou=people,{CHILD1_SUFFIX}"
|
||||
+CHILD2_REFERRAL_DN = f"cn=ref,ou=people,{CHILD2_SUFFIX}"
|
||||
+
|
||||
+REFERRAL_CHECK_PEDIOD = 7
|
||||
+
|
||||
+
|
||||
+
|
||||
BESTRUCT = [
|
||||
- { "bename" : "parent", "suffix": "dc=parent" },
|
||||
- { "bename" : "child1", "suffix": "dc=child1,dc=parent" },
|
||||
- { "bename" : "child2", "suffix": "dc=child2,dc=parent" },
|
||||
+ { "bename" : "parent", "suffix": PARENT_SUFFIX },
|
||||
+ { "bename" : "child1", "suffix": CHILD1_SUFFIX },
|
||||
+ { "bename" : "child2", "suffix": CHILD2_SUFFIX },
|
||||
]
|
||||
|
||||
|
||||
-@pytest.fixture(scope="function")
|
||||
+@pytest.fixture(scope="module")
|
||||
def topo(topology_st, request):
|
||||
bes = []
|
||||
|
||||
@@ -50,6 +66,9 @@ def topo(topology_st, request):
|
||||
request.addfinalizer(fin)
|
||||
|
||||
inst = topology_st.standalone
|
||||
+ # Reduce nsslapd-referral-check-period to accelerate test
|
||||
+ topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK_PEDIOD))
|
||||
+
|
||||
ldif_files = {}
|
||||
for d in BESTRUCT:
|
||||
bename = d['bename']
|
||||
@@ -76,14 +95,13 @@ def topo(topology_st, request):
|
||||
inst.start()
|
||||
return topology_st
|
||||
|
||||
-# Parameters for test_change_repl_passwd
|
||||
-EXPECTED_ENTRIES = (("dc=parent", 39), ("dc=child1,dc=parent", 13), ("dc=child2,dc=parent", 13))
|
||||
+# Parameters for test_sub_suffixes
|
||||
@pytest.mark.parametrize(
|
||||
"orphan_param",
|
||||
[
|
||||
- pytest.param( ( True, { "dc=parent": 2, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-true" ),
|
||||
- pytest.param( ( False, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-false" ),
|
||||
- pytest.param( ( None, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="no-orphan" ),
|
||||
+ pytest.param( ( True, { PARENT_SUFFIX: 2, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="orphan-is-true" ),
|
||||
+ pytest.param( ( False, { PARENT_SUFFIX: 3, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="orphan-is-false" ),
|
||||
+ pytest.param( ( None, { PARENT_SUFFIX: 3, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="no-orphan" ),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -128,3 +146,128 @@ def test_sub_suffixes(topo, orphan_param):
|
||||
log.info('Test PASSED')
|
||||
|
||||
|
||||
+def test_one_level_search_on_sub_suffixes(topo):
|
||||
+ """ Perform one level scoped search accross suffix and sub-suffix
|
||||
+
|
||||
+ :id: 92f3139e-280e-11ef-a989-482ae39447e5
|
||||
+ :feature: mapping-tree
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+ :steps:
|
||||
+ 1. Perform a ONE LEVEL search on dc=parent
|
||||
+ 2. Check that all expected entries have been returned
|
||||
+ 3. Check that only the expected entries have been returned
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. each expected dn should be in the result set
|
||||
+ 3. Number of returned entries should be the same as the number of expected entries
|
||||
+ """
|
||||
+ expected_dns = ( 'dc=child1,dc=parent',
|
||||
+ 'dc=child2,dc=parent',
|
||||
+ 'ou=accounting,dc=parent',
|
||||
+ 'ou=product development,dc=parent',
|
||||
+ 'ou=product testing,dc=parent',
|
||||
+ 'ou=human resources,dc=parent',
|
||||
+ 'ou=payroll,dc=parent',
|
||||
+ 'ou=people,dc=parent',
|
||||
+ 'ou=groups,dc=parent', )
|
||||
+ entries = topo.standalone.search_s("dc=parent", ldap.SCOPE_ONELEVEL, "(objectClass=*)",
|
||||
+ attrlist=("dc","ou"), escapehatch='i am sure')
|
||||
+ log.info(f'one level search on dc=parent returned the following entries: {entries}')
|
||||
+ dns = [ entry.dn for entry in entries ]
|
||||
+ for dn in expected_dns:
|
||||
+ assert dn in dns
|
||||
+ assert len(entries) == len(expected_dns)
|
||||
+
|
||||
+
|
||||
+def test_sub_suffixes_errlog(topo):
|
||||
+ """ check the entries found on suffix/sub-suffix
|
||||
+ used int
|
||||
+
|
||||
+ :id: 1db9d52e-28de-11ef-b286-482ae39447e5
|
||||
+ :feature: mapping-tree
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+ :steps:
|
||||
+ 1. Check that id2entry error message is not in the error log.
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ assert not inst.searchErrorsLog('id2entry - Could not open id2entry err 0')
|
||||
+
|
||||
+
|
||||
+# Parameters for test_referral_subsuffix:
|
||||
+# a tuple pair containing:
|
||||
+# - list of referral dn that must be created
|
||||
+# - dict of searches basedn: expected_number_of_referrals
|
||||
+@pytest.mark.parametrize(
|
||||
+ "parameters",
|
||||
+ [
|
||||
+ pytest.param( ((PARENT_REFERRAL_DN, CHILD1_REFERRAL_DN), {PARENT_SUFFIX: 2, CHILD1_SUFFIX:1, CHILD2_SUFFIX:0}), id="Both"),
|
||||
+ pytest.param( ((PARENT_REFERRAL_DN,), {PARENT_SUFFIX: 1, CHILD1_SUFFIX:0, CHILD2_SUFFIX:0}) , id="Parent"),
|
||||
+ pytest.param( ((CHILD1_REFERRAL_DN,), {PARENT_SUFFIX: 1, CHILD1_SUFFIX:1, CHILD2_SUFFIX:0}) , id="Child"),
|
||||
+ pytest.param( ((), {PARENT_SUFFIX: 0, CHILD1_SUFFIX:0, CHILD2_SUFFIX:0}), id="None"),
|
||||
+ ])
|
||||
+
|
||||
+def test_referral_subsuffix(topo, request, parameters):
|
||||
+ """Test the results of an inverted parent suffix definition in the configuration.
|
||||
+
|
||||
+ For more details see:
|
||||
+ https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html
|
||||
+
|
||||
+ :id: 4e111a22-2a5d-11ef-a890-482ae39447e5
|
||||
+ :feature: referrals
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+ :parametrized: yes
|
||||
+ :steps:
|
||||
+ refs,searches = referrals
|
||||
+
|
||||
+ 1. Create the referrals according to the current parameter
|
||||
+ 2. Wait enough time so they get detected
|
||||
+ 3. For each search base dn, in the current parameter, perform the two following steps
|
||||
+ 4. In 3. loop: Perform a search with provided base dn
|
||||
+ 5. In 3. loop: Check that the number of returned referrals is the expected one.
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ all steps succeeds
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Deleting all referrals')
|
||||
+ for ref in Referrals(inst, PARENT_SUFFIX).list():
|
||||
+ ref.delete()
|
||||
+
|
||||
+ # Set cleanup callback
|
||||
+ if DEBUGGING:
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Remove all referrals
|
||||
+ fin()
|
||||
+ # Add requested referrals
|
||||
+ for dn in parameters[0]:
|
||||
+ refs = Referral(inst, dn=dn)
|
||||
+ refs.create(basedn=dn, properties={ 'cn': 'ref', 'ref': f'ldap://remote/{dn}'})
|
||||
+ # Wait that the internal search detects the referrals
|
||||
+ time.sleep(REFERRAL_CHECK_PEDIOD + 1)
|
||||
+ # Open a test connection
|
||||
+ ldc = ldap.initialize(f"ldap://{HOST_STANDALONE}:{PORT_STANDALONE}")
|
||||
+ ldc.set_option(ldap.OPT_REFERRALS,0)
|
||||
+ ldc.simple_bind_s(DN_DM,PW_DM)
|
||||
+
|
||||
+ # For each search base dn:
|
||||
+ for basedn,nbref in parameters[1].items():
|
||||
+ log.info(f"Referrals are: {parameters[0]}")
|
||||
+ # Perform a search with provided base dn
|
||||
+ result = ldc.search_s(basedn, ldap.SCOPE_SUBTREE, filterstr="(ou=People)")
|
||||
+ found_dns = [ dn for dn,entry in result if dn is not None ]
|
||||
+ found_refs = [ entry for dn,entry in result if dn is None ]
|
||||
+ log.info(f"Search on {basedn} returned {found_dns} and {found_refs}")
|
||||
+ # Check that the number of returned referrals is the expected one.
|
||||
+ log.info(f"Search returned {len(found_refs)} referrals. {nbref} are expected.")
|
||||
+ assert len(found_refs) == nbref
|
||||
+ ldc.unbind()
|
||||
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
|
||||
index 498f683b1..f86b0b9b6 100644
|
||||
--- a/ldap/servers/slapd/backend.c
|
||||
+++ b/ldap/servers/slapd/backend.c
|
||||
@@ -230,12 +230,17 @@ slapi_exist_referral(Slapi_Backend *be)
|
||||
|
||||
/* search for ("smart") referral entries */
|
||||
search_pb = slapi_pblock_new();
|
||||
- server_ctrls = (LDAPControl **) slapi_ch_calloc(2, sizeof (LDAPControl *));
|
||||
+ server_ctrls = (LDAPControl **) slapi_ch_calloc(3, sizeof (LDAPControl *));
|
||||
server_ctrls[0] = (LDAPControl *) slapi_ch_malloc(sizeof (LDAPControl));
|
||||
server_ctrls[0]->ldctl_oid = slapi_ch_strdup(LDAP_CONTROL_MANAGEDSAIT);
|
||||
server_ctrls[0]->ldctl_value.bv_val = NULL;
|
||||
server_ctrls[0]->ldctl_value.bv_len = 0;
|
||||
server_ctrls[0]->ldctl_iscritical = '\0';
|
||||
+ server_ctrls[1] = (LDAPControl *) slapi_ch_malloc(sizeof (LDAPControl));
|
||||
+ server_ctrls[1]->ldctl_oid = slapi_ch_strdup(MTN_CONTROL_USE_ONE_BACKEND_EXT_OID);
|
||||
+ server_ctrls[1]->ldctl_value.bv_val = NULL;
|
||||
+ server_ctrls[1]->ldctl_value.bv_len = 0;
|
||||
+ server_ctrls[1]->ldctl_iscritical = '\0';
|
||||
slapi_search_internal_set_pb(search_pb, suffix, LDAP_SCOPE_SUBTREE,
|
||||
filter, NULL, 0, server_ctrls, NULL,
|
||||
(void *) plugin_get_default_component_id(), 0);
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,32 +0,0 @@
|
||||
From ab06b3cebbe0287ef557c0307ca2ee86fe8cb761 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Thu, 21 Nov 2024 16:26:02 +0100
|
||||
Subject: [PATCH] Issue 6224 - Fix merge issue in 389-ds-base-2.1 for
|
||||
ds_log_test.py (#6414)
|
||||
|
||||
Fix a merge issue during cherry-pick over 389-ds-base-2.1 and 389-ds-base-1.4.3 branches
|
||||
|
||||
Issue: #6224
|
||||
|
||||
Reviewed by: @mreynolds389
|
||||
|
||||
(cherry picked from commit 2b541c64b8317209e4dafa4f82918d714039907c)
|
||||
---
|
||||
dirsrvtests/tests/suites/ds_logs/ds_logs_test.py | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 84a9c6ec8..812936c62 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,7 +1222,6 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-<<<<<<< HEAD
|
||||
def test_referral_subsuffix(topology_st, request):
|
||||
"""Test the results of an inverted parent suffix definition in the configuration.
|
||||
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,214 +0,0 @@
|
||||
From 3fe2cf7cdedcdf5cafb59867e52a1fbe4a643571 Mon Sep 17 00:00:00 2001
|
||||
From: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
Date: Fri, 20 Dec 2024 22:37:15 +0900
|
||||
Subject: [PATCH] Issue 6224 - Remove test_referral_subsuffix from
|
||||
ds_logs_test.py (#6456)
|
||||
|
||||
Bug Description:
|
||||
|
||||
test_referral_subsuffix test was removed from main branch and some other
|
||||
ones for higher versions. But, it was not removed from 389-ds-base-1.4.3
|
||||
and 389-ds-base-2.1. The test doesn't work anymore with the fix for
|
||||
Issue 6224, because the added new control limited one backend for internal
|
||||
search. The test should be removed.
|
||||
|
||||
Fix Description:
|
||||
|
||||
remove the test from ds_logs_test.py
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6224
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 177 ------------------
|
||||
1 file changed, 177 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 812936c62..84d721756 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,183 +1222,6 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_referral_subsuffix(topology_st, request):
|
||||
- """Test the results of an inverted parent suffix definition in the configuration.
|
||||
-
|
||||
- For more details see:
|
||||
- https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html
|
||||
-
|
||||
- :id: 4faf210a-4fde-4e4f-8834-865bdc8f4d37
|
||||
- :setup: Standalone instance
|
||||
- :steps:
|
||||
- 1. First create two Backends, without mapping trees.
|
||||
- 2. create the mapping trees for these backends
|
||||
- 3. reduce nsslapd-referral-check-period to accelerate test
|
||||
- 4. Remove error log file
|
||||
- 5. Create a referral entry on parent suffix
|
||||
- 6. Check that the server detected the referral
|
||||
- 7. Delete the referral entry
|
||||
- 8. Check that the server detected the deletion of the referral
|
||||
- 9. Remove error log file
|
||||
- 10. Create a referral entry on child suffix
|
||||
- 11. Check that the server detected the referral on both parent and child suffixes
|
||||
- 12. Delete the referral entry
|
||||
- 13. Check that the server detected the deletion of the referral on both parent and child suffixes
|
||||
- 14. Remove error log file
|
||||
- 15. Create a referral entry on parent suffix
|
||||
- 16. Check that the server detected the referral on both parent and child suffixes
|
||||
- 17. Delete the child referral entry
|
||||
- 18. Check that the server detected the deletion of the referral on child suffix but not on parent suffix
|
||||
- 19. Delete the parent referral entry
|
||||
- 20. Check that the server detected the deletion of the referral parent suffix
|
||||
-
|
||||
- :expectedresults:
|
||||
- all steps succeeds
|
||||
- """
|
||||
- inst = topology_st.standalone
|
||||
- # Step 1 First create two Backends, without mapping trees.
|
||||
- PARENT_SUFFIX='dc=parent,dc=com'
|
||||
- CHILD_SUFFIX='dc=child,%s' % PARENT_SUFFIX
|
||||
- be1 = create_backend(inst, 'Parent', PARENT_SUFFIX)
|
||||
- be2 = create_backend(inst, 'Child', CHILD_SUFFIX)
|
||||
- # Step 2 create the mapping trees for these backends
|
||||
- mts = MappingTrees(inst)
|
||||
- mt1 = mts.create(properties={
|
||||
- 'cn': PARENT_SUFFIX,
|
||||
- 'nsslapd-state': 'backend',
|
||||
- 'nsslapd-backend': 'Parent',
|
||||
- })
|
||||
- mt2 = mts.create(properties={
|
||||
- 'cn': CHILD_SUFFIX,
|
||||
- 'nsslapd-state': 'backend',
|
||||
- 'nsslapd-backend': 'Child',
|
||||
- 'nsslapd-parent-suffix': PARENT_SUFFIX,
|
||||
- })
|
||||
-
|
||||
- dc_ex = Domain(inst, dn=PARENT_SUFFIX)
|
||||
- assert dc_ex.exists()
|
||||
-
|
||||
- dc_st = Domain(inst, dn=CHILD_SUFFIX)
|
||||
- assert dc_st.exists()
|
||||
-
|
||||
- # Step 3 reduce nsslapd-referral-check-period to accelerate test
|
||||
- # requires a restart done on step 4
|
||||
- REFERRAL_CHECK=7
|
||||
- topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK))
|
||||
-
|
||||
- # Check that if we create a referral at parent level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is not detected at child backend
|
||||
-
|
||||
- # Step 3 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 4 Create a referral entry on parent suffix
|
||||
- rs_parent = Referrals(topology_st.standalone, PARENT_SUFFIX)
|
||||
-
|
||||
- referral_entry_parent = rs_parent.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 5 Check that the server detected the referral
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Step 6 Delete the referral entry
|
||||
- referral_entry_parent.delete()
|
||||
-
|
||||
- # Step 7 Check that the server detected the deletion of the referral
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Check that if we create a referral at child level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is detected at child backend
|
||||
-
|
||||
- # Step 8 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 9 Create a referral entry on child suffix
|
||||
- rs_child = Referrals(topology_st.standalone, CHILD_SUFFIX)
|
||||
- referral_entry_child = rs_child.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 10 Check that the server detected the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Step 11 Delete the referral entry
|
||||
- referral_entry_child.delete()
|
||||
-
|
||||
- # Step 12 Check that the server detected the deletion of the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Check that if we create a referral at child level and parent level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is detected at child backend
|
||||
-
|
||||
- # Step 13 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 14 Create a referral entry on parent suffix
|
||||
- # Create a referral entry on child suffix
|
||||
- referral_entry_parent = rs_parent.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
- referral_entry_child = rs_child.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 15 Check that the server detected the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Step 16 Delete the child referral entry
|
||||
- referral_entry_child.delete()
|
||||
-
|
||||
- # Step 17 Check that the server detected the deletion of the referral on child suffix but not on parent suffix
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Step 18 Delete the parent referral entry
|
||||
- referral_entry_parent.delete()
|
||||
-
|
||||
- # Step 19 Check that the server detected the deletion of the referral parent suffix
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- def fin():
|
||||
- log.info('Deleting referral')
|
||||
- try:
|
||||
- referral_entry_parent.delete()
|
||||
- referral.entry_child.delete()
|
||||
- except:
|
||||
- pass
|
||||
-
|
||||
- request.addfinalizer(fin)
|
||||
|
||||
def test_missing_backend_suffix(topology_st, request):
|
||||
"""Test that the server does not crash if a backend has no suffix
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,90 +0,0 @@
|
||||
From 4121ffe7a44fbacf513758661e71e483eb11ee3c Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 6 Jan 2025 14:00:39 +0100
|
||||
Subject: [PATCH] Issue 6417 - (2nd) If an entry RDN is identical to the
|
||||
suffix, then Entryrdn gets broken during a reindex (#6460)
|
||||
|
||||
Bug description:
|
||||
The primary fix has a flaw as it assumes that the
|
||||
suffix ID is '1'.
|
||||
If the RUV entry is the first entry of the database
|
||||
the server loops indefinitely
|
||||
|
||||
Fix description:
|
||||
Read the suffix ID from the entryrdn index
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier (also reviewed the first fix)
|
||||
---
|
||||
.../suites/replication/regression_m2_test.py | 9 +++++++++
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 19 ++++++++++++++++++-
|
||||
2 files changed, 27 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index abac46ada..72d4b9f89 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -1010,6 +1010,15 @@ def test_online_reinit_may_hang(topo_with_sigkill):
|
||||
"""
|
||||
M1 = topo_with_sigkill.ms["supplier1"]
|
||||
M2 = topo_with_sigkill.ms["supplier2"]
|
||||
+
|
||||
+ # The RFE 5367 (when enabled) retrieves the DN
|
||||
+ # from the dncache. This hides an issue
|
||||
+ # with primary fix for 6417.
|
||||
+ # We need to disable the RFE to verify that the primary
|
||||
+ # fix is properly fixed.
|
||||
+ if ds_is_newer('2.3.1'):
|
||||
+ M1.config.replace('nsslapd-return-original-entrydn', 'off')
|
||||
+
|
||||
M1.stop()
|
||||
ldif_file = '%s/supplier1.ldif' % M1.get_ldif_dir()
|
||||
M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 83b041192..1bbb6252a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1115,6 +1115,7 @@ entryrdn_lookup_dn(backend *be,
|
||||
rdn_elem *elem = NULL;
|
||||
int maybesuffix = 0;
|
||||
int db_retry = 0;
|
||||
+ ID suffix_id = 1;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "entryrdn_lookup_dn",
|
||||
"--> entryrdn_lookup_dn\n");
|
||||
@@ -1175,6 +1176,22 @@ entryrdn_lookup_dn(backend *be,
|
||||
/* Setting the bulk fetch buffer */
|
||||
data.flags = DB_DBT_MALLOC;
|
||||
|
||||
+ /* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
+ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
+ dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1);
|
||||
+ rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data);
|
||||
+ if (rc) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
+ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
+ slapi_sdn_get_ndn(be->be_suffix),
|
||||
+ suffix_id);
|
||||
+ } else {
|
||||
+ elem = (rdn_elem *)data.data;
|
||||
+ suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ }
|
||||
+ dblayer_value_free(be, &data);
|
||||
+ dblayer_value_free(be, &key);
|
||||
+
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
slapi_ch_free_string(&keybuf);
|
||||
@@ -1224,7 +1241,7 @@ entryrdn_lookup_dn(backend *be,
|
||||
}
|
||||
goto bail;
|
||||
}
|
||||
- if (workid == 1) {
|
||||
+ if (workid == suffix_id) {
|
||||
/* The loop (workid) iterates from the starting 'id'
|
||||
* up to the suffix ID (i.e. '1').
|
||||
* A corner case (#6417) is if an entry, on the path
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,40 +0,0 @@
|
||||
From 1ffcc9aa9a397180fe35283ee61b164471d073fb Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 7 Jan 2025 10:01:51 +0100
|
||||
Subject: [PATCH] Issue 6417 - (2nd) fix typo
|
||||
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 10 ++++++----
|
||||
1 file changed, 6 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 1bbb6252a..e2b8273a2 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1178,8 +1178,10 @@ entryrdn_lookup_dn(backend *be,
|
||||
|
||||
/* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
- dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1);
|
||||
- rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data);
|
||||
+ key.data = keybuf;
|
||||
+ key.size = key.ulen = strlen(keybuf) + 1;
|
||||
+ key.flags = DB_DBT_USERMEM;
|
||||
+ rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
"Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
@@ -1189,8 +1191,8 @@ entryrdn_lookup_dn(backend *be,
|
||||
elem = (rdn_elem *)data.data;
|
||||
suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
}
|
||||
- dblayer_value_free(be, &data);
|
||||
- dblayer_value_free(be, &key);
|
||||
+ slapi_ch_free(&data.data);
|
||||
+ slapi_ch_free_string(&keybuf);
|
||||
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,75 +0,0 @@
|
||||
From 9e1284122a929fe14633a2aa6e2de4d72891f98f Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 13 Jan 2025 17:41:18 +0100
|
||||
Subject: [PATCH] Issue 6417 - (3rd) If an entry RDN is identical to the
|
||||
suffix, then Entryrdn gets broken during a reindex (#6480)
|
||||
|
||||
Bug description:
|
||||
The previous fix had a flaw.
|
||||
In case entryrdn_lookup_dn is called with an undefined suffix
|
||||
the lookup of the suffix trigger a crash.
|
||||
For example it can occur during internal search of an
|
||||
unexisting map (view plugin).
|
||||
The issue exists in all releases but is hidden since 2.3.
|
||||
|
||||
Fix description:
|
||||
testing the suffix is defined
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier (THnaks !)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 36 +++++++++++---------
|
||||
1 file changed, 20 insertions(+), 16 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index e2b8273a2..01c77156f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1176,23 +1176,27 @@ entryrdn_lookup_dn(backend *be,
|
||||
/* Setting the bulk fetch buffer */
|
||||
data.flags = DB_DBT_MALLOC;
|
||||
|
||||
- /* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
- keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
- key.data = keybuf;
|
||||
- key.size = key.ulen = strlen(keybuf) + 1;
|
||||
- key.flags = DB_DBT_USERMEM;
|
||||
- rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
- if (rc) {
|
||||
- slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
- "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
- slapi_sdn_get_ndn(be->be_suffix),
|
||||
- suffix_id);
|
||||
- } else {
|
||||
- elem = (rdn_elem *)data.data;
|
||||
- suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ /* Just in case the suffix ID is not '1' retrieve it from the database
|
||||
+ * if the suffix is not defined suffix_id remains '1'
|
||||
+ */
|
||||
+ if (be->be_suffix) {
|
||||
+ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
+ key.data = keybuf;
|
||||
+ key.size = key.ulen = strlen(keybuf) + 1;
|
||||
+ key.flags = DB_DBT_USERMEM;
|
||||
+ rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
+ if (rc) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
+ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
+ slapi_sdn_get_ndn(be->be_suffix),
|
||||
+ suffix_id);
|
||||
+ } else {
|
||||
+ elem = (rdn_elem *) data.data;
|
||||
+ suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ }
|
||||
+ slapi_ch_free(&data.data);
|
||||
+ slapi_ch_free_string(&keybuf);
|
||||
}
|
||||
- slapi_ch_free(&data.data);
|
||||
- slapi_ch_free_string(&keybuf);
|
||||
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,297 +0,0 @@
|
||||
From d2f9dd82e3610ee9b73feea981c680c03bb21394 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 16 Jan 2025 08:42:53 -0500
|
||||
Subject: [PATCH] Issue 6509 - Race condition with Paged Result searches
|
||||
|
||||
Description:
|
||||
|
||||
There is a race condition with Paged Result searches when a new operation comes
|
||||
in while a paged search is finishing. This triggers an invalid time out error
|
||||
and closes the connection with a T3 code.
|
||||
|
||||
The problem is that we do not use the "PagedResult lock" when checking the
|
||||
connection's paged result data for a timeout event. This causes the paged
|
||||
result timeout value to change unexpectedly and trigger a false timeout when a
|
||||
new operation arrives.
|
||||
|
||||
Now we check the timeout without hte conn lock, if its expired it could
|
||||
be a race condition and false positive. Try the lock again and test the
|
||||
timeout. This also prevents blocking non-paged result searches from
|
||||
getting held up by the lock when it's not necessary.
|
||||
|
||||
This also fixes some memory leaks that occur when an error happens.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6509
|
||||
|
||||
Reviewed by: tbordaz & proger (Thanks!!)
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 61 ++++++++++++++++++-------------
|
||||
ldap/servers/slapd/opshared.c | 58 ++++++++++++++---------------
|
||||
ldap/servers/slapd/pagedresults.c | 9 +++++
|
||||
ldap/servers/slapd/slap.h | 2 +-
|
||||
4 files changed, 75 insertions(+), 55 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index bb80dae36..13dfe250d 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1578,7 +1578,29 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
if (c->c_state == CONN_STATE_FREE) {
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else {
|
||||
- /* we try to acquire the connection mutex, if it is already
|
||||
+ /* Check for a timeout for PAGED RESULTS */
|
||||
+ if (pagedresults_is_timedout_nolock(c)) {
|
||||
+ /*
|
||||
+ * There could be a race condition so lets try again with the
|
||||
+ * right lock
|
||||
+ */
|
||||
+ pthread_mutex_t *pr_mutex = pageresult_lock_get_addr(c);
|
||||
+ if (pthread_mutex_trylock(pr_mutex) == EBUSY) {
|
||||
+ c = next;
|
||||
+ continue;
|
||||
+ }
|
||||
+ if (pagedresults_is_timedout_nolock(c)) {
|
||||
+ pthread_mutex_unlock(pr_mutex);
|
||||
+ disconnect_server(c, c->c_connid, -1,
|
||||
+ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
+ 0);
|
||||
+ } else {
|
||||
+ pthread_mutex_unlock(pr_mutex);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * we try to acquire the connection mutex, if it is already
|
||||
* acquired by another thread, don't wait
|
||||
*/
|
||||
if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
@@ -1586,35 +1608,24 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
continue;
|
||||
}
|
||||
if (c->c_flags & CONN_FLAG_CLOSING) {
|
||||
- /* A worker thread has marked that this connection
|
||||
- * should be closed by calling disconnect_server.
|
||||
- * move this connection out of the active list
|
||||
- * the last thread to use the connection will close it
|
||||
+ /*
|
||||
+ * A worker thread, or paged result timeout, has marked that
|
||||
+ * this connection should be closed by calling
|
||||
+ * disconnect_server(). Move this connection out of the active
|
||||
+ * list then the last thread to use the connection will close
|
||||
+ * it.
|
||||
*/
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_sd == SLAPD_INVALID_SOCKET) {
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_prfd != NULL) {
|
||||
if ((!c->c_gettingber) && (c->c_threadnumber < c->c_max_threads_per_conn)) {
|
||||
- int add_fd = 1;
|
||||
- /* check timeout for PAGED RESULTS */
|
||||
- if (pagedresults_is_timedout_nolock(c)) {
|
||||
- /* Exceeded the paged search timelimit; disconnect the client */
|
||||
- disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
- 0);
|
||||
- connection_table_move_connection_out_of_active_list(ct,
|
||||
- c);
|
||||
- add_fd = 0; /* do not poll on this fd */
|
||||
- }
|
||||
- if (add_fd) {
|
||||
- ct->fd[count].fd = c->c_prfd;
|
||||
- ct->fd[count].in_flags = SLAPD_POLL_FLAGS;
|
||||
- /* slot i of the connection table is mapped to slot
|
||||
- * count of the fds array */
|
||||
- c->c_fdi = count;
|
||||
- count++;
|
||||
- }
|
||||
+ ct->fd[listnum][count].fd = c->c_prfd;
|
||||
+ ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS;
|
||||
+ /* slot i of the connection table is mapped to slot
|
||||
+ * count of the fds array */
|
||||
+ c->c_fdi = count;
|
||||
+ count++;
|
||||
} else {
|
||||
if (c->c_threadnumber >= c->c_max_threads_per_conn) {
|
||||
c->c_maxthreadsblocked++;
|
||||
@@ -1675,7 +1686,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused
|
||||
continue;
|
||||
}
|
||||
|
||||
- /* Try to get connection mutex, if not available just skip the connection and
|
||||
+ /* Try to get connection mutex, if not available just skip the connection and
|
||||
* process other connections events. May generates cpu load for listening thread
|
||||
* if connection mutex is held for a long time
|
||||
*/
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index 7ab4117cd..a29eed052 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -250,7 +250,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
char *errtext = NULL;
|
||||
int nentries, pnentries;
|
||||
int flag_search_base_found = 0;
|
||||
- int flag_no_such_object = 0;
|
||||
+ bool flag_no_such_object = false;
|
||||
int flag_referral = 0;
|
||||
int flag_psearch = 0;
|
||||
int err_code = LDAP_SUCCESS;
|
||||
@@ -315,7 +315,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
rc = -1;
|
||||
goto free_and_return_nolock;
|
||||
}
|
||||
-
|
||||
+
|
||||
/* Set the time we actually started the operation */
|
||||
slapi_operation_set_time_started(operation);
|
||||
|
||||
@@ -798,11 +798,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
}
|
||||
|
||||
/* subtree searches :
|
||||
- * if the search was started above the backend suffix
|
||||
- * - temporarily set the SLAPI_SEARCH_TARGET_SDN to the
|
||||
- * base of the node so that we don't get a NO SUCH OBJECT error
|
||||
- * - do not change the scope
|
||||
- */
|
||||
+ * if the search was started above the backend suffix
|
||||
+ * - temporarily set the SLAPI_SEARCH_TARGET_SDN to the
|
||||
+ * base of the node so that we don't get a NO SUCH OBJECT error
|
||||
+ * - do not change the scope
|
||||
+ */
|
||||
if (scope == LDAP_SCOPE_SUBTREE) {
|
||||
if (slapi_sdn_issuffix(be_suffix, basesdn)) {
|
||||
if (free_sdn) {
|
||||
@@ -825,53 +825,53 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
switch (rc) {
|
||||
case 1:
|
||||
/* if the backend returned LDAP_NO_SUCH_OBJECT for a SEARCH request,
|
||||
- * it will not have sent back a result - otherwise, it will have
|
||||
- * sent a result */
|
||||
+ * it will not have sent back a result - otherwise, it will have
|
||||
+ * sent a result */
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
if (err == LDAP_NO_SUCH_OBJECT) {
|
||||
/* may be the object exist somewhere else
|
||||
- * wait the end of the loop to send back this error
|
||||
- */
|
||||
- flag_no_such_object = 1;
|
||||
+ * wait the end of the loop to send back this error
|
||||
+ */
|
||||
+ flag_no_such_object = true;
|
||||
} else {
|
||||
/* err something other than LDAP_NO_SUCH_OBJECT, so the backend will
|
||||
- * have sent the result -
|
||||
- * Set a flag here so we don't return another result. */
|
||||
+ * have sent the result -
|
||||
+ * Set a flag here so we don't return another result. */
|
||||
sent_result = 1;
|
||||
}
|
||||
- /* fall through */
|
||||
+ /* fall through */
|
||||
|
||||
case -1: /* an error occurred */
|
||||
+ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
/* PAGED RESULTS */
|
||||
if (op_is_pagedresults(operation)) {
|
||||
/* cleanup the slot */
|
||||
pthread_mutex_lock(pagedresults_mutex);
|
||||
+ if (err != LDAP_NO_SUCH_OBJECT && !flag_no_such_object) {
|
||||
+ /* Free the results if not "no_such_object" */
|
||||
+ void *sr = NULL;
|
||||
+ slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
|
||||
+ be->be_search_results_release(&sr);
|
||||
+ }
|
||||
pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx);
|
||||
rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1);
|
||||
pthread_mutex_unlock(pagedresults_mutex);
|
||||
}
|
||||
- if (1 == flag_no_such_object) {
|
||||
- break;
|
||||
- }
|
||||
- slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
- if (err == LDAP_NO_SUCH_OBJECT) {
|
||||
- /* may be the object exist somewhere else
|
||||
- * wait the end of the loop to send back this error
|
||||
- */
|
||||
- flag_no_such_object = 1;
|
||||
+
|
||||
+ if (err == LDAP_NO_SUCH_OBJECT || flag_no_such_object) {
|
||||
+ /* Maybe the object exists somewhere else, wait to the end
|
||||
+ * of the loop to send back this error */
|
||||
+ flag_no_such_object = true;
|
||||
break;
|
||||
} else {
|
||||
- /* for error other than LDAP_NO_SUCH_OBJECT
|
||||
- * the error has already been sent
|
||||
- * stop the search here
|
||||
- */
|
||||
+ /* For error other than LDAP_NO_SUCH_OBJECT the error has
|
||||
+ * already been sent stop the search here */
|
||||
cache_return_target_entry(pb, be, operation);
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
/* when rc == SLAPI_FAIL_DISKFULL this case is executed */
|
||||
-
|
||||
case SLAPI_FAIL_DISKFULL:
|
||||
operation_out_of_disk_space();
|
||||
cache_return_target_entry(pb, be, operation);
|
||||
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
|
||||
index db87e486e..4aa1fa3e5 100644
|
||||
--- a/ldap/servers/slapd/pagedresults.c
|
||||
+++ b/ldap/servers/slapd/pagedresults.c
|
||||
@@ -121,12 +121,15 @@ pagedresults_parse_control_value(Slapi_PBlock *pb,
|
||||
if (ber_scanf(ber, "{io}", pagesize, &cookie) == LBER_ERROR) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value",
|
||||
"<= corrupted control value\n");
|
||||
+ ber_free(ber, 1);
|
||||
return LDAP_PROTOCOL_ERROR;
|
||||
}
|
||||
if (!maxreqs) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value",
|
||||
"Simple paged results requests per conn exceeded the limit: %d\n",
|
||||
maxreqs);
|
||||
+ ber_free(ber, 1);
|
||||
+ slapi_ch_free_string(&cookie.bv_val);
|
||||
return LDAP_UNWILLING_TO_PERFORM;
|
||||
}
|
||||
|
||||
@@ -376,6 +379,10 @@ pagedresults_free_one_msgid(Connection *conn, ber_int_t msgid, pthread_mutex_t *
|
||||
}
|
||||
prp->pr_flags |= CONN_FLAG_PAGEDRESULTS_ABANDONED;
|
||||
prp->pr_flags &= ~CONN_FLAG_PAGEDRESULTS_PROCESSING;
|
||||
+ if (conn->c_pagedresults.prl_count > 0) {
|
||||
+ _pr_cleanup_one_slot(prp);
|
||||
+ conn->c_pagedresults.prl_count--;
|
||||
+ }
|
||||
rc = 0;
|
||||
break;
|
||||
}
|
||||
@@ -940,7 +947,9 @@ pagedresults_is_timedout_nolock(Connection *conn)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "<-- pagedresults_is_timedout", "<= false 2\n");
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
|
||||
index 072f6f962..469874fd1 100644
|
||||
--- a/ldap/servers/slapd/slap.h
|
||||
+++ b/ldap/servers/slapd/slap.h
|
||||
@@ -74,7 +74,7 @@ static char ptokPBE[34] = "Internal (Software) Token ";
|
||||
#include <sys/stat.h>
|
||||
#include <sys/socket.h>
|
||||
#include <netinet/in.h>
|
||||
-
|
||||
+#include <stdbool.h>
|
||||
#include <time.h> /* For timespec definitions */
|
||||
|
||||
/* Provides our int types and platform specific requirements. */
|
||||
--
|
||||
2.48.0
|
||||
|
@ -1,29 +0,0 @@
|
||||
From 27cd055197bc3cae458a1f86621aa5410c66dd2c Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 20 Jan 2025 15:51:24 -0500
|
||||
Subject: [PATCH] Issue 6509 - Fix cherry pick issue (race condition in Paged
|
||||
results)
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6509
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 13dfe250d..57e07e5f5 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1620,8 +1620,8 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_prfd != NULL) {
|
||||
if ((!c->c_gettingber) && (c->c_threadnumber < c->c_max_threads_per_conn)) {
|
||||
- ct->fd[listnum][count].fd = c->c_prfd;
|
||||
- ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS;
|
||||
+ ct->fd[count].fd = c->c_prfd;
|
||||
+ ct->fd[count].in_flags = SLAPD_POLL_FLAGS;
|
||||
/* slot i of the connection table is mapped to slot
|
||||
* count of the fds array */
|
||||
c->c_fdi = count;
|
||||
--
|
||||
2.48.0
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,236 +0,0 @@
|
||||
From 1845aed98becaba6b975342229cb5e0de79d208d Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 29 Jan 2025 17:41:55 +0000
|
||||
Subject: [PATCH] Issue 6436 - MOD on a large group slow if substring index is
|
||||
present (#6437)
|
||||
|
||||
Bug Description: If the substring index is configured for the group
|
||||
membership attribute ( member or uniqueMember ), the removal of a
|
||||
member from a large static group is pretty slow.
|
||||
|
||||
Fix Description: A solution to this issue would be to introduce
|
||||
a new index to track a membership atttribute index. In the interm,
|
||||
we add a check to healthcheck to inform the user of the implications
|
||||
of this configuration.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6436
|
||||
|
||||
Reviewed by: @Firstyear, @tbordaz, @droideck (Thanks)
|
||||
---
|
||||
.../suites/healthcheck/health_config_test.py | 89 ++++++++++++++++++-
|
||||
src/lib389/lib389/lint.py | 15 ++++
|
||||
src/lib389/lib389/plugins.py | 37 +++++++-
|
||||
3 files changed, 137 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
|
||||
index 6d3d08bfa..747699486 100644
|
||||
--- a/dirsrvtests/tests/suites/healthcheck/health_config_test.py
|
||||
+++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
|
||||
@@ -212,6 +212,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st):
|
||||
MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
|
||||
|
||||
standalone = topology_st.standalone
|
||||
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
|
||||
|
||||
log.info('Enable RI plugin')
|
||||
plugin = ReferentialIntegrityPlugin(standalone)
|
||||
@@ -233,7 +234,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st):
|
||||
|
||||
|
||||
def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
- """Check if HealthCheck returns DSMOLE0002 code
|
||||
+ """Check if HealthCheck returns DSMOLE0001 code
|
||||
|
||||
:id: 236b0ec2-13da-48fb-b65a-db7406d56d5d
|
||||
:setup: Standalone instance
|
||||
@@ -248,8 +249,8 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
:expectedresults:
|
||||
1. Success
|
||||
2. Success
|
||||
- 3. Healthcheck reports DSMOLE0002 code and related details
|
||||
- 4. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 3. Healthcheck reports DSMOLE0001 code and related details
|
||||
+ 4. Healthcheck reports DSMOLE0001 code and related details
|
||||
5. Success
|
||||
6. Healthcheck reports no issue found
|
||||
7. Healthcheck reports no issue found
|
||||
@@ -259,6 +260,7 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
MO_GROUP_ATTR = 'creatorsname'
|
||||
|
||||
standalone = topology_st.standalone
|
||||
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
|
||||
|
||||
log.info('Enable MO plugin')
|
||||
plugin = MemberOfPlugin(standalone)
|
||||
@@ -279,6 +281,87 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
|
||||
|
||||
|
||||
+def test_healthcheck_MO_plugin_substring_index(topology_st):
|
||||
+ """Check if HealthCheck returns DSMOLE0002 code when the
|
||||
+ member, uniquemember attribute contains a substring index type
|
||||
+
|
||||
+ :id: 10954811-24ac-4886-8183-e30892f8e02d
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Configure the instance with MO Plugin
|
||||
+ 3. Change index type to substring for member attribute
|
||||
+ 4. Use HealthCheck without --json option
|
||||
+ 5. Use HealthCheck with --json option
|
||||
+ 6. Change index type back to equality for member attribute
|
||||
+ 7. Use HealthCheck without --json option
|
||||
+ 8. Use HealthCheck with --json option
|
||||
+ 9. Change index type to substring for uniquemember attribute
|
||||
+ 10. Use HealthCheck without --json option
|
||||
+ 11. Use HealthCheck with --json option
|
||||
+ 12. Change index type back to equality for uniquemember attribute
|
||||
+ 13. Use HealthCheck without --json option
|
||||
+ 14. Use HealthCheck with --json option
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 5. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 6. Success
|
||||
+ 7. Healthcheck reports no issue found
|
||||
+ 8. Healthcheck reports no issue found
|
||||
+ 9. Success
|
||||
+ 10. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 11. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 12. Success
|
||||
+ 13. Healthcheck reports no issue found
|
||||
+ 14. Healthcheck reports no issue found
|
||||
+ """
|
||||
+
|
||||
+ RET_CODE = 'DSMOLE0002'
|
||||
+ MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
|
||||
+ UNIQUE_MEMBER_DN = 'cn=uniquemember,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
|
||||
+
|
||||
+ log.info('Enable MO plugin')
|
||||
+ plugin = MemberOfPlugin(standalone)
|
||||
+ plugin.disable()
|
||||
+ plugin.enable()
|
||||
+
|
||||
+ log.info('Change the index type of the member attribute index to substring')
|
||||
+ index = Index(topology_st.standalone, MEMBER_DN)
|
||||
+ index.replace('nsIndexType', 'sub')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE)
|
||||
+
|
||||
+ log.info('Set the index type of the member attribute index back to eq')
|
||||
+ index.replace('nsIndexType', 'eq')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
|
||||
+
|
||||
+ log.info('Change the index type of the uniquemember attribute index to substring')
|
||||
+ index = Index(topology_st.standalone, UNIQUE_MEMBER_DN)
|
||||
+ index.replace('nsIndexType', 'sub')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE)
|
||||
+
|
||||
+ log.info('Set the index type of the uniquemember attribute index back to eq')
|
||||
+ index.replace('nsIndexType', 'eq')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
|
||||
+
|
||||
+ # Restart the instance after changing the plugin to avoid breaking the other tests
|
||||
+ standalone.restart()
|
||||
+
|
||||
+
|
||||
@pytest.mark.ds50873
|
||||
@pytest.mark.bz1685160
|
||||
@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented")
|
||||
diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
|
||||
index 4d9cbb666..3d3c79ea3 100644
|
||||
--- a/src/lib389/lib389/lint.py
|
||||
+++ b/src/lib389/lib389/lint.py
|
||||
@@ -231,6 +231,21 @@ database after adding the missing index type. Here is an example using dsconf:
|
||||
"""
|
||||
}
|
||||
|
||||
+DSMOLE0002 = {
|
||||
+ 'dsle': 'DSMOLE0002',
|
||||
+ 'severity': 'LOW',
|
||||
+ 'description': 'Removal of a member can be slow ',
|
||||
+ 'items': ['cn=memberof plugin,cn=plugins,cn=config', ],
|
||||
+ 'detail': """If the substring index is configured for a membership attribute. The removal of a member
|
||||
+from the large group can be slow.
|
||||
+
|
||||
+""",
|
||||
+ 'fix': """If not required, you can remove the substring index type using dsconf:
|
||||
+
|
||||
+ # dsconf slapd-YOUR_INSTANCE backend index set --attr=ATTR BACKEND --del-type=sub
|
||||
+"""
|
||||
+}
|
||||
+
|
||||
# Disk Space check. Note - PARTITION is replaced by the calling function
|
||||
DSDSLE0001 = {
|
||||
'dsle': 'DSDSLE0001',
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 6bf1843ad..185398e5b 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -12,7 +12,7 @@ import copy
|
||||
import os.path
|
||||
from lib389 import tasks
|
||||
from lib389._mapped_object import DSLdapObjects, DSLdapObject
|
||||
-from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001
|
||||
+from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001, DSMOLE0002
|
||||
from lib389.utils import ensure_str, ensure_list_bytes
|
||||
from lib389.schema import Schema
|
||||
from lib389._constants import (
|
||||
@@ -827,6 +827,41 @@ class MemberOfPlugin(Plugin):
|
||||
report['check'] = f'memberof:attr_indexes'
|
||||
yield report
|
||||
|
||||
+ def _lint_member_substring_index(self):
|
||||
+ if self.status():
|
||||
+ from lib389.backend import Backends
|
||||
+ backends = Backends(self._instance).list()
|
||||
+ membership_attrs = ['member', 'uniquemember']
|
||||
+ container = self.get_attr_val_utf8_l("nsslapd-plugincontainerscope")
|
||||
+ for backend in backends:
|
||||
+ suffix = backend.get_attr_val_utf8_l('nsslapd-suffix')
|
||||
+ if suffix == "cn=changelog":
|
||||
+ # Always skip retro changelog
|
||||
+ continue
|
||||
+ if container is not None:
|
||||
+ # Check if this backend is in the scope
|
||||
+ if not container.endswith(suffix):
|
||||
+ # skip this backend that is not in the scope
|
||||
+ continue
|
||||
+ indexes = backend.get_indexes()
|
||||
+ for attr in membership_attrs:
|
||||
+ report = copy.deepcopy(DSMOLE0002)
|
||||
+ try:
|
||||
+ index = indexes.get(attr)
|
||||
+ types = index.get_attr_vals_utf8_l("nsIndexType")
|
||||
+ if "sub" in types:
|
||||
+ report['detail'] = report['detail'].replace('ATTR', attr)
|
||||
+ report['detail'] = report['detail'].replace('BACKEND', suffix)
|
||||
+ report['fix'] = report['fix'].replace('ATTR', attr)
|
||||
+ report['fix'] = report['fix'].replace('BACKEND', suffix)
|
||||
+ report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid)
|
||||
+ report['items'].append(suffix)
|
||||
+ report['items'].append(attr)
|
||||
+ report['check'] = f'attr:substring_index'
|
||||
+ yield report
|
||||
+ except KeyError:
|
||||
+ continue
|
||||
+
|
||||
def get_attr(self):
|
||||
"""Get memberofattr attribute"""
|
||||
|
||||
--
|
||||
2.48.1
|
||||
|
@ -1,651 +0,0 @@
|
||||
From dba27e56161943fbcf54ecbc28337e2c81b07979 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 13 Jan 2025 18:03:07 +0100
|
||||
Subject: [PATCH] Issue 6494 - Various errors when using extended matching rule
|
||||
on vlv sort filter (#6495)
|
||||
|
||||
* Issue 6494 - Various errors when using extended matching rule on vlv sort filter
|
||||
|
||||
Various issues when configuring and using extended matching rule within a vlv sort filter:
|
||||
|
||||
Race condition about the keys storage while indexing leading to various heap and data corruption. (lmdb only)
|
||||
Crash while indexing if vlv are misconfigured because NULL key is not checked.
|
||||
Read after block because of data type mismatch between SlapiValue and berval
|
||||
Memory leaks
|
||||
Solution:
|
||||
|
||||
Serialize the vlv index key generation if vlv filter has an extended matching rule.
|
||||
Check null keys
|
||||
Always provides SlapiValue even ifg we want to get keys as bervals
|
||||
Free properly the resources
|
||||
Issue: #6494
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
|
||||
(cherry picked from commit 4bd27ecc4e1d21c8af5ab8cad795d70477179a98)
|
||||
(cherry picked from commit 223a20250cbf29a546dcb398cfc76024d2f91347)
|
||||
(cherry picked from commit 280043740a525eaf0438129fd8b99ca251c62366)
|
||||
---
|
||||
.../tests/suites/indexes/regression_test.py | 29 +++
|
||||
.../tests/suites/vlv/regression_test.py | 183 ++++++++++++++++++
|
||||
ldap/servers/slapd/back-ldbm/cleanup.c | 8 +
|
||||
ldap/servers/slapd/back-ldbm/dblayer.c | 22 ++-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_attr.c | 2 +-
|
||||
ldap/servers/slapd/back-ldbm/matchrule.c | 8 +-
|
||||
.../servers/slapd/back-ldbm/proto-back-ldbm.h | 3 +-
|
||||
ldap/servers/slapd/back-ldbm/sort.c | 37 ++--
|
||||
ldap/servers/slapd/back-ldbm/vlv.c | 26 +--
|
||||
ldap/servers/slapd/back-ldbm/vlv_srch.c | 4 +-
|
||||
ldap/servers/slapd/generation.c | 5 +
|
||||
ldap/servers/slapd/plugin_mr.c | 12 +-
|
||||
src/lib389/lib389/backend.py | 10 +
|
||||
13 files changed, 292 insertions(+), 57 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
index fc6db727f..2196fb2ed 100644
|
||||
--- a/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
@@ -227,6 +227,35 @@ def test_reject_virtual_attr_for_indexing(topo):
|
||||
break
|
||||
|
||||
|
||||
+def test_reindex_extended_matching_rule(topo, add_backend_and_ldif_50K_users):
|
||||
+ """Check that index with extended matching rule are reindexed properly.
|
||||
+
|
||||
+ :id: 8a3198e8-cc5a-11ef-a3e7-482ae39447e5
|
||||
+ :setup: Standalone instance + a second backend with 50K users
|
||||
+ :steps:
|
||||
+ 1. Configure uid with 2.5.13.2 matching rule
|
||||
+ 1. Configure cn with 2.5.13.2 matching rule
|
||||
+ 2. Reindex
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+ tasks = Tasks(inst)
|
||||
+ be2 = Backends(topo.standalone).get_backend(SUFFIX2)
|
||||
+ index = be2.get_index('uid')
|
||||
+ index.replace('nsMatchingRule', '2.5.13.2')
|
||||
+ index = be2.get_index('cn')
|
||||
+ index.replace('nsMatchingRule', '2.5.13.2')
|
||||
+
|
||||
+ assert tasks.reindex(
|
||||
+ suffix=SUFFIX2,
|
||||
+ args={TASK_WAIT: True}
|
||||
+ ) == 0
|
||||
+
|
||||
+
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index 3b66de8b5..6ab709bd3 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -22,6 +22,146 @@ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
+class BackendHandler:
|
||||
+ def __init__(self, inst, bedict, scope=ldap.SCOPE_ONELEVEL):
|
||||
+ self.inst = inst
|
||||
+ self.bedict = bedict
|
||||
+ self.bes = Backends(inst)
|
||||
+ self.scope = scope
|
||||
+ self.data = {}
|
||||
+
|
||||
+ def find_backend(self, bename):
|
||||
+ for be in self.bes.list():
|
||||
+ if be.get_attr_val_utf8_l('cn') == bename:
|
||||
+ return be
|
||||
+ return None
|
||||
+
|
||||
+ def cleanup(self):
|
||||
+ benames = list(self.bedict.keys())
|
||||
+ benames.reverse()
|
||||
+ for bename in benames:
|
||||
+ be = self.find_backend(bename)
|
||||
+ if be:
|
||||
+ be.delete()
|
||||
+
|
||||
+ def setup(self):
|
||||
+ # Create backends, add vlv index and populate the backends.
|
||||
+ for bename,suffix in self.bedict.items():
|
||||
+ be = self.bes.create(properties={
|
||||
+ 'cn': bename,
|
||||
+ 'nsslapd-suffix': suffix,
|
||||
+ })
|
||||
+ # Add suffix entry
|
||||
+ Organization(self.inst, dn=suffix).create(properties={ 'o': bename, })
|
||||
+ # Configure vlv
|
||||
+ vlv_search, vlv_index = create_vlv_search_and_index(
|
||||
+ self.inst, basedn=suffix,
|
||||
+ bename=bename, scope=self.scope,
|
||||
+ prefix=f'vlv_1lvl_{bename}')
|
||||
+ # Reindex
|
||||
+ reindex_task = Tasks(self.inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=suffix,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+ # Add ou=People entry
|
||||
+ OrganizationalUnits(self.inst, suffix).create(properties={'ou': 'People'})
|
||||
+ # Add another ou that will be deleted before the export
|
||||
+ # so that import will change the vlv search basedn entryid
|
||||
+ ou2 = OrganizationalUnits(self.inst, suffix).create(properties={'ou': 'dummy ou'})
|
||||
+ # Add a demo user so that vlv_check is happy
|
||||
+ dn = f'uid=demo_user,ou=people,{suffix}'
|
||||
+ UserAccount(self.inst, dn=dn).create( properties= {
|
||||
+ 'uid': 'demo_user',
|
||||
+ 'cn': 'Demo user',
|
||||
+ 'sn': 'Demo user',
|
||||
+ 'uidNumber': '99998',
|
||||
+ 'gidNumber': '99998',
|
||||
+ 'homeDirectory': '/var/empty',
|
||||
+ 'loginShell': '/bin/false',
|
||||
+ 'userpassword': DEMO_PW })
|
||||
+ # Add regular user
|
||||
+ add_users(self.inst, 10, suffix=suffix)
|
||||
+ # Removing ou2
|
||||
+ ou2.delete()
|
||||
+ # And export
|
||||
+ tasks = Tasks(self.inst)
|
||||
+ ldif = f'{self.inst.get_ldif_dir()}/db-{bename}.ldif'
|
||||
+ assert tasks.exportLDIF(suffix=suffix,
|
||||
+ output_file=ldif,
|
||||
+ args={TASK_WAIT: True}) == 0
|
||||
+ # Add the various parameters in topology_st.belist
|
||||
+ self.data[bename] = { 'be': be,
|
||||
+ 'suffix': suffix,
|
||||
+ 'ldif': ldif,
|
||||
+ 'vlv_search' : vlv_search,
|
||||
+ 'vlv_index' : vlv_index,
|
||||
+ 'dn' : dn}
|
||||
+
|
||||
+
|
||||
+def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
+ scope=ldap.SCOPE_SUBTREE, prefix="vlv", vlvsort="cn"):
|
||||
+ vlv_searches = VLVSearch(inst)
|
||||
+ vlv_search_properties = {
|
||||
+ "objectclass": ["top", "vlvSearch"],
|
||||
+ "cn": f"{prefix}Srch",
|
||||
+ "vlvbase": basedn,
|
||||
+ "vlvfilter": "(uid=*)",
|
||||
+ "vlvscope": str(scope),
|
||||
+ }
|
||||
+ vlv_searches.create(
|
||||
+ basedn=f"cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_search_properties
|
||||
+ )
|
||||
+
|
||||
+ vlv_index = VLVIndex(inst)
|
||||
+ vlv_index_properties = {
|
||||
+ "objectclass": ["top", "vlvIndex"],
|
||||
+ "cn": f"{prefix}Idx",
|
||||
+ "vlvsort": vlvsort,
|
||||
+ }
|
||||
+ vlv_index.create(
|
||||
+ basedn=f"cn={prefix}Srch,cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_index_properties
|
||||
+ )
|
||||
+ return vlv_searches, vlv_index
|
||||
+
|
||||
+
|
||||
+@pytest.fixture
|
||||
+def vlv_setup_with_uid_mr(topology_st, request):
|
||||
+ inst = topology_st.standalone
|
||||
+ bename = 'be1'
|
||||
+ besuffix = f'o={bename}'
|
||||
+ beh = BackendHandler(inst, { bename: besuffix })
|
||||
+
|
||||
+ def fin():
|
||||
+ # Cleanup function
|
||||
+ if not DEBUGGING and inst.exists() and inst.status():
|
||||
+ beh.cleanup()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Make sure that our backend are not already present.
|
||||
+ beh.cleanup()
|
||||
+
|
||||
+ # Then add the new backend
|
||||
+ beh.setup()
|
||||
+
|
||||
+ index = Index(inst, f'cn=uid,cn=index,cn={bename},cn=ldbm database,cn=plugins,cn=config')
|
||||
+ index.add('nsMatchingRule', '2.5.13.2')
|
||||
+ reindex_task = Tasks(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=besuffix,
|
||||
+ attrname='uid',
|
||||
+ args={TASK_WAIT: True}
|
||||
+ ) == 0
|
||||
+
|
||||
+ topology_st.beh = beh
|
||||
+ return topology_st
|
||||
+
|
||||
+
|
||||
@pytest.mark.DS47966
|
||||
def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2):
|
||||
"""
|
||||
@@ -105,6 +245,49 @@ def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2):
|
||||
entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)")
|
||||
|
||||
|
||||
+def test_vlv_with_mr(vlv_setup_with_uid_mr):
|
||||
+ """
|
||||
+ Testing vlv having specific matching rule
|
||||
+
|
||||
+ :id: 5e04afe2-beec-11ef-aa84-482ae39447e5
|
||||
+ :setup: Standalone with uid have a matching rule index
|
||||
+ :steps:
|
||||
+ 1. Append vlvIndex entries then vlvSearch entry in the dse.ldif
|
||||
+ 2. Restart the server
|
||||
+ :expectedresults:
|
||||
+ 1. Should Success.
|
||||
+ 2. Should Success.
|
||||
+ """
|
||||
+ inst = vlv_setup_with_uid_mr.standalone
|
||||
+ beh = vlv_setup_with_uid_mr.beh
|
||||
+ bename, besuffix = next(iter(beh.bedict.items()))
|
||||
+ vlv_searches, vlv_index = create_vlv_search_and_index(
|
||||
+ inst, basedn=besuffix, bename=bename,
|
||||
+ vlvsort="uid:2.5.13.2")
|
||||
+ # Reindex the vlv
|
||||
+ reindex_task = Tasks(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=besuffix,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+
|
||||
+ inst.restart()
|
||||
+ users = UserAccounts(inst, besuffix)
|
||||
+ user_properties = {
|
||||
+ 'uid': f'a new testuser',
|
||||
+ 'cn': f'a new testuser',
|
||||
+ 'sn': 'user',
|
||||
+ 'uidNumber': '0',
|
||||
+ 'gidNumber': '0',
|
||||
+ 'homeDirectory': 'foo'
|
||||
+ }
|
||||
+ user = users.create(properties=user_properties)
|
||||
+ user.delete()
|
||||
+ assert inst.status()
|
||||
+
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/cleanup.c b/ldap/servers/slapd/back-ldbm/cleanup.c
|
||||
index 6b2e9faef..939d8bc4f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/cleanup.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/cleanup.c
|
||||
@@ -15,12 +15,14 @@
|
||||
|
||||
#include "back-ldbm.h"
|
||||
#include "dblayer.h"
|
||||
+#include "vlv_srch.h"
|
||||
|
||||
int
|
||||
ldbm_back_cleanup(Slapi_PBlock *pb)
|
||||
{
|
||||
struct ldbminfo *li;
|
||||
Slapi_Backend *be;
|
||||
+ struct vlvSearch *nextp;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_cleanup", "ldbm backend cleaning up\n");
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
|
||||
@@ -45,6 +47,12 @@ ldbm_back_cleanup(Slapi_PBlock *pb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
+ /* Release the vlv list */
|
||||
+ for (struct vlvSearch *p=be->vlvSearchList; p; p=nextp) {
|
||||
+ nextp = p->vlv_next;
|
||||
+ vlvSearch_delete(&p);
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* We check if li is NULL. Because of an issue in how we create backends
|
||||
* we share the li and plugin info between many unique backends. This causes
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
|
||||
index 05cc5b891..6b8ce0016 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
|
||||
@@ -494,8 +494,12 @@ int
|
||||
dblayer_close(struct ldbminfo *li, int dbmode)
|
||||
{
|
||||
dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
|
||||
-
|
||||
- return priv->dblayer_close_fn(li, dbmode);
|
||||
+ int rc = priv->dblayer_close_fn(li, dbmode);
|
||||
+ if (rc == 0) {
|
||||
+ /* Clean thread specific data */
|
||||
+ dblayer_destroy_txn_stack();
|
||||
+ }
|
||||
+ return rc;
|
||||
}
|
||||
|
||||
/* Routines for opening and closing random files in the DB_ENV.
|
||||
@@ -621,6 +625,9 @@ dblayer_erase_index_file(backend *be, struct attrinfo *a, PRBool use_lock, int n
|
||||
return 0;
|
||||
}
|
||||
struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
|
||||
+ if (NULL == li) {
|
||||
+ return 0;
|
||||
+ }
|
||||
dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
|
||||
|
||||
return priv->dblayer_rm_db_file_fn(be, a, use_lock, no_force_chkpt);
|
||||
@@ -1382,3 +1389,14 @@ dblayer_pop_pvt_txn(void)
|
||||
}
|
||||
return;
|
||||
}
|
||||
+
|
||||
+void
|
||||
+dblayer_destroy_txn_stack(void)
|
||||
+{
|
||||
+ /*
|
||||
+ * Cleanup for the main thread to avoid false/positive leaks from libasan
|
||||
+ * Note: data is freed because PR_SetThreadPrivate calls the
|
||||
+ * dblayer_cleanup_txn_stack callback
|
||||
+ */
|
||||
+ PR_SetThreadPrivate(thread_private_txn_stack, NULL);
|
||||
+}
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
|
||||
index 708756d3e..70700ca1d 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
|
||||
@@ -54,7 +54,7 @@ attrinfo_delete(struct attrinfo **pp)
|
||||
idl_release_private(*pp);
|
||||
(*pp)->ai_key_cmp_fn = NULL;
|
||||
slapi_ch_free((void **)&((*pp)->ai_type));
|
||||
- slapi_ch_free((void **)(*pp)->ai_index_rules);
|
||||
+ charray_free((*pp)->ai_index_rules);
|
||||
slapi_ch_free((void **)&((*pp)->ai_attrcrypt));
|
||||
attr_done(&((*pp)->ai_sattr));
|
||||
attrinfo_delete_idlistinfo(&(*pp)->ai_idlistinfo);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/matchrule.c b/ldap/servers/slapd/back-ldbm/matchrule.c
|
||||
index 5d516b9f8..5365e8acf 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/matchrule.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/matchrule.c
|
||||
@@ -107,7 +107,7 @@ destroy_matchrule_indexer(Slapi_PBlock *pb)
|
||||
* is destroyed
|
||||
*/
|
||||
int
|
||||
-matchrule_values_to_keys(Slapi_PBlock *pb, struct berval **input_values, struct berval ***output_values)
|
||||
+matchrule_values_to_keys(Slapi_PBlock *pb, Slapi_Value **input_values, struct berval ***output_values)
|
||||
{
|
||||
IFP mrINDEX = NULL;
|
||||
|
||||
@@ -135,10 +135,8 @@ matchrule_values_to_keys_sv(Slapi_PBlock *pb, Slapi_Value **input_values, Slapi_
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_MR_INDEX_SV_FN, &mrINDEX);
|
||||
if (NULL == mrINDEX) { /* old school - does not have SV function */
|
||||
int rc;
|
||||
- struct berval **bvi = NULL, **bvo = NULL;
|
||||
- valuearray_get_bervalarray(input_values, &bvi);
|
||||
- rc = matchrule_values_to_keys(pb, bvi, &bvo);
|
||||
- ber_bvecfree(bvi);
|
||||
+ struct berval **bvo = NULL;
|
||||
+ rc = matchrule_values_to_keys(pb, input_values, &bvo);
|
||||
/* note - the indexer owns bvo and will free it when destroyed */
|
||||
valuearray_init_bervalarray(bvo, output_values);
|
||||
/* store output values in SV form - caller expects SLAPI_PLUGIN_MR_KEYS is Slapi_Value** */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
index d93ff9239..157788fa4 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
@@ -84,6 +84,7 @@ int dblayer_release_index_file(backend *be, struct attrinfo *a, DB *pDB);
|
||||
int dblayer_erase_index_file(backend *be, struct attrinfo *a, PRBool use_lock, int no_force_chkpt);
|
||||
int dblayer_get_id2entry(backend *be, DB **ppDB);
|
||||
int dblayer_release_id2entry(backend *be, DB *pDB);
|
||||
+void dblayer_destroy_txn_stack(void);
|
||||
int dblayer_txn_init(struct ldbminfo *li, back_txn *txn);
|
||||
int dblayer_txn_begin(backend *be, back_txnid parent_txn, back_txn *txn);
|
||||
int dblayer_txn_begin_ext(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool use_lock);
|
||||
@@ -560,7 +561,7 @@ int compute_allids_limit(Slapi_PBlock *pb, struct ldbminfo *li);
|
||||
*/
|
||||
int create_matchrule_indexer(Slapi_PBlock **pb, char *matchrule, char *type);
|
||||
int destroy_matchrule_indexer(Slapi_PBlock *pb);
|
||||
-int matchrule_values_to_keys(Slapi_PBlock *pb, struct berval **input_values, struct berval ***output_values);
|
||||
+int matchrule_values_to_keys(Slapi_PBlock *pb, Slapi_Value **input_values, struct berval ***output_values);
|
||||
int matchrule_values_to_keys_sv(Slapi_PBlock *pb, Slapi_Value **input_values, Slapi_Value ***output_values);
|
||||
|
||||
/*
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/sort.c b/ldap/servers/slapd/back-ldbm/sort.c
|
||||
index 70ac60803..196af753f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/sort.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/sort.c
|
||||
@@ -536,30 +536,18 @@ compare_entries_sv(ID *id_a, ID *id_b, sort_spec *s, baggage_carrier *bc, int *e
|
||||
valuearray_get_bervalarray(valueset_get_valuearray(&attr_b->a_present_values), &value_b);
|
||||
} else {
|
||||
/* Match rule case */
|
||||
- struct berval **actual_value_a = NULL;
|
||||
- struct berval **actual_value_b = NULL;
|
||||
- struct berval **temp_value = NULL;
|
||||
-
|
||||
- valuearray_get_bervalarray(valueset_get_valuearray(&attr_a->a_present_values), &actual_value_a);
|
||||
- valuearray_get_bervalarray(valueset_get_valuearray(&attr_b->a_present_values), &actual_value_b);
|
||||
- matchrule_values_to_keys(this_one->mr_pb, actual_value_a, &temp_value);
|
||||
- /* Now copy it, so the second call doesn't crap on it */
|
||||
- value_a = slapi_ch_bvecdup(temp_value); /* Really, we'd prefer to not call the chXXX variant...*/
|
||||
- matchrule_values_to_keys(this_one->mr_pb, actual_value_b, &value_b);
|
||||
-
|
||||
- if ((actual_value_a && !value_a) ||
|
||||
- (actual_value_b && !value_b)) {
|
||||
- ber_bvecfree(actual_value_a);
|
||||
- ber_bvecfree(actual_value_b);
|
||||
- CACHE_RETURN(&inst->inst_cache, &a);
|
||||
- CACHE_RETURN(&inst->inst_cache, &b);
|
||||
- *error = 1;
|
||||
- return 0;
|
||||
+ Slapi_Value **va_a = valueset_get_valuearray(&attr_a->a_present_values);
|
||||
+ Slapi_Value **va_b = valueset_get_valuearray(&attr_b->a_present_values);
|
||||
+
|
||||
+ matchrule_values_to_keys(this_one->mr_pb, va_a, &value_a);
|
||||
+ /* Plugin owns the memory ==> duplicate the key before next call garble it */
|
||||
+ value_a = slapi_ch_bvecdup(value_a);
|
||||
+ matchrule_values_to_keys(this_one->mr_pb, va_b, &value_b);
|
||||
+
|
||||
+ if ((va_a && !value_a) || (va_b && !value_b)) {
|
||||
+ result = 0;
|
||||
+ goto bail;
|
||||
}
|
||||
- if (actual_value_a)
|
||||
- ber_bvecfree(actual_value_a);
|
||||
- if (actual_value_b)
|
||||
- ber_bvecfree(actual_value_b);
|
||||
}
|
||||
/* Compare them */
|
||||
if (!order) {
|
||||
@@ -582,9 +570,10 @@ compare_entries_sv(ID *id_a, ID *id_b, sort_spec *s, baggage_carrier *bc, int *e
|
||||
}
|
||||
/* If so, proceed to the next attribute for comparison */
|
||||
}
|
||||
+ *error = 0;
|
||||
+bail:
|
||||
CACHE_RETURN(&inst->inst_cache, &a);
|
||||
CACHE_RETURN(&inst->inst_cache, &b);
|
||||
- *error = 0;
|
||||
return result;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
index 121fb3667..70e0bac85 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
@@ -605,7 +605,7 @@ vlv_getindices(IFP callback_fn, void *param, backend *be)
|
||||
* generate the same composite key, so we append the EntryID
|
||||
* to ensure the uniqueness of the key.
|
||||
*
|
||||
- * Always creates a key. Never returns NULL.
|
||||
+ * May return NULL in case of errors (typically in some configuration error cases)
|
||||
*/
|
||||
static struct vlv_key *
|
||||
vlv_create_key(struct vlvIndex *p, struct backentry *e)
|
||||
@@ -659,10 +659,8 @@ vlv_create_key(struct vlvIndex *p, struct backentry *e)
|
||||
/* Matching rule. Do the magic mangling. Plugin owns the memory. */
|
||||
if (p->vlv_mrpb[sortattr] != NULL) {
|
||||
/* xxxPINAKI */
|
||||
- struct berval **bval = NULL;
|
||||
Slapi_Value **va = valueset_get_valuearray(&attr->a_present_values);
|
||||
- valuearray_get_bervalarray(va, &bval);
|
||||
- matchrule_values_to_keys(p->vlv_mrpb[sortattr], bval, &value);
|
||||
+ matchrule_values_to_keys(p->vlv_mrpb[sortattr], va, &value);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -779,6 +777,13 @@ do_vlv_update_index(back_txn *txn, struct ldbminfo *li __attribute__((unused)),
|
||||
}
|
||||
|
||||
key = vlv_create_key(pIndex, entry);
|
||||
+ if (key == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "vlv_create_key", "Unable to generate vlv %s index key."
|
||||
+ " There may be a configuration issue.\n", pIndex->vlv_name);
|
||||
+ dblayer_release_index_file(be, pIndex->vlv_attrinfo, db);
|
||||
+ return rc;
|
||||
+ }
|
||||
+
|
||||
if (NULL != txn) {
|
||||
db_txn = txn->back_txn_txn;
|
||||
} else {
|
||||
@@ -949,11 +954,11 @@ vlv_create_matching_rule_value(Slapi_PBlock *pb, struct berval *original_value)
|
||||
struct berval **value = NULL;
|
||||
if (pb != NULL) {
|
||||
struct berval **outvalue = NULL;
|
||||
- struct berval *invalue[2];
|
||||
- invalue[0] = original_value; /* jcm: cast away const */
|
||||
- invalue[1] = NULL;
|
||||
+ Slapi_Value v_in = {0};
|
||||
+ Slapi_Value *va_in[2] = { &v_in, NULL };
|
||||
+ slapi_value_init_berval(&v_in, original_value);
|
||||
/* The plugin owns the memory it returns in outvalue */
|
||||
- matchrule_values_to_keys(pb, invalue, &outvalue);
|
||||
+ matchrule_values_to_keys(pb, va_in, &outvalue);
|
||||
if (outvalue != NULL) {
|
||||
value = slapi_ch_bvecdup(outvalue);
|
||||
}
|
||||
@@ -1610,11 +1615,8 @@ retry:
|
||||
PRBool needFree = PR_FALSE;
|
||||
|
||||
if (sort_control->mr_pb != NULL) {
|
||||
- struct berval **tmp_entry_value = NULL;
|
||||
-
|
||||
- valuearray_get_bervalarray(csn_value, &tmp_entry_value);
|
||||
/* Matching rule. Do the magic mangling. Plugin owns the memory. */
|
||||
- matchrule_values_to_keys(sort_control->mr_pb, /* xxxPINAKI needs modification attr->a_vals */ tmp_entry_value, &entry_value);
|
||||
+ matchrule_values_to_keys(sort_control->mr_pb, csn_value, &entry_value);
|
||||
} else {
|
||||
valuearray_get_bervalarray(csn_value, &entry_value);
|
||||
needFree = PR_TRUE; /* entry_value is a copy */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/vlv_srch.c b/ldap/servers/slapd/back-ldbm/vlv_srch.c
|
||||
index fe1208d59..11d1c715b 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/vlv_srch.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/vlv_srch.c
|
||||
@@ -203,6 +203,9 @@ vlvSearch_delete(struct vlvSearch **ppvs)
|
||||
{
|
||||
if (ppvs != NULL && *ppvs != NULL) {
|
||||
struct vlvIndex *pi, *ni;
|
||||
+ if ((*ppvs)->vlv_e) {
|
||||
+ slapi_entry_free((struct slapi_entry *)((*ppvs)->vlv_e));
|
||||
+ }
|
||||
slapi_sdn_free(&((*ppvs)->vlv_dn));
|
||||
slapi_ch_free((void **)&((*ppvs)->vlv_name));
|
||||
slapi_sdn_free(&((*ppvs)->vlv_base));
|
||||
@@ -217,7 +220,6 @@ vlvSearch_delete(struct vlvSearch **ppvs)
|
||||
pi = ni;
|
||||
}
|
||||
slapi_ch_free((void **)ppvs);
|
||||
- *ppvs = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/generation.c b/ldap/servers/slapd/generation.c
|
||||
index c4f20f793..89f097322 100644
|
||||
--- a/ldap/servers/slapd/generation.c
|
||||
+++ b/ldap/servers/slapd/generation.c
|
||||
@@ -93,9 +93,13 @@ get_server_dataversion()
|
||||
lenstr *l = NULL;
|
||||
Slapi_Backend *be;
|
||||
char *cookie;
|
||||
+ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
+ /* Serialize to avoid race condition */
|
||||
+ pthread_mutex_lock(&mutex);
|
||||
/* we already cached the copy - just return it */
|
||||
if (server_dataversion_id != NULL) {
|
||||
+ pthread_mutex_unlock(&mutex);
|
||||
return server_dataversion_id;
|
||||
}
|
||||
|
||||
@@ -130,5 +134,6 @@ get_server_dataversion()
|
||||
server_dataversion_id = slapi_ch_strdup(l->ls_buf);
|
||||
}
|
||||
lenstr_free(&l);
|
||||
+ pthread_mutex_unlock(&mutex);
|
||||
return server_dataversion_id;
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c
|
||||
index 13f76fe52..6cf88b7de 100644
|
||||
--- a/ldap/servers/slapd/plugin_mr.c
|
||||
+++ b/ldap/servers/slapd/plugin_mr.c
|
||||
@@ -391,28 +391,18 @@ mr_wrap_mr_index_sv_fn(Slapi_PBlock *pb)
|
||||
return rc;
|
||||
}
|
||||
|
||||
-/* this function takes SLAPI_PLUGIN_MR_VALUES as struct berval ** and
|
||||
+/* this function takes SLAPI_PLUGIN_MR_VALUES as Slapi_Value ** and
|
||||
returns SLAPI_PLUGIN_MR_KEYS as struct berval **
|
||||
*/
|
||||
static int
|
||||
mr_wrap_mr_index_fn(Slapi_PBlock *pb)
|
||||
{
|
||||
int rc = -1;
|
||||
- struct berval **in_vals = NULL;
|
||||
struct berval **out_vals = NULL;
|
||||
struct mr_private *mrpriv = NULL;
|
||||
- Slapi_Value **in_vals_sv = NULL;
|
||||
Slapi_Value **out_vals_sv = NULL;
|
||||
|
||||
- slapi_pblock_get(pb, SLAPI_PLUGIN_MR_VALUES, &in_vals); /* get bervals */
|
||||
- /* convert bervals to sv ary */
|
||||
- valuearray_init_bervalarray(in_vals, &in_vals_sv);
|
||||
- slapi_pblock_set(pb, SLAPI_PLUGIN_MR_VALUES, in_vals_sv); /* use sv */
|
||||
rc = mr_wrap_mr_index_sv_fn(pb);
|
||||
- /* clean up in_vals_sv */
|
||||
- valuearray_free(&in_vals_sv);
|
||||
- /* restore old in_vals */
|
||||
- slapi_pblock_set(pb, SLAPI_PLUGIN_MR_VALUES, in_vals);
|
||||
/* get result sv keys */
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_MR_KEYS, &out_vals_sv);
|
||||
/* convert to bvec */
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index 9acced205..cee073ea7 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -1029,6 +1029,16 @@ class Backends(DSLdapObjects):
|
||||
for be in sorted(self.list(), key=lambda be: len(be.get_suffix()), reverse=True):
|
||||
be.delete()
|
||||
|
||||
+ def get_backend(self, suffix):
|
||||
+ """
|
||||
+ Return the backend associated with the provided suffix.
|
||||
+ """
|
||||
+ suffix_l = suffix.lower()
|
||||
+ for be in self.list():
|
||||
+ if be.get_attr_val_utf8_l('nsslapd-suffix') == suffix_l:
|
||||
+ return be
|
||||
+ return None
|
||||
+
|
||||
|
||||
class DatabaseConfig(DSLdapObject):
|
||||
"""Backend Database configuration
|
||||
--
|
||||
2.48.1
|
||||
|
@ -1,230 +0,0 @@
|
||||
From bd2829d04491556c35a0b36b591c09a69baf6546 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 11 Dec 2023 11:58:40 +0100
|
||||
Subject: [PATCH] Issue 6004 - idletimeout may be ignored (#6005)
|
||||
|
||||
* Issue 6004 - idletimeout may be ignored
|
||||
|
||||
Problem: idletimeout is still not handled when binding as non root (unless there are some activity
|
||||
on another connection)
|
||||
Fix:
|
||||
Add a slapi_eq_repeat_rel handler that walks all active connection every seconds and check if the timeout is expired.
|
||||
Note about CI test:
|
||||
Notice that idletimeout is never enforced for connections bound as root (i.e cn=directory manager).
|
||||
|
||||
Issue #6004
|
||||
|
||||
Reviewed by: @droideck, @tbordaz (Thanks!)
|
||||
|
||||
(cherry picked from commit 86b5969acbe124eec8c89bcf1ab2156b2b140c17)
|
||||
(cherry picked from commit bdb0a72b4953678e5418406b3c202dfa2c7469a2)
|
||||
(cherry picked from commit 61cebc191cd4090072dda691b9956dbde4cf7c48)
|
||||
---
|
||||
.../tests/suites/config/regression_test.py | 82 ++++++++++++++++++-
|
||||
ldap/servers/slapd/daemon.c | 52 +++++++++++-
|
||||
2 files changed, 128 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/regression_test.py b/dirsrvtests/tests/suites/config/regression_test.py
|
||||
index 0000dd82d..8dbba8cd2 100644
|
||||
--- a/dirsrvtests/tests/suites/config/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/regression_test.py
|
||||
@@ -6,20 +6,49 @@
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
#
|
||||
+import os
|
||||
import logging
|
||||
import pytest
|
||||
+import time
|
||||
from lib389.utils import *
|
||||
from lib389.dseldif import DSEldif
|
||||
-from lib389.config import LDBMConfig
|
||||
+from lib389.config import BDB_LDBMConfig, LDBMConfig, Config
|
||||
from lib389.backend import Backends
|
||||
from lib389.topologies import topology_st as topo
|
||||
+from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
|
||||
+from lib389._constants import DEFAULT_SUFFIX, PASSWORD, DN_DM
|
||||
|
||||
pytestmark = pytest.mark.tier0
|
||||
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
CUSTOM_MEM = '9100100100'
|
||||
+IDLETIMEOUT = 5
|
||||
+DN_TEST_USER = f'uid={TEST_USER_PROPERTIES["uid"]},ou=People,{DEFAULT_SUFFIX}'
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="module")
|
||||
+def idletimeout_topo(topo, request):
|
||||
+ """Create an instance with a test user and set idletimeout"""
|
||||
+ inst = topo.standalone
|
||||
+ config = Config(inst)
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ user = users.create(properties={
|
||||
+ **TEST_USER_PROPERTIES,
|
||||
+ 'userpassword' : PASSWORD,
|
||||
+ })
|
||||
+ config.replace('nsslapd-idletimeout', str(IDLETIMEOUT))
|
||||
+
|
||||
+ def fin():
|
||||
+ if not DEBUGGING:
|
||||
+ config.reset('nsslapd-idletimeout')
|
||||
+ user.delete()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+ return topo
|
||||
|
||||
|
||||
# Function to return value of available memory in kb
|
||||
@@ -79,7 +108,7 @@ def test_maxbersize_repl(topo):
|
||||
nsslapd-errorlog-logmaxdiskspace are set in certain order
|
||||
|
||||
:id: 743e912c-2be4-4f5f-9c2a-93dcb18f51a0
|
||||
- :setup: MMR with two suppliers
|
||||
+ :setup: Standalone Instance
|
||||
:steps:
|
||||
1. Stop the instance
|
||||
2. Set nsslapd-errorlog-maxlogsize before/after
|
||||
@@ -112,3 +141,52 @@ def test_maxbersize_repl(topo):
|
||||
log.info("Assert no init_dse_file errors in the error log")
|
||||
assert not inst.ds_error_log.match('.*ERR - init_dse_file.*')
|
||||
|
||||
+
|
||||
+def test_bdb_config(topo):
|
||||
+ """Check that bdb config entry exists
|
||||
+
|
||||
+ :id: edbc6f54-7c98-11ee-b1c0-482ae39447e5
|
||||
+ :setup: standalone
|
||||
+ :steps:
|
||||
+ 1. Check that bdb config instance exists.
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+ assert BDB_LDBMConfig(inst).exists()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("dn,expected_result", [(DN_TEST_USER, True), (DN_DM, False)])
|
||||
+def test_idletimeout(idletimeout_topo, dn, expected_result):
|
||||
+ """Check that bdb config entry exists
|
||||
+
|
||||
+ :id: b20f2826-942a-11ee-827b-482ae39447e5
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance with test user and idletimeout
|
||||
+ :steps:
|
||||
+ 1. Open new ldap connection
|
||||
+ 2. Bind with the provided dn
|
||||
+ 3. Wait longer than idletimeout
|
||||
+ 4. Try to bind again the provided dn and check if
|
||||
+ connection is closed or not.
|
||||
+ 5. Check if result is the expected one.
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = idletimeout_topo.standalone
|
||||
+
|
||||
+ l = ldap.initialize(f'ldap://localhost:{inst.port}')
|
||||
+ l.bind_s(dn, PASSWORD)
|
||||
+ time.sleep(IDLETIMEOUT+1)
|
||||
+ try:
|
||||
+ l.bind_s(dn, PASSWORD)
|
||||
+ result = False
|
||||
+ except ldap.SERVER_DOWN:
|
||||
+ result = True
|
||||
+ assert expected_result == result
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 57e07e5f5..6df109760 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -68,6 +68,8 @@
|
||||
#define SLAPD_ACCEPT_WAKEUP_TIMER 250
|
||||
#endif
|
||||
|
||||
+#define MILLISECONDS_PER_SECOND 1000
|
||||
+
|
||||
int slapd_wakeup_timer = SLAPD_WAKEUP_TIMER; /* time in ms to wakeup */
|
||||
int slapd_accept_wakeup_timer = SLAPD_ACCEPT_WAKEUP_TIMER; /* time in ms to wakeup */
|
||||
#ifdef notdef /* GGOODREPL */
|
||||
@@ -1045,6 +1047,48 @@ slapd_sockets_ports_free(daemon_ports_t *ports_info)
|
||||
#endif
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Tells if idle timeout has expired
|
||||
+ */
|
||||
+static inline int __attribute__((always_inline))
|
||||
+has_idletimeout_expired(Connection *c, time_t curtime)
|
||||
+{
|
||||
+ return (c->c_state != CONN_STATE_FREE && !c->c_gettingber &&
|
||||
+ c->c_idletimeout > 0 && NULL == c->c_ops &&
|
||||
+ curtime - c->c_idlesince >= c->c_idletimeout);
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * slapi_eq_repeat_rel callback that checks that idletimeout has not expired.
|
||||
+ */
|
||||
+void
|
||||
+check_idletimeout(time_t when __attribute__((unused)), void *arg __attribute__((unused)) )
|
||||
+{
|
||||
+ Connection_Table *ct = the_connection_table;
|
||||
+ time_t curtime = slapi_current_rel_time_t();
|
||||
+ /* Walk all active connections of all connection listeners */
|
||||
+ for (int list_num = 0; list_num < ct->list_num; list_num++) {
|
||||
+ for (Connection *c = connection_table_get_first_active_connection(ct, list_num);
|
||||
+ c != NULL; c = connection_table_get_next_active_connection(ct, c)) {
|
||||
+ if (!has_idletimeout_expired(c, curtime)) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ /* Looks like idletimeout has expired, lets acquire the lock
|
||||
+ * and double check.
|
||||
+ */
|
||||
+ if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ if (has_idletimeout_expired(c, curtime)) {
|
||||
+ /* idle timeout has expired */
|
||||
+ disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
+ SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
+ }
|
||||
+ pthread_mutex_unlock(&(c->c_mutex));
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
void
|
||||
slapd_daemon(daemon_ports_t *ports)
|
||||
{
|
||||
@@ -1258,7 +1302,9 @@ slapd_daemon(daemon_ports_t *ports)
|
||||
"MAINPID=%lu",
|
||||
(unsigned long)getpid());
|
||||
#endif
|
||||
-
|
||||
+ slapi_eq_repeat_rel(check_idletimeout, NULL,
|
||||
+ slapi_current_rel_time_t(),
|
||||
+ MILLISECONDS_PER_SECOND);
|
||||
/* The meat of the operation is in a loop on a call to select */
|
||||
while (!g_get_shutdown()) {
|
||||
int select_return = 0;
|
||||
@@ -1734,9 +1780,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused
|
||||
disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
SLAPD_DISCONNECT_POLL, EPIPE);
|
||||
}
|
||||
- } else if (c->c_idletimeout > 0 &&
|
||||
- (curtime - c->c_idlesince) >= c->c_idletimeout &&
|
||||
- NULL == c->c_ops) {
|
||||
+ } else if (has_idletimeout_expired(c, curtime)) {
|
||||
/* idle timeout */
|
||||
disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
--
|
||||
2.48.1
|
||||
|
@ -1,69 +0,0 @@
|
||||
From e9fe6e074130406328b8e932a5c2efa814d190a0 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 5 Feb 2025 09:41:30 +0100
|
||||
Subject: [PATCH] Issue 6004 - (2nd) idletimeout may be ignored (#6569)
|
||||
|
||||
Problem:
|
||||
multiple listener threads was implemented in 2.x and after
|
||||
This is missing in 1.4.3 so the cherry pick should be adapted
|
||||
Fix:
|
||||
skip the loop with listeners
|
||||
|
||||
Issue #6004
|
||||
|
||||
Reviewed by: Jamie Chapman (Thanks !)
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 36 +++++++++++++++++-------------------
|
||||
1 file changed, 17 insertions(+), 19 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 6df109760..bef75e4a3 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1066,26 +1066,24 @@ check_idletimeout(time_t when __attribute__((unused)), void *arg __attribute__((
|
||||
{
|
||||
Connection_Table *ct = the_connection_table;
|
||||
time_t curtime = slapi_current_rel_time_t();
|
||||
- /* Walk all active connections of all connection listeners */
|
||||
- for (int list_num = 0; list_num < ct->list_num; list_num++) {
|
||||
- for (Connection *c = connection_table_get_first_active_connection(ct, list_num);
|
||||
- c != NULL; c = connection_table_get_next_active_connection(ct, c)) {
|
||||
- if (!has_idletimeout_expired(c, curtime)) {
|
||||
- continue;
|
||||
- }
|
||||
- /* Looks like idletimeout has expired, lets acquire the lock
|
||||
- * and double check.
|
||||
- */
|
||||
- if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
- continue;
|
||||
- }
|
||||
- if (has_idletimeout_expired(c, curtime)) {
|
||||
- /* idle timeout has expired */
|
||||
- disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
- }
|
||||
- pthread_mutex_unlock(&(c->c_mutex));
|
||||
+ /* Walk all active connections */
|
||||
+ for (Connection *c = connection_table_get_first_active_connection(ct);
|
||||
+ c != NULL; c = connection_table_get_next_active_connection(ct, c)) {
|
||||
+ if (!has_idletimeout_expired(c, curtime)) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ /* Looks like idletimeout has expired, lets acquire the lock
|
||||
+ * and double check.
|
||||
+ */
|
||||
+ if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ if (has_idletimeout_expired(c, curtime)) {
|
||||
+ /* idle timeout has expired */
|
||||
+ disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
+ SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
}
|
||||
+ pthread_mutex_unlock(&(c->c_mutex));
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.48.1
|
||||
|
@ -1,52 +0,0 @@
|
||||
From b2edc371c5ca4fd24ef469c64829c48824098e7f Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 8 Jan 2025 12:57:52 -0500
|
||||
Subject: [PATCH] Issue 6485 - Fix double free in USN cleanup task
|
||||
|
||||
Description:
|
||||
|
||||
ASAN report shows double free of bind dn in the USN cleanup task data. The bind
|
||||
dn was passed as a reference so it should never have to be freed by the cleanup
|
||||
task.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6485
|
||||
|
||||
Reviewed by: tbordaz(Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/usn/usn_cleanup.c | 6 ++----
|
||||
1 file changed, 2 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/usn/usn_cleanup.c b/ldap/servers/plugins/usn/usn_cleanup.c
|
||||
index bdb55e6b1..7eaf0f88f 100644
|
||||
--- a/ldap/servers/plugins/usn/usn_cleanup.c
|
||||
+++ b/ldap/servers/plugins/usn/usn_cleanup.c
|
||||
@@ -240,7 +240,7 @@ usn_cleanup_add(Slapi_PBlock *pb,
|
||||
char *suffix = NULL;
|
||||
char *backend = NULL;
|
||||
char *maxusn = NULL;
|
||||
- char *bind_dn;
|
||||
+ char *bind_dn = NULL;
|
||||
struct usn_cleanup_data *cleanup_data = NULL;
|
||||
int rv = SLAPI_DSE_CALLBACK_OK;
|
||||
Slapi_Task *task = NULL;
|
||||
@@ -323,8 +323,7 @@ usn_cleanup_add(Slapi_PBlock *pb,
|
||||
suffix = NULL; /* don't free in this function */
|
||||
cleanup_data->maxusn_to_delete = maxusn;
|
||||
maxusn = NULL; /* don't free in this function */
|
||||
- cleanup_data->bind_dn = bind_dn;
|
||||
- bind_dn = NULL; /* don't free in this function */
|
||||
+ cleanup_data->bind_dn = slapi_ch_strdup(bind_dn);
|
||||
slapi_task_set_data(task, cleanup_data);
|
||||
|
||||
/* start the USN tombstone cleanup task as a separate thread */
|
||||
@@ -363,7 +362,6 @@ usn_cleanup_task_destructor(Slapi_Task *task)
|
||||
slapi_ch_free_string(&mydata->suffix);
|
||||
slapi_ch_free_string(&mydata->maxusn_to_delete);
|
||||
slapi_ch_free_string(&mydata->bind_dn);
|
||||
- /* Need to cast to avoid a compiler warning */
|
||||
slapi_ch_free((void **)&mydata);
|
||||
}
|
||||
}
|
||||
--
|
||||
2.48.1
|
||||
|
@ -1,38 +0,0 @@
|
||||
From 679262c0c292413851d2d004b588ecfd7d91c85a Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Tue, 11 Feb 2025 18:06:34 +0000
|
||||
Subject: [PATCH] Issue 5841 - dsconf incorrectly setting up Pass-Through
|
||||
Authentication (#6601)
|
||||
|
||||
Bug description:
|
||||
During init, PAMPassThroughAuthConfigs defines an "objectclass=nsslapdplugin"
|
||||
plugin object. During filter creation, dsconf fails as objectclass=nsslapdplugin
|
||||
is not present in the PAM PT config entry. This objectclass has been removed in
|
||||
all other branches, branch 1.4.3 was skipped as there are cherry pick conflicts.
|
||||
|
||||
Fix description:
|
||||
Remove nsslapdplugin from the plugin objecti, objectclass list.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/5841
|
||||
|
||||
Reviewed by: @progier389 (Thank you)
|
||||
---
|
||||
src/lib389/lib389/plugins.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 185398e5b..25b49dae4 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -1579,7 +1579,7 @@ class PAMPassThroughAuthConfigs(DSLdapObjects):
|
||||
|
||||
def __init__(self, instance, basedn="cn=PAM Pass Through Auth,cn=plugins,cn=config"):
|
||||
super(PAMPassThroughAuthConfigs, self).__init__(instance)
|
||||
- self._objectclasses = ['top', 'extensibleObject', 'nsslapdplugin', 'pamConfig']
|
||||
+ self._objectclasses = ['top', 'extensibleObject', 'pamConfig']
|
||||
self._filterattrs = ['cn']
|
||||
self._scope = ldap.SCOPE_ONELEVEL
|
||||
self._childobject = PAMPassThroughAuthConfig
|
||||
--
|
||||
2.48.1
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,319 +0,0 @@
|
||||
From 7d534efdcd96b13524dae587c3c5994ed01924ab Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 16 Feb 2024 13:52:36 -0800
|
||||
Subject: [PATCH] Issue 6067 - Improve dsidm CLI No Such Entry handling (#6079)
|
||||
|
||||
Description: Add additional error processing to dsidm CLI tool for when basedn
|
||||
or OU subentries are absent.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6067
|
||||
|
||||
Reviewed by: @vashirov (Thanks!)
|
||||
---
|
||||
src/lib389/cli/dsidm | 21 ++++++++-------
|
||||
src/lib389/lib389/cli_idm/__init__.py | 38 ++++++++++++++++++++++++++-
|
||||
src/lib389/lib389/cli_idm/account.py | 4 +--
|
||||
src/lib389/lib389/cli_idm/service.py | 4 ++-
|
||||
src/lib389/lib389/idm/group.py | 10 ++++---
|
||||
src/lib389/lib389/idm/posixgroup.py | 5 ++--
|
||||
src/lib389/lib389/idm/services.py | 5 ++--
|
||||
src/lib389/lib389/idm/user.py | 5 ++--
|
||||
8 files changed, 67 insertions(+), 25 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/cli/dsidm b/src/lib389/cli/dsidm
|
||||
index 1b739b103..970973f4f 100755
|
||||
--- a/src/lib389/cli/dsidm
|
||||
+++ b/src/lib389/cli/dsidm
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -19,6 +19,7 @@ import argparse
|
||||
import argcomplete
|
||||
from lib389.utils import get_instance_list, instance_choices
|
||||
from lib389._constants import DSRC_HOME
|
||||
+from lib389.cli_idm import _get_basedn_arg
|
||||
from lib389.cli_idm import account as cli_account
|
||||
from lib389.cli_idm import initialise as cli_init
|
||||
from lib389.cli_idm import organizationalunit as cli_ou
|
||||
@@ -117,14 +118,6 @@ if __name__ == '__main__':
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
- if dsrc_inst['basedn'] is None:
|
||||
- errmsg = "Must provide a basedn!"
|
||||
- if args.json:
|
||||
- sys.stderr.write('{"desc": "%s"}\n' % errmsg)
|
||||
- else:
|
||||
- log.error(errmsg)
|
||||
- sys.exit(1)
|
||||
-
|
||||
if not args.verbose:
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
@@ -135,7 +128,15 @@ if __name__ == '__main__':
|
||||
result = False
|
||||
try:
|
||||
inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose, args=args)
|
||||
- result = args.func(inst, dsrc_inst['basedn'], log, args)
|
||||
+ basedn = _get_basedn_arg(inst, args, log, msg="Enter basedn")
|
||||
+ if basedn is None:
|
||||
+ errmsg = "Must provide a basedn!"
|
||||
+ if args.json:
|
||||
+ sys.stderr.write('{"desc": "%s"}\n' % errmsg)
|
||||
+ else:
|
||||
+ log.error(errmsg)
|
||||
+ sys.exit(1)
|
||||
+ result = args.func(inst, basedn, log, args)
|
||||
if args.verbose:
|
||||
log.info("Command successful.")
|
||||
except Exception as e:
|
||||
diff --git a/src/lib389/lib389/cli_idm/__init__.py b/src/lib389/lib389/cli_idm/__init__.py
|
||||
index 0dab54847..e3622246d 100644
|
||||
--- a/src/lib389/lib389/cli_idm/__init__.py
|
||||
+++ b/src/lib389/lib389/cli_idm/__init__.py
|
||||
@@ -1,15 +1,30 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
|
||||
+import sys
|
||||
import ldap
|
||||
from getpass import getpass
|
||||
import json
|
||||
+from lib389._mapped_object import DSLdapObject
|
||||
+from lib389.cli_base import _get_dn_arg
|
||||
+from lib389.idm.user import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_USER
|
||||
+from lib389.idm.group import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_GROUP
|
||||
+from lib389.idm.posixgroup import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_POSIXGROUP
|
||||
+from lib389.idm.services import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_SERVICES
|
||||
+
|
||||
+# The key is module name, the value is default RDN
|
||||
+BASEDN_RDNS = {
|
||||
+ 'user': DEFAULT_BASEDN_RDN_USER,
|
||||
+ 'group': DEFAULT_BASEDN_RDN_GROUP,
|
||||
+ 'posixgroup': DEFAULT_BASEDN_RDN_POSIXGROUP,
|
||||
+ 'service': DEFAULT_BASEDN_RDN_SERVICES,
|
||||
+}
|
||||
|
||||
|
||||
def _get_arg(args, msg=None):
|
||||
@@ -37,6 +52,27 @@ def _get_args(args, kws):
|
||||
return kwargs
|
||||
|
||||
|
||||
+def _get_basedn_arg(inst, args, log, msg=None):
|
||||
+ basedn_arg = _get_dn_arg(args.basedn, msg="Enter basedn")
|
||||
+ if not DSLdapObject(inst, basedn_arg).exists():
|
||||
+ raise ValueError(f'The base DN "{basedn_arg}" does not exist.')
|
||||
+
|
||||
+ # Get the RDN based on the last part of the module name if applicable
|
||||
+ # (lib389.cli_idm.user -> user)
|
||||
+ try:
|
||||
+ command_name = args.func.__module__.split('.')[-1]
|
||||
+ object_rdn = BASEDN_RDNS[command_name]
|
||||
+ # Check if the DN for our command exists
|
||||
+ command_basedn = f'{object_rdn},{basedn_arg}'
|
||||
+ if not DSLdapObject(inst, command_basedn).exists():
|
||||
+ errmsg = f'The DN "{command_basedn}" does not exist.'
|
||||
+ errmsg += f' It is required for "{command_name}" subcommand. Please create it first.'
|
||||
+ raise ValueError(errmsg)
|
||||
+ except KeyError:
|
||||
+ pass
|
||||
+ return basedn_arg
|
||||
+
|
||||
+
|
||||
# This is really similar to get_args, but generates from an array
|
||||
def _get_attributes(args, attrs):
|
||||
kwargs = {}
|
||||
diff --git a/src/lib389/lib389/cli_idm/account.py b/src/lib389/lib389/cli_idm/account.py
|
||||
index 5d7b9cc77..15f766588 100644
|
||||
--- a/src/lib389/lib389/cli_idm/account.py
|
||||
+++ b/src/lib389/lib389/cli_idm/account.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2023, Red Hat inc,
|
||||
+# Copyright (C) 2024, Red Hat inc,
|
||||
# Copyright (C) 2018, William Brown <william@blackhats.net.au>
|
||||
# All rights reserved.
|
||||
#
|
||||
@@ -91,7 +91,6 @@ def entry_status(inst, basedn, log, args):
|
||||
|
||||
|
||||
def subtree_status(inst, basedn, log, args):
|
||||
- basedn = _get_dn_arg(args.basedn, msg="Enter basedn to check")
|
||||
filter = ""
|
||||
scope = ldap.SCOPE_SUBTREE
|
||||
epoch_inactive_time = None
|
||||
@@ -121,7 +120,6 @@ def subtree_status(inst, basedn, log, args):
|
||||
|
||||
|
||||
def bulk_update(inst, basedn, log, args):
|
||||
- basedn = _get_dn_arg(args.basedn, msg="Enter basedn to search")
|
||||
search_filter = "(objectclass=*)"
|
||||
scope = ldap.SCOPE_SUBTREE
|
||||
scope_str = "sub"
|
||||
diff --git a/src/lib389/lib389/cli_idm/service.py b/src/lib389/lib389/cli_idm/service.py
|
||||
index c62fc12d1..c2b2c8c84 100644
|
||||
--- a/src/lib389/lib389/cli_idm/service.py
|
||||
+++ b/src/lib389/lib389/cli_idm/service.py
|
||||
@@ -57,7 +57,9 @@ def rename(inst, basedn, log, args, warn=True):
|
||||
_generic_rename(inst, basedn, log.getChild('_generic_rename'), MANY, rdn, args)
|
||||
|
||||
def create_parser(subparsers):
|
||||
- service_parser = subparsers.add_parser('service', help='Manage service accounts', formatter_class=CustomHelpFormatter)
|
||||
+ service_parser = subparsers.add_parser('service',
|
||||
+ help='Manage service accounts. The organizationalUnit (by default "ou=Services") '
|
||||
+ 'needs to exist prior to managing service accounts.', formatter_class=CustomHelpFormatter)
|
||||
|
||||
subcommands = service_parser.add_subparsers(help='action')
|
||||
|
||||
diff --git a/src/lib389/lib389/idm/group.py b/src/lib389/lib389/idm/group.py
|
||||
index 1b60a1f51..2cf2c7b23 100644
|
||||
--- a/src/lib389/lib389/idm/group.py
|
||||
+++ b/src/lib389/lib389/idm/group.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -16,6 +16,8 @@ MUST_ATTRIBUTES = [
|
||||
'cn',
|
||||
]
|
||||
RDN = 'cn'
|
||||
+DEFAULT_BASEDN_RDN = 'ou=Groups'
|
||||
+DEFAULT_BASEDN_RDN_ADMIN_GROUPS = 'ou=People'
|
||||
|
||||
|
||||
class Group(DSLdapObject):
|
||||
@@ -93,7 +95,7 @@ class Groups(DSLdapObjects):
|
||||
:type basedn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=Groups'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(Groups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'groupOfNames',
|
||||
@@ -140,7 +142,7 @@ class UniqueGroup(DSLdapObject):
|
||||
class UniqueGroups(DSLdapObjects):
|
||||
# WARNING!!!
|
||||
# Use group, not unique group!!!
|
||||
- def __init__(self, instance, basedn, rdn='ou=Groups'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(UniqueGroups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'groupOfUniqueNames',
|
||||
@@ -203,7 +205,7 @@ class nsAdminGroups(DSLdapObjects):
|
||||
:type rdn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=People'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN_ADMIN_GROUPS):
|
||||
super(nsAdminGroups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'nsAdminGroup'
|
||||
diff --git a/src/lib389/lib389/idm/posixgroup.py b/src/lib389/lib389/idm/posixgroup.py
|
||||
index d1debcf12..45735c579 100644
|
||||
--- a/src/lib389/lib389/idm/posixgroup.py
|
||||
+++ b/src/lib389/lib389/idm/posixgroup.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -17,6 +17,7 @@ MUST_ATTRIBUTES = [
|
||||
'gidNumber',
|
||||
]
|
||||
RDN = 'cn'
|
||||
+DEFAULT_BASEDN_RDN = 'ou=Groups'
|
||||
|
||||
|
||||
class PosixGroup(DSLdapObject):
|
||||
@@ -72,7 +73,7 @@ class PosixGroups(DSLdapObjects):
|
||||
:type basedn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=Groups'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(PosixGroups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'groupOfNames',
|
||||
diff --git a/src/lib389/lib389/idm/services.py b/src/lib389/lib389/idm/services.py
|
||||
index d1e5b4693..e750a32c4 100644
|
||||
--- a/src/lib389/lib389/idm/services.py
|
||||
+++ b/src/lib389/lib389/idm/services.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2021 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -16,6 +16,7 @@ RDN = 'cn'
|
||||
MUST_ATTRIBUTES = [
|
||||
'cn',
|
||||
]
|
||||
+DEFAULT_BASEDN_RDN = 'ou=Services'
|
||||
|
||||
class ServiceAccount(Account):
|
||||
"""A single instance of Service entry
|
||||
@@ -59,7 +60,7 @@ class ServiceAccounts(DSLdapObjects):
|
||||
:type basedn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=Services'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(ServiceAccounts, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'applicationProcess',
|
||||
diff --git a/src/lib389/lib389/idm/user.py b/src/lib389/lib389/idm/user.py
|
||||
index 1206a6e08..3b21ccf1c 100644
|
||||
--- a/src/lib389/lib389/idm/user.py
|
||||
+++ b/src/lib389/lib389/idm/user.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -23,6 +23,7 @@ MUST_ATTRIBUTES = [
|
||||
'homeDirectory',
|
||||
]
|
||||
RDN = 'uid'
|
||||
+DEFAULT_BASEDN_RDN = 'ou=People'
|
||||
|
||||
TEST_USER_PROPERTIES = {
|
||||
'uid': 'testuser',
|
||||
@@ -201,7 +202,7 @@ class UserAccounts(DSLdapObjects):
|
||||
:type rdn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=People'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(UserAccounts, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'account',
|
||||
--
|
||||
2.48.1
|
||||
|
@ -1,52 +0,0 @@
|
||||
From ee03e8443a108cff0cc4c7a03962fdc3a1fbf94d Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Wed, 16 Oct 2024 19:24:55 -0700
|
||||
Subject: [PATCH] Issue 6067 - Update dsidm to prioritize basedn from .dsrc
|
||||
over interactive input (#6362)
|
||||
|
||||
Description: Modifies dsidm CLI tool to check for the basedn in the .dsrc configuration file
|
||||
when the -b option is not provided.
|
||||
Previously, users were required to always specify the basedn interactively if -b was omitted,
|
||||
even if it was available in .dsrc.
|
||||
Now, the basedn is determined by first checking the -b option, then the .dsrc file, and finally
|
||||
prompting the user if neither is set.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6067
|
||||
|
||||
Reviewed by: @Firstyear (Thanks!)
|
||||
---
|
||||
src/lib389/cli/dsidm | 2 +-
|
||||
src/lib389/lib389/cli_idm/__init__.py | 4 ++--
|
||||
2 files changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/cli/dsidm b/src/lib389/cli/dsidm
|
||||
index 970973f4f..d318664bc 100755
|
||||
--- a/src/lib389/cli/dsidm
|
||||
+++ b/src/lib389/cli/dsidm
|
||||
@@ -128,7 +128,7 @@ if __name__ == '__main__':
|
||||
result = False
|
||||
try:
|
||||
inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose, args=args)
|
||||
- basedn = _get_basedn_arg(inst, args, log, msg="Enter basedn")
|
||||
+ basedn = _get_basedn_arg(inst, args, dsrc_inst['basedn'], log, msg="Enter basedn")
|
||||
if basedn is None:
|
||||
errmsg = "Must provide a basedn!"
|
||||
if args.json:
|
||||
diff --git a/src/lib389/lib389/cli_idm/__init__.py b/src/lib389/lib389/cli_idm/__init__.py
|
||||
index e3622246d..1f3e2dc86 100644
|
||||
--- a/src/lib389/lib389/cli_idm/__init__.py
|
||||
+++ b/src/lib389/lib389/cli_idm/__init__.py
|
||||
@@ -52,8 +52,8 @@ def _get_args(args, kws):
|
||||
return kwargs
|
||||
|
||||
|
||||
-def _get_basedn_arg(inst, args, log, msg=None):
|
||||
- basedn_arg = _get_dn_arg(args.basedn, msg="Enter basedn")
|
||||
+def _get_basedn_arg(inst, args, basedn, log, msg=None):
|
||||
+ basedn_arg = _get_dn_arg(basedn, msg="Enter basedn")
|
||||
if not DSLdapObject(inst, basedn_arg).exists():
|
||||
raise ValueError(f'The base DN "{basedn_arg}" does not exist.')
|
||||
|
||||
--
|
||||
2.48.1
|
||||
|
@ -1,520 +0,0 @@
|
||||
From b8c079c770d3eaa4de49e997d42e1501c28a153b Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 8 Jul 2024 11:19:09 +0200
|
||||
Subject: [PATCH] Issue 6155 - ldap-agent fails to start because of permission
|
||||
error (#6179)
|
||||
|
||||
Issue: dirsrv-snmp service fails to starts when SELinux is enforced because of AVC preventing to open some files
|
||||
One workaround is to use the dac_override capability but it is a bad practice.
|
||||
Fix: Setting proper permissions:
|
||||
|
||||
Running ldap-agent with uid=root and gid=dirsrv to be able to access both snmp and dirsrv resources.
|
||||
Setting read permission on the group for the dse.ldif file
|
||||
Setting r/w permissions on the group for the snmp semaphore and mmap file
|
||||
For that one special care is needed because ns-slapd umask overrides the file creation permission
|
||||
as is better to avoid changing the umask (changing umask within the code is not thread safe,
|
||||
and the current 0022 umask value is correct for most of the files) so the safest way is to chmod the snmp file
|
||||
if the needed permission are not set.
|
||||
Issue: #6155
|
||||
|
||||
Reviewed by: @droideck , @vashirov (Thanks ! )
|
||||
|
||||
(cherry picked from commit eb7e57d77b557b63c65fdf38f9069893b021f049)
|
||||
---
|
||||
.github/scripts/generate_matrix.py | 4 +-
|
||||
dirsrvtests/tests/suites/snmp/snmp.py | 214 ++++++++++++++++++++++++++
|
||||
ldap/servers/slapd/agtmmap.c | 72 ++++++++-
|
||||
ldap/servers/slapd/agtmmap.h | 13 ++
|
||||
ldap/servers/slapd/dse.c | 6 +-
|
||||
ldap/servers/slapd/slap.h | 6 +
|
||||
ldap/servers/slapd/snmp_collator.c | 4 +-
|
||||
src/lib389/lib389/instance/setup.py | 5 +
|
||||
wrappers/systemd-snmp.service.in | 1 +
|
||||
9 files changed, 313 insertions(+), 12 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/snmp/snmp.py
|
||||
|
||||
diff --git a/.github/scripts/generate_matrix.py b/.github/scripts/generate_matrix.py
|
||||
index 584374597..8d67a1dc7 100644
|
||||
--- a/.github/scripts/generate_matrix.py
|
||||
+++ b/.github/scripts/generate_matrix.py
|
||||
@@ -21,8 +21,8 @@ else:
|
||||
# Use tests from the source
|
||||
suites = next(os.walk('dirsrvtests/tests/suites/'))[1]
|
||||
|
||||
- # Filter out snmp as it is an empty directory:
|
||||
- suites.remove('snmp')
|
||||
+ # Filter out webui because of broken tests
|
||||
+ suites.remove('webui')
|
||||
|
||||
# Run each replication test module separately to speed things up
|
||||
suites.remove('replication')
|
||||
diff --git a/dirsrvtests/tests/suites/snmp/snmp.py b/dirsrvtests/tests/suites/snmp/snmp.py
|
||||
new file mode 100644
|
||||
index 000000000..0952deb40
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/snmp/snmp.py
|
||||
@@ -0,0 +1,214 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import os
|
||||
+import pytest
|
||||
+import logging
|
||||
+import subprocess
|
||||
+import ldap
|
||||
+from datetime import datetime
|
||||
+from shutil import copyfile
|
||||
+from lib389.topologies import topology_m2 as topo_m2
|
||||
+from lib389.utils import selinux_present
|
||||
+
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+if DEBUGGING:
|
||||
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+else:
|
||||
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+SNMP_USER = 'user_name'
|
||||
+SNMP_PASSWORD = 'authentication_password'
|
||||
+SNMP_PRIVATE = 'private_password'
|
||||
+
|
||||
+# LDAP OID in MIB
|
||||
+LDAP_OID = '.1.3.6.1.4.1.2312.6.1.1'
|
||||
+LDAPCONNECTIONS_OID = f'{LDAP_OID}.21'
|
||||
+
|
||||
+
|
||||
+def run_cmd(cmd, check_returncode=True):
|
||||
+ """Run a command"""
|
||||
+
|
||||
+ log.info(f'Run: {cmd}')
|
||||
+ result = subprocess.run(cmd, capture_output=True, universal_newlines=True)
|
||||
+ log.info(f'STDOUT of {cmd} is:\n{result.stdout}')
|
||||
+ log.info(f'STDERR of {cmd} is:\n{result.stderr}')
|
||||
+ if check_returncode:
|
||||
+ result.check_returncode()
|
||||
+ return result
|
||||
+
|
||||
+
|
||||
+def add_lines(lines, filename):
|
||||
+ """Add lines that are not already present at the end of a file"""
|
||||
+
|
||||
+ log.info(f'add_lines({lines}, {filename})')
|
||||
+ try:
|
||||
+ with open(filename, 'r') as fd:
|
||||
+ for line in fd:
|
||||
+ try:
|
||||
+ lines.remove(line.strip())
|
||||
+ except ValueError:
|
||||
+ pass
|
||||
+ except FileNotFoundError:
|
||||
+ pass
|
||||
+ if lines:
|
||||
+ with open(filename, 'a') as fd:
|
||||
+ for line in lines:
|
||||
+ fd.write(f'{line}\n')
|
||||
+
|
||||
+
|
||||
+def remove_lines(lines, filename):
|
||||
+ """Remove lines in a file"""
|
||||
+
|
||||
+ log.info(f'remove_lines({lines}, {filename})')
|
||||
+ file_lines = []
|
||||
+ with open(filename, 'r') as fd:
|
||||
+ for line in fd:
|
||||
+ if not line.strip() in lines:
|
||||
+ file_lines.append(line)
|
||||
+ with open(filename, 'w') as fd:
|
||||
+ for line in file_lines:
|
||||
+ fd.write(line)
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="module")
|
||||
+def setup_snmp(topo_m2, request):
|
||||
+ """Install snmp and configure it
|
||||
+
|
||||
+ Returns the time just before dirsrv-snmp get restarted
|
||||
+ """
|
||||
+
|
||||
+ inst1 = topo_m2.ms["supplier1"]
|
||||
+ inst2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+ # Check for the test prerequisites
|
||||
+ if os.getuid() != 0:
|
||||
+ pytest.skip('This test should be run by root superuser')
|
||||
+ return None
|
||||
+ if not inst1.with_systemd_running():
|
||||
+ pytest.skip('This test requires systemd')
|
||||
+ return None
|
||||
+ required_packages = {
|
||||
+ '389-ds-base-snmp': os.path.join(inst1.get_sbin_dir(), 'ldap-agent'),
|
||||
+ 'net-snmp': '/etc/snmp/snmpd.conf', }
|
||||
+ skip_msg = ""
|
||||
+ for package,file in required_packages.items():
|
||||
+ if not os.path.exists(file):
|
||||
+ skip_msg += f"Package {package} is not installed ({file} is missing).\n"
|
||||
+ if skip_msg != "":
|
||||
+ pytest.skip(f'This test requires the following package(s): {skip_msg}')
|
||||
+ return None
|
||||
+
|
||||
+ # Install snmp
|
||||
+ # run_cmd(['/usr/bin/dnf', 'install', '-y', 'net-snmp', 'net-snmp-utils', '389-ds-base-snmp'])
|
||||
+
|
||||
+ # Prepare the lines to add/remove in files:
|
||||
+ # master agentx
|
||||
+ # snmp user (user_name - authentication_password - private_password)
|
||||
+ # ldap_agent ds instances
|
||||
+ #
|
||||
+ # Adding rwuser and createUser lines is the same as running:
|
||||
+ # net-snmp-create-v3-user -A authentication_password -a SHA -X private_password -x AES user_name
|
||||
+ # but has the advantage of removing the user at cleanup phase
|
||||
+ #
|
||||
+ agent_cfg = '/etc/dirsrv/config/ldap-agent.conf'
|
||||
+ lines_dict = { '/etc/snmp/snmpd.conf' : ['master agentx', f'rwuser {SNMP_USER}'],
|
||||
+ '/var/lib/net-snmp/snmpd.conf' : [
|
||||
+ f'createUser {SNMP_USER} SHA "{SNMP_PASSWORD}" AES "{SNMP_PRIVATE}"',],
|
||||
+ agent_cfg : [] }
|
||||
+ for inst in topo_m2:
|
||||
+ lines_dict[agent_cfg].append(f'server slapd-{inst.serverid}')
|
||||
+
|
||||
+ # Prepare the cleanup
|
||||
+ def fin():
|
||||
+ run_cmd(['systemctl', 'stop', 'dirsrv-snmp'])
|
||||
+ if not DEBUGGING:
|
||||
+ run_cmd(['systemctl', 'stop', 'snmpd'])
|
||||
+ try:
|
||||
+ os.remove('/usr/share/snmp/mibs/redhat-directory.mib')
|
||||
+ except FileNotFoundError:
|
||||
+ pass
|
||||
+ for filename,lines in lines_dict.items():
|
||||
+ remove_lines(lines, filename)
|
||||
+ run_cmd(['systemctl', 'start', 'snmpd'])
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Copy RHDS MIB in default MIB search path (Ugly because I have not found how to change the search path)
|
||||
+ copyfile('/usr/share/dirsrv/mibs/redhat-directory.mib', '/usr/share/snmp/mibs/redhat-directory.mib')
|
||||
+
|
||||
+ run_cmd(['systemctl', 'stop', 'snmpd'])
|
||||
+ for filename,lines in lines_dict.items():
|
||||
+ add_lines(lines, filename)
|
||||
+
|
||||
+ run_cmd(['systemctl', 'start', 'snmpd'])
|
||||
+
|
||||
+ curtime = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||
+
|
||||
+ run_cmd(['systemctl', 'start', 'dirsrv-snmp'])
|
||||
+ return curtime
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(not os.path.exists('/usr/bin/snmpwalk'), reason="net-snmp-utils package is not installed")
|
||||
+def test_snmpwalk(topo_m2, setup_snmp):
|
||||
+ """snmp smoke tests.
|
||||
+
|
||||
+ :id: e5d29998-1c21-11ef-a654-482ae39447e5
|
||||
+ :setup: Two suppliers replication setup, snmp
|
||||
+ :steps:
|
||||
+ 1. use snmpwalk to display LDAP statistics
|
||||
+ 2. use snmpwalk to get the number of open connections
|
||||
+ :expectedresults:
|
||||
+ 1. Success and no messages in stderr
|
||||
+ 2. The number of open connections should be positive
|
||||
+ """
|
||||
+
|
||||
+ inst1 = topo_m2.ms["supplier1"]
|
||||
+ inst2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+
|
||||
+ cmd = [ '/usr/bin/snmpwalk', '-v3', '-u', SNMP_USER, '-l', 'AuthPriv',
|
||||
+ '-m', '+RHDS-MIB', '-A', SNMP_PASSWORD, '-a', 'SHA',
|
||||
+ '-X', SNMP_PRIVATE, '-x', 'AES', 'localhost',
|
||||
+ LDAP_OID ]
|
||||
+ result = run_cmd(cmd)
|
||||
+ assert not result.stderr
|
||||
+
|
||||
+ cmd = [ '/usr/bin/snmpwalk', '-v3', '-u', SNMP_USER, '-l', 'AuthPriv',
|
||||
+ '-m', '+RHDS-MIB', '-A', SNMP_PASSWORD, '-a', 'SHA',
|
||||
+ '-X', SNMP_PRIVATE, '-x', 'AES', 'localhost',
|
||||
+ f'{LDAPCONNECTIONS_OID}.{inst1.port}', '-Ov' ]
|
||||
+ result = run_cmd(cmd)
|
||||
+ nbconns = int(result.stdout.split()[1])
|
||||
+ log.info(f'There are {nbconns} open connections on {inst1.serverid}')
|
||||
+ assert nbconns > 0
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(not selinux_present(), reason="SELinux is not enabled")
|
||||
+def test_snmp_avc(topo_m2, setup_snmp):
|
||||
+ """snmp smoke tests.
|
||||
+
|
||||
+ :id: fb79728e-1d0d-11ef-9213-482ae39447e5
|
||||
+ :setup: Two suppliers replication setup, snmp
|
||||
+ :steps:
|
||||
+ 1. Get the system journal about ldap-agent
|
||||
+ :expectedresults:
|
||||
+ 1. No AVC should be present
|
||||
+ """
|
||||
+ result = run_cmd(['journalctl', '-S', setup_snmp, '-g', 'ldap-agent'])
|
||||
+ assert not 'AVC' in result.stdout
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/agtmmap.c b/ldap/servers/slapd/agtmmap.c
|
||||
index bc5fe1ee1..4dc67dcfb 100644
|
||||
--- a/ldap/servers/slapd/agtmmap.c
|
||||
+++ b/ldap/servers/slapd/agtmmap.c
|
||||
@@ -34,6 +34,70 @@
|
||||
agt_mmap_context_t mmap_tbl[2] = {{AGT_MAP_UNINIT, -1, (caddr_t)-1},
|
||||
{AGT_MAP_UNINIT, -1, (caddr_t)-1}};
|
||||
|
||||
+#define CHECK_MAP_FAILURE(addr) ((addr)==NULL || (addr) == (caddr_t) -1)
|
||||
+
|
||||
+
|
||||
+/****************************************************************************
|
||||
+ *
|
||||
+ * agt_set_fmode () - try to increase file mode if some flags are missing.
|
||||
+ *
|
||||
+ *
|
||||
+ * Inputs:
|
||||
+ * fd -> The file descriptor.
|
||||
+ *
|
||||
+ * mode -> the wanted mode
|
||||
+ *
|
||||
+ * Outputs: None
|
||||
+ * Return Values: None
|
||||
+ *
|
||||
+ ****************************************************************************/
|
||||
+static void
|
||||
+agt_set_fmode(int fd, mode_t mode)
|
||||
+{
|
||||
+ /* ns-slapd umask is 0022 which is usually fine.
|
||||
+ * but ldap-agen needs S_IWGRP permission on snmp semaphore and mmap file
|
||||
+ * ( when SELinux is enforced process with uid=0 does not bypass the file permission
|
||||
+ * (unless the unfamous dac_override capability is set)
|
||||
+ * Changing umask could lead to race conditions so it is better to check the
|
||||
+ * file permission and change them if needed and if the process own the file.
|
||||
+ */
|
||||
+ struct stat fileinfo = {0};
|
||||
+ if (fstat(fd, &fileinfo) == 0 && fileinfo.st_uid == getuid() &&
|
||||
+ (fileinfo.st_mode & mode) != mode) {
|
||||
+ (void) fchmod(fd, fileinfo.st_mode | mode);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+/****************************************************************************
|
||||
+ *
|
||||
+ * agt_sem_open () - Like sem_open but ignores umask
|
||||
+ *
|
||||
+ *
|
||||
+ * Inputs: see sem_open man page.
|
||||
+ * Outputs: see sem_open man page.
|
||||
+ * Return Values: see sem_open man page.
|
||||
+ *
|
||||
+ ****************************************************************************/
|
||||
+sem_t *
|
||||
+agt_sem_open(const char *name, int oflag, mode_t mode, unsigned int value)
|
||||
+{
|
||||
+ sem_t *sem = sem_open(name, oflag, mode, value);
|
||||
+ char *semname = NULL;
|
||||
+
|
||||
+ if (sem != NULL) {
|
||||
+ if (asprintf(&semname, "/dev/shm/sem.%s", name+1) > 0) {
|
||||
+ int fd = open(semname, O_RDONLY);
|
||||
+ if (fd >= 0) {
|
||||
+ agt_set_fmode(fd, mode);
|
||||
+ (void) close(fd);
|
||||
+ }
|
||||
+ free(semname);
|
||||
+ semname = NULL;
|
||||
+ }
|
||||
+ }
|
||||
+ return sem;
|
||||
+}
|
||||
+
|
||||
/****************************************************************************
|
||||
*
|
||||
* agt_mopen_stats () - open and Memory Map the stats file. agt_mclose_stats()
|
||||
@@ -52,7 +116,6 @@ agt_mmap_context_t mmap_tbl[2] = {{AGT_MAP_UNINIT, -1, (caddr_t)-1},
|
||||
* as defined in <errno.h>, otherwise.
|
||||
*
|
||||
****************************************************************************/
|
||||
-
|
||||
int
|
||||
agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
{
|
||||
@@ -64,6 +127,7 @@ agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
int err;
|
||||
size_t sz;
|
||||
struct stat fileinfo;
|
||||
+ mode_t rw_mode = S_IWUSR | S_IRUSR | S_IRGRP | S_IWGRP | S_IROTH;
|
||||
|
||||
switch (mode) {
|
||||
case O_RDONLY:
|
||||
@@ -128,10 +192,7 @@ agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
break;
|
||||
|
||||
case O_RDWR:
|
||||
- fd = open(path,
|
||||
- O_RDWR | O_CREAT,
|
||||
- S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH);
|
||||
-
|
||||
+ fd = open(path, O_RDWR | O_CREAT, rw_mode);
|
||||
if (fd < 0) {
|
||||
err = errno;
|
||||
#if (0)
|
||||
@@ -140,6 +201,7 @@ agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
rc = err;
|
||||
goto bail;
|
||||
}
|
||||
+ agt_set_fmode(fd, rw_mode);
|
||||
|
||||
if (fstat(fd, &fileinfo) != 0) {
|
||||
close(fd);
|
||||
diff --git a/ldap/servers/slapd/agtmmap.h b/ldap/servers/slapd/agtmmap.h
|
||||
index fb27ab2c4..99a8584a3 100644
|
||||
--- a/ldap/servers/slapd/agtmmap.h
|
||||
+++ b/ldap/servers/slapd/agtmmap.h
|
||||
@@ -28,6 +28,7 @@
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
+#include <semaphore.h>
|
||||
#include <errno.h>
|
||||
#include "nspr.h"
|
||||
|
||||
@@ -188,6 +189,18 @@ int agt_mclose_stats(int hdl);
|
||||
|
||||
int agt_mread_stats(int hdl, struct hdr_stats_t *, struct ops_stats_t *, struct entries_stats_t *);
|
||||
|
||||
+/****************************************************************************
|
||||
+ *
|
||||
+ * agt_sem_open () - Like sem_open but ignores umask
|
||||
+ *
|
||||
+ *
|
||||
+ * Inputs: see sem_open man page.
|
||||
+ * Outputs: see sem_open man page.
|
||||
+ * Return Values: see sem_open man page.
|
||||
+ *
|
||||
+ ****************************************************************************/
|
||||
+sem_t *agt_sem_open(const char *name, int oflag, mode_t mode, unsigned int value);
|
||||
+
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c
|
||||
index b04fafde6..f1e48c6b1 100644
|
||||
--- a/ldap/servers/slapd/dse.c
|
||||
+++ b/ldap/servers/slapd/dse.c
|
||||
@@ -683,7 +683,7 @@ dse_read_one_file(struct dse *pdse, const char *filename, Slapi_PBlock *pb, int
|
||||
"The configuration file %s could not be accessed, error %d\n",
|
||||
filename, rc);
|
||||
rc = 0; /* Fail */
|
||||
- } else if ((prfd = PR_Open(filename, PR_RDONLY, SLAPD_DEFAULT_FILE_MODE)) == NULL) {
|
||||
+ } else if ((prfd = PR_Open(filename, PR_RDONLY, SLAPD_DEFAULT_DSE_FILE_MODE)) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "dse_read_one_file",
|
||||
"The configuration file %s could not be read. " SLAPI_COMPONENT_NAME_NSPR " %d (%s)\n",
|
||||
filename,
|
||||
@@ -871,7 +871,7 @@ dse_rw_permission_to_one_file(const char *name, int loglevel)
|
||||
PRFileDesc *prfd;
|
||||
|
||||
prfd = PR_Open(name, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE,
|
||||
- SLAPD_DEFAULT_FILE_MODE);
|
||||
+ SLAPD_DEFAULT_DSE_FILE_MODE);
|
||||
if (NULL == prfd) {
|
||||
prerr = PR_GetError();
|
||||
accesstype = "create";
|
||||
@@ -970,7 +970,7 @@ dse_write_file_nolock(struct dse *pdse)
|
||||
fpw.fpw_prfd = NULL;
|
||||
|
||||
if (NULL != pdse->dse_filename) {
|
||||
- if ((fpw.fpw_prfd = PR_Open(pdse->dse_tmpfile, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, SLAPD_DEFAULT_FILE_MODE)) == NULL) {
|
||||
+ if ((fpw.fpw_prfd = PR_Open(pdse->dse_tmpfile, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, SLAPD_DEFAULT_DSE_FILE_MODE)) == NULL) {
|
||||
rc = PR_GetOSError();
|
||||
slapi_log_err(SLAPI_LOG_ERR, "dse_write_file_nolock", "Cannot open "
|
||||
"temporary DSE file \"%s\" for update: OS error %d (%s)\n",
|
||||
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
|
||||
index 469874fd1..927576b70 100644
|
||||
--- a/ldap/servers/slapd/slap.h
|
||||
+++ b/ldap/servers/slapd/slap.h
|
||||
@@ -238,6 +238,12 @@ typedef void (*VFPV)(); /* takes undefined arguments */
|
||||
*/
|
||||
|
||||
#define SLAPD_DEFAULT_FILE_MODE S_IRUSR | S_IWUSR
|
||||
+/* ldap_agent run as uid=root gid=dirsrv and requires S_IRGRP | S_IWGRP
|
||||
+ * on semaphore and mmap file if SELinux is enforced.
|
||||
+ */
|
||||
+#define SLAPD_DEFAULT_SNMP_FILE_MODE S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP
|
||||
+/* ldap_agent run as uid=root gid=dirsrv and requires S_IRGRP on dse.ldif if SELinux is enforced. */
|
||||
+#define SLAPD_DEFAULT_DSE_FILE_MODE S_IRUSR | S_IWUSR | S_IRGRP
|
||||
#define SLAPD_DEFAULT_DIR_MODE S_IRWXU
|
||||
#define SLAPD_DEFAULT_IDLE_TIMEOUT 3600 /* seconds - 0 == never */
|
||||
#define SLAPD_DEFAULT_IDLE_TIMEOUT_STR "3600"
|
||||
diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c
|
||||
index c998d4262..bd7020585 100644
|
||||
--- a/ldap/servers/slapd/snmp_collator.c
|
||||
+++ b/ldap/servers/slapd/snmp_collator.c
|
||||
@@ -474,7 +474,7 @@ static void
|
||||
snmp_collator_create_semaphore(void)
|
||||
{
|
||||
/* First just try to create the semaphore. This should usually just work. */
|
||||
- if ((stats_sem = sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
+ if ((stats_sem = agt_sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_SNMP_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
if (errno == EEXIST) {
|
||||
/* It appears that we didn't exit cleanly last time and left the semaphore
|
||||
* around. Recreate it since we don't know what state it is in. */
|
||||
@@ -486,7 +486,7 @@ snmp_collator_create_semaphore(void)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
- if ((stats_sem = sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
+ if ((stats_sem = agt_sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_SNMP_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
/* No dice */
|
||||
slapi_log_err(SLAPI_LOG_EMERG, "snmp_collator_create_semaphore",
|
||||
"Failed to create semaphore for stats file (/dev/shm/sem.%s). Error %d (%s).\n",
|
||||
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
|
||||
index 036664447..fca03383e 100644
|
||||
--- a/src/lib389/lib389/instance/setup.py
|
||||
+++ b/src/lib389/lib389/instance/setup.py
|
||||
@@ -10,6 +10,7 @@
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
+import stat
|
||||
import pwd
|
||||
import grp
|
||||
import re
|
||||
@@ -773,6 +774,10 @@ class SetupDs(object):
|
||||
ldapi_autobind="on",
|
||||
)
|
||||
file_dse.write(dse_fmt)
|
||||
+ # Set minimum permission required by snmp ldap-agent
|
||||
+ status = os.fstat(file_dse.fileno())
|
||||
+ os.fchmod(file_dse.fileno(), status.st_mode | stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP)
|
||||
+ os.chown(os.path.join(slapd['config_dir'], 'dse.ldif'), slapd['user_uid'], slapd['group_gid'])
|
||||
|
||||
self.log.info("Create file system structures ...")
|
||||
# Create all the needed paths
|
||||
diff --git a/wrappers/systemd-snmp.service.in b/wrappers/systemd-snmp.service.in
|
||||
index f18766cb4..d344367a0 100644
|
||||
--- a/wrappers/systemd-snmp.service.in
|
||||
+++ b/wrappers/systemd-snmp.service.in
|
||||
@@ -9,6 +9,7 @@ After=network.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
+Group=@defaultgroup@
|
||||
PIDFile=/run/dirsrv/ldap-agent.pid
|
||||
ExecStart=@sbindir@/ldap-agent @configdir@/ldap-agent.conf
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,60 +0,0 @@
|
||||
From 12870f410545fb055f664b588df2a2b7ab1c228e Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 4 Mar 2024 07:22:00 +0100
|
||||
Subject: [PATCH] Issue 5305 - OpenLDAP version autodetection doesn't work
|
||||
|
||||
Bug Description:
|
||||
An error is logged during a build in `mock` with Bash 4.4:
|
||||
|
||||
```
|
||||
checking for --with-libldap-r... ./configure: command substitution: line 22848: syntax error near unexpected token `>'
|
||||
./configure: command substitution: line 22848: `ldapsearch -VV 2> >(sed -n '/ldapsearch/ s/.*ldapsearch \([0-9]\+\.[0-9]\+\.[0-9]\+\) .*/\1/p')'
|
||||
no
|
||||
```
|
||||
|
||||
`mock` runs Bash as `sh` (POSIX mode). Support for process substitution
|
||||
in POSIX mode was added in version 5.1:
|
||||
https://lists.gnu.org/archive/html/bug-bash/2020-12/msg00002.html
|
||||
|
||||
> Process substitution is now available in posix mode.
|
||||
|
||||
Fix Description:
|
||||
* Add missing `BuildRequires` for openldap-clients
|
||||
* Replace process substitution with a pipe
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/5305
|
||||
|
||||
Reviewed by: @progier389, @tbordaz (Thanks!)
|
||||
---
|
||||
configure.ac | 2 +-
|
||||
rpm/389-ds-base.spec.in | 1 +
|
||||
2 files changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/configure.ac b/configure.ac
|
||||
index ffc2aac14..a690765a3 100644
|
||||
--- a/configure.ac
|
||||
+++ b/configure.ac
|
||||
@@ -912,7 +912,7 @@ AC_ARG_WITH(libldap-r, AS_HELP_STRING([--with-libldap-r],[Use lldap_r shared lib
|
||||
AC_SUBST(with_libldap_r)
|
||||
fi
|
||||
],
|
||||
-OPENLDAP_VERSION=`ldapsearch -VV 2> >(sed -n '/ldapsearch/ s/.*ldapsearch \([[[0-9]]]\+\.[[[0-9]]]\+\.[[[0-9]]]\+\) .*/\1/p')`
|
||||
+OPENLDAP_VERSION=`ldapsearch -VV 2>&1 | sed -n '/ldapsearch/ s/.*ldapsearch \([[[0-9]]]\+\.[[[0-9]]]\+\.[[[0-9]]]\+\) .*/\1/p'`
|
||||
AX_COMPARE_VERSION([$OPENLDAP_VERSION], [lt], [2.5], [ with_libldap_r=yes ], [ with_libldap_r=no ])
|
||||
AC_MSG_RESULT($with_libldap_r))
|
||||
|
||||
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
|
||||
index cd86138ea..b8c14cd14 100644
|
||||
--- a/rpm/389-ds-base.spec.in
|
||||
+++ b/rpm/389-ds-base.spec.in
|
||||
@@ -65,6 +65,7 @@ Provides: ldif2ldbm
|
||||
# Attach the buildrequires to the top level package:
|
||||
BuildRequires: nspr-devel
|
||||
BuildRequires: nss-devel >= 3.34
|
||||
+BuildRequires: openldap-clients
|
||||
BuildRequires: openldap-devel
|
||||
BuildRequires: libdb-devel
|
||||
BuildRequires: cyrus-sasl-devel
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,245 +0,0 @@
|
||||
From eca6f5fe18f768fd407d38c85624a5212bcf16ab Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Wed, 27 Sep 2023 15:40:33 -0700
|
||||
Subject: [PATCH] Issue 1925 - Add a CI test (#5936)
|
||||
|
||||
Description: Verify that the issue is not present. Cover the scenario when
|
||||
we remove existing VLVs, create new VLVs (with the same name) and then
|
||||
we do online re-indexing.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/1925
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
|
||||
(cherry picked from the 9633e8d32d28345409680f8e462fb4a53d3b4f83)
|
||||
---
|
||||
.../tests/suites/vlv/regression_test.py | 175 +++++++++++++++---
|
||||
1 file changed, 145 insertions(+), 30 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index 6ab709bd3..536fe950f 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2018 Red Hat, Inc.
|
||||
+# Copyright (C) 2023 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -9,12 +9,16 @@
|
||||
import pytest, time
|
||||
from lib389.tasks import *
|
||||
from lib389.utils import *
|
||||
-from lib389.topologies import topology_m2
|
||||
+from lib389.topologies import topology_m2, topology_st
|
||||
from lib389.replica import *
|
||||
from lib389._constants import *
|
||||
+from lib389.properties import TASK_WAIT
|
||||
from lib389.index import *
|
||||
from lib389.mappingTree import *
|
||||
from lib389.backend import *
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from ldap.controls.vlv import VLVRequestControl
|
||||
+from ldap.controls.sss import SSSRequestControl
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
@@ -22,6 +26,88 @@ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
+def open_new_ldapi_conn(dsinstance):
|
||||
+ ldapurl, certdir = get_ldapurl_from_serverid(dsinstance)
|
||||
+ assert 'ldapi://' in ldapurl
|
||||
+ conn = ldap.initialize(ldapurl)
|
||||
+ conn.sasl_interactive_bind_s("", ldap.sasl.external())
|
||||
+ return conn
|
||||
+
|
||||
+
|
||||
+def check_vlv_search(conn):
|
||||
+ before_count=1
|
||||
+ after_count=3
|
||||
+ offset=3501
|
||||
+
|
||||
+ vlv_control = VLVRequestControl(criticality=True,
|
||||
+ before_count=before_count,
|
||||
+ after_count=after_count,
|
||||
+ offset=offset,
|
||||
+ content_count=0,
|
||||
+ greater_than_or_equal=None,
|
||||
+ context_id=None)
|
||||
+
|
||||
+ sss_control = SSSRequestControl(criticality=True, ordering_rules=['cn'])
|
||||
+ result = conn.search_ext_s(
|
||||
+ base='dc=example,dc=com',
|
||||
+ scope=ldap.SCOPE_SUBTREE,
|
||||
+ filterstr='(uid=*)',
|
||||
+ serverctrls=[vlv_control, sss_control]
|
||||
+ )
|
||||
+ imin = offset + 998 - before_count
|
||||
+ imax = offset + 998 + after_count
|
||||
+
|
||||
+ for i, (dn, entry) in enumerate(result, start=imin):
|
||||
+ assert i <= imax
|
||||
+ expected_dn = f'uid=testuser{i},ou=People,dc=example,dc=com'
|
||||
+ log.debug(f'found {dn} expected {expected_dn}')
|
||||
+ assert dn.lower() == expected_dn.lower()
|
||||
+
|
||||
+
|
||||
+def add_users(inst, users_num):
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ log.info(f'Adding {users_num} users')
|
||||
+ for i in range(0, users_num):
|
||||
+ uid = 1000 + i
|
||||
+ user_properties = {
|
||||
+ 'uid': f'testuser{uid}',
|
||||
+ 'cn': f'testuser{uid}',
|
||||
+ 'sn': 'user',
|
||||
+ 'uidNumber': str(uid),
|
||||
+ 'gidNumber': str(uid),
|
||||
+ 'homeDirectory': f'/home/testuser{uid}'
|
||||
+ }
|
||||
+ users.create(properties=user_properties)
|
||||
+
|
||||
+
|
||||
+
|
||||
+def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
+ scope=ldap.SCOPE_SUBTREE, prefix="vlv", vlvsort="cn"):
|
||||
+ vlv_searches = VLVSearch(inst)
|
||||
+ vlv_search_properties = {
|
||||
+ "objectclass": ["top", "vlvSearch"],
|
||||
+ "cn": f"{prefix}Srch",
|
||||
+ "vlvbase": basedn,
|
||||
+ "vlvfilter": "(uid=*)",
|
||||
+ "vlvscope": str(scope),
|
||||
+ }
|
||||
+ vlv_searches.create(
|
||||
+ basedn=f"cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_search_properties
|
||||
+ )
|
||||
+
|
||||
+ vlv_index = VLVIndex(inst)
|
||||
+ vlv_index_properties = {
|
||||
+ "objectclass": ["top", "vlvIndex"],
|
||||
+ "cn": f"{prefix}Idx",
|
||||
+ "vlvsort": vlvsort,
|
||||
+ }
|
||||
+ vlv_index.create(
|
||||
+ basedn=f"cn={prefix}Srch,cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_index_properties
|
||||
+ )
|
||||
+ return vlv_searches, vlv_index
|
||||
+
|
||||
class BackendHandler:
|
||||
def __init__(self, inst, bedict, scope=ldap.SCOPE_ONELEVEL):
|
||||
self.inst = inst
|
||||
@@ -101,34 +187,6 @@ class BackendHandler:
|
||||
'dn' : dn}
|
||||
|
||||
|
||||
-def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
- scope=ldap.SCOPE_SUBTREE, prefix="vlv", vlvsort="cn"):
|
||||
- vlv_searches = VLVSearch(inst)
|
||||
- vlv_search_properties = {
|
||||
- "objectclass": ["top", "vlvSearch"],
|
||||
- "cn": f"{prefix}Srch",
|
||||
- "vlvbase": basedn,
|
||||
- "vlvfilter": "(uid=*)",
|
||||
- "vlvscope": str(scope),
|
||||
- }
|
||||
- vlv_searches.create(
|
||||
- basedn=f"cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
- properties=vlv_search_properties
|
||||
- )
|
||||
-
|
||||
- vlv_index = VLVIndex(inst)
|
||||
- vlv_index_properties = {
|
||||
- "objectclass": ["top", "vlvIndex"],
|
||||
- "cn": f"{prefix}Idx",
|
||||
- "vlvsort": vlvsort,
|
||||
- }
|
||||
- vlv_index.create(
|
||||
- basedn=f"cn={prefix}Srch,cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
- properties=vlv_index_properties
|
||||
- )
|
||||
- return vlv_searches, vlv_index
|
||||
-
|
||||
-
|
||||
@pytest.fixture
|
||||
def vlv_setup_with_uid_mr(topology_st, request):
|
||||
inst = topology_st.standalone
|
||||
@@ -245,6 +303,62 @@ def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2):
|
||||
entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)")
|
||||
|
||||
|
||||
+def test_vlv_recreation_reindex(topology_st):
|
||||
+ """Test VLV recreation and reindexing.
|
||||
+
|
||||
+ :id: 29f4567f-4ac0-410f-bc99-a32e217a939f
|
||||
+ :setup: Standalone instance.
|
||||
+ :steps:
|
||||
+ 1. Create new VLVs and do the reindex.
|
||||
+ 2. Test the new VLVs.
|
||||
+ 3. Remove the existing VLVs.
|
||||
+ 4. Create new VLVs (with the same name).
|
||||
+ 5. Perform online re-indexing of the new VLVs.
|
||||
+ 6. Test the new VLVs.
|
||||
+ :expectedresults:
|
||||
+ 1. Should Success.
|
||||
+ 2. Should Success.
|
||||
+ 3. Should Success.
|
||||
+ 4. Should Success.
|
||||
+ 5. Should Success.
|
||||
+ 6. Should Success.
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ reindex_task = Tasks(inst)
|
||||
+
|
||||
+ # Create and test VLVs
|
||||
+ vlv_search, vlv_index = create_vlv_search_and_index(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=DEFAULT_SUFFIX,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+
|
||||
+ add_users(inst, 5000)
|
||||
+
|
||||
+ conn = open_new_ldapi_conn(inst.serverid)
|
||||
+ assert len(conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(cn=*)")) > 0
|
||||
+ check_vlv_search(conn)
|
||||
+
|
||||
+ # Remove and recreate VLVs
|
||||
+ vlv_index.delete()
|
||||
+ vlv_search.delete()
|
||||
+
|
||||
+ vlv_search, vlv_index = create_vlv_search_and_index(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=DEFAULT_SUFFIX,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+
|
||||
+ conn = open_new_ldapi_conn(inst.serverid)
|
||||
+ assert len(conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(cn=*)")) > 0
|
||||
+ check_vlv_search(conn)
|
||||
+
|
||||
+
|
||||
def test_vlv_with_mr(vlv_setup_with_uid_mr):
|
||||
"""
|
||||
Testing vlv having specific matching rule
|
||||
@@ -288,6 +402,7 @@ def test_vlv_with_mr(vlv_setup_with_uid_mr):
|
||||
assert inst.status()
|
||||
|
||||
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,75 +0,0 @@
|
||||
From af3fa90f91efda86f4337e8823bca6581ab61792 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 7 Feb 2025 09:43:08 +0100
|
||||
Subject: [PATCH] Issue 6494 - (2nd) Various errors when using extended
|
||||
matching rule on vlv sort filter
|
||||
|
||||
---
|
||||
.../tests/suites/indexes/regression_test.py | 40 +++++++++++++++++++
|
||||
1 file changed, 40 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
index 2196fb2ed..b5bcccc8f 100644
|
||||
--- a/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
@@ -11,17 +11,57 @@ import os
|
||||
import pytest
|
||||
import ldap
|
||||
from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX
|
||||
+from lib389.backend import Backend, Backends, DatabaseConfig
|
||||
from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate
|
||||
+from lib389.dbgen import dbgen_users
|
||||
from lib389.index import Indexes
|
||||
from lib389.backend import Backends
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.utils import ds_is_older
|
||||
from lib389.idm.nscontainer import nsContainer
|
||||
+from lib389.properties import TASK_WAIT
|
||||
+from lib389.tasks import Tasks, Task
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
|
||||
+SUFFIX2 = 'dc=example2,dc=com'
|
||||
+BENAME2 = 'be2'
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def add_backend_and_ldif_50K_users(request, topo):
|
||||
+ """
|
||||
+ Add an empty backend and associated 50K users ldif file
|
||||
+ """
|
||||
+
|
||||
+ tasks = Tasks(topo.standalone)
|
||||
+ import_ldif = f'{topo.standalone.ldifdir}/be2_50K_users.ldif'
|
||||
+ be2 = Backend(topo.standalone)
|
||||
+ be2.create(properties={
|
||||
+ 'cn': BENAME2,
|
||||
+ 'nsslapd-suffix': SUFFIX2,
|
||||
+ },
|
||||
+ )
|
||||
+
|
||||
+ def fin():
|
||||
+ nonlocal be2
|
||||
+ if not DEBUGGING:
|
||||
+ be2.delete()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+ parent = f'ou=people,{SUFFIX2}'
|
||||
+ dbgen_users(topo.standalone, 50000, import_ldif, SUFFIX2, generic=True, parent=parent)
|
||||
+ assert tasks.importLDIF(
|
||||
+ suffix=SUFFIX2,
|
||||
+ input_file=import_ldif,
|
||||
+ args={TASK_WAIT: True}
|
||||
+ ) == 0
|
||||
+
|
||||
+ return import_ldif
|
||||
+
|
||||
@pytest.fixture(scope="function")
|
||||
def add_a_group_with_users(request, topo):
|
||||
"""
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,45 +0,0 @@
|
||||
From 0ad0eb34972c99f30334d7d420f3056e0e794d74 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 7 Feb 2025 14:33:46 +0100
|
||||
Subject: [PATCH] Issue 6494 - (3rd) Various errors when using extended
|
||||
matching rule on vlv sort filter
|
||||
|
||||
(cherry picked from the commit f2f917ca55c34c81b578bce1dd5275abff6abb72)
|
||||
---
|
||||
dirsrvtests/tests/suites/vlv/regression_test.py | 8 ++++++--
|
||||
1 file changed, 6 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index 536fe950f..d069fdbaf 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -16,12 +16,16 @@ from lib389.properties import TASK_WAIT
|
||||
from lib389.index import *
|
||||
from lib389.mappingTree import *
|
||||
from lib389.backend import *
|
||||
-from lib389.idm.user import UserAccounts
|
||||
+from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.idm.organization import Organization
|
||||
+from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
from ldap.controls.vlv import VLVRequestControl
|
||||
from ldap.controls.sss import SSSRequestControl
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
+DEMO_PW = 'secret12'
|
||||
+
|
||||
logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -169,7 +173,7 @@ class BackendHandler:
|
||||
'loginShell': '/bin/false',
|
||||
'userpassword': DEMO_PW })
|
||||
# Add regular user
|
||||
- add_users(self.inst, 10, suffix=suffix)
|
||||
+ add_users(self.inst, 10)
|
||||
# Removing ou2
|
||||
ou2.delete()
|
||||
# And export
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,72 +0,0 @@
|
||||
From 52041811b200292af6670490c9ebc1f599439a22 Mon Sep 17 00:00:00 2001
|
||||
From: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
Date: Sat, 22 Mar 2025 01:25:25 +0900
|
||||
Subject: [PATCH] Issue 6494 - (4th) Various errors when using extended
|
||||
matching rule on vlv sort filter
|
||||
|
||||
test_vlv_with_mr uses vlv_setup_with_uid_mr fixture to setup backend
|
||||
and testusers. add_users function is called in beh.setup without any
|
||||
suffix for the created backend. As a result, testusers always are
|
||||
created in the DEFAULT_SUFFIX only by add_users function. Another test
|
||||
like test_vlv_recreation_reindex can create the same test user in
|
||||
DEFAULT_SUFFIX, and it caused the ALREADY_EXISTS failure in
|
||||
test_vlv_with_mr test.
|
||||
|
||||
In main branch, add_users have suffix argument. Test users are created
|
||||
on the specific suffix, and the backend is cleaned up after the test.
|
||||
This PR is to follow the same implementation.
|
||||
|
||||
Also, suppressing ldap.ALREADY_EXISTS makes the add_users func to be
|
||||
used easily.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6494
|
||||
---
|
||||
dirsrvtests/tests/suites/vlv/regression_test.py | 11 ++++++-----
|
||||
1 file changed, 6 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index d069fdbaf..e9408117b 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -21,6 +21,7 @@ from lib389.idm.organization import Organization
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
from ldap.controls.vlv import VLVRequestControl
|
||||
from ldap.controls.sss import SSSRequestControl
|
||||
+from contextlib import suppress
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
@@ -68,8 +69,8 @@ def check_vlv_search(conn):
|
||||
assert dn.lower() == expected_dn.lower()
|
||||
|
||||
|
||||
-def add_users(inst, users_num):
|
||||
- users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+def add_users(inst, users_num, suffix=DEFAULT_SUFFIX):
|
||||
+ users = UserAccounts(inst, suffix)
|
||||
log.info(f'Adding {users_num} users')
|
||||
for i in range(0, users_num):
|
||||
uid = 1000 + i
|
||||
@@ -81,8 +82,8 @@ def add_users(inst, users_num):
|
||||
'gidNumber': str(uid),
|
||||
'homeDirectory': f'/home/testuser{uid}'
|
||||
}
|
||||
- users.create(properties=user_properties)
|
||||
-
|
||||
+ with suppress(ldap.ALREADY_EXISTS):
|
||||
+ users.create(properties=user_properties)
|
||||
|
||||
|
||||
def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
@@ -173,7 +174,7 @@ class BackendHandler:
|
||||
'loginShell': '/bin/false',
|
||||
'userpassword': DEMO_PW })
|
||||
# Add regular user
|
||||
- add_users(self.inst, 10)
|
||||
+ add_users(self.inst, 10, suffix=suffix)
|
||||
# Removing ou2
|
||||
ou2.delete()
|
||||
# And export
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,357 +0,0 @@
|
||||
From b812afe4da6db134c1221eb48a6155480e4c2cb3 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 14 Jan 2025 13:55:03 -0500
|
||||
Subject: [PATCH] Issue 6497 - lib389 - Configure replication for multiple
|
||||
suffixes (#6498)
|
||||
|
||||
Bug Description: When trying to set up replication across multiple suffixes -
|
||||
particularly if one of those suffixes is a subsuffix - lib389 fails to properly
|
||||
configure the replication agreements, service accounts, and required groups.
|
||||
The references to the replication_managers group and service account
|
||||
naming do not correctly account for non-default additional suffixes.
|
||||
|
||||
Fix Description: Ensure replication DNs and credentials are correctly tied to each suffix.
|
||||
Enable DSLdapObject.present method to compare values as
|
||||
a normalized DNs if they are DNs.
|
||||
Add a test (test_multi_subsuffix_replication) to verify multi-suffix
|
||||
replication across four suppliers.
|
||||
Fix tests that are related to repl service accounts.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6497
|
||||
|
||||
Reviewed: @progier389 (Thanks!)
|
||||
---
|
||||
.../tests/suites/ds_tools/replcheck_test.py | 4 +-
|
||||
.../suites/replication/acceptance_test.py | 153 ++++++++++++++++++
|
||||
.../cleanallruv_shutdown_crash_test.py | 4 +-
|
||||
.../suites/replication/regression_m2_test.py | 2 +-
|
||||
.../replication/tls_client_auth_repl_test.py | 4 +-
|
||||
src/lib389/lib389/_mapped_object.py | 21 ++-
|
||||
src/lib389/lib389/replica.py | 10 +-
|
||||
7 files changed, 182 insertions(+), 16 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_tools/replcheck_test.py b/dirsrvtests/tests/suites/ds_tools/replcheck_test.py
|
||||
index f61fc432d..dfa1d9423 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_tools/replcheck_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_tools/replcheck_test.py
|
||||
@@ -67,10 +67,10 @@ def topo_tls_ldapi(topo):
|
||||
|
||||
# Create the replication dns
|
||||
services = ServiceAccounts(m1, DEFAULT_SUFFIX)
|
||||
- repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
|
||||
+ repl_m1 = services.get(f'{DEFAULT_SUFFIX}:{m1.host}:{m1.sslport}')
|
||||
repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())
|
||||
|
||||
- repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
|
||||
+ repl_m2 = services.get(f'{DEFAULT_SUFFIX}:{m2.host}:{m2.sslport}')
|
||||
repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())
|
||||
|
||||
# Check the replication is "done".
|
||||
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
index d1cfa8bdb..fc8622051 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
@@ -9,6 +9,7 @@
|
||||
import pytest
|
||||
import logging
|
||||
import time
|
||||
+from lib389.backend import Backend
|
||||
from lib389.replica import Replicas
|
||||
from lib389.tasks import *
|
||||
from lib389.utils import *
|
||||
@@ -325,6 +326,158 @@ def test_modify_stripattrs(topo_m4):
|
||||
assert attr_value in entries[0].data['nsds5replicastripattrs']
|
||||
|
||||
|
||||
+def test_multi_subsuffix_replication(topo_m4):
|
||||
+ """Check that replication works with multiple subsuffixes
|
||||
+
|
||||
+ :id: ac1aaeae-173e-48e7-847f-03b9867443c4
|
||||
+ :setup: Four suppliers replication setup
|
||||
+ :steps:
|
||||
+ 1. Create additional suffixes
|
||||
+ 2. Setup replication for all suppliers
|
||||
+ 3. Generate test data for each suffix (add, modify, remove)
|
||||
+ 4. Wait for replication to complete across all suppliers for each suffix
|
||||
+ 5. Check that all expected data is present on all suppliers
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success (the data is replicated everywhere)
|
||||
+ """
|
||||
+
|
||||
+ SUFFIX_2 = "dc=test2"
|
||||
+ SUFFIX_3 = f"dc=test3,{DEFAULT_SUFFIX}"
|
||||
+ all_suffixes = [DEFAULT_SUFFIX, SUFFIX_2, SUFFIX_3]
|
||||
+
|
||||
+ test_users_by_suffix = {suffix: [] for suffix in all_suffixes}
|
||||
+ created_backends = []
|
||||
+
|
||||
+ suppliers = [
|
||||
+ topo_m4.ms["supplier1"],
|
||||
+ topo_m4.ms["supplier2"],
|
||||
+ topo_m4.ms["supplier3"],
|
||||
+ topo_m4.ms["supplier4"]
|
||||
+ ]
|
||||
+
|
||||
+ try:
|
||||
+ # Setup additional backends and replication for the new suffixes
|
||||
+ for suffix in [SUFFIX_2, SUFFIX_3]:
|
||||
+ repl = ReplicationManager(suffix)
|
||||
+ for supplier in suppliers:
|
||||
+ # Create a new backend for this suffix
|
||||
+ props = {
|
||||
+ 'cn': f'userRoot_{suffix.split(",")[0][3:]}',
|
||||
+ 'nsslapd-suffix': suffix
|
||||
+ }
|
||||
+ be = Backend(supplier)
|
||||
+ be.create(properties=props)
|
||||
+ be.create_sample_entries('001004002')
|
||||
+
|
||||
+ # Track the backend so we can remove it later
|
||||
+ created_backends.append((supplier, props['cn']))
|
||||
+
|
||||
+ # Enable replication
|
||||
+ if supplier == suppliers[0]:
|
||||
+ repl.create_first_supplier(supplier)
|
||||
+ else:
|
||||
+ repl.join_supplier(suppliers[0], supplier)
|
||||
+
|
||||
+ # Create a full mesh topology for this suffix
|
||||
+ for i, supplier_i in enumerate(suppliers):
|
||||
+ for j, supplier_j in enumerate(suppliers):
|
||||
+ if i != j:
|
||||
+ repl.ensure_agreement(supplier_i, supplier_j)
|
||||
+
|
||||
+ # Generate test data for each suffix (add, modify, remove)
|
||||
+ for suffix in all_suffixes:
|
||||
+ # Create some user entries in supplier1
|
||||
+ for i in range(20):
|
||||
+ user_dn = f'uid=test_user_{i},{suffix}'
|
||||
+ test_user = UserAccount(suppliers[0], user_dn)
|
||||
+ test_user.create(properties={
|
||||
+ 'uid': f'test_user_{i}',
|
||||
+ 'cn': f'Test User {i}',
|
||||
+ 'sn': f'User{i}',
|
||||
+ 'userPassword': 'password',
|
||||
+ 'uidNumber': str(1000 + i),
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': f'/home/test_user_{i}'
|
||||
+ })
|
||||
+ test_users_by_suffix[suffix].append(test_user)
|
||||
+
|
||||
+ # Perform modifications on these entries
|
||||
+ for user in test_users_by_suffix[suffix]:
|
||||
+ # Add some attributes
|
||||
+ for j in range(3):
|
||||
+ user.add('description', f'Description {j}')
|
||||
+ # Replace an attribute
|
||||
+ user.replace('cn', f'Modified User {user.get_attr_val_utf8("uid")}')
|
||||
+ # Delete the attributes we added
|
||||
+ for j in range(3):
|
||||
+ try:
|
||||
+ user.remove('description', f'Description {j}')
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+ # Wait for replication to complete across all suppliers, for each suffix
|
||||
+ for suffix in all_suffixes:
|
||||
+ repl = ReplicationManager(suffix)
|
||||
+ for i, supplier_i in enumerate(suppliers):
|
||||
+ for j, supplier_j in enumerate(suppliers):
|
||||
+ if i != j:
|
||||
+ repl.wait_for_replication(supplier_i, supplier_j)
|
||||
+
|
||||
+ # Verify that each user and modification replicated to all suppliers
|
||||
+ for suffix in all_suffixes:
|
||||
+ for i in range(20):
|
||||
+ user_dn = f'uid=test_user_{i},{suffix}'
|
||||
+ # Retrieve this user from all suppliers
|
||||
+ all_user_objs = topo_m4.all_get_dsldapobject(user_dn, UserAccount)
|
||||
+ # Ensure it exists in all 4 suppliers
|
||||
+ assert len(all_user_objs) == 4, (
|
||||
+ f"User {user_dn} not found on all suppliers. "
|
||||
+ f"Found only on {len(all_user_objs)} suppliers."
|
||||
+ )
|
||||
+ # Check modifications: 'cn' should now be 'Modified User test_user_{i}'
|
||||
+ for user_obj in all_user_objs:
|
||||
+ expected_cn = f"Modified User test_user_{i}"
|
||||
+ actual_cn = user_obj.get_attr_val_utf8("cn")
|
||||
+ assert actual_cn == expected_cn, (
|
||||
+ f"User {user_dn} has unexpected 'cn': {actual_cn} "
|
||||
+ f"(expected '{expected_cn}') on supplier {user_obj._instance.serverid}"
|
||||
+ )
|
||||
+ # And check that 'description' attributes were removed
|
||||
+ desc_vals = user_obj.get_attr_vals_utf8('description')
|
||||
+ for j in range(3):
|
||||
+ assert f"Description {j}" not in desc_vals, (
|
||||
+ f"User {user_dn} on supplier {user_obj._instance.serverid} "
|
||||
+ f"still has 'Description {j}'"
|
||||
+ )
|
||||
+ finally:
|
||||
+ for suffix, test_users in test_users_by_suffix.items():
|
||||
+ for user in test_users:
|
||||
+ try:
|
||||
+ if user.exists():
|
||||
+ user.delete()
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+ for suffix in [SUFFIX_2, SUFFIX_3]:
|
||||
+ repl = ReplicationManager(suffix)
|
||||
+ for supplier in suppliers:
|
||||
+ try:
|
||||
+ repl.remove_supplier(supplier)
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+ for (supplier, backend_name) in created_backends:
|
||||
+ be = Backend(supplier, backend_name)
|
||||
+ try:
|
||||
+ be.delete()
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+
|
||||
def test_new_suffix(topo_m4, new_suffix):
|
||||
"""Check that we can enable replication on a new suffix
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py
|
||||
index b4b74e339..fe9955e7e 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py
|
||||
@@ -66,10 +66,10 @@ def test_clean_shutdown_crash(topology_m2):
|
||||
|
||||
log.info('Creating replication dns')
|
||||
services = ServiceAccounts(m1, DEFAULT_SUFFIX)
|
||||
- repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
|
||||
+ repl_m1 = services.get(f'{DEFAULT_SUFFIX}:{m1.host}:{m1.sslport}')
|
||||
repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())
|
||||
|
||||
- repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
|
||||
+ repl_m2 = services.get(f'{DEFAULT_SUFFIX}:{m2.host}:{m2.sslport}')
|
||||
repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())
|
||||
|
||||
log.info('Changing auth type')
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index 72d4b9f89..9c707615f 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -64,7 +64,7 @@ class _AgmtHelper:
|
||||
self.binddn = f'cn={cn},cn=config'
|
||||
else:
|
||||
self.usedn = False
|
||||
- self.cn = f'{self.from_inst.host}:{self.from_inst.sslport}'
|
||||
+ self.cn = ldap.dn.escape_dn_chars(f'{DEFAULT_SUFFIX}:{self.from_inst.host}:{self.from_inst.sslport}')
|
||||
self.binddn = f'cn={self.cn}, ou=Services, {DEFAULT_SUFFIX}'
|
||||
self.original_state = []
|
||||
self._pass = False
|
||||
diff --git a/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py b/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py
|
||||
index a00dc5b78..ca17554c7 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py
|
||||
@@ -56,10 +56,10 @@ def tls_client_auth(topo_m2):
|
||||
|
||||
# Create the replication dns
|
||||
services = ServiceAccounts(m1, DEFAULT_SUFFIX)
|
||||
- repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
|
||||
+ repl_m1 = services.get(f'{DEFAULT_SUFFIX}:{m1.host}:{m1.sslport}')
|
||||
repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())
|
||||
|
||||
- repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
|
||||
+ repl_m2 = services.get(f'{DEFAULT_SUFFIX}:{m2.host}:{m2.sslport}')
|
||||
repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())
|
||||
|
||||
# Check the replication is "done".
|
||||
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
|
||||
index b7391d8cc..ae00c95d0 100644
|
||||
--- a/src/lib389/lib389/_mapped_object.py
|
||||
+++ b/src/lib389/lib389/_mapped_object.py
|
||||
@@ -19,7 +19,7 @@ from lib389._constants import DIRSRV_STATE_ONLINE
|
||||
from lib389._mapped_object_lint import DSLint, DSLints
|
||||
from lib389.utils import (
|
||||
ensure_bytes, ensure_str, ensure_int, ensure_list_bytes, ensure_list_str,
|
||||
- ensure_list_int, display_log_value, display_log_data
|
||||
+ ensure_list_int, display_log_value, display_log_data, is_a_dn, normalizeDN
|
||||
)
|
||||
|
||||
# This function filter and term generation provided thanks to
|
||||
@@ -292,15 +292,28 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
_search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[attr, ],
|
||||
serverctrls=self._server_controls, clientctrls=self._client_controls,
|
||||
escapehatch='i am sure')[0]
|
||||
- values = self.get_attr_vals_bytes(attr)
|
||||
+ values = self.get_attr_vals_utf8(attr)
|
||||
self._log.debug("%s contains %s" % (self._dn, values))
|
||||
|
||||
if value is None:
|
||||
# We are just checking if SOMETHING is present ....
|
||||
return len(values) > 0
|
||||
+
|
||||
+ # Otherwise, we are checking a specific value
|
||||
+ if is_a_dn(value):
|
||||
+ normalized_value = normalizeDN(value)
|
||||
else:
|
||||
- # Check if a value really does exist.
|
||||
- return ensure_bytes(value).lower() in [x.lower() for x in values]
|
||||
+ normalized_value = ensure_bytes(value).lower()
|
||||
+
|
||||
+ # Normalize each returned value depending on whether it is a DN
|
||||
+ normalized_values = []
|
||||
+ for v in values:
|
||||
+ if is_a_dn(v):
|
||||
+ normalized_values.append(normalizeDN(v))
|
||||
+ else:
|
||||
+ normalized_values.append(ensure_bytes(v.lower()))
|
||||
+
|
||||
+ return normalized_value in normalized_values
|
||||
|
||||
def add(self, key, value):
|
||||
"""Add an attribute with a value
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 1f321972d..cd46e86d5 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -2011,7 +2011,7 @@ class ReplicationManager(object):
|
||||
return repl_group
|
||||
else:
|
||||
try:
|
||||
- repl_group = groups.get('replication_managers')
|
||||
+ repl_group = groups.get(dn=f'cn=replication_managers,{self._suffix}')
|
||||
return repl_group
|
||||
except ldap.NO_SUCH_OBJECT:
|
||||
self._log.warning("{} doesn't have cn=replication_managers,{} entry \
|
||||
@@ -2035,7 +2035,7 @@ class ReplicationManager(object):
|
||||
services = ServiceAccounts(from_instance, self._suffix)
|
||||
# Generate the password and save the credentials
|
||||
# for putting them into agreements in the future
|
||||
- service_name = '{}:{}'.format(to_instance.host, port)
|
||||
+ service_name = f'{self._suffix}:{to_instance.host}:{port}'
|
||||
creds = password_generate()
|
||||
repl_service = services.ensure_state(properties={
|
||||
'cn': service_name,
|
||||
@@ -2299,7 +2299,7 @@ class ReplicationManager(object):
|
||||
Internal Only.
|
||||
"""
|
||||
|
||||
- rdn = '{}:{}'.format(from_instance.host, from_instance.sslport)
|
||||
+ rdn = f'{self._suffix}:{from_instance.host}:{from_instance.sslport}'
|
||||
try:
|
||||
creds = self._repl_creds[rdn]
|
||||
except KeyError:
|
||||
@@ -2499,8 +2499,8 @@ class ReplicationManager(object):
|
||||
# Touch something then wait_for_replication.
|
||||
from_groups = Groups(from_instance, basedn=self._suffix, rdn=None)
|
||||
to_groups = Groups(to_instance, basedn=self._suffix, rdn=None)
|
||||
- from_group = from_groups.get('replication_managers')
|
||||
- to_group = to_groups.get('replication_managers')
|
||||
+ from_group = from_groups.get(dn=f'cn=replication_managers,{self._suffix}')
|
||||
+ to_group = to_groups.get(dn=f'cn=replication_managers,{self._suffix}')
|
||||
|
||||
change = str(uuid.uuid4())
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,126 +0,0 @@
|
||||
From ebe986c78c6cd4e1f10172d8a8a11faf814fbc22 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 6 Mar 2025 16:49:53 -0500
|
||||
Subject: [PATCH] Issue 6655 - fix replication release replica decoding error
|
||||
|
||||
Description:
|
||||
|
||||
When a start replication session extended op is received acquire and
|
||||
release exclusive access before returning the result to the client.
|
||||
Otherwise there is a race condition where a "end" replication extended
|
||||
op can arrive before the replica is released and that leads to a
|
||||
decoding error on the other replica.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6655
|
||||
|
||||
Reviewed by: spichugi, tbordaz, and vashirov(Thanks!!!)
|
||||
---
|
||||
.../suites/replication/acceptance_test.py | 12 ++++++++++
|
||||
ldap/servers/plugins/replication/repl_extop.c | 24 ++++++++++++-------
|
||||
2 files changed, 27 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
index fc8622051..0f18edb44 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
@@ -1,5 +1,9 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+<<<<<<< HEAD
|
||||
# Copyright (C) 2021 Red Hat, Inc.
|
||||
+=======
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+>>>>>>> a623c3f90 (Issue 6655 - fix replication release replica decoding error)
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -453,6 +457,13 @@ def test_multi_subsuffix_replication(topo_m4):
|
||||
f"User {user_dn} on supplier {user_obj._instance.serverid} "
|
||||
f"still has 'Description {j}'"
|
||||
)
|
||||
+
|
||||
+ # Check there are no decoding errors
|
||||
+ assert not topo_m4.ms["supplier1"].ds_error_log.match('.*decoding failed.*')
|
||||
+ assert not topo_m4.ms["supplier2"].ds_error_log.match('.*decoding failed.*')
|
||||
+ assert not topo_m4.ms["supplier3"].ds_error_log.match('.*decoding failed.*')
|
||||
+ assert not topo_m4.ms["supplier4"].ds_error_log.match('.*decoding failed.*')
|
||||
+
|
||||
finally:
|
||||
for suffix, test_users in test_users_by_suffix.items():
|
||||
for user in test_users:
|
||||
@@ -507,6 +518,7 @@ def test_new_suffix(topo_m4, new_suffix):
|
||||
repl.remove_supplier(m1)
|
||||
repl.remove_supplier(m2)
|
||||
|
||||
+
|
||||
def test_many_attrs(topo_m4, create_entry):
|
||||
"""Check a replication with many attributes (add and delete)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
|
||||
index 14b756df1..dacc611c0 100644
|
||||
--- a/ldap/servers/plugins/replication/repl_extop.c
|
||||
+++ b/ldap/servers/plugins/replication/repl_extop.c
|
||||
@@ -1134,6 +1134,12 @@ send_response:
|
||||
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_OID, REPL_NSDS50_REPLICATION_RESPONSE_OID);
|
||||
}
|
||||
|
||||
+ /* connext (release our hold on it at least) */
|
||||
+ if (NULL != connext) {
|
||||
+ /* don't free it, just let go of it */
|
||||
+ consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
+ }
|
||||
+
|
||||
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_VALUE, resp_bval);
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
"multimaster_extop_StartNSDS50ReplicationRequest - "
|
||||
@@ -1251,12 +1257,6 @@ send_response:
|
||||
if (NULL != ruv_bervals) {
|
||||
ber_bvecfree(ruv_bervals);
|
||||
}
|
||||
- /* connext (our hold on it at least) */
|
||||
- if (NULL != connext) {
|
||||
- /* don't free it, just let go of it */
|
||||
- consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
- connext = NULL;
|
||||
- }
|
||||
|
||||
return return_value;
|
||||
}
|
||||
@@ -1389,6 +1389,13 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
send_response:
|
||||
+ /* connext (release our hold on it at least) */
|
||||
+ if (NULL != connext) {
|
||||
+ /* don't free it, just let go of it */
|
||||
+ consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
+ connext = NULL;
|
||||
+ }
|
||||
+
|
||||
/* Send the response code */
|
||||
if ((resp_bere = der_alloc()) == NULL) {
|
||||
goto free_and_return;
|
||||
@@ -1419,11 +1426,10 @@ free_and_return:
|
||||
if (NULL != resp_bval) {
|
||||
ber_bvfree(resp_bval);
|
||||
}
|
||||
- /* connext (our hold on it at least) */
|
||||
+ /* connext (release our hold on it if not already released) */
|
||||
if (NULL != connext) {
|
||||
/* don't free it, just let go of it */
|
||||
consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
- connext = NULL;
|
||||
}
|
||||
|
||||
return return_value;
|
||||
@@ -1516,7 +1522,7 @@ multimaster_extop_abort_cleanruv(Slapi_PBlock *pb)
|
||||
rid);
|
||||
}
|
||||
/*
|
||||
- * Get the replica
|
||||
+ * Get the replica
|
||||
*/
|
||||
if ((r = replica_get_replica_from_root(repl_root)) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "multimaster_extop_abort_cleanruv - "
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,26 +0,0 @@
|
||||
From 5b12463bfeb518f016acb14bc118b5f8ad3eef5e Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 15 May 2025 09:22:22 +0200
|
||||
Subject: [PATCH] Issue 6655 - fix merge conflict
|
||||
|
||||
---
|
||||
dirsrvtests/tests/suites/replication/acceptance_test.py | 4 ----
|
||||
1 file changed, 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
index 0f18edb44..6b5186127 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
@@ -1,9 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-<<<<<<< HEAD
|
||||
-# Copyright (C) 2021 Red Hat, Inc.
|
||||
-=======
|
||||
# Copyright (C) 2025 Red Hat, Inc.
|
||||
->>>>>>> a623c3f90 (Issue 6655 - fix replication release replica decoding error)
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,291 +0,0 @@
|
||||
From 8d62124fb4d0700378b6f0669cc9d47338a8151c Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 25 Mar 2025 09:20:50 +0100
|
||||
Subject: [PATCH] Issue 6571 - Nested group does not receive memberOf attribute
|
||||
(#6679)
|
||||
|
||||
Bug description:
|
||||
There is a risk to create a loop in group membership.
|
||||
For example G2 is member of G1 and G1 is member of G2.
|
||||
Memberof plugins iterates from a node to its ancestors
|
||||
to update the 'memberof' values of the node.
|
||||
The plugin uses a valueset ('already_seen_ndn_vals')
|
||||
to keep the track of the nodes it already visited.
|
||||
It uses this valueset to detect a possible loop and
|
||||
in that case it does not add the ancestor as the
|
||||
memberof value of the node.
|
||||
This is an error in case there are multiples paths
|
||||
up to an ancestor.
|
||||
|
||||
Fix description:
|
||||
The ancestor should be added to the node systematically,
|
||||
just in case the ancestor is in 'already_seen_ndn_vals'
|
||||
it skips the final recursion
|
||||
|
||||
fixes: #6571
|
||||
|
||||
Reviewed by: Pierre Rogier, Mark Reynolds (Thanks !!!)
|
||||
---
|
||||
.../suites/memberof_plugin/regression_test.py | 109 ++++++++++++++++++
|
||||
.../tests/suites/plugins/memberof_test.py | 5 +
|
||||
ldap/servers/plugins/memberof/memberof.c | 52 ++++-----
|
||||
3 files changed, 137 insertions(+), 29 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
index 4c681a909..dba908975 100644
|
||||
--- a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
@@ -467,6 +467,21 @@ def _find_memberof_ext(server, user_dn=None, group_dn=None, find_result=True):
|
||||
else:
|
||||
assert (not found)
|
||||
|
||||
+def _check_membership(server, entry, expected_members, expected_memberof):
|
||||
+ assert server
|
||||
+ assert entry
|
||||
+
|
||||
+ memberof = entry.get_attr_vals('memberof')
|
||||
+ member = entry.get_attr_vals('member')
|
||||
+ assert len(member) == len(expected_members)
|
||||
+ assert len(memberof) == len(expected_memberof)
|
||||
+ for e in expected_members:
|
||||
+ server.log.info("Checking %s has member %s" % (entry.dn, e.dn))
|
||||
+ assert e.dn.encode() in member
|
||||
+ for e in expected_memberof:
|
||||
+ server.log.info("Checking %s is member of %s" % (entry.dn, e.dn))
|
||||
+ assert e.dn.encode() in memberof
|
||||
+
|
||||
|
||||
@pytest.mark.ds49161
|
||||
def test_memberof_group(topology_st):
|
||||
@@ -535,6 +550,100 @@ def test_memberof_group(topology_st):
|
||||
_find_memberof_ext(inst, dn1, g2n, True)
|
||||
_find_memberof_ext(inst, dn2, g2n, True)
|
||||
|
||||
+def test_multipaths(topology_st, request):
|
||||
+ """Test memberof succeeds to update memberof when
|
||||
+ there are multiple paths from a leaf to an intermediate node
|
||||
+
|
||||
+ :id: 35aa704a-b895-4153-9dcb-1e8a13612ebf
|
||||
+
|
||||
+ :setup: Single instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Create a graph G1->U1, G2->G21->U1
|
||||
+ 2. Add G2 as member of G1: G1->U1, G1->G2->G21->U1
|
||||
+ 3. Check members and memberof in entries G1,G2,G21,User1
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Graph should be created
|
||||
+ 2. succeed
|
||||
+ 3. Membership is okay
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ memberof = MemberOfPlugin(inst)
|
||||
+ memberof.enable()
|
||||
+ memberof.replace('memberOfEntryScope', SUFFIX)
|
||||
+ if (memberof.get_memberofdeferredupdate() and memberof.get_memberofdeferredupdate().lower() == "on"):
|
||||
+ delay = 3
|
||||
+ else:
|
||||
+ delay = 0
|
||||
+ inst.restart()
|
||||
+
|
||||
+ #
|
||||
+ # Create the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ---------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp2 ----> Grp21 ------/
|
||||
+ #
|
||||
+ users = UserAccounts(inst, SUFFIX, rdn=None)
|
||||
+ user1 = users.create(properties={'uid': "user1",
|
||||
+ 'cn': "user1",
|
||||
+ 'sn': 'SN',
|
||||
+ 'description': 'leaf',
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': '/home/user1'
|
||||
+ })
|
||||
+ group = Groups(inst, SUFFIX, rdn=None)
|
||||
+ g1 = group.create(properties={'cn': 'group1',
|
||||
+ 'member': user1.dn,
|
||||
+ 'description': 'group1'})
|
||||
+ g21 = group.create(properties={'cn': 'group21',
|
||||
+ 'member': user1.dn,
|
||||
+ 'description': 'group21'})
|
||||
+ g2 = group.create(properties={'cn': 'group2',
|
||||
+ 'member': [g21.dn],
|
||||
+ 'description': 'group2'})
|
||||
+
|
||||
+ # Enable debug logs if necessary
|
||||
+ #inst.config.replace('nsslapd-errorlog-level', '65536')
|
||||
+ #inst.config.set('nsslapd-accesslog-level','260')
|
||||
+ #inst.config.set('nsslapd-plugin-logging', 'on')
|
||||
+ #inst.config.set('nsslapd-auditlog-logging-enabled','on')
|
||||
+ #inst.config.set('nsslapd-auditfaillog-logging-enabled','on')
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # \ ^
|
||||
+ # \ /
|
||||
+ # --> Grp2 --> Grp21 --
|
||||
+ #
|
||||
+ g1.add_member(g2.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[g2, user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g1])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2, g1])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ def fin():
|
||||
+ try:
|
||||
+ user1.delete()
|
||||
+ g1.delete()
|
||||
+ g2.delete()
|
||||
+ g21.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+ request.addfinalizer(fin)
|
||||
|
||||
def _config_memberof_entrycache_on_modrdn_failure(server):
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/memberof_test.py b/dirsrvtests/tests/suites/plugins/memberof_test.py
|
||||
index 2de1389fd..621c45daf 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/memberof_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/memberof_test.py
|
||||
@@ -2168,9 +2168,14 @@ def test_complex_group_scenario_6(topology_st):
|
||||
|
||||
# add Grp[1-4] (uniqueMember) to grp5
|
||||
# it creates a membership loop !!!
|
||||
+ topology_st.standalone.config.replace('nsslapd-errorlog-level', '65536')
|
||||
mods = [(ldap.MOD_ADD, 'uniqueMember', memofegrp020_5)]
|
||||
for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
|
||||
topology_st.standalone.modify_s(ensure_str(grp), mods)
|
||||
+ topology_st.standalone.config.replace('nsslapd-errorlog-level', '0')
|
||||
+
|
||||
+ results = topology_st.standalone.ds_error_log.match('.*detecting a loop in group.*')
|
||||
+ assert results
|
||||
|
||||
time.sleep(5)
|
||||
# assert user[1-4] are member of grp20_[1-4]
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index e75b99b14..32bdcf3f1 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -1592,7 +1592,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn
|
||||
ht_grp = ancestors_cache_lookup(config, (const void *)ndn);
|
||||
if (ht_grp) {
|
||||
#if MEMBEROF_CACHE_DEBUG
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s already cached (%x)\n", ndn, ht_grp);
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s already cached (%lx)\n", ndn, (ulong) ht_grp);
|
||||
#endif
|
||||
add_ancestors_cbdata(ht_grp, callback_data);
|
||||
*cached = 1;
|
||||
@@ -1600,7 +1600,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn
|
||||
}
|
||||
}
|
||||
#if MEMBEROF_CACHE_DEBUG
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s not cached\n", ndn);
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s not cached\n", slapi_sdn_get_ndn(sdn));
|
||||
#endif
|
||||
|
||||
/* Escape the dn, and build the search filter. */
|
||||
@@ -3233,7 +3233,8 @@ cache_ancestors(MemberOfConfig *config, Slapi_Value **member_ndn_val, memberof_g
|
||||
return;
|
||||
}
|
||||
#if MEMBEROF_CACHE_DEBUG
|
||||
- if (double_check = ancestors_cache_lookup(config, (const void*) key)) {
|
||||
+ double_check = ancestors_cache_lookup(config, (const void*) key);
|
||||
+ if (double_check) {
|
||||
dump_cache_entry(double_check, "read back");
|
||||
}
|
||||
#endif
|
||||
@@ -3263,13 +3264,13 @@ merge_ancestors(Slapi_Value **member_ndn_val, memberof_get_groups_data *v1, memb
|
||||
sval_dn = slapi_value_new_string(slapi_value_get_string(sval));
|
||||
if (sval_dn) {
|
||||
/* Use the normalized dn from v1 to search it
|
||||
- * in v2
|
||||
- */
|
||||
+ * in v2
|
||||
+ */
|
||||
val_sdn = slapi_sdn_new_dn_byval(slapi_value_get_string(sval_dn));
|
||||
sval_ndn = slapi_value_new_string(slapi_sdn_get_ndn(val_sdn));
|
||||
if (!slapi_valueset_find(
|
||||
((memberof_get_groups_data *)v2)->config->group_slapiattrs[0], v2_group_norm_vals, sval_ndn)) {
|
||||
-/* This ancestor was not already present in v2 => Add it
|
||||
+ /* This ancestor was not already present in v2 => Add it
|
||||
* Using slapi_valueset_add_value it consumes val
|
||||
* so do not free sval
|
||||
*/
|
||||
@@ -3318,7 +3319,7 @@ memberof_get_groups_r(MemberOfConfig *config, Slapi_DN *member_sdn, memberof_get
|
||||
|
||||
merge_ancestors(&member_ndn_val, &member_data, data);
|
||||
if (!cached && member_data.use_cache)
|
||||
- cache_ancestors(config, &member_ndn_val, &member_data);
|
||||
+ cache_ancestors(config, &member_ndn_val, data);
|
||||
|
||||
slapi_value_free(&member_ndn_val);
|
||||
slapi_valueset_free(groupvals);
|
||||
@@ -3379,25 +3380,6 @@ memberof_get_groups_callback(Slapi_Entry *e, void *callback_data)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
- /* Have we been here before? Note that we don't loop through all of the group_slapiattrs
|
||||
- * in config. We only need this attribute for it's syntax so the comparison can be
|
||||
- * performed. Since all of the grouping attributes are validated to use the Dinstinguished
|
||||
- * Name syntax, we can safely just use the first group_slapiattr. */
|
||||
- if (slapi_valueset_find(
|
||||
- ((memberof_get_groups_data *)callback_data)->config->group_slapiattrs[0], already_seen_ndn_vals, group_ndn_val)) {
|
||||
- /* we either hit a recursive grouping, or an entry is
|
||||
- * a member of a group through multiple paths. Either
|
||||
- * way, we can just skip processing this entry since we've
|
||||
- * already gone through this part of the grouping hierarchy. */
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
- "memberof_get_groups_callback - Possible group recursion"
|
||||
- " detected in %s\n",
|
||||
- group_ndn);
|
||||
- slapi_value_free(&group_ndn_val);
|
||||
- ((memberof_get_groups_data *)callback_data)->use_cache = PR_FALSE;
|
||||
- goto bail;
|
||||
- }
|
||||
-
|
||||
/* if the group does not belong to an excluded subtree, adds it to the valueset */
|
||||
if (memberof_entry_in_scope(config, group_sdn)) {
|
||||
/* Push group_dn_val into the valueset. This memory is now owned
|
||||
@@ -3407,9 +3389,21 @@ memberof_get_groups_callback(Slapi_Entry *e, void *callback_data)
|
||||
group_dn_val = slapi_value_new_string(group_dn);
|
||||
slapi_valueset_add_value_ext(groupvals, group_dn_val, SLAPI_VALUE_FLAG_PASSIN);
|
||||
|
||||
- /* push this ndn to detect group recursion */
|
||||
- already_seen_ndn_val = slapi_value_new_string(group_ndn);
|
||||
- slapi_valueset_add_value_ext(already_seen_ndn_vals, already_seen_ndn_val, SLAPI_VALUE_FLAG_PASSIN);
|
||||
+ if (slapi_valueset_find(
|
||||
+ ((memberof_get_groups_data *)callback_data)->config->group_slapiattrs[0], already_seen_ndn_vals, group_ndn_val)) {
|
||||
+ /* The group group_ndn_val has already been processed
|
||||
+ * skip the final recursion to prevent infinite loop
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "memberof_get_groups_callback - detecting a loop in group %s (stop building memberof)\n",
|
||||
+ group_ndn);
|
||||
+ ((memberof_get_groups_data *)callback_data)->use_cache = PR_FALSE;
|
||||
+ goto bail;
|
||||
+ } else {
|
||||
+ /* keep this ndn to detect a possible group recursion */
|
||||
+ already_seen_ndn_val = slapi_value_new_string(group_ndn);
|
||||
+ slapi_valueset_add_value_ext(already_seen_ndn_vals, already_seen_ndn_val, SLAPI_VALUE_FLAG_PASSIN);
|
||||
+ }
|
||||
}
|
||||
if (!config->skip_nested || config->fixup_task) {
|
||||
/* now recurse to find ancestors groups of e */
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,272 +0,0 @@
|
||||
From 17da0257b24749765777a4e64c3626cb39cca639 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 31 Mar 2025 11:05:01 +0200
|
||||
Subject: [PATCH] Issue 6571 - (2nd) Nested group does not receive memberOf
|
||||
attribute (#6697)
|
||||
|
||||
Bug description:
|
||||
erroneous debug change made in previous fix
|
||||
where cache_ancestors is called with the wrong parameter
|
||||
|
||||
Fix description:
|
||||
Restore the orginal param 'member_data'
|
||||
Increase the set of tests around multipaths
|
||||
|
||||
fixes: #6571
|
||||
|
||||
review by: Simon Pichugin (Thanks !!)
|
||||
---
|
||||
.../suites/memberof_plugin/regression_test.py | 154 ++++++++++++++++++
|
||||
ldap/servers/plugins/memberof/memberof.c | 50 +++++-
|
||||
2 files changed, 203 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
index dba908975..9ba40a0c3 100644
|
||||
--- a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
@@ -598,6 +598,8 @@ def test_multipaths(topology_st, request):
|
||||
'homeDirectory': '/home/user1'
|
||||
})
|
||||
group = Groups(inst, SUFFIX, rdn=None)
|
||||
+ g0 = group.create(properties={'cn': 'group0',
|
||||
+ 'description': 'group0'})
|
||||
g1 = group.create(properties={'cn': 'group1',
|
||||
'member': user1.dn,
|
||||
'description': 'group1'})
|
||||
@@ -635,6 +637,158 @@ def test_multipaths(topology_st, request):
|
||||
_check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2, g1])
|
||||
_check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
|
||||
+ #inst.config.replace('nsslapd-errorlog-level', '65536')
|
||||
+ #inst.config.set('nsslapd-accesslog-level','260')
|
||||
+ #inst.config.set('nsslapd-plugin-logging', 'on')
|
||||
+ #inst.config.set('nsslapd-auditlog-logging-enabled','on')
|
||||
+ #inst.config.set('nsslapd-auditfaillog-logging-enabled','on')
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp2 --> Grp21 --
|
||||
+ #
|
||||
+ g1.remove_member(g2.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # \__________ ^
|
||||
+ # | /
|
||||
+ # v /
|
||||
+ # Grp2 --> Grp21 ----
|
||||
+ #
|
||||
+ g1.add_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[user1, g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2, g1])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp2 --> Grp21 --
|
||||
+ #
|
||||
+ g1.remove_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp0 ---> Grp2 ---> Grp21 ---
|
||||
+ #
|
||||
+ g0.add_member(g2.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1, g0])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^ ^
|
||||
+ # / /
|
||||
+ # Grp0 ---> Grp2 ---> Grp21 ---
|
||||
+ #
|
||||
+ g0.add_member(g1.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g1,g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1, g0])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^ \_____________ ^
|
||||
+ # / | /
|
||||
+ # / V /
|
||||
+ # Grp0 ---> Grp2 ---> Grp21 ---
|
||||
+ #
|
||||
+ g1.add_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g1, g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1, g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g1, g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1, g0])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^ \_____________ ^
|
||||
+ # / | /
|
||||
+ # / V /
|
||||
+ # Grp0 ---> Grp2 Grp21 ---
|
||||
+ #
|
||||
+ g2.remove_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g1, g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1, g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g2, expected_members=[], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g1])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g1, g0])
|
||||
+
|
||||
def fin():
|
||||
try:
|
||||
user1.delete()
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index 32bdcf3f1..f79b083a9 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -3258,6 +3258,35 @@ merge_ancestors(Slapi_Value **member_ndn_val, memberof_get_groups_data *v1, memb
|
||||
Slapi_ValueSet *v2_group_norm_vals = *((memberof_get_groups_data *)v2)->group_norm_vals;
|
||||
int merged_cnt = 0;
|
||||
|
||||
+#if MEMBEROF_CACHE_DEBUG
|
||||
+ {
|
||||
+ Slapi_Value *val = 0;
|
||||
+ int hint = 0;
|
||||
+ struct berval *bv;
|
||||
+ hint = slapi_valueset_first_value(v2_groupvals, &val);
|
||||
+ while (val) {
|
||||
+ /* this makes a copy of the berval */
|
||||
+ bv = slapi_value_get_berval(val);
|
||||
+ if (bv && bv->bv_len) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "merge_ancestors: V2 contains %s\n",
|
||||
+ bv->bv_val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_next_value(v2_groupvals, hint, &val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_first_value(v1_groupvals, &val);
|
||||
+ while (val) {
|
||||
+ /* this makes a copy of the berval */
|
||||
+ bv = slapi_value_get_berval(val);
|
||||
+ if (bv && bv->bv_len) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "merge_ancestors: add %s (from V1)\n",
|
||||
+ bv->bv_val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_next_value(v1_groupvals, hint, &val);
|
||||
+ }
|
||||
+ }
|
||||
+#endif
|
||||
hint = slapi_valueset_first_value(v1_groupvals, &sval);
|
||||
while (sval) {
|
||||
if (memberof_compare(config, member_ndn_val, &sval)) {
|
||||
@@ -3319,7 +3348,7 @@ memberof_get_groups_r(MemberOfConfig *config, Slapi_DN *member_sdn, memberof_get
|
||||
|
||||
merge_ancestors(&member_ndn_val, &member_data, data);
|
||||
if (!cached && member_data.use_cache)
|
||||
- cache_ancestors(config, &member_ndn_val, data);
|
||||
+ cache_ancestors(config, &member_ndn_val, &member_data);
|
||||
|
||||
slapi_value_free(&member_ndn_val);
|
||||
slapi_valueset_free(groupvals);
|
||||
@@ -4285,6 +4314,25 @@ memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data)
|
||||
|
||||
/* get a list of all of the groups this user belongs to */
|
||||
groups = memberof_get_groups(config, sdn);
|
||||
+#if MEMBEROF_CACHE_DEBUG
|
||||
+ {
|
||||
+ Slapi_Value *val = 0;
|
||||
+ int hint = 0;
|
||||
+ struct berval *bv;
|
||||
+ hint = slapi_valueset_first_value(groups, &val);
|
||||
+ while (val) {
|
||||
+ /* this makes a copy of the berval */
|
||||
+ bv = slapi_value_get_berval(val);
|
||||
+ if (bv && bv->bv_len) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "memberof_fix_memberof_callback: %s belongs to %s\n",
|
||||
+ ndn,
|
||||
+ bv->bv_val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_next_value(groups, hint, &val);
|
||||
+ }
|
||||
+ }
|
||||
+#endif
|
||||
|
||||
if (config->group_filter) {
|
||||
if (slapi_filter_test_simple(e, config->group_filter)) {
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,192 +0,0 @@
|
||||
From ff364a4b1c88e1a8f678e056af88cce50cd8717c Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Fri, 28 Mar 2025 17:32:14 +0100
|
||||
Subject: [PATCH] Issue 6698 - NPE after configuring invalid filtered role
|
||||
(#6699)
|
||||
|
||||
Server crash when doing search after configuring filtered role with invalid filter.
|
||||
Reason: The part of the filter that should be overwritten are freed before knowing that the filter is invalid.
|
||||
Solution: Check first that the filter is valid before freeing the filtere bits
|
||||
|
||||
Issue: #6698
|
||||
|
||||
Reviewed by: @tbordaz , @mreynolds389 (Thanks!)
|
||||
|
||||
(cherry picked from commit 31e120d2349eda7a41380cf78fc04cf41e394359)
|
||||
---
|
||||
dirsrvtests/tests/suites/roles/basic_test.py | 80 ++++++++++++++++++--
|
||||
ldap/servers/slapd/filter.c | 17 ++++-
|
||||
2 files changed, 88 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
index 875ac47c1..b79816c58 100644
|
||||
--- a/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
@@ -28,6 +28,7 @@ from lib389.dbgen import dbgen_users
|
||||
from lib389.tasks import ImportTask
|
||||
from lib389.utils import get_default_db_lib
|
||||
from lib389.rewriters import *
|
||||
+from lib389._mapped_object import DSLdapObject
|
||||
from lib389.backend import Backends
|
||||
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
@@ -427,7 +428,6 @@ def test_vattr_on_filtered_role_restart(topo, request):
|
||||
log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
|
||||
-
|
||||
log.info("Check the virtual attribute definition is found (after a required delay)")
|
||||
topo.standalone.restart()
|
||||
time.sleep(5)
|
||||
@@ -541,7 +541,7 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
indexes = backend.get_indexes()
|
||||
try:
|
||||
index = indexes.create(properties={
|
||||
- 'cn': attrname,
|
||||
+ 'cn': attrname,
|
||||
'nsSystemIndex': 'false',
|
||||
'nsIndexType': ['eq', 'pres']
|
||||
})
|
||||
@@ -593,7 +593,6 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
dn = "uid=%s0000%d,%s" % (RDN, i, PARENT)
|
||||
topo.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsRoleDN', [role.dn.encode()])])
|
||||
|
||||
-
|
||||
# Now check that search is fast, evaluating only 4 entries
|
||||
search_start = time.time()
|
||||
entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
@@ -676,7 +675,7 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
indexes = backend.get_indexes()
|
||||
try:
|
||||
index = indexes.create(properties={
|
||||
- 'cn': attrname,
|
||||
+ 'cn': attrname,
|
||||
'nsSystemIndex': 'false',
|
||||
'nsIndexType': ['eq', 'pres']
|
||||
})
|
||||
@@ -730,7 +729,7 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
|
||||
# Enable plugin level to check message
|
||||
topo.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.PLUGIN))
|
||||
-
|
||||
+
|
||||
# Now check that search is fast, evaluating only 4 entries
|
||||
search_start = time.time()
|
||||
entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(|(nsrole=%s)(nsrole=cn=not_such_entry_role,%s))" % (role.dn, DEFAULT_SUFFIX))
|
||||
@@ -758,6 +757,77 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+
|
||||
+def test_rewriter_with_invalid_filter(topo, request):
|
||||
+ """Test that server does not crash when having
|
||||
+ invalid filter in filtered role
|
||||
+
|
||||
+ :id: 5013b0b2-0af6-11f0-8684-482ae39447e5
|
||||
+ :setup: standalone server
|
||||
+ :steps:
|
||||
+ 1. Setup filtered role with good filter
|
||||
+ 2. Setup nsrole rewriter
|
||||
+ 3. Restart the server
|
||||
+ 4. Search for entries
|
||||
+ 5. Setup filtered role with bad filter
|
||||
+ 6. Search for entries
|
||||
+ :expectedresults:
|
||||
+ 1. Operation should succeed
|
||||
+ 2. Operation should succeed
|
||||
+ 3. Operation should succeed
|
||||
+ 4. Operation should succeed
|
||||
+ 5. Operation should succeed
|
||||
+ 6. Operation should succeed
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ entries = []
|
||||
+
|
||||
+ def fin():
|
||||
+ inst.start()
|
||||
+ for entry in entries:
|
||||
+ entry.delete()
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Setup filtered role
|
||||
+ roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}')
|
||||
+ filter_ko = '(&((objectClass=top)(objectClass=nsPerson))'
|
||||
+ filter_ok = '(&(objectClass=top)(objectClass=nsPerson))'
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ok,
|
||||
+ 'description': 'Test good filter',
|
||||
+ }
|
||||
+ role = roles.create(properties=role_properties)
|
||||
+ entries.append(role)
|
||||
+
|
||||
+ # Setup nsrole rewriter
|
||||
+ rewriters = Rewriters(inst)
|
||||
+ rewriter_properties = {
|
||||
+ "cn": "nsrole",
|
||||
+ "nsslapd-libpath": 'libroles-plugin',
|
||||
+ "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter',
|
||||
+ }
|
||||
+ rewriter = rewriters.ensure_state(properties=rewriter_properties)
|
||||
+ entries.append(rewriter)
|
||||
+
|
||||
+ # Restart thge instance
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+ # Set bad filter
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ko,
|
||||
+ 'description': 'Test bad filter',
|
||||
+ }
|
||||
+ role.ensure_state(properties=role_properties)
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/filter.c b/ldap/servers/slapd/filter.c
|
||||
index ce09891b8..f541b8fc1 100644
|
||||
--- a/ldap/servers/slapd/filter.c
|
||||
+++ b/ldap/servers/slapd/filter.c
|
||||
@@ -1038,9 +1038,11 @@ slapi_filter_get_subfilt(
|
||||
}
|
||||
|
||||
/*
|
||||
- * Before calling this function, you must free all the parts
|
||||
+ * The function does not know how to free all the parts
|
||||
* which will be overwritten (i.e. slapi_free_the_filter_bits),
|
||||
- * this function dosn't know how to do that
|
||||
+ * so the caller must take care of that.
|
||||
+ * But it must do so AFTER calling slapi_filter_replace_ex to
|
||||
+ * avoid getting invalid filter if slapi_filter_replace_ex fails.
|
||||
*/
|
||||
int
|
||||
slapi_filter_replace_ex(Slapi_Filter *f, char *s)
|
||||
@@ -1099,8 +1101,15 @@ slapi_filter_free_bits(Slapi_Filter *f)
|
||||
int
|
||||
slapi_filter_replace_strfilter(Slapi_Filter *f, char *strfilter)
|
||||
{
|
||||
- slapi_filter_free_bits(f);
|
||||
- return (slapi_filter_replace_ex(f, strfilter));
|
||||
+ /* slapi_filter_replace_ex may fail and we cannot
|
||||
+ * free filter bits before calling it.
|
||||
+ */
|
||||
+ Slapi_Filter save_f = *f;
|
||||
+ int ret = slapi_filter_replace_ex(f, strfilter);
|
||||
+ if (ret == 0) {
|
||||
+ slapi_filter_free_bits(&save_f);
|
||||
+ }
|
||||
+ return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,455 +0,0 @@
|
||||
From 446a23d0ed2d3ffa76c5fb5e9576d6876bdbf04f Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 28 Mar 2025 11:28:54 -0700
|
||||
Subject: [PATCH] Issue 6686 - CLI - Re-enabling user accounts that reached
|
||||
inactivity limit fails with error (#6687)
|
||||
|
||||
Description: When attempting to unlock a user account that has been locked due
|
||||
to exceeding the Account Policy Plugin's inactivity limit, the dsidm account
|
||||
unlock command fails with a Python type error: "float() argument must be a
|
||||
string or a number, not 'NoneType'".
|
||||
|
||||
Enhance the unlock method to properly handle different account locking states,
|
||||
including inactivity limit exceeded states.
|
||||
Add test cases to verify account inactivity locking/unlocking functionality
|
||||
with CoS and role-based indirect locking.
|
||||
|
||||
Fix CoS template class to include the required 'ldapsubentry' objectClass.
|
||||
Improv error messages to provide better guidance on unlocking indirectly
|
||||
locked accounts.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6686
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
.../clu/dsidm_account_inactivity_test.py | 329 ++++++++++++++++++
|
||||
src/lib389/lib389/cli_idm/account.py | 25 +-
|
||||
src/lib389/lib389/idm/account.py | 28 +-
|
||||
3 files changed, 377 insertions(+), 5 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py b/dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py
|
||||
new file mode 100644
|
||||
index 000000000..88a34abf6
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py
|
||||
@@ -0,0 +1,329 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import ldap
|
||||
+import time
|
||||
+import pytest
|
||||
+import logging
|
||||
+import os
|
||||
+from datetime import datetime, timedelta
|
||||
+
|
||||
+from lib389 import DEFAULT_SUFFIX, DN_PLUGIN, DN_CONFIG
|
||||
+from lib389.cli_idm.account import entry_status, unlock
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389.cli_base import FakeArgs
|
||||
+from lib389.utils import ds_is_older
|
||||
+from lib389.plugins import AccountPolicyPlugin, AccountPolicyConfigs
|
||||
+from lib389.idm.role import FilteredRoles
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.cos import CosTemplate, CosPointerDefinition
|
||||
+from lib389.idm.domain import Domain
|
||||
+from . import check_value_in_log_and_reset
|
||||
+
|
||||
+pytestmark = pytest.mark.tier0
|
||||
+
|
||||
+logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+# Constants
|
||||
+PLUGIN_ACCT_POLICY = "Account Policy Plugin"
|
||||
+ACCP_DN = f"cn={PLUGIN_ACCT_POLICY},{DN_PLUGIN}"
|
||||
+ACCP_CONF = f"{DN_CONFIG},{ACCP_DN}"
|
||||
+POLICY_NAME = "Account Inactivity Policy"
|
||||
+POLICY_DN = f"cn={POLICY_NAME},{DEFAULT_SUFFIX}"
|
||||
+COS_TEMPLATE_NAME = "TemplateCoS"
|
||||
+COS_TEMPLATE_DN = f"cn={COS_TEMPLATE_NAME},{DEFAULT_SUFFIX}"
|
||||
+COS_DEFINITION_NAME = "DefinitionCoS"
|
||||
+COS_DEFINITION_DN = f"cn={COS_DEFINITION_NAME},{DEFAULT_SUFFIX}"
|
||||
+TEST_USER_NAME = "test_inactive_user"
|
||||
+TEST_USER_DN = f"uid={TEST_USER_NAME},{DEFAULT_SUFFIX}"
|
||||
+TEST_USER_PW = "password"
|
||||
+INACTIVITY_LIMIT = 30
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def account_policy_setup(topology_st, request):
|
||||
+ """Set up account policy plugin, configuration, and CoS objects"""
|
||||
+ log.info("Setting up Account Policy Plugin and CoS")
|
||||
+
|
||||
+ # Enable Account Policy Plugin
|
||||
+ plugin = AccountPolicyPlugin(topology_st.standalone)
|
||||
+ if not plugin.status():
|
||||
+ plugin.enable()
|
||||
+ plugin.set('nsslapd-pluginarg0', ACCP_CONF)
|
||||
+
|
||||
+ # Configure Account Policy
|
||||
+ accp_configs = AccountPolicyConfigs(topology_st.standalone)
|
||||
+ accp_config = accp_configs.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': 'config',
|
||||
+ 'alwaysrecordlogin': 'yes',
|
||||
+ 'stateattrname': 'lastLoginTime',
|
||||
+ 'altstateattrname': '1.1',
|
||||
+ 'specattrname': 'acctPolicySubentry',
|
||||
+ 'limitattrname': 'accountInactivityLimit'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Add ACI for anonymous access if it doesn't exist
|
||||
+ domain = Domain(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ anon_aci = '(targetattr="*")(version 3.0; acl "Anonymous read access"; allow (read,search,compare) userdn="ldap:///anyone";)'
|
||||
+ domain.ensure_present('aci', anon_aci)
|
||||
+
|
||||
+ # Restart the server to apply plugin configuration
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ # Create or update account policy entry
|
||||
+ accp_configs = AccountPolicyConfigs(topology_st.standalone, basedn=DEFAULT_SUFFIX)
|
||||
+ policy = accp_configs.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': POLICY_NAME,
|
||||
+ 'objectClass': ['top', 'ldapsubentry', 'extensibleObject', 'accountpolicy'],
|
||||
+ 'accountInactivityLimit': str(INACTIVITY_LIMIT)
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Create or update CoS template entry
|
||||
+ cos_template = CosTemplate(topology_st.standalone, dn=COS_TEMPLATE_DN)
|
||||
+ cos_template.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': COS_TEMPLATE_NAME,
|
||||
+ 'objectClass': ['top', 'cosTemplate', 'extensibleObject'],
|
||||
+ 'acctPolicySubentry': policy.dn
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Create or update CoS definition entry
|
||||
+ cos_def = CosPointerDefinition(topology_st.standalone, dn=COS_DEFINITION_DN)
|
||||
+ cos_def.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': COS_DEFINITION_NAME,
|
||||
+ 'objectClass': ['top', 'ldapsubentry', 'cosSuperDefinition', 'cosPointerDefinition'],
|
||||
+ 'cosTemplateDn': COS_TEMPLATE_DN,
|
||||
+ 'cosAttribute': 'acctPolicySubentry default operational-default'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Restart server to ensure CoS is applied
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Cleaning up Account Policy settings')
|
||||
+ try:
|
||||
+ # Delete CoS and policy entries
|
||||
+ if cos_def.exists():
|
||||
+ cos_def.delete()
|
||||
+ if cos_template.exists():
|
||||
+ cos_template.delete()
|
||||
+ if policy.exists():
|
||||
+ policy.delete()
|
||||
+
|
||||
+ # Disable the plugin
|
||||
+ if plugin.status():
|
||||
+ plugin.disable()
|
||||
+ topology_st.standalone.restart()
|
||||
+ except Exception as e:
|
||||
+ log.error(f'Failed to clean up: {e}')
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ return topology_st.standalone
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def create_test_user(topology_st, account_policy_setup, request):
|
||||
+ """Create a test user for the inactivity test"""
|
||||
+ log.info('Creating test user')
|
||||
+
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ user = users.ensure_state(
|
||||
+ properties={
|
||||
+ 'uid': TEST_USER_NAME,
|
||||
+ 'cn': TEST_USER_NAME,
|
||||
+ 'sn': TEST_USER_NAME,
|
||||
+ 'userPassword': TEST_USER_PW,
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': f'/home/{TEST_USER_NAME}'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Deleting test user')
|
||||
+ if user.exists():
|
||||
+ user.delete()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+ return user
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Indirect account locking not implemented")
|
||||
+def test_dsidm_account_inactivity_lock_unlock(topology_st, create_test_user):
|
||||
+ """Test dsidm account unlock functionality with indirectly locked accounts
|
||||
+
|
||||
+ :id: d7b57083-6111-4dbf-af84-6fca7fc7fb31
|
||||
+ :setup: Standalone instance with Account Policy Plugin and CoS configured
|
||||
+ :steps:
|
||||
+ 1. Create a test user
|
||||
+ 2. Bind as the test user to set lastLoginTime
|
||||
+ 3. Check account status - should be active
|
||||
+ 4. Set user's lastLoginTime to a time in the past that exceeds inactivity limit
|
||||
+ 5. Check account status - should be locked due to inactivity
|
||||
+ 6. Attempt to bind as the user - should fail with constraint violation
|
||||
+ 7. Unlock the account using dsidm account unlock
|
||||
+ 8. Verify account status is active again
|
||||
+ 9. Verify the user can bind again
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Account status shows as activated
|
||||
+ 4. Success
|
||||
+ 5. Account status shows as inactivity limit exceeded
|
||||
+ 6. Bind attempt fails with constraint violation
|
||||
+ 7. Account unlocked successfully
|
||||
+ 8. Account status shows as activated
|
||||
+ 9. User can bind successfully
|
||||
+ """
|
||||
+ standalone = topology_st.standalone
|
||||
+ user = create_test_user
|
||||
+
|
||||
+ # Set up FakeArgs for dsidm commands
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = user.dn
|
||||
+ args.json = False
|
||||
+ args.details = False
|
||||
+
|
||||
+ # 1. Check initial account status - should be active
|
||||
+ log.info('Step 1: Checking initial account status')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+ # 2. Bind as test user to set initial lastLoginTime
|
||||
+ log.info('Step 2: Binding as test user to set lastLoginTime')
|
||||
+ try:
|
||||
+ conn = user.bind(TEST_USER_PW)
|
||||
+ conn.unbind()
|
||||
+ log.info("Successfully bound as test user")
|
||||
+ except ldap.LDAPError as e:
|
||||
+ pytest.fail(f"Failed to bind as test user: {e}")
|
||||
+
|
||||
+ # 3. Set lastLoginTime to a time in the past that exceeds inactivity limit
|
||||
+ log.info('Step 3: Setting lastLoginTime to the past')
|
||||
+ past_time = datetime.utcnow() - timedelta(seconds=INACTIVITY_LIMIT * 2)
|
||||
+ past_time_str = past_time.strftime('%Y%m%d%H%M%SZ')
|
||||
+ user.replace('lastLoginTime', past_time_str)
|
||||
+
|
||||
+ # 4. Check account status - should now be locked due to inactivity
|
||||
+ log.info('Step 4: Checking account status after setting old lastLoginTime')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: inactivity limit exceeded')
|
||||
+
|
||||
+ # 5. Attempt to bind as the user - should fail
|
||||
+ log.info('Step 5: Attempting to bind as user (should fail)')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION) as excinfo:
|
||||
+ conn = user.bind(TEST_USER_PW)
|
||||
+ assert "Account inactivity limit exceeded" in str(excinfo.value)
|
||||
+
|
||||
+ # 6. Unlock the account using dsidm account unlock
|
||||
+ log.info('Step 6: Unlocking the account with dsidm')
|
||||
+ unlock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st,
|
||||
+ check_value='now unlocked by resetting lastLoginTime')
|
||||
+
|
||||
+ # 7. Verify account status is active again
|
||||
+ log.info('Step 7: Checking account status after unlock')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+ # 8. Verify the user can bind again
|
||||
+ log.info('Step 8: Verifying user can bind again')
|
||||
+ try:
|
||||
+ conn = user.bind(TEST_USER_PW)
|
||||
+ conn.unbind()
|
||||
+ log.info("Successfully bound as test user after unlock")
|
||||
+ except ldap.LDAPError as e:
|
||||
+ pytest.fail(f"Failed to bind as test user after unlock: {e}")
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Indirect account locking not implemented")
|
||||
+def test_dsidm_indirectly_locked_via_role(topology_st, create_test_user):
|
||||
+ """Test dsidm account unlock functionality with accounts indirectly locked via role
|
||||
+
|
||||
+ :id: 7bfe69bb-cf99-4214-a763-051ab2b9cf89
|
||||
+ :setup: Standalone instance with Role and user configured
|
||||
+ :steps:
|
||||
+ 1. Create a test user
|
||||
+ 2. Create a Filtered Role that includes the test user
|
||||
+ 3. Lock the role
|
||||
+ 4. Check account status - should be indirectly locked through the role
|
||||
+ 5. Attempt to unlock the account - should fail with appropriate message
|
||||
+ 6. Unlock the role
|
||||
+ 7. Verify account status is active again
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Account status shows as indirectly locked
|
||||
+ 5. Unlock attempt fails with appropriate error message
|
||||
+ 6. Success
|
||||
+ 7. Account status shows as activated
|
||||
+ """
|
||||
+ standalone = topology_st.standalone
|
||||
+ user = create_test_user
|
||||
+
|
||||
+ # Use FilteredRoles and ensure_state for role creation
|
||||
+ log.info('Step 1: Creating Filtered Role')
|
||||
+ roles = FilteredRoles(standalone, DEFAULT_SUFFIX)
|
||||
+ role = roles.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': 'TestFilterRole',
|
||||
+ 'nsRoleFilter': f'(uid={TEST_USER_NAME})'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Set up FakeArgs for dsidm commands
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = user.dn
|
||||
+ args.json = False
|
||||
+ args.details = False
|
||||
+
|
||||
+ # 2. Check account status before locking role
|
||||
+ log.info('Step 2: Checking account status before locking role')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+ # 3. Lock the role
|
||||
+ log.info('Step 3: Locking the role')
|
||||
+ role.lock()
|
||||
+
|
||||
+ # 4. Check account status - should be indirectly locked
|
||||
+ log.info('Step 4: Checking account status after locking role')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: indirectly locked through a Role')
|
||||
+
|
||||
+ # 5. Attempt to unlock the account - should fail
|
||||
+ log.info('Step 5: Attempting to unlock indirectly locked account')
|
||||
+ unlock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st,
|
||||
+ check_value='Account is locked through role')
|
||||
+
|
||||
+ # 6. Unlock the role
|
||||
+ log.info('Step 6: Unlocking the role')
|
||||
+ role.unlock()
|
||||
+
|
||||
+ # 7. Verify account status is active again
|
||||
+ log.info('Step 7: Checking account status after unlocking role')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
\ No newline at end of file
|
||||
diff --git a/src/lib389/lib389/cli_idm/account.py b/src/lib389/lib389/cli_idm/account.py
|
||||
index 15f766588..a0dfd8f65 100644
|
||||
--- a/src/lib389/lib389/cli_idm/account.py
|
||||
+++ b/src/lib389/lib389/cli_idm/account.py
|
||||
@@ -176,8 +176,29 @@ def unlock(inst, basedn, log, args):
|
||||
dn = _get_dn_arg(args.dn, msg="Enter dn to unlock")
|
||||
accounts = Accounts(inst, basedn)
|
||||
acct = accounts.get(dn=dn)
|
||||
- acct.unlock()
|
||||
- log.info(f'Entry {dn} is unlocked')
|
||||
+
|
||||
+ try:
|
||||
+ # Get the account status before attempting to unlock
|
||||
+ status = acct.status()
|
||||
+ state = status["state"]
|
||||
+
|
||||
+ # Attempt to unlock the account
|
||||
+ acct.unlock()
|
||||
+
|
||||
+ # Success message
|
||||
+ log.info(f'Entry {dn} is unlocked')
|
||||
+ if state == AccountState.DIRECTLY_LOCKED:
|
||||
+ log.info(f'The entry was directly locked')
|
||||
+ elif state == AccountState.INACTIVITY_LIMIT_EXCEEDED:
|
||||
+ log.info(f'The entry was locked due to inactivity and is now unlocked by resetting lastLoginTime')
|
||||
+
|
||||
+ except ValueError as e:
|
||||
+ # Provide a more detailed error message based on failure reason
|
||||
+ if "through role" in str(e):
|
||||
+ log.error(f"Cannot unlock {dn}: {str(e)}")
|
||||
+ log.info("To unlock this account, you must modify the role that's locking it.")
|
||||
+ else:
|
||||
+ log.error(f"Failed to unlock {dn}: {str(e)}")
|
||||
|
||||
|
||||
def reset_password(inst, basedn, log, args):
|
||||
diff --git a/src/lib389/lib389/idm/account.py b/src/lib389/lib389/idm/account.py
|
||||
index 4b823b662..faf6f6f16 100644
|
||||
--- a/src/lib389/lib389/idm/account.py
|
||||
+++ b/src/lib389/lib389/idm/account.py
|
||||
@@ -140,7 +140,8 @@ class Account(DSLdapObject):
|
||||
"nsAccountLock", state_attr])
|
||||
|
||||
last_login_time = self._dict_get_with_ignore_indexerror(account_data, state_attr)
|
||||
- if not last_login_time:
|
||||
+ # if last_login_time not exist then check alt_state_attr only if its not disabled and exist
|
||||
+ if not last_login_time and alt_state_attr in account_data:
|
||||
last_login_time = self._dict_get_with_ignore_indexerror(account_data, alt_state_attr)
|
||||
|
||||
create_time = self._dict_get_with_ignore_indexerror(account_data, "createTimestamp")
|
||||
@@ -203,12 +204,33 @@ class Account(DSLdapObject):
|
||||
self.replace('nsAccountLock', 'true')
|
||||
|
||||
def unlock(self):
|
||||
- """Unset nsAccountLock"""
|
||||
+ """Unset nsAccountLock if it's set and reset lastLoginTime if account is locked due to inactivity"""
|
||||
|
||||
current_status = self.status()
|
||||
+
|
||||
if current_status["state"] == AccountState.ACTIVATED:
|
||||
raise ValueError("Account is already active")
|
||||
- self.remove('nsAccountLock', None)
|
||||
+
|
||||
+ if current_status["state"] == AccountState.DIRECTLY_LOCKED:
|
||||
+ # Account is directly locked with nsAccountLock attribute
|
||||
+ self.remove('nsAccountLock', None)
|
||||
+ elif current_status["state"] == AccountState.INACTIVITY_LIMIT_EXCEEDED:
|
||||
+ # Account is locked due to inactivity - reset lastLoginTime to current time
|
||||
+ # The lastLoginTime attribute stores its value in GMT/UTC time (Zulu time zone)
|
||||
+ current_time = time.strftime('%Y%m%d%H%M%SZ', time.gmtime())
|
||||
+ self.replace('lastLoginTime', current_time)
|
||||
+ elif current_status["state"] == AccountState.INDIRECTLY_LOCKED:
|
||||
+ # Account is locked through a role
|
||||
+ role_dn = current_status.get("role_dn")
|
||||
+ if role_dn:
|
||||
+ raise ValueError(f"Account is locked through role {role_dn}. "
|
||||
+ f"Please modify the role to unlock this account.")
|
||||
+ else:
|
||||
+ raise ValueError("Account is locked through an unknown role. "
|
||||
+ "Please check the roles configuration to unlock this account.")
|
||||
+ else:
|
||||
+ # Should not happen, but just in case
|
||||
+ raise ValueError(f"Unknown lock state: {current_status['state'].value}")
|
||||
|
||||
# If the account can be bound to, this will attempt to do so. We don't check
|
||||
# for exceptions, just pass them back!
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,70 +0,0 @@
|
||||
From 09a284ee43c2b4346da892f8756f97accd15ca68 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Wed, 4 Dec 2024 21:59:40 -0500
|
||||
Subject: [PATCH] Issue 6302 - Allow to run replication status without a prompt
|
||||
(#6410)
|
||||
|
||||
Description: We should allow running replication status and
|
||||
other similar commands without requesting a password and bind DN.
|
||||
|
||||
This way, the current instance's root DN and root PW will be used on other
|
||||
instances when requesting CSN info. If they are incorrect,
|
||||
then the info won't be printed, but otherwise, the agreement status
|
||||
will be displayed correctly.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6302
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
src/lib389/lib389/cli_conf/replication.py | 15 +++------------
|
||||
1 file changed, 3 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 399d0d2f8..cd4a331a8 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -319,12 +319,9 @@ def list_suffixes(inst, basedn, log, args):
|
||||
def get_repl_status(inst, basedn, log, args):
|
||||
replicas = Replicas(inst)
|
||||
replica = replicas.get(args.suffix)
|
||||
- pw_and_dn_prompt = False
|
||||
if args.bind_passwd_file is not None:
|
||||
args.bind_passwd = get_passwd_from_file(args.bind_passwd_file)
|
||||
- if args.bind_passwd_prompt or args.bind_dn is None or args.bind_passwd is None:
|
||||
- pw_and_dn_prompt = True
|
||||
- status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=pw_and_dn_prompt)
|
||||
+ status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=args.bind_passwd_prompt)
|
||||
if args.json:
|
||||
log.info(json.dumps({"type": "list", "items": status}, indent=4))
|
||||
else:
|
||||
@@ -335,12 +332,9 @@ def get_repl_status(inst, basedn, log, args):
|
||||
def get_repl_winsync_status(inst, basedn, log, args):
|
||||
replicas = Replicas(inst)
|
||||
replica = replicas.get(args.suffix)
|
||||
- pw_and_dn_prompt = False
|
||||
if args.bind_passwd_file is not None:
|
||||
args.bind_passwd = get_passwd_from_file(args.bind_passwd_file)
|
||||
- if args.bind_passwd_prompt or args.bind_dn is None or args.bind_passwd is None:
|
||||
- pw_and_dn_prompt = True
|
||||
- status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, winsync=True, pwprompt=pw_and_dn_prompt)
|
||||
+ status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, winsync=True, pwprompt=args.bind_passwd_prompt)
|
||||
if args.json:
|
||||
log.info(json.dumps({"type": "list", "items": status}, indent=4))
|
||||
else:
|
||||
@@ -874,12 +868,9 @@ def poke_agmt(inst, basedn, log, args):
|
||||
|
||||
def get_agmt_status(inst, basedn, log, args):
|
||||
agmt = get_agmt(inst, args)
|
||||
- pw_and_dn_prompt = False
|
||||
if args.bind_passwd_file is not None:
|
||||
args.bind_passwd = get_passwd_from_file(args.bind_passwd_file)
|
||||
- if args.bind_passwd_prompt or args.bind_dn is None or args.bind_passwd is None:
|
||||
- pw_and_dn_prompt = True
|
||||
- status = agmt.status(use_json=args.json, binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=pw_and_dn_prompt)
|
||||
+ status = agmt.status(use_json=args.json, binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=args.bind_passwd_prompt)
|
||||
log.info(status)
|
||||
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
@ -1,999 +0,0 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "addr2line"
|
||||
version = "0.24.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
|
||||
dependencies = [
|
||||
"gimli",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "adler2"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
|
||||
|
||||
[[package]]
|
||||
name = "ahash"
|
||||
version = "0.7.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9"
|
||||
dependencies = [
|
||||
"getrandom 0.2.16",
|
||||
"once_cell",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.75"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002"
|
||||
dependencies = [
|
||||
"addr2line",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"miniz_oxide",
|
||||
"object",
|
||||
"rustc-demangle",
|
||||
"windows-targets",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
|
||||
|
||||
[[package]]
|
||||
name = "cbindgen"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"syn 1.0.109",
|
||||
"tempfile",
|
||||
"toml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0fc897dc1e865cc67c0e05a836d9d3f1df3cbe442aa4a9473b18e12624a4951"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
"libc",
|
||||
"shlex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.34.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
|
||||
dependencies = [
|
||||
"ansi_term",
|
||||
"atty",
|
||||
"bitflags 1.3.2",
|
||||
"strsim",
|
||||
"textwrap",
|
||||
"unicode-width",
|
||||
"vec_map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "concread"
|
||||
version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcc9816f5ac93ebd51c37f7f9a6bf2b40dfcd42978ad2aea5d542016e9244cf6"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"crossbeam",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"lru",
|
||||
"parking_lot",
|
||||
"rand",
|
||||
"smallvec",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"crossbeam-deque",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-queue",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-deque"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
|
||||
dependencies = [
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-epoch"
|
||||
version = "0.9.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-queue"
|
||||
version = "0.3.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.8.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
|
||||
|
||||
[[package]]
|
||||
name = "entryuuid"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"paste",
|
||||
"slapi_r_plugin",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "entryuuid_syntax"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"paste",
|
||||
"slapi_r_plugin",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fastrand"
|
||||
version = "2.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
||||
|
||||
[[package]]
|
||||
name = "fernet"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"byteorder",
|
||||
"getrandom 0.2.16",
|
||||
"openssl",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
|
||||
dependencies = [
|
||||
"foreign-types-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types-shared"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"wasi 0.11.0+wasi-snapshot-preview1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"r-efi",
|
||||
"wasi 0.14.2+wasi-0.2.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gimli"
|
||||
version = "0.31.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "instant"
|
||||
version = "0.1.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
|
||||
|
||||
[[package]]
|
||||
name = "jobserver"
|
||||
version = "0.1.33"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a"
|
||||
dependencies = [
|
||||
"getrandom 0.3.3",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.172"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
|
||||
|
||||
[[package]]
|
||||
name = "librnsslapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cbindgen",
|
||||
"libc",
|
||||
"slapd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "librslapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cbindgen",
|
||||
"concread",
|
||||
"libc",
|
||||
"slapd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
||||
|
||||
[[package]]
|
||||
name = "lru"
|
||||
version = "0.7.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a"
|
||||
dependencies = [
|
||||
"hashbrown",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.7.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.8.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a"
|
||||
dependencies = [
|
||||
"adler2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.36.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.21.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
|
||||
|
||||
[[package]]
|
||||
name = "openssl"
|
||||
version = "0.10.73"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"cfg-if",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"openssl-macros",
|
||||
"openssl-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-macros"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.109"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.11.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
|
||||
dependencies = [
|
||||
"instant",
|
||||
"lock_api",
|
||||
"parking_lot_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot_core"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"instant",
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"smallvec",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
|
||||
dependencies = [
|
||||
"paste-impl",
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste-impl"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
|
||||
dependencies = [
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-lite"
|
||||
version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b"
|
||||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.32"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
|
||||
dependencies = [
|
||||
"zerocopy",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.20+deprecated"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.95"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pwdchan"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"cc",
|
||||
"libc",
|
||||
"openssl",
|
||||
"paste",
|
||||
"slapi_r_plugin",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "r-efi"
|
||||
version = "5.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_chacha"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
|
||||
dependencies = [
|
||||
"getrandom 0.2.16",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rsds"
|
||||
version = "0.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.24"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.219"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.219"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.140"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "shlex"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "slapd"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"fernet",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slapi_r_plugin"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"paste",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.109"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.101"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.20.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1"
|
||||
dependencies = [
|
||||
"fastrand",
|
||||
"getrandom 0.3.3",
|
||||
"once_cell",
|
||||
"rustix",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
dependencies = [
|
||||
"unicode-width",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.45.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"pin-project-lite",
|
||||
"tokio-macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-macros"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.5.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
|
||||
dependencies = [
|
||||
"getrandom 0.2.16",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "vcpkg"
|
||||
version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
|
||||
|
||||
[[package]]
|
||||
name = "vec_map"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.11.0+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.14.2+wasi-0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
|
||||
dependencies = [
|
||||
"wit-bindgen-rt",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu",
|
||||
"winapi-x86_64-pc-windows-gnu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.59.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
|
||||
dependencies = [
|
||||
"windows-targets",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_gnullvm",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
"windows_x86_64_msvc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen-rt"
|
||||
version = "0.39.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy"
|
||||
version = "0.8.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb"
|
||||
dependencies = [
|
||||
"zerocopy-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy-derive"
|
||||
version = "0.8.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zeroize"
|
||||
version = "1.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
|
||||
dependencies = [
|
||||
"zeroize_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zeroize_derive"
|
||||
version = "1.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
]
|
6
gating.yaml
Normal file
6
gating.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
--- !Policy
|
||||
product_versions:
|
||||
- rhel-9
|
||||
decision_context: osci_compose_gate
|
||||
rules:
|
||||
- !PassingTestCaseRule {test_case_name: osci.brew-build.tier0.functional}
|
7
rpminspect.yaml
Normal file
7
rpminspect.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
specname:
|
||||
match: suffix
|
||||
runpath:
|
||||
allowed_paths:
|
||||
- /usr/lib64/dirsrv
|
||||
- /usr/lib64/dirsrv/plugins
|
3
sources
Normal file
3
sources
Normal file
@ -0,0 +1,3 @@
|
||||
SHA512 (389-ds-base-1.4.3.28.tar.bz2) = 85f955d3a07066bcc3de8097ac2f7583a1d40950ea898c57c8513a92f1d94fff7c136abab33f1071bfdb1ee2bde0d1c498cfe1c99223ab349cf302b2d25f62b2
|
||||
SHA512 (jemalloc-5.2.1.tar.bz2) = 0bbb77564d767cef0c6fe1b97b705d368ddb360d55596945aea8c3ba5889fbce10479d85ad492c91d987caacdbbdccc706aa3688e321460069f00c05814fae02
|
||||
SHA512 (vendor-1.4.3.28-1.tar.gz) = ab65a3245ab5529bc1876bf9b7b6d4c626bb329fd4ed319210aa3d6438a4368a460c63a7e2b0cf4671c43b6f61da93e167aecf8a212f8a08a2c4ee891cdcf6c1
|
58
tests/tests.yml
Normal file
58
tests/tests.yml
Normal file
@ -0,0 +1,58 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
remote_user: root
|
||||
vars:
|
||||
ds_repo_url: https://github.com/389ds/389-ds-base.git
|
||||
ds_repo_dir: ds
|
||||
ds_repo_version: 389-ds-base-2.1
|
||||
ds_tests: "{{ ds_repo_dir }}/dirsrvtests/tests"
|
||||
pytest: "py.test-3"
|
||||
pytest_args: "-v"
|
||||
pytest_tier0_tests: "-m tier0"
|
||||
pytest_tier1_tests: "-m 'tier1 and not tier2'"
|
||||
pytest_run_command: "PYTHONPATH=../../src/lib389 {{ pytest }} {{ pytest_args }}"
|
||||
artifacts: ./artifacts
|
||||
pre_tasks:
|
||||
- name: Install policycoreutils
|
||||
action: >
|
||||
{{ ansible_pkg_mgr }} name=policycoreutils-python-utils state=present
|
||||
tags: always
|
||||
ignore_errors: yes
|
||||
- name: Prelabel non-secure ports
|
||||
tags: always
|
||||
shell: "semanage port -a -t ldap_port_t -p tcp 38900-39299"
|
||||
ignore_errors: yes
|
||||
- name: Prelabel secure ports
|
||||
tags: always
|
||||
shell: "semanage port -a -t ldap_port_t -p tcp 63600-63999"
|
||||
ignore_errors: yes
|
||||
- name: Install pip
|
||||
action: >
|
||||
{{ ansible_pkg_mgr }} name=python3-pip state=present
|
||||
tags: always
|
||||
ignore_errors: yes
|
||||
- name: Install slugify
|
||||
tags: always
|
||||
shell: "pip3 install slugify"
|
||||
ignore_errors: yes
|
||||
roles:
|
||||
- role: standard-test-basic
|
||||
tags:
|
||||
- classic
|
||||
repositories:
|
||||
- repo: "{{ ds_repo_url }}"
|
||||
dest: "{{ ds_repo_dir }}"
|
||||
version: "{{ ds_repo_version }}"
|
||||
tests:
|
||||
- tier0:
|
||||
dir: "{{ ds_tests }}"
|
||||
run: "{{ pytest_run_command }} {{ pytest_tier0_tests }}"
|
||||
- tier1:
|
||||
dir: "{{ ds_tests }}"
|
||||
run: "{{ pytest_run_command }} {{ pytest_tier1_tests }}"
|
||||
required_packages:
|
||||
- python3-pytest
|
||||
- python3-distro
|
||||
- 389-ds-base
|
||||
- 389-ds-base-snmp
|
||||
- cracklib-dicts
|
Loading…
Reference in New Issue
Block a user