import 389-ds-base-1.4.3.16-6.module+el8.4.0+9207+729bbaca

This commit is contained in:
CentOS Sources 2020-12-21 08:22:36 +00:00 committed by Andrew Lukoshko
commit 584f0d770f
24 changed files with 4609 additions and 0 deletions

2
.389-ds-base.metadata Normal file
View File

@ -0,0 +1,2 @@
90cda7aea8d8644eea5a2af28c72350dd915db34 SOURCES/389-ds-base-1.4.3.16.tar.bz2
9e06b5cc57fd185379d007696da153893cf73e30 SOURCES/jemalloc-5.2.1.tar.bz2

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
SOURCES/389-ds-base-1.4.3.16.tar.bz2
SOURCES/jemalloc-5.2.1.tar.bz2

View File

@ -0,0 +1,159 @@
From 81dcaf1c37c2de24c46672df8d4f968c2fb40a6e Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 11 Nov 2020 08:59:18 -0500
Subject: [PATCH 1/3] Issue 4383 - Do not normalize escaped spaces in a DN
Bug Description: Adding an entry with an escaped leading space leads to many
problems. Mainly id2entry can get corrupted during an
import of such an entry, and the entryrdn index is not
updated correctly
Fix Description: In slapi_dn_normalize_ext() leave an escaped space intact.
Relates: https://github.com/389ds/389-ds-base/issues/4383
Reviewed by: firstyear, progier, and tbordaz (Thanks!!!)
---
.../tests/suites/syntax/acceptance_test.py | 75 ++++++++++++++++++-
ldap/servers/slapd/dn.c | 8 +-
2 files changed, 77 insertions(+), 6 deletions(-)
diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py
index 543718689..7939a99a7 100644
--- a/dirsrvtests/tests/suites/syntax/acceptance_test.py
+++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2019 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -7,13 +7,12 @@
# --- END COPYRIGHT BLOCK ---
import ldap
-import logging
import pytest
import os
from lib389.schema import Schema
from lib389.config import Config
from lib389.idm.user import UserAccounts
-from lib389.idm.group import Groups
+from lib389.idm.group import Group, Groups
from lib389._constants import DEFAULT_SUFFIX
from lib389.topologies import log, topology_st as topo
@@ -127,7 +126,7 @@ def test_invalid_dn_syntax_crash(topo):
4. Success
"""
- # Create group
+ # Create group
groups = Groups(topo.standalone, DEFAULT_SUFFIX)
group = groups.create(properties={'cn': ' test'})
@@ -145,6 +144,74 @@ def test_invalid_dn_syntax_crash(topo):
groups.list()
+@pytest.mark.parametrize("props, rawdn", [
+ ({'cn': ' leadingSpace'}, "cn=\\20leadingSpace,ou=Groups,dc=example,dc=com"),
+ ({'cn': 'trailingSpace '}, "cn=trailingSpace\\20,ou=Groups,dc=example,dc=com")])
+def test_dn_syntax_spaces_delete(topo, props, rawdn):
+ """Test that an entry with a space as the first character in the DN can be
+ deleted without error. We also want to make sure the indexes are properly
+ updated by repeatedly adding and deleting the entry, and that the entry cache
+ is properly maintained.
+
+ :id: b993f37c-c2b0-4312-992c-a9048ff98965
+ :parametrized: yes
+ :setup: Standalone Instance
+ :steps:
+ 1. Create a group with a DN that has a space as the first/last
+ character.
+ 2. Delete group
+ 3. Add group
+ 4. Modify group
+ 5. Restart server and modify entry
+ 6. Delete group
+ 7. Add group back
+ 8. Delete group using specific DN
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ """
+
+ # Create group
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties=props.copy())
+
+ # Delete group (verifies DN/RDN parsing works and cache is correct)
+ group.delete()
+
+ # Add group again (verifies entryrdn index was properly updated)
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties=props.copy())
+
+ # Modify the group (verifies dn/rdn parsing is correct)
+ group.replace('description', 'escaped space group')
+
+ # Restart the server. This will pull the entry from the database and
+ # convert it into a cache entry, which is different than how a client
+ # first adds an entry and is put into the cache before being written to
+ # disk.
+ topo.standalone.restart()
+
+ # Make sure we can modify the entry (verifies cache entry was created
+ # correctly)
+ group.replace('description', 'escaped space group after restart')
+
+ # Make sure it can still be deleted (verifies cache again).
+ group.delete()
+
+ # Add it back so we can delete it using a specific DN (sanity test to verify
+ # another DN/RDN parsing variation).
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties=props.copy())
+ group = Group(topo.standalone, dn=rawdn)
+ group.delete()
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c
index 2af3f38fc..3980b897f 100644
--- a/ldap/servers/slapd/dn.c
+++ b/ldap/servers/slapd/dn.c
@@ -894,8 +894,7 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len)
s++;
}
}
- } else if (s + 2 < ends &&
- isxdigit(*(s + 1)) && isxdigit(*(s + 2))) {
+ } else if (s + 2 < ends && isxdigit(*(s + 1)) && isxdigit(*(s + 2))) {
/* esc hexpair ==> real character */
int n = slapi_hexchar2int(*(s + 1));
int n2 = slapi_hexchar2int(*(s + 2));
@@ -903,6 +902,11 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len)
if (n == 0) { /* don't change \00 */
*d++ = *++s;
*d++ = *++s;
+ } else if (n == 32) { /* leave \20 (space) intact */
+ *d++ = *s;
+ *d++ = *++s;
+ *d++ = *++s;
+ s++;
} else {
*d++ = n;
s += 3;
--
2.26.2

View File

@ -0,0 +1,232 @@
From 29c9e1c3c760f0941b022d45d14c248e9ceb9738 Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Tue, 3 Nov 2020 12:18:50 +0100
Subject: [PATCH 2/3] ticket 2058: Add keep alive entry after on-line
initialization - second version (#4399)
Bug description:
Keep alive entry is not created on target master after on line initialization,
and its RUVelement stays empty until a direct update is issued on that master
Fix description:
The patch allows a consumer (configured as a master) to create (if it did not
exist before) the consumer's keep alive entry. It creates it at the end of a
replication session at a time we are sure the changelog exists and will not
be reset. It allows a consumer to have RUVelement with csn in the RUV at the
first incoming replication session.
That is basically lkrispen's proposal with an associated pytest testcase
Second version changes:
- moved the testcase to suites/replication/regression_test.py
- set up the topology from a 2 master topology then
reinitialized the replicas from an ldif without replication metadata
rather than using the cli.
- search for keepalive entries using search_s instead of getEntry
- add a comment about keep alive entries purpose
last commit:
- wait that ruv are in sync before checking keep alive entries
Reviewed by: droideck, Firstyear
Platforms tested: F32
relates: #2058
---
.../suites/replication/regression_test.py | 130 ++++++++++++++++++
.../plugins/replication/repl5_replica.c | 14 ++
ldap/servers/plugins/replication/repl_extop.c | 4 +
3 files changed, 148 insertions(+)
diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py
index 844d762b9..14b9d6a44 100644
--- a/dirsrvtests/tests/suites/replication/regression_test.py
+++ b/dirsrvtests/tests/suites/replication/regression_test.py
@@ -98,6 +98,30 @@ def _move_ruv(ldif_file):
for dn, entry in ldif_list:
ldif_writer.unparse(dn, entry)
+def _remove_replication_data(ldif_file):
+ """ Remove the replication data from ldif file:
+ db2lif without -r includes some of the replica data like
+ - nsUniqueId
+ - keepalive entries
+ This function filters the ldif fil to remove these data
+ """
+
+ with open(ldif_file) as f:
+ parser = ldif.LDIFRecordList(f)
+ parser.parse()
+
+ ldif_list = parser.all_records
+ # Iterate on a copy of the ldif entry list
+ for dn, entry in ldif_list[:]:
+ if dn.startswith('cn=repl keep alive'):
+ ldif_list.remove((dn,entry))
+ else:
+ entry.pop('nsUniqueId')
+ with open(ldif_file, 'w') as f:
+ ldif_writer = ldif.LDIFWriter(f)
+ for dn, entry in ldif_list:
+ ldif_writer.unparse(dn, entry)
+
@pytest.fixture(scope="module")
def topo_with_sigkill(request):
@@ -897,6 +921,112 @@ def test_moving_entry_make_online_init_fail(topology_m2):
assert len(m1entries) == len(m2entries)
+def get_keepalive_entries(instance,replica):
+ # Returns the keep alive entries that exists with the suffix of the server instance
+ try:
+ entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL,
+ "(&(objectclass=ldapsubentry)(cn=repl keep alive*))",
+ ['cn', 'nsUniqueId', 'modifierTimestamp'])
+ except ldap.LDAPError as e:
+ log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e)))
+ assert False
+ # No error, so lets log the keepalive entries
+ if log.isEnabledFor(logging.DEBUG):
+ for ret in entries:
+ log.debug("Found keepalive entry:\n"+str(ret));
+ return entries
+
+def verify_keepalive_entries(topo, expected):
+ #Check that keep alive entries exists (or not exists) for every masters on every masters
+ #Note: The testing method is quite basic: counting that there is one keepalive entry per master.
+ # that is ok for simple test cases like test_online_init_should_create_keepalive_entries but
+ # not for the general case as keep alive associated with no more existing master may exists
+ # (for example after: db2ldif / demote a master / ldif2db / init other masters)
+ # ==> if the function is somehow pushed in lib389, a check better than simply counting the entries
+ # should be done.
+ for masterId in topo.ms:
+ master=topo.ms[masterId]
+ for replica in Replicas(master).list():
+ if (replica.get_role() != ReplicaRole.MASTER):
+ continue
+ replica_info = f'master: {masterId} RID: {replica.get_rid()} suffix: {replica.get_suffix()}'
+ log.debug(f'Checking keepAliveEntries on {replica_info}')
+ keepaliveEntries = get_keepalive_entries(master, replica);
+ expectedCount = len(topo.ms) if expected else 0
+ foundCount = len(keepaliveEntries)
+ if (foundCount == expectedCount):
+ log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.')
+ else:
+ log.error(f'{foundCount} Keepalive entries are found '
+ f'while {expectedCount} were expected on {replica_info}.')
+ assert False
+
+
+def test_online_init_should_create_keepalive_entries(topo_m2):
+ """Check that keep alive entries are created when initializinf a master from another one
+
+ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
+ :setup: Two masters replication setup
+ :steps:
+ 1. Generate ldif without replication data
+ 2 Init both masters from that ldif
+ 3 Check that keep alive entries does not exists
+ 4 Perform on line init of master2 from master1
+ 5 Check that keep alive entries exists
+ :expectedresults:
+ 1. No error while generating ldif
+ 2. No error while importing the ldif file
+ 3. No keepalive entrie should exists on any masters
+ 4. No error while initializing master2
+ 5. All keepalive entries should exist on every masters
+
+ """
+
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+ m1 = topo_m2.ms["master1"]
+ m2 = topo_m2.ms["master2"]
+ # Step 1: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ _remove_replication_data(ldif_file)
+
+ # Step 2: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ """ Replica state is now as if CLI setup has been done using:
+ dsconf master1 replication enable --suffix "${SUFFIX}" --role master
+ dsconf master2 replication enable --suffix "${SUFFIX}" --role master
+ dsconf master1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
+ dsconf master2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
+ dsconf master1 repl-agmt create --suffix "${SUFFIX}"
+ dsconf master2 repl-agmt create --suffix "${SUFFIX}"
+ """
+
+ # Step 3: No keepalive entrie should exists on any masters
+ verify_keepalive_entries(topo_m2, False)
+
+ # Step 4: Perform on line init of master2 from master1
+ agmt = Agreements(m1).list()[0]
+ agmt.begin_reinit()
+ (done, error) = agmt.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 5: All keepalive entries should exists on every masters
+ # Verify the keep alive entry once replication is in sync
+ # (that is the step that fails when bug is not fixed)
+ repl.wait_for_ruv(m2,m1)
+ verify_keepalive_entries(topo_m2, True);
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index f01782330..f0ea0f8ef 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -373,6 +373,20 @@ replica_destroy(void **arg)
slapi_ch_free((void **)arg);
}
+/******************************************************************************
+ ******************** REPLICATION KEEP ALIVE ENTRIES **************************
+ ******************************************************************************
+ * They are subentries of the replicated suffix and there is one per master. *
+ * These entries exist only to trigger a change that get replicated over the *
+ * topology. *
+ * Their main purpose is to generate records in the changelog and they are *
+ * updated from time to time by fractional replication to insure that at *
+ * least a change must be replicated by FR after a great number of not *
+ * replicated changes are found in the changelog. The interest is that the *
+ * fractional RUV get then updated so less changes need to be walked in the *
+ * changelog when searching for the first change to send *
+ ******************************************************************************/
+
#define KEEP_ALIVE_ATTR "keepalivetimestamp"
#define KEEP_ALIVE_ENTRY "repl keep alive"
#define KEEP_ALIVE_DN_FORMAT "cn=%s %d,%s"
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
index 14c8e0bcc..af486f730 100644
--- a/ldap/servers/plugins/replication/repl_extop.c
+++ b/ldap/servers/plugins/replication/repl_extop.c
@@ -1173,6 +1173,10 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb)
*/
if (cl5GetState() == CL5_STATE_OPEN) {
replica_log_ruv_elements(r);
+ /* now that the changelog is open and started, we can alos cretae the
+ * keep alive entry without risk that db and cl will not match
+ */
+ replica_subentry_check(replica_get_root(r), replica_get_rid(r));
}
/* ONREPL code that dealt with new RUV, etc was moved into the code
--
2.26.2

View File

@ -0,0 +1,513 @@
From e202c62c3b4c92163d2de9f3da9a9f3efc81e4b8 Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Thu, 12 Nov 2020 18:50:04 +0100
Subject: [PATCH 3/3] do not add referrals for masters with different data
generation #2054 (#4427)
Bug description:
The problem is that some operation mandatory in the usual cases are
also performed when replication cannot take place because the
database set are differents (i.e: RUV generation ids are different)
One of the issue is that the csn generator state is updated when
starting a replication session (it is a problem when trying to
reset the time skew, as freshly reinstalled replicas get infected
by the old ones)
A second issue is that the RUV got updated when ending a replication session
(which may add replica that does not share the same data set,
then update operations on consumer retun referrals towards wrong masters
Fix description:
The fix checks the RUVs generation id before updating the csn generator
and before updating the RUV.
Reviewed by: mreynolds
firstyear
vashirov
Platforms tested: F32
---
.../suites/replication/regression_test.py | 290 ++++++++++++++++++
ldap/servers/plugins/replication/repl5.h | 1 +
.../plugins/replication/repl5_inc_protocol.c | 20 +-
.../plugins/replication/repl5_replica.c | 39 ++-
src/lib389/lib389/dseldif.py | 37 +++
5 files changed, 368 insertions(+), 19 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py
index 14b9d6a44..a72af6b30 100644
--- a/dirsrvtests/tests/suites/replication/regression_test.py
+++ b/dirsrvtests/tests/suites/replication/regression_test.py
@@ -13,6 +13,7 @@ from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts
from lib389.pwpolicy import PwPolicyManager
from lib389.utils import *
from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db, topology_i2 as topo_i2
+from lib389.topologies import topology_m2c2 as topo_m2c2
from lib389._constants import *
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389.idm.user import UserAccount
@@ -22,6 +23,7 @@ from lib389.idm.directorymanager import DirectoryManager
from lib389.replica import Replicas, ReplicationManager, Changelog5, BootstrapReplicationManager
from lib389.agreement import Agreements
from lib389 import pid_from_file
+from lib389.dseldif import *
pytestmark = pytest.mark.tier1
@@ -1027,6 +1029,294 @@ def test_online_init_should_create_keepalive_entries(topo_m2):
verify_keepalive_entries(topo_m2, True);
+def get_agreement(agmts, consumer):
+ # Get agreement towards consumer among the agremment list
+ for agmt in agmts.list():
+ if (agmt.get_attr_val_utf8('nsDS5ReplicaPort') == str(consumer.port) and
+ agmt.get_attr_val_utf8('nsDS5ReplicaHost') == consumer.host):
+ return agmt
+ return None;
+
+
+def test_ruv_url_not_added_if_different_uuid(topo_m2c2):
+ """Check that RUV url is not updated if RUV generation uuid are different
+
+ :id: 7cc30a4e-0ffd-4758-8f00-e500279af344
+ :setup: Two masters + two consumers replication setup
+ :steps:
+ 1. Generate ldif without replication data
+ 2. Init both masters from that ldif
+ (to clear the ruvs and generates different generation uuid)
+ 3. Perform on line init from master1 to consumer1
+ and from master2 to consumer2
+ 4. Perform update on both masters
+ 5. Check that c1 RUV does not contains URL towards m2
+ 6. Check that c2 RUV does contains URL towards m2
+ 7. Perform on line init from master1 to master2
+ 8. Perform update on master2
+ 9. Check that c1 RUV does contains URL towards m2
+ :expectedresults:
+ 1. No error while generating ldif
+ 2. No error while importing the ldif file
+ 3. No error and Initialization done.
+ 4. No error
+ 5. master2 replicaid should not be in the consumer1 RUV
+ 6. master2 replicaid should be in the consumer2 RUV
+ 7. No error and Initialization done.
+ 8. No error
+ 9. master2 replicaid should be in the consumer1 RUV
+
+ """
+
+ # Variables initialization
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+
+ m1 = topo_m2c2.ms["master1"]
+ m2 = topo_m2c2.ms["master2"]
+ c1 = topo_m2c2.cs["consumer1"]
+ c2 = topo_m2c2.cs["consumer2"]
+
+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)
+
+ replicid_m2 = replica_m2.get_rid()
+
+ agmts_m1 = Agreements(m1, replica_m1.dn)
+ agmts_m2 = Agreements(m2, replica_m2.dn)
+
+ m1_m2 = get_agreement(agmts_m1, m2)
+ m1_c1 = get_agreement(agmts_m1, c1)
+ m1_c2 = get_agreement(agmts_m1, c2)
+ m2_m1 = get_agreement(agmts_m2, m1)
+ m2_c1 = get_agreement(agmts_m2, c1)
+ m2_c2 = get_agreement(agmts_m2, c2)
+
+ # Step 1: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ # _remove_replication_data(ldif_file)
+
+ # Step 2: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ # Step 3: Perform on line init from master1 to consumer1
+ # and from master2 to consumer2
+ m1_c1.begin_reinit()
+ m2_c2.begin_reinit()
+ (done, error) = m1_c1.wait_reinit()
+ assert done is True
+ assert error is False
+ (done, error) = m2_c2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 4: Perform update on both masters
+ repl.test_replication(m1, c1)
+ repl.test_replication(m2, c2)
+
+ # Step 5: Check that c1 RUV does not contains URL towards m2
+ ruv = replica_c1.get_ruv()
+ log.debug(f"c1 RUV: {ruv}")
+ url=ruv._rid_url.get(replica_m2.get_rid())
+ if (url == None):
+ log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV");
+ else:
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
+ log.error(f"URL for RID {replica_m2.get_rid()} found in RUV")
+ #Note: this assertion fails if issue 2054 is not fixed.
+ assert False
+
+ # Step 6: Check that c2 RUV does contains URL towards m2
+ ruv = replica_c2.get_ruv()
+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
+ url=ruv._rid_url.get(replica_m2.get_rid())
+ if (url == None):
+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV");
+ assert False
+ else:
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
+
+
+ # Step 7: Perform on line init from master1 to master2
+ m1_m2.begin_reinit()
+ (done, error) = m1_m2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 8: Perform update on master2
+ repl.test_replication(m2, c1)
+
+ # Step 9: Check that c1 RUV does contains URL towards m2
+ ruv = replica_c1.get_ruv()
+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
+ url=ruv._rid_url.get(replica_m2.get_rid())
+ if (url == None):
+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV");
+ assert False
+ else:
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
+
+
+def test_csngen_state_not_updated_if_different_uuid(topo_m2c2):
+ """Check that csngen remote offset is not updated if RUV generation uuid are different
+
+ :id: 77694b8e-22ae-11eb-89b2-482ae39447e5
+ :setup: Two masters + two consumers replication setup
+ :steps:
+ 1. Disable m1<->m2 agreement to avoid propagate timeSkew
+ 2. Generate ldif without replication data
+ 3. Increase time skew on master2
+ 4. Init both masters from that ldif
+ (to clear the ruvs and generates different generation uuid)
+ 5. Perform on line init from master1 to consumer1 and master2 to consumer2
+ 6. Perform update on both masters
+ 7: Check that c1 has no time skew
+ 8: Check that c2 has time skew
+ 9. Init master2 from master1
+ 10. Perform update on master2
+ 11. Check that c1 has time skew
+ :expectedresults:
+ 1. No error
+ 2. No error while generating ldif
+ 3. No error
+ 4. No error while importing the ldif file
+ 5. No error and Initialization done.
+ 6. No error
+ 7. c1 time skew should be lesser than threshold
+ 8. c2 time skew should be higher than threshold
+ 9. No error and Initialization done.
+ 10. No error
+ 11. c1 time skew should be higher than threshold
+
+ """
+
+ # Variables initialization
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+
+ m1 = topo_m2c2.ms["master1"]
+ m2 = topo_m2c2.ms["master2"]
+ c1 = topo_m2c2.cs["consumer1"]
+ c2 = topo_m2c2.cs["consumer2"]
+
+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)
+
+ replicid_m2 = replica_m2.get_rid()
+
+ agmts_m1 = Agreements(m1, replica_m1.dn)
+ agmts_m2 = Agreements(m2, replica_m2.dn)
+
+ m1_m2 = get_agreement(agmts_m1, m2)
+ m1_c1 = get_agreement(agmts_m1, c1)
+ m1_c2 = get_agreement(agmts_m1, c2)
+ m2_m1 = get_agreement(agmts_m2, m1)
+ m2_c1 = get_agreement(agmts_m2, c1)
+ m2_c2 = get_agreement(agmts_m2, c2)
+
+ # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew
+ m1_m2.pause()
+ m2_m1.pause()
+
+ # Step 2: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ # _remove_replication_data(ldif_file)
+
+ # Step 3: Increase time skew on master2
+ timeSkew=6*3600
+ # We can modify master2 time skew
+ # But the time skew on the consumer may be smaller
+ # depending on when the cnsgen generation time is updated
+ # and when first csn get replicated.
+ # Since we use timeSkew has threshold value to detect
+ # whether there are time skew or not,
+ # lets add a significative margin (longer than the test duration)
+ # to avoid any risk of erroneous failure
+ timeSkewMargin = 300
+ DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew+timeSkewMargin)
+
+ # Step 4: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ # Step 5: Perform on line init from master1 to consumer1
+ # and from master2 to consumer2
+ m1_c1.begin_reinit()
+ m2_c2.begin_reinit()
+ (done, error) = m1_c1.wait_reinit()
+ assert done is True
+ assert error is False
+ (done, error) = m2_c2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 6: Perform update on both masters
+ repl.test_replication(m1, c1)
+ repl.test_replication(m2, c2)
+
+ # Step 7: Check that c1 has no time skew
+ # Stop server to insure that dse.ldif is uptodate
+ c1.stop()
+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
+ c1_timeSkew = int(c1_nsState['time_skew'])
+ log.debug(f"c1 time skew: {c1_timeSkew}")
+ if (c1_timeSkew >= timeSkew):
+ log.error(f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}")
+ assert False
+ c1.start()
+
+ # Step 8: Check that c2 has time skew
+ # Stop server to insure that dse.ldif is uptodate
+ c2.stop()
+ c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0]
+ c2_timeSkew = int(c2_nsState['time_skew'])
+ log.debug(f"c2 time skew: {c2_timeSkew}")
+ if (c2_timeSkew < timeSkew):
+ log.error(f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}")
+ assert False
+ c2.start()
+
+ # Step 9: Perform on line init from master1 to master2
+ m1_c1.pause()
+ m1_m2.resume()
+ m1_m2.begin_reinit()
+ (done, error) = m1_m2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 10: Perform update on master2
+ repl.test_replication(m2, c1)
+
+ # Step 11: Check that c1 has time skew
+ # Stop server to insure that dse.ldif is uptodate
+ c1.stop()
+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
+ c1_timeSkew = int(c1_nsState['time_skew'])
+ log.debug(f"c1 time skew: {c1_timeSkew}")
+ if (c1_timeSkew < timeSkew):
+ log.error(f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}")
+ assert False
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index b35f724c2..f1c596a3f 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -708,6 +708,7 @@ void replica_dump(Replica *r);
void replica_set_enabled(Replica *r, PRBool enable);
Replica *replica_get_replica_from_dn(const Slapi_DN *dn);
Replica *replica_get_replica_from_root(const char *repl_root);
+int replica_check_generation(Replica *r, const RUV *remote_ruv);
int replica_update_ruv(Replica *replica, const CSN *csn, const char *replica_purl);
Replica *replica_get_replica_for_op(Slapi_PBlock *pb);
/* the functions below manipulate replica hash */
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index 29b1fb073..af5e5897c 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -2161,26 +2161,12 @@ examine_update_vector(Private_Repl_Protocol *prp, RUV *remote_ruv)
} else if (NULL == remote_ruv) {
return_value = EXAMINE_RUV_PRISTINE_REPLICA;
} else {
- char *local_gen = NULL;
- char *remote_gen = ruv_get_replica_generation(remote_ruv);
- Object *local_ruv_obj;
- RUV *local_ruv;
-
PR_ASSERT(NULL != prp->replica);
- local_ruv_obj = replica_get_ruv(prp->replica);
- if (NULL != local_ruv_obj) {
- local_ruv = (RUV *)object_get_data(local_ruv_obj);
- PR_ASSERT(local_ruv);
- local_gen = ruv_get_replica_generation(local_ruv);
- object_release(local_ruv_obj);
- }
- if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) {
- return_value = EXAMINE_RUV_GENERATION_MISMATCH;
- } else {
+ if (replica_check_generation(prp->replica, remote_ruv)) {
return_value = EXAMINE_RUV_OK;
+ } else {
+ return_value = EXAMINE_RUV_GENERATION_MISMATCH;
}
- slapi_ch_free((void **)&remote_gen);
- slapi_ch_free((void **)&local_gen);
}
return return_value;
}
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index f0ea0f8ef..7e56d6557 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -812,6 +812,36 @@ replica_set_ruv(Replica *r, RUV *ruv)
replica_unlock(r->repl_lock);
}
+/*
+ * Check if replica generation is the same than the remote ruv one
+ */
+int
+replica_check_generation(Replica *r, const RUV *remote_ruv)
+{
+ int return_value;
+ char *local_gen = NULL;
+ char *remote_gen = ruv_get_replica_generation(remote_ruv);
+ Object *local_ruv_obj;
+ RUV *local_ruv;
+
+ PR_ASSERT(NULL != r);
+ local_ruv_obj = replica_get_ruv(r);
+ if (NULL != local_ruv_obj) {
+ local_ruv = (RUV *)object_get_data(local_ruv_obj);
+ PR_ASSERT(local_ruv);
+ local_gen = ruv_get_replica_generation(local_ruv);
+ object_release(local_ruv_obj);
+ }
+ if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) {
+ return_value = PR_FALSE;
+ } else {
+ return_value = PR_TRUE;
+ }
+ slapi_ch_free_string(&remote_gen);
+ slapi_ch_free_string(&local_gen);
+ return return_value;
+}
+
/*
* Update one particular CSN in an RUV. This is meant to be called
* whenever (a) the server has processed a client operation and
@@ -1298,6 +1328,11 @@ replica_update_csngen_state_ext(Replica *r, const RUV *ruv, const CSN *extracsn)
PR_ASSERT(r && ruv);
+ if (!replica_check_generation(r, ruv)) /* ruv has wrong generation - we are done */
+ {
+ return 0;
+ }
+
rc = ruv_get_max_csn(ruv, &csn);
if (rc != RUV_SUCCESS) {
return -1;
@@ -3713,8 +3748,8 @@ replica_update_ruv_consumer(Replica *r, RUV *supplier_ruv)
replica_lock(r->repl_lock);
local_ruv = (RUV *)object_get_data(r->repl_ruv);
-
- if (is_cleaned_rid(supplier_id) || local_ruv == NULL) {
+ if (is_cleaned_rid(supplier_id) || local_ruv == NULL ||
+ !replica_check_generation(r, supplier_ruv)) {
replica_unlock(r->repl_lock);
return;
}
diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py
index 10baba4d7..6850c9a8a 100644
--- a/src/lib389/lib389/dseldif.py
+++ b/src/lib389/lib389/dseldif.py
@@ -317,6 +317,43 @@ class DSEldif(DSLint):
return states
+ def _increaseTimeSkew(self, suffix, timeSkew):
+ # Increase csngen state local_offset by timeSkew
+ # Warning: instance must be stopped before calling this function
+ assert (timeSkew >= 0)
+ nsState = self.readNsState(suffix)[0]
+ self._instance.log.debug(f'_increaseTimeSkew nsState is {nsState}')
+ oldNsState = self.get(nsState['dn'], 'nsState', True)
+ self._instance.log.debug(f'oldNsState is {oldNsState}')
+
+ # Lets reencode the new nsState
+ from lib389.utils import print_nice_time
+ if pack('<h', 1) == pack('=h',1):
+ end = '<'
+ elif pack('>h', 1) == pack('=h',1):
+ end = '>'
+ else:
+ raise ValueError("Unknown endian, unable to proceed")
+
+ thelen = len(oldNsState)
+ if thelen <= 20:
+ pad = 2 # padding for short H values
+ timefmt = 'I' # timevals are unsigned 32-bit int
+ else:
+ pad = 6 # padding for short H values
+ timefmt = 'Q' # timevals are unsigned 64-bit int
+ fmtstr = "%sH%dx3%sH%dx" % (end, pad, timefmt, pad)
+ newNsState = base64.b64encode(pack(fmtstr, int(nsState['rid']),
+ int(nsState['gen_time']), int(nsState['local_offset'])+timeSkew,
+ int(nsState['remote_offset']), int(nsState['seq_num'])))
+ newNsState = newNsState.decode('utf-8')
+ self._instance.log.debug(f'newNsState is {newNsState}')
+ # Lets replace the value.
+ (entry_dn_i, attr_data) = self._find_attr(nsState['dn'], 'nsState')
+ attr_i = next(iter(attr_data))
+ self._contents[entry_dn_i + attr_i] = f"nsState:: {newNsState}"
+ self._update()
+
class FSChecks(DSLint):
"""This is for the healthcheck feature, check commonly used system config files the
--
2.26.2

View File

@ -0,0 +1,179 @@
From 826a1bb4ea88915ac492828d1cc4a901623f7866 Mon Sep 17 00:00:00 2001
From: William Brown <william@blackhats.net.au>
Date: Thu, 14 May 2020 14:31:47 +1000
Subject: [PATCH 1/2] Ticket 50933 - Update 2307compat.ldif
Bug Description: This resolves a potential conflict between 60nis.ldif
in freeipa and others with 2307compat, by removing the conflicting
definitions from 2307bis that were included.
Fix Description: By not including these in 2307compat, this means that
sites that rely on the values provided by 2307bis may ALSO need
60nis.ldif to be present. However, these nis values seem like they are
likely very rare in reality, and this also will avoid potential
issues with freeipa. It also is the least disruptive as we don't need
to change an already defined file, and we don't have values where the name
to oid relationship changes.
Fixes: #50933
https://pagure.io/389-ds-base/issue/50933
Author: William Brown <william@blackhats.net.au>
Review by: tbordaz (Thanks!)
---
ldap/schema/10rfc2307compat.ldif | 66 --------------------------------
ldap/schema/60autofs.ldif | 39 ++++++++++++-------
2 files changed, 26 insertions(+), 79 deletions(-)
diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif
index 8810231ac..78c588d08 100644
--- a/ldap/schema/10rfc2307compat.ldif
+++ b/ldap/schema/10rfc2307compat.ldif
@@ -176,50 +176,6 @@ attributeTypes: (
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
SINGLE-VALUE
)
-attributeTypes: (
- 1.3.6.1.1.1.1.28 NAME 'nisPublicKey'
- DESC 'NIS public key'
- EQUALITY octetStringMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.40
- SINGLE-VALUE
- )
-attributeTypes: (
- 1.3.6.1.1.1.1.29 NAME 'nisSecretKey'
- DESC 'NIS secret key'
- EQUALITY octetStringMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.40
- SINGLE-VALUE
- )
-attributeTypes: (
- 1.3.6.1.1.1.1.30 NAME 'nisDomain'
- DESC 'NIS domain'
- EQUALITY caseIgnoreIA5Match
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
- )
-attributeTypes: (
- 1.3.6.1.1.1.1.31 NAME 'automountMapName'
- DESC 'automount Map Name'
- EQUALITY caseExactIA5Match
- SUBSTR caseExactIA5SubstringsMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
- SINGLE-VALUE
- )
-attributeTypes: (
- 1.3.6.1.1.1.1.32 NAME 'automountKey'
- DESC 'Automount Key value'
- EQUALITY caseExactIA5Match
- SUBSTR caseExactIA5SubstringsMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
- SINGLE-VALUE
- )
-attributeTypes: (
- 1.3.6.1.1.1.1.33 NAME 'automountInformation'
- DESC 'Automount information'
- EQUALITY caseExactIA5Match
- SUBSTR caseExactIA5SubstringsMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
- SINGLE-VALUE
- )
# end of attribute types - beginning of objectclasses
objectClasses: (
1.3.6.1.1.1.2.0 NAME 'posixAccount' SUP top AUXILIARY
@@ -324,28 +280,6 @@ objectClasses: (
seeAlso $ serialNumber'
MAY ( bootFile $ bootParameter $ cn $ description $ l $ o $ ou $ owner $ seeAlso $ serialNumber )
)
-objectClasses: (
- 1.3.6.1.1.1.2.14 NAME 'nisKeyObject' SUP top AUXILIARY
- DESC 'An object with a public and secret key'
- MUST ( cn $ nisPublicKey $ nisSecretKey )
- MAY ( uidNumber $ description )
- )
-objectClasses: (
- 1.3.6.1.1.1.2.15 NAME 'nisDomainObject' SUP top AUXILIARY
- DESC 'Associates a NIS domain with a naming context'
- MUST nisDomain
- )
-objectClasses: (
- 1.3.6.1.1.1.2.16 NAME 'automountMap' SUP top STRUCTURAL
- MUST ( automountMapName )
- MAY description
- )
-objectClasses: (
- 1.3.6.1.1.1.2.17 NAME 'automount' SUP top STRUCTURAL
- DESC 'Automount information'
- MUST ( automountKey $ automountInformation )
- MAY description
- )
## namedObject is needed for groups without members
objectClasses: (
1.3.6.1.4.1.5322.13.1.1 NAME 'namedObject' SUP top STRUCTURAL
diff --git a/ldap/schema/60autofs.ldif b/ldap/schema/60autofs.ldif
index 084e9ec30..de3922aa2 100644
--- a/ldap/schema/60autofs.ldif
+++ b/ldap/schema/60autofs.ldif
@@ -6,7 +6,23 @@ dn: cn=schema
################################################################################
#
attributeTypes: (
- 1.3.6.1.1.1.1.33
+ 1.3.6.1.1.1.1.31 NAME 'automountMapName'
+ DESC 'automount Map Name'
+ EQUALITY caseExactIA5Match
+ SUBSTR caseExactIA5SubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.32 NAME 'automountKey'
+ DESC 'Automount Key value'
+ EQUALITY caseExactIA5Match
+ SUBSTR caseExactIA5SubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.33
NAME 'automountInformation'
DESC 'Information used by the autofs automounter'
EQUALITY caseExactIA5Match
@@ -18,25 +34,22 @@ attributeTypes: (
################################################################################
#
objectClasses: (
- 1.3.6.1.1.1.2.17
- NAME 'automount'
- DESC 'An entry in an automounter map'
+ 1.3.6.1.1.1.2.16
+ NAME 'automountMap'
+ DESC 'An group of related automount objects'
SUP top
STRUCTURAL
- MUST ( cn $ automountInformation )
- MAY ( description )
+ MAY ( ou $ automountMapName $ description )
X-ORIGIN 'draft-howard-rfc2307bis'
)
-#
-################################################################################
-#
objectClasses: (
- 1.3.6.1.1.1.2.16
- NAME 'automountMap'
- DESC 'An group of related automount objects'
+ 1.3.6.1.1.1.2.17
+ NAME 'automount'
+ DESC 'An entry in an automounter map'
SUP top
STRUCTURAL
- MUST ( ou )
+ MUST ( automountInformation )
+ MAY ( cn $ description $ automountKey )
X-ORIGIN 'draft-howard-rfc2307bis'
)
#
--
2.26.2

View File

@ -0,0 +1,36 @@
From 3d9ced9e340678cc02b1a36c2139492c95ef15a6 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 12 Aug 2020 12:46:42 -0400
Subject: [PATCH 2/2] Issue 50933 - Fix OID change between 10rfc2307 and
10rfc2307compat
Bug Description: 10rfc2307compat changed the OID for nisMap objectclass to
match the standard OID, but this breaks replication with
older versions of DS.
Fix Description: Continue to use the old(invalid?) oid for nisMap so that
replication does not break in a mixed version environment.
Fixes: https://pagure.io/389-ds-base/issue/50933
Reviewed by: firstyear & tbordaz(Thanks!!)
---
ldap/schema/10rfc2307compat.ldif | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif
index 78c588d08..8ba72e1e3 100644
--- a/ldap/schema/10rfc2307compat.ldif
+++ b/ldap/schema/10rfc2307compat.ldif
@@ -253,7 +253,7 @@ objectClasses: (
MAY ( nisNetgroupTriple $ memberNisNetgroup $ description )
)
objectClasses: (
- 1.3.6.1.1.1.2.9 NAME 'nisMap' SUP top STRUCTURAL
+ 1.3.6.1.1.1.2.13 NAME 'nisMap' SUP top STRUCTURAL
DESC 'A generic abstraction of a NIS map'
MUST nisMapName
MAY description
--
2.26.2

View File

@ -0,0 +1,147 @@
From 1085823bf5586d55103cfba249fdf212e9afcb7c Mon Sep 17 00:00:00 2001
From: William Brown <william@blackhats.net.au>
Date: Thu, 4 Jun 2020 11:51:53 +1000
Subject: [PATCH] Ticket 51131 - improve mutex alloc in conntable
Bug Description: We previously did delayed allocation
of mutexs, which @tbordaz noted can lead to high usage
of the pthread mutex init routines. This was done under
the conntable lock, as well as cleaning the connection
Fix Description: rather than delayed allocation, we
initialise everything at start up instead, which means
that while startup may have a delay, at run time we have
a smaller and lighter connection allocation routine,
that is able to release the CT lock sooner.
https://pagure.io/389-ds-base/issue/51131
Author: William Brown <william@blackhats.net.au>
Review by: ???
---
ldap/servers/slapd/conntable.c | 86 +++++++++++++++++++---------------
1 file changed, 47 insertions(+), 39 deletions(-)
diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c
index b23dc3435..feb9c0d75 100644
--- a/ldap/servers/slapd/conntable.c
+++ b/ldap/servers/slapd/conntable.c
@@ -138,10 +138,21 @@ connection_table_new(int table_size)
ct->conn_next_offset = 1;
ct->conn_free_offset = 1;
+ pthread_mutexattr_t monitor_attr = {0};
+ pthread_mutexattr_init(&monitor_attr);
+ pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE);
+
/* We rely on the fact that we called calloc, which zeros the block, so we don't
* init any structure element unless a zero value is troublesome later
*/
for (i = 0; i < table_size; i++) {
+ /*
+ * Technically this is a no-op due to calloc, but we should always be
+ * careful with things like this ....
+ */
+ ct->c[i].c_state = CONN_STATE_FREE;
+ /* Start the conn setup. */
+
LBER_SOCKET invalid_socket;
/* DBDB---move this out of here once everything works */
ct->c[i].c_sb = ber_sockbuf_alloc();
@@ -161,11 +172,20 @@ connection_table_new(int table_size)
ct->c[i].c_prev = NULL;
ct->c[i].c_ci = i;
ct->c[i].c_fdi = SLAPD_INVALID_SOCKET_INDEX;
- /*
- * Technically this is a no-op due to calloc, but we should always be
- * careful with things like this ....
- */
- ct->c[i].c_state = CONN_STATE_FREE;
+
+ if (pthread_mutex_init(&(ct->c[i].c_mutex), &monitor_attr) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "pthread_mutex_init failed\n");
+ exit(1);
+ }
+
+ ct->c[i].c_pdumutex = PR_NewLock();
+ if (ct->c[i].c_pdumutex == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "PR_NewLock failed\n");
+ exit(1);
+ }
+
+ /* Ready to rock, mark as such. */
+ ct->c[i].c_state = CONN_STATE_INIT;
/* Prepare the connection into the freelist. */
ct->c_freelist[i] = &(ct->c[i]);
}
@@ -241,44 +261,32 @@ connection_table_get_connection(Connection_Table *ct, int sd)
/* Never use slot 0 */
ct->conn_next_offset += 1;
}
- /* Now prep the slot for usage. */
- PR_ASSERT(c->c_next == NULL);
- PR_ASSERT(c->c_prev == NULL);
- PR_ASSERT(c->c_extension == NULL);
-
- if (c->c_state == CONN_STATE_FREE) {
-
- c->c_state = CONN_STATE_INIT;
-
- pthread_mutexattr_t monitor_attr = {0};
- pthread_mutexattr_init(&monitor_attr);
- pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE);
- if (pthread_mutex_init(&(c->c_mutex), &monitor_attr) != 0) {
- slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "pthread_mutex_init failed\n");
- exit(1);
- }
-
- c->c_pdumutex = PR_NewLock();
- if (c->c_pdumutex == NULL) {
- c->c_pdumutex = NULL;
- slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "PR_NewLock failed\n");
- exit(1);
- }
- }
- /* Let's make sure there's no cruft left on there from the last time this connection was used. */
- /* Note: no need to lock c->c_mutex because this function is only
- * called by one thread (the slapd_daemon thread), and if we got this
- * far then `c' is not being used by any operation threads, etc.
- */
- connection_cleanup(c);
- c->c_ct = ct; /* pointer to connection table that owns this connection */
+ PR_Unlock(ct->table_mutex);
} else {
- /* couldn't find a Connection */
+ /* couldn't find a Connection, table must be full */
slapi_log_err(SLAPI_LOG_CONNS, "connection_table_get_connection", "Max open connections reached\n");
+ PR_Unlock(ct->table_mutex);
+ return NULL;
}
- /* We could move this to before the c alloc as there is no point to remain here. */
- PR_Unlock(ct->table_mutex);
+ /* Now prep the slot for usage. */
+ PR_ASSERT(c != NULL);
+ PR_ASSERT(c->c_next == NULL);
+ PR_ASSERT(c->c_prev == NULL);
+ PR_ASSERT(c->c_extension == NULL);
+ PR_ASSERT(c->c_state == CONN_STATE_INIT);
+ /* Let's make sure there's no cruft left on there from the last time this connection was used. */
+
+ /*
+ * Note: no need to lock c->c_mutex because this function is only
+ * called by one thread (the slapd_daemon thread), and if we got this
+ * far then `c' is not being used by any operation threads, etc. The
+ * memory ordering will be provided by the work queue sending c to a
+ * thread.
+ */
+ connection_cleanup(c);
+ /* pointer to connection table that owns this connection */
+ c->c_ct = ct;
return c;
}
--
2.26.2

View File

@ -0,0 +1,66 @@
From a9f53e9958861e6a7a827bd852d72d51a6512396 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 25 Nov 2020 18:07:34 +0100
Subject: [PATCH] Issue 4297 - 2nd fix for on ADD replication URP issue
internal searches with filter containing unescaped chars (#4439)
Bug description:
Previous fix is buggy because slapi_filter_escape_filter_value returns
a escaped filter component not an escaped assertion value.
Fix description:
use the escaped filter component
relates: https://github.com/389ds/389-ds-base/issues/4297
Reviewed by: William Brown
Platforms tested: F31
---
ldap/servers/plugins/replication/urp.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c
index f41dbc72d..ed340c9d8 100644
--- a/ldap/servers/plugins/replication/urp.c
+++ b/ldap/servers/plugins/replication/urp.c
@@ -1411,12 +1411,12 @@ urp_add_check_tombstone (Slapi_PBlock *pb, char *sessionid, Slapi_Entry *entry,
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
char *basedn = slapi_entry_get_ndn(entry);
- char *escaped_basedn;
+ char *escaped_filter;
const Slapi_DN *suffix = slapi_get_suffix_by_dn(slapi_entry_get_sdn (entry));
- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", basedn);
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
- slapi_ch_free((void **)&escaped_basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter);
+ slapi_ch_free((void **)&escaped_filter);
newpb = slapi_pblock_new();
slapi_search_internal_set_pb(newpb,
slapi_sdn_get_dn(suffix), /* Base DN */
@@ -1605,15 +1605,15 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
const char *basedn = slapi_sdn_get_dn(parentdn);
- char *escaped_basedn;
- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn);
+ char *escaped_filter;
+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn);
char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn");
CSN *conflict_csn = csn_new_by_string(conflict_csnstr);
CSN *tombstone_csn = NULL;
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
- slapi_ch_free((void **)&escaped_basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter);
+ slapi_ch_free((void **)&escaped_filter);
newpb = slapi_pblock_new();
char *parent_dn = slapi_dn_parent (basedn);
slapi_search_internal_set_pb(newpb,
--
2.26.2

View File

@ -0,0 +1,502 @@
From 4faec52810e12070ef72da347bb590c57d8761e4 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 20 Nov 2020 17:47:18 -0500
Subject: [PATCH 1/2] Issue 3657 - Add options to dsctl for dsrc file
Description: Add options to create, modify, delete, and display
the .dsrc CLI tool shortcut file.
Relates: https://github.com/389ds/389-ds-base/issues/3657
Reviewed by: firstyear(Thanks!)
---
dirsrvtests/tests/suites/clu/dsrc_test.py | 136 ++++++++++
src/lib389/cli/dsctl | 2 +
src/lib389/lib389/cli_ctl/dsrc.py | 312 ++++++++++++++++++++++
3 files changed, 450 insertions(+)
create mode 100644 dirsrvtests/tests/suites/clu/dsrc_test.py
create mode 100644 src/lib389/lib389/cli_ctl/dsrc.py
diff --git a/dirsrvtests/tests/suites/clu/dsrc_test.py b/dirsrvtests/tests/suites/clu/dsrc_test.py
new file mode 100644
index 000000000..1b27700ec
--- /dev/null
+++ b/dirsrvtests/tests/suites/clu/dsrc_test.py
@@ -0,0 +1,136 @@
+import logging
+import pytest
+import os
+from os.path import expanduser
+from lib389.cli_base import FakeArgs
+from lib389.cli_ctl.dsrc import create_dsrc, modify_dsrc, delete_dsrc, display_dsrc
+from lib389._constants import DEFAULT_SUFFIX, DN_DM
+from lib389.topologies import topology_st as topo
+
+log = logging.getLogger(__name__)
+
+
+@pytest.fixture(scope="function")
+def setup(topo, request):
+ """Preserve any existing .dsrc file"""
+
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+ backup_file = dsrc_file + ".original"
+ if os.path.exists(dsrc_file):
+ os.rename(dsrc_file, backup_file)
+
+ def fin():
+ if os.path.exists(backup_file):
+ os.rename(backup_file, dsrc_file)
+
+ request.addfinalizer(fin)
+
+
+def test_dsrc(topo, setup):
+ """Test "dsctl dsrc" command
+
+ :id: 0610de6c-e167-4761-bdab-3e677b2d44bb
+ :setup: Standalone Instance
+ :steps:
+ 1. Test creation works
+ 2. Test creating duplicate section
+ 3. Test adding an additional inst config works
+ 4. Test removing an instance works
+ 5. Test modify works
+ 6. Test delete works
+ 7. Test display fails when no file is present
+
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ """
+
+ inst = topo.standalone
+ serverid = inst.serverid
+ second_inst_name = "Second"
+ second_inst_basedn = "o=second"
+ different_suffix = "o=different"
+
+ # Setup our args
+ args = FakeArgs()
+ args.basedn = DEFAULT_SUFFIX
+ args.binddn = DN_DM
+ args.json = None
+ args.uri = None
+ args.saslmech = None
+ args.tls_cacertdir = None
+ args.tls_cert = None
+ args.tls_key = None
+ args.tls_reqcert = None
+ args.starttls = None
+ args.cancel_starttls = None
+ args.pwdfile = None
+ args.do_it = True
+
+ # Create a dsrc configuration entry
+ create_dsrc(inst, log, args)
+ display_dsrc(inst, topo.logcap.log, args)
+ assert topo.logcap.contains("basedn = " + args.basedn)
+ assert topo.logcap.contains("binddn = " + args.binddn)
+ assert topo.logcap.contains("[" + serverid + "]")
+ topo.logcap.flush()
+
+ # Attempt to add duplicate instance section
+ with pytest.raises(ValueError):
+ create_dsrc(inst, log, args)
+
+ # Test adding a second instance works correctly
+ inst.serverid = second_inst_name
+ args.basedn = second_inst_basedn
+ create_dsrc(inst, log, args)
+ display_dsrc(inst, topo.logcap.log, args)
+ assert topo.logcap.contains("basedn = " + args.basedn)
+ assert topo.logcap.contains("[" + second_inst_name + "]")
+ topo.logcap.flush()
+
+ # Delete second instance
+ delete_dsrc(inst, log, args)
+ inst.serverid = serverid # Restore original instance name
+ display_dsrc(inst, topo.logcap.log, args)
+ assert not topo.logcap.contains("[" + second_inst_name + "]")
+ assert not topo.logcap.contains("basedn = " + args.basedn)
+ # Make sure first instance config is still present
+ assert topo.logcap.contains("[" + serverid + "]")
+ assert topo.logcap.contains("binddn = " + args.binddn)
+ topo.logcap.flush()
+
+ # Modify the config
+ args.basedn = different_suffix
+ modify_dsrc(inst, log, args)
+ display_dsrc(inst, topo.logcap.log, args)
+ assert topo.logcap.contains(different_suffix)
+ topo.logcap.flush()
+
+ # Remove an arg from the config
+ args.basedn = ""
+ modify_dsrc(inst, log, args)
+ display_dsrc(inst, topo.logcap.log, args)
+ assert not topo.logcap.contains(different_suffix)
+ topo.logcap.flush()
+
+ # Remove the last entry, which should delete the file
+ delete_dsrc(inst, log, args)
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+ assert not os.path.exists(dsrc_file)
+
+ # Make sure display fails
+ with pytest.raises(ValueError):
+ display_dsrc(inst, log, args)
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main(["-s", CURRENT_FILE])
+
diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl
index fe9bc10e9..69f069297 100755
--- a/src/lib389/cli/dsctl
+++ b/src/lib389/cli/dsctl
@@ -23,6 +23,7 @@ from lib389.cli_ctl import tls as cli_tls
from lib389.cli_ctl import health as cli_health
from lib389.cli_ctl import nsstate as cli_nsstate
from lib389.cli_ctl import dbgen as cli_dbgen
+from lib389.cli_ctl import dsrc as cli_dsrc
from lib389.cli_ctl.instance import instance_remove_all
from lib389.cli_base import (
disconnect_instance,
@@ -61,6 +62,7 @@ cli_tls.create_parser(subparsers)
cli_health.create_parser(subparsers)
cli_nsstate.create_parser(subparsers)
cli_dbgen.create_parser(subparsers)
+cli_dsrc.create_parser(subparsers)
argcomplete.autocomplete(parser)
diff --git a/src/lib389/lib389/cli_ctl/dsrc.py b/src/lib389/lib389/cli_ctl/dsrc.py
new file mode 100644
index 000000000..e49c7f819
--- /dev/null
+++ b/src/lib389/lib389/cli_ctl/dsrc.py
@@ -0,0 +1,312 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+
+import json
+from os.path import expanduser
+from os import path, remove
+from ldapurl import isLDAPUrl
+from ldap.dn import is_dn
+import configparser
+
+
+def create_dsrc(inst, log, args):
+ """Create the .dsrc file
+
+ [instance]
+ uri = ldaps://hostname:port
+ basedn = dc=example,dc=com
+ binddn = uid=user,....
+ saslmech = [EXTERNAL|PLAIN]
+ tls_cacertdir = /path/to/cacertdir
+ tls_cert = /path/to/user.crt
+ tls_key = /path/to/user.key
+ tls_reqcert = [never, hard, allow]
+ starttls = [true, false]
+ pwdfile = /path/to/file
+ """
+
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+ config = configparser.ConfigParser()
+ config.read(dsrc_file)
+
+ # Verify this section does not already exist
+ instances = config.sections()
+ if inst.serverid in instances:
+ raise ValueError("There is already a configuration section for this instance!")
+
+ # Process and validate the args
+ config[inst.serverid] = {}
+
+ if args.uri is not None:
+ if not isLDAPUrl(args.uri):
+ raise ValueError("The uri is not a valid LDAP URL!")
+ if args.uri.startswith("ldapi"):
+ # We must use EXTERNAL saslmech for LDAPI
+ args.saslmech = "EXTERNAL"
+ config[inst.serverid]['uri'] = args.uri
+ if args.basedn is not None:
+ if not is_dn(args.basedn):
+ raise ValueError("The basedn is not a valid DN!")
+ config[inst.serverid]['basedn'] = args.basedn
+ if args.binddn is not None:
+ if not is_dn(args.binddn):
+ raise ValueError("The binddn is not a valid DN!")
+ config[inst.serverid]['binddn'] = args.binddn
+ if args.saslmech is not None:
+ if args.saslmech not in ['EXTERNAL', 'PLAIN']:
+ raise ValueError("The saslmech must be EXTERNAL or PLAIN!")
+ config[inst.serverid]['saslmech'] = args.saslmech
+ if args.tls_cacertdir is not None:
+ if not path.exists(args.tls_cacertdir):
+ raise ValueError('--tls-cacertdir directory does not exist!')
+ config[inst.serverid]['tls_cacertdir'] = args.tls_cacertdir
+ if args.tls_cert is not None:
+ if not path.exists(args.tls_cert):
+ raise ValueError('--tls-cert does not point to an existing file!')
+ config[inst.serverid]['tls_cert'] = args.tls_cert
+ if args.tls_key is not None:
+ if not path.exists(args.tls_key):
+ raise ValueError('--tls-key does not point to an existing file!')
+ config[inst.serverid]['tls_key'] = args.tls_key
+ if args.tls_reqcert is not None:
+ if args.tls_reqcert not in ['never', 'hard', 'allow']:
+ raise ValueError('--tls-reqcert value is invalid (must be either "never", "allow", or "hard")!')
+ config[inst.serverid]['tls_reqcert'] = args.tls_reqcert
+ if args.starttls:
+ config[inst.serverid]['starttls'] = 'true'
+ if args.pwdfile is not None:
+ if not path.exists(args.pwdfile):
+ raise ValueError('--pwdfile does not exist!')
+ config[inst.serverid]['pwdfile'] = args.pwdfile
+
+ if len(config[inst.serverid]) == 0:
+ # No args set
+ raise ValueError("You must set at least one argument for the new dsrc file!")
+
+ # Print a preview of the config
+ log.info(f'Updating "{dsrc_file}" with:\n')
+ log.info(f' [{inst.serverid}]')
+ for k, v in config[inst.serverid].items():
+ log.info(f' {k} = {v}')
+
+ # Perform confirmation?
+ if not args.do_it:
+ while 1:
+ val = input(f'\nUpdate "{dsrc_file}" ? [yes]: ').rstrip().lower()
+ if val == '' or val == 'y' or val == 'yes':
+ break
+ if val == 'n' or val == 'no':
+ return
+
+ # Now write the file
+ with open(dsrc_file, 'w') as configfile:
+ config.write(configfile)
+
+ log.info(f'Successfully updated: {dsrc_file}')
+
+
+def modify_dsrc(inst, log, args):
+ """Modify the instance config
+ """
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+
+ if path.exists(dsrc_file):
+ config = configparser.ConfigParser()
+ config.read(dsrc_file)
+
+ # Verify we have a section to modify
+ instances = config.sections()
+ if inst.serverid not in instances:
+ raise ValueError("There is no configuration section for this instance to modify!")
+
+ # Process and validate the args
+ if args.uri is not None:
+ if not isLDAPUrl(args.uri):
+ raise ValueError("The uri is not a valid LDAP URL!")
+ if args.uri.startswith("ldapi"):
+ # We must use EXTERNAL saslmech for LDAPI
+ args.saslmech = "EXTERNAL"
+ if args.uri == '':
+ del config[inst.serverid]['uri']
+ else:
+ config[inst.serverid]['uri'] = args.uri
+ if args.basedn is not None:
+ if not is_dn(args.basedn):
+ raise ValueError("The basedn is not a valid DN!")
+ if args.basedn == '':
+ del config[inst.serverid]['basedn']
+ else:
+ config[inst.serverid]['basedn'] = args.basedn
+ if args.binddn is not None:
+ if not is_dn(args.binddn):
+ raise ValueError("The binddn is not a valid DN!")
+ if args.binddn == '':
+ del config[inst.serverid]['binddn']
+ else:
+ config[inst.serverid]['binddn'] = args.binddn
+ if args.saslmech is not None:
+ if args.saslmech not in ['EXTERNAL', 'PLAIN']:
+ raise ValueError("The saslmech must be EXTERNAL or PLAIN!")
+ if args.saslmech == '':
+ del config[inst.serverid]['saslmech']
+ else:
+ config[inst.serverid]['saslmech'] = args.saslmech
+ if args.tls_cacertdir is not None:
+ if not path.exists(args.tls_cacertdir):
+ raise ValueError('--tls-cacertdir directory does not exist!')
+ if args.tls_cacertdir == '':
+ del config[inst.serverid]['tls_cacertdir']
+ else:
+ config[inst.serverid]['tls_cacertdir'] = args.tls_cacertdir
+ if args.tls_cert is not None:
+ if not path.exists(args.tls_cert):
+ raise ValueError('--tls-cert does not point to an existing file!')
+ if args.tls_cert == '':
+ del config[inst.serverid]['tls_cert']
+ else:
+ config[inst.serverid]['tls_cert'] = args.tls_cert
+ if args.tls_key is not None:
+ if not path.exists(args.tls_key):
+ raise ValueError('--tls-key does not point to an existing file!')
+ if args.tls_key == '':
+ del config[inst.serverid]['tls_key']
+ else:
+ config[inst.serverid]['tls_key'] = args.tls_key
+ if args.tls_reqcert is not None:
+ if args.tls_reqcert not in ['never', 'hard', 'allow']:
+ raise ValueError('--tls-reqcert value is invalid (must be either "never", "allow", or "hard")!')
+ if args.tls_reqcert == '':
+ del config[inst.serverid]['tls_reqcert']
+ else:
+ config[inst.serverid]['tls_reqcert'] = args.tls_reqcert
+ if args.starttls:
+ config[inst.serverid]['starttls'] = 'true'
+ if args.cancel_starttls:
+ config[inst.serverid]['starttls'] = 'false'
+ if args.pwdfile is not None:
+ if not path.exists(args.pwdfile):
+ raise ValueError('--pwdfile does not exist!')
+ if args.pwdfile == '':
+ del config[inst.serverid]['pwdfile']
+ else:
+ config[inst.serverid]['pwdfile'] = args.pwdfile
+
+ # Okay now rewrite the file
+ with open(dsrc_file, 'w') as configfile:
+ config.write(configfile)
+
+ log.info(f'Successfully updated: {dsrc_file}')
+ else:
+ raise ValueError(f'There is no .dsrc file "{dsrc_file}" to update!')
+
+
+def delete_dsrc(inst, log, args):
+ """Delete the .dsrc file
+ """
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+ if path.exists(dsrc_file):
+ if not args.do_it:
+ # Get confirmation
+ while 1:
+ val = input(f'\nAre you sure you want to remove this instances configuration ? [no]: ').rstrip().lower()
+ if val == 'y' or val == 'yes':
+ break
+ if val == '' or val == 'n' or val == 'no':
+ return
+
+ config = configparser.ConfigParser()
+ config.read(dsrc_file)
+ instances = config.sections()
+ if inst.serverid not in instances:
+ raise ValueError("The is no configuration for this instance")
+
+ # Update the config object
+ del config[inst.serverid]
+
+ if len(config.sections()) == 0:
+ # The file would be empty so just delete it
+ try:
+ remove(dsrc_file)
+ log.info(f'Successfully removed: {dsrc_file}')
+ return
+ except OSError as e:
+ raise ValueError(f'Failed to delete "{dsrc_file}", error: {str(e)}')
+ else:
+ # write the updated config
+ with open(dsrc_file, 'w') as configfile:
+ config.write(configfile)
+ else:
+ raise ValueError(f'There is no .dsrc file "{dsrc_file}" to update!')
+
+ log.info(f'Successfully updated: {dsrc_file}')
+
+def display_dsrc(inst, log, args):
+ """Display the contents of the ~/.dsrc file
+ """
+ dsrc_file = f'{expanduser("~")}/.dsrc'
+
+ if not path.exists(dsrc_file):
+ raise ValueError(f'There is no dsrc file "{dsrc_file}" to display!')
+
+ config = configparser.ConfigParser()
+ config.read(dsrc_file)
+ instances = config.sections()
+
+ for inst_section in instances:
+ if args.json:
+ log.info(json.dumps({inst_section: dict(config[inst_section])}, indent=4))
+ else:
+ log.info(f'[{inst_section}]')
+ for k, v in config[inst_section].items():
+ log.info(f'{k} = {v}')
+ log.info("")
+
+
+def create_parser(subparsers):
+ dsrc_parser = subparsers.add_parser('dsrc', help="Manage the .dsrc file")
+ subcommands = dsrc_parser.add_subparsers(help="action")
+
+ # Create .dsrc file
+ dsrc_create_parser = subcommands.add_parser('create', help='Generate the .dsrc file')
+ dsrc_create_parser.set_defaults(func=create_dsrc)
+ dsrc_create_parser.add_argument('--uri', help="The URI (LDAP URL) for the Directory Server instance.")
+ dsrc_create_parser.add_argument('--basedn', help="The default database suffix.")
+ dsrc_create_parser.add_argument('--binddn', help="The default Bind DN used or authentication.")
+ dsrc_create_parser.add_argument('--saslmech', help="The SASL mechanism to use: PLAIN or EXTERNAL.")
+ dsrc_create_parser.add_argument('--tls-cacertdir', help="The directory containing the Trusted Certificate Authority certificate.")
+ dsrc_create_parser.add_argument('--tls-cert', help="The absolute file name to the server certificate.")
+ dsrc_create_parser.add_argument('--tls-key', help="The absolute file name to the server certificate key.")
+ dsrc_create_parser.add_argument('--tls-reqcert', help="Request certificate strength: 'never', 'allow', 'hard'")
+ dsrc_create_parser.add_argument('--starttls', action='store_true', help="Use startTLS for connection to the server.")
+ dsrc_create_parser.add_argument('--pwdfile', help="The absolute path to a file containing the Bind DN's password.")
+ dsrc_create_parser.add_argument('--do-it', action='store_true', help="Create the file without any confirmation.")
+
+ dsrc_modify_parser = subcommands.add_parser('modify', help='Modify the .dsrc file')
+ dsrc_modify_parser.set_defaults(func=modify_dsrc)
+ dsrc_modify_parser.add_argument('--uri', nargs='?', const='', help="The URI (LDAP URL) for the Directory Server instance.")
+ dsrc_modify_parser.add_argument('--basedn', nargs='?', const='', help="The default database suffix.")
+ dsrc_modify_parser.add_argument('--binddn', nargs='?', const='', help="The default Bind DN used or authentication.")
+ dsrc_modify_parser.add_argument('--saslmech', nargs='?', const='', help="The SASL mechanism to use: PLAIN or EXTERNAL.")
+ dsrc_modify_parser.add_argument('--tls-cacertdir', nargs='?', const='', help="The directory containing the Trusted Certificate Authority certificate.")
+ dsrc_modify_parser.add_argument('--tls-cert', nargs='?', const='', help="The absolute file name to the server certificate.")
+ dsrc_modify_parser.add_argument('--tls-key', nargs='?', const='', help="The absolute file name to the server certificate key.")
+ dsrc_modify_parser.add_argument('--tls-reqcert', nargs='?', const='', help="Request certificate strength: 'never', 'allow', 'hard'")
+ dsrc_modify_parser.add_argument('--starttls', action='store_true', help="Use startTLS for connection to the server.")
+ dsrc_modify_parser.add_argument('--cancel-starttls', action='store_true', help="Do not use startTLS for connection to the server.")
+ dsrc_modify_parser.add_argument('--pwdfile', nargs='?', const='', help="The absolute path to a file containing the Bind DN's password.")
+ dsrc_modify_parser.add_argument('--do-it', action='store_true', help="Update the file without any confirmation.")
+
+ # Delete the instance from the .dsrc file
+ dsrc_delete_parser = subcommands.add_parser('delete', help='Delete instance configuration from the .dsrc file.')
+ dsrc_delete_parser.set_defaults(func=delete_dsrc)
+ dsrc_delete_parser.add_argument('--do-it', action='store_true',
+ help="Delete this instance's configuration from the .dsrc file.")
+
+ # Display .dsrc file
+ dsrc_display_parser = subcommands.add_parser('display', help='Display the contents of the .dsrc file.')
+ dsrc_display_parser.set_defaults(func=display_dsrc)
--
2.26.2

View File

@ -0,0 +1,902 @@
From 201cb1147c0a34bddbd3e5c03aecd804c47a9905 Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Thu, 19 Nov 2020 10:21:10 +0100
Subject: [PATCH 2/2] Issue 4440 - BUG - ldifgen with --start-idx option fails
with unsupported operand (#4444)
Bug description:
Got TypeError exception when usign:
dsctl -v slapd-localhost ldifgen users --suffix
dc=example,dc=com --parent ou=people,dc=example,dc=com
--number 100000 --generic --start-idx=50
The reason is that by default python parser provides
value for numeric options:
as an integer if specified by "--option value" or
as a string if specified by "--option=value"
Fix description:
convert the numeric parameters to integer when using it.
options impacted are:
- in users subcommand: --number , --start-idx
- in mod-load subcommand: --num-users, --add-users,
--del-users, --modrdn-users, --mod-users
FYI: An alternative solution would have been to indicate the
parser that these values are an integer. But two reasons
leaded me to implement the first solution:
- first solution fix the problem for all users while the
second one fixes only dsctl command.
- first solution is easier to test:
I just added a new test file generated by a script
that duplicated existing ldifgen test, renamed the
test cases and replaced the numeric arguments by
strings.
Second solution would need to redesign the test framework
to be able to test the parser.
relates: https://github.com/389ds/389-ds-base/issues/4440
Reviewed by:
Platforms tested: F32
(cherry picked from commit 3c3e1f30cdb046a1aabb93aacebcf261a76a0892)
---
.../tests/suites/clu/dbgen_test_usan.py | 806 ++++++++++++++++++
src/lib389/lib389/cli_ctl/dbgen.py | 10 +-
src/lib389/lib389/dbgen.py | 3 +
3 files changed, 814 insertions(+), 5 deletions(-)
create mode 100644 dirsrvtests/tests/suites/clu/dbgen_test_usan.py
diff --git a/dirsrvtests/tests/suites/clu/dbgen_test_usan.py b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py
new file mode 100644
index 000000000..80ff63417
--- /dev/null
+++ b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py
@@ -0,0 +1,806 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import time
+
+"""
+ This file contains tests similar to dbgen_test.py
+ except that paramaters that are number are expressed as string
+ (to mimic the parameters parser default behavior which returns an
+ int when parsing "option value" and a string when parsing "option=value"
+ This file has been generated by usign:
+sed '
+9r z1
+s/ test_/ test_usan/
+/args.*= [0-9]/s,[0-9]*$,"&",
+/:id:/s/.$/1/
+' dbgen_test.py > dbgen_test_usan.py
+ ( with z1 file containing this comment )
+"""
+
+
+
+import subprocess
+import pytest
+
+from lib389.cli_ctl.dbgen import *
+from lib389.cos import CosClassicDefinitions, CosPointerDefinitions, CosIndirectDefinitions, CosTemplates
+from lib389.idm.account import Accounts
+from lib389.idm.group import Groups
+from lib389.idm.role import ManagedRoles, FilteredRoles, NestedRoles
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.topologies import topology_st
+from lib389.cli_base import FakeArgs
+
+pytestmark = pytest.mark.tier0
+
+LOG_FILE = '/tmp/dbgen.log'
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+
+@pytest.fixture(scope="function")
+def set_log_file_and_ldif(topology_st, request):
+ global ldif_file
+ ldif_file = get_ldif_dir(topology_st.standalone) + '/created.ldif'
+
+ fh = logging.FileHandler(LOG_FILE)
+ fh.setLevel(logging.DEBUG)
+ log.addHandler(fh)
+
+ def fin():
+ log.info('Delete files')
+ os.remove(LOG_FILE)
+ os.remove(ldif_file)
+
+ request.addfinalizer(fin)
+
+
+def run_offline_import(instance, ldif_file):
+ log.info('Stopping the server and running offline import...')
+ instance.stop()
+ assert instance.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None,
+ import_file=ldif_file)
+ instance.start()
+
+
+def run_ldapmodify_from_file(instance, ldif_file, output_to_check=None):
+ LDAP_MOD = '/usr/bin/ldapmodify'
+ log.info('Add entries from ldif file with ldapmodify')
+ result = subprocess.check_output([LDAP_MOD, '-cx', '-D', DN_DM, '-w', PASSWORD,
+ '-h', instance.host, '-p', str(instance.port), '-af', ldif_file])
+ if output_to_check is not None:
+ assert output_to_check in ensure_str(result)
+
+
+def check_value_in_log_and_reset(content_list):
+ with open(LOG_FILE, 'r+') as f:
+ file_content = f.read()
+ log.info('Check if content is present in output')
+ for item in content_list:
+ assert item in file_content
+
+ log.info('Reset log file for next test')
+ f.truncate(0)
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_users(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create ldif with users
+
+ :id: 426b5b94-9923-454d-a736-7e71ca985e91
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with users
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.suffix = DEFAULT_SUFFIX
+ args.parent = 'ou=people,dc=example,dc=com'
+ args.number = "1000"
+ args.rdn_cn = False
+ args.generic = True
+ args.start_idx = "50"
+ args.localize = False
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'suffix={}'.format(args.suffix),
+ 'parent={}'.format(args.parent),
+ 'number={}'.format(args.number),
+ 'rdn-cn={}'.format(args.rdn_cn),
+ 'generic={}'.format(args.generic),
+ 'start-idx={}'.format(args.start_idx),
+ 'localize={}'.format(args.localize),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create users ldif')
+ dbgen_create_users(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ log.info('Get number of accounts before import')
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
+ count_account = len(accounts.filter('(uid=*)'))
+
+ run_offline_import(standalone, ldif_file)
+
+ log.info('Check that accounts are imported')
+ assert len(accounts.filter('(uid=*)')) > count_account
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_groups(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create ldif with group
+
+ :id: 97207413-9a93-4065-a5ec-63aa93801a31
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with group
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+ LDAP_RESULT = 'adding new entry "cn=myGroup-1,ou=groups,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.NAME = 'myGroup'
+ args.parent = 'ou=groups,dc=example,dc=com'
+ args.suffix = DEFAULT_SUFFIX
+ args.number = "1"
+ args.num_members = "1000"
+ args.create_members = True
+ args.member_attr = 'uniquemember'
+ args.member_parent = 'ou=people,dc=example,dc=com'
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'number={}'.format(args.number),
+ 'suffix={}'.format(args.suffix),
+ 'num-members={}'.format(args.num_members),
+ 'create-members={}'.format(args.create_members),
+ 'member-parent={}'.format(args.member_parent),
+ 'member-attr={}'.format(args.member_attr),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create group ldif')
+ dbgen_create_groups(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ log.info('Get number of accounts before import')
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
+ count_account = len(accounts.filter('(uid=*)'))
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ # ldapmodify will complain about already existing parent which causes subprocess to return exit code != 0
+ with pytest.raises(subprocess.CalledProcessError):
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that accounts are imported')
+ assert len(accounts.filter('(uid=*)')) > count_account
+
+ log.info('Check that group is imported')
+ groups = Groups(standalone, DEFAULT_SUFFIX)
+ assert groups.exists(args.NAME + '-1')
+ new_group = groups.get(args.NAME + '-1')
+ new_group.present('uniquemember', 'uid=group_entry1-0152,ou=people,dc=example,dc=com')
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_cos_classic(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a COS definition
+
+ :id: 8557f994-8a91-4f8a-86f6-9cb826a0b8f1
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with classic COS definition
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def,ou=cos definitions,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.type = 'classic'
+ args.NAME = 'My_Postal_Def'
+ args.parent = 'ou=cos definitions,dc=example,dc=com'
+ args.create_parent = True
+ args.cos_specifier = 'businessCategory'
+ args.cos_attr = ['postalcode', 'telephonenumber']
+ args.cos_template = 'cn=sales,cn=classicCoS,dc=example,dc=com'
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'type={}'.format(args.type),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'cos-specifier={}'.format(args.cos_specifier),
+ 'cos-template={}'.format(args.cos_template),
+ 'cos-attr={}'.format(args.cos_attr),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create COS definition ldif')
+ dbgen_create_cos_def(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that COS definition is imported')
+ cos_def = CosClassicDefinitions(standalone, args.parent)
+ assert cos_def.exists(args.NAME)
+ new_cos = cos_def.get(args.NAME)
+ assert new_cos.present('cosTemplateDN', args.cos_template)
+ assert new_cos.present('cosSpecifier', args.cos_specifier)
+ assert new_cos.present('cosAttribute', args.cos_attr[0])
+ assert new_cos.present('cosAttribute', args.cos_attr[1])
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_cos_pointer(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a COS definition
+
+ :id: 6b26ca6d-226a-4f93-925e-faf95cc20211
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with pointer COS definition
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_pointer,ou=cos pointer definitions,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.type = 'pointer'
+ args.NAME = 'My_Postal_Def_pointer'
+ args.parent = 'ou=cos pointer definitions,dc=example,dc=com'
+ args.create_parent = True
+ args.cos_specifier = None
+ args.cos_attr = ['postalcode', 'telephonenumber']
+ args.cos_template = 'cn=sales,cn=pointerCoS,dc=example,dc=com'
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'type={}'.format(args.type),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'cos-template={}'.format(args.cos_template),
+ 'cos-attr={}'.format(args.cos_attr),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create COS definition ldif')
+ dbgen_create_cos_def(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that COS definition is imported')
+ cos_def = CosPointerDefinitions(standalone, args.parent)
+ assert cos_def.exists(args.NAME)
+ new_cos = cos_def.get(args.NAME)
+ assert new_cos.present('cosTemplateDN', args.cos_template)
+ assert new_cos.present('cosAttribute', args.cos_attr[0])
+ assert new_cos.present('cosAttribute', args.cos_attr[1])
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_cos_indirect(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a COS definition
+
+ :id: ab4b799e-e801-432a-a61d-badad2628201
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with indirect COS definition
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_indirect,ou=cos indirect definitions,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.type = 'indirect'
+ args.NAME = 'My_Postal_Def_indirect'
+ args.parent = 'ou=cos indirect definitions,dc=example,dc=com'
+ args.create_parent = True
+ args.cos_specifier = 'businessCategory'
+ args.cos_attr = ['postalcode', 'telephonenumber']
+ args.cos_template = None
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'type={}'.format(args.type),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'cos-specifier={}'.format(args.cos_specifier),
+ 'cos-attr={}'.format(args.cos_attr),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create COS definition ldif')
+ dbgen_create_cos_def(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that COS definition is imported')
+ cos_def = CosIndirectDefinitions(standalone, args.parent)
+ assert cos_def.exists(args.NAME)
+ new_cos = cos_def.get(args.NAME)
+ assert new_cos.present('cosIndirectSpecifier', args.cos_specifier)
+ assert new_cos.present('cosAttribute', args.cos_attr[0])
+ assert new_cos.present('cosAttribute', args.cos_attr[1])
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_cos_template(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a COS template
+
+ :id: 544017c7-4a82-4e7d-a047-00b68a28e071
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with COS template
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Template,ou=cos templates,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.NAME = 'My_Template'
+ args.parent = 'ou=cos templates,dc=example,dc=com'
+ args.create_parent = True
+ args.cos_priority = "1"
+ args.cos_attr_val = 'postalcode:12345'
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'cos-priority={}'.format(args.cos_priority),
+ 'cos-attr-val={}'.format(args.cos_attr_val),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create COS template ldif')
+ dbgen_create_cos_tmp(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that COS template is imported')
+ cos_temp = CosTemplates(standalone, args.parent)
+ assert cos_temp.exists(args.NAME)
+ new_cos = cos_temp.get(args.NAME)
+ assert new_cos.present('cosPriority', str(args.cos_priority))
+ assert new_cos.present('postalcode', '12345')
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_managed_role(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a managed role
+
+ :id: 10e77b41-0bc1-4ad5-a144-2c5107455b91
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with managed role
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Managed_Role,ou=managed roles,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+
+ args.NAME = 'My_Managed_Role'
+ args.parent = 'ou=managed roles,dc=example,dc=com'
+ args.create_parent = True
+ args.type = 'managed'
+ args.filter = None
+ args.role_dn = None
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'type={}'.format(args.type),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create managed role ldif')
+ dbgen_create_role(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that managed role is imported')
+ roles = ManagedRoles(standalone, DEFAULT_SUFFIX)
+ assert roles.exists(args.NAME)
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_filtered_role(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a filtered role
+
+ :id: cb3c8ea8-4234-40e2-8810-fb6a25973921
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with filtered role
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Filtered_Role,ou=filtered roles,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+
+ args.NAME = 'My_Filtered_Role'
+ args.parent = 'ou=filtered roles,dc=example,dc=com'
+ args.create_parent = True
+ args.type = 'filtered'
+ args.filter = '"objectclass=posixAccount"'
+ args.role_dn = None
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'type={}'.format(args.type),
+ 'filter={}'.format(args.filter),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create filtered role ldif')
+ dbgen_create_role(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that filtered role is imported')
+ roles = FilteredRoles(standalone, DEFAULT_SUFFIX)
+ assert roles.exists(args.NAME)
+ new_role = roles.get(args.NAME)
+ assert new_role.present('nsRoleFilter', args.filter)
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_nested_role(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create a nested role
+
+ :id: 97fff0a8-3103-4adb-be04-2799ff58d8f1
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate ldif with nested role
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ LDAP_RESULT = 'adding new entry "cn=My_Nested_Role,ou=nested roles,dc=example,dc=com"'
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.NAME = 'My_Nested_Role'
+ args.parent = 'ou=nested roles,dc=example,dc=com'
+ args.create_parent = True
+ args.type = 'nested'
+ args.filter = None
+ args.role_dn = ['cn=some_role,ou=roles,dc=example,dc=com']
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'NAME={}'.format(args.NAME),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'type={}'.format(args.type),
+ 'role-dn={}'.format(args.role_dn),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create nested role ldif')
+ dbgen_create_role(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT)
+
+ log.info('Check that nested role is imported')
+ roles = NestedRoles(standalone, DEFAULT_SUFFIX)
+ assert roles.exists(args.NAME)
+ new_role = roles.get(args.NAME)
+ assert new_role.present('nsRoleDN', args.role_dn[0])
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_mod_ldif_mixed(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create mixed modification ldif
+
+ :id: 4a2e0901-2b48-452e-a4a0-507735132c81
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate modification ldif
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.parent = DEFAULT_SUFFIX
+ args.create_users = True
+ args.delete_users = True
+ args.create_parent = False
+ args.num_users = "1000"
+ args.add_users = "100"
+ args.del_users = "999"
+ args.modrdn_users = "100"
+ args.mod_users = "10"
+ args.mod_attrs = ['cn', 'uid', 'sn']
+ args.randomize = False
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'create-users={}'.format(args.create_users),
+ 'parent={}'.format(args.parent),
+ 'create-parent={}'.format(args.create_parent),
+ 'delete-users={}'.format(args.delete_users),
+ 'num-users={}'.format(args.num_users),
+ 'add-users={}'.format(args.add_users),
+ 'del-users={}'.format(args.del_users),
+ 'modrdn-users={}'.format(args.modrdn_users),
+ 'mod-users={}'.format(args.mod_users),
+ 'mod-attrs={}'.format(args.mod_attrs),
+ 'randomize={}'.format(args.randomize),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created LDIF file: {}'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create modification ldif')
+ dbgen_create_mods(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ log.info('Get number of accounts before import')
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
+ count_account = len(accounts.filter('(uid=*)'))
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ # ldapmodify will complain about a lot of changes done which causes subprocess to return exit code != 0
+ with pytest.raises(subprocess.CalledProcessError):
+ run_ldapmodify_from_file(standalone, ldif_file)
+
+ log.info('Check that some accounts are imported')
+ assert len(accounts.filter('(uid=*)')) > count_account
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1798394
+@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented")
+def test_usandsconf_dbgen_nested_ldif(topology_st, set_log_file_and_ldif):
+ """Test ldifgen (formerly dbgen) tool to create nested ldif
+
+ :id: 9c281c28-4169-45e0-8c07-c5502d9a7581
+ :setup: Standalone instance
+ :steps:
+ 1. Create DS instance
+ 2. Run ldifgen to generate nested ldif
+ 3. Import generated ldif to database
+ 4. Check it was properly imported
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ standalone = topology_st.standalone
+
+ args = FakeArgs()
+ args.suffix = DEFAULT_SUFFIX
+ args.node_limit = "100"
+ args.num_users = "600"
+ args.ldif_file = ldif_file
+
+ content_list = ['Generating LDIF with the following options:',
+ 'suffix={}'.format(args.suffix),
+ 'node-limit={}'.format(args.node_limit),
+ 'num-users={}'.format(args.num_users),
+ 'ldif-file={}'.format(args.ldif_file),
+ 'Writing LDIF',
+ 'Successfully created nested LDIF file ({}) containing 6 nodes/subtrees'.format(args.ldif_file)]
+
+ log.info('Run ldifgen to create nested ldif')
+ dbgen_create_nested(standalone, log, args)
+
+ log.info('Check if file exists')
+ assert os.path.exists(ldif_file)
+
+ check_value_in_log_and_reset(content_list)
+
+ log.info('Get number of accounts before import')
+ accounts = Accounts(standalone, DEFAULT_SUFFIX)
+ count_account = len(accounts.filter('(uid=*)'))
+ count_ou = len(accounts.filter('(ou=*)'))
+
+ # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db
+ # ldapmodify will complain about already existing suffix which causes subprocess to return exit code != 0
+ with pytest.raises(subprocess.CalledProcessError):
+ run_ldapmodify_from_file(standalone, ldif_file)
+
+ standalone.restart()
+
+ log.info('Check that accounts are imported')
+ assert len(accounts.filter('(uid=*)')) > count_account
+ assert len(accounts.filter('(ou=*)')) > count_ou
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/src/lib389/lib389/cli_ctl/dbgen.py b/src/lib389/lib389/cli_ctl/dbgen.py
index 7bc3892ba..058342fb1 100644
--- a/src/lib389/lib389/cli_ctl/dbgen.py
+++ b/src/lib389/lib389/cli_ctl/dbgen.py
@@ -451,13 +451,13 @@ def dbgen_create_mods(inst, log, args):
props = {
"createUsers": args.create_users,
"deleteUsers": args.delete_users,
- "numUsers": args.num_users,
+ "numUsers": int(args.num_users),
"parent": args.parent,
"createParent": args.create_parent,
- "addUsers": args.add_users,
- "delUsers": args.del_users,
- "modrdnUsers": args.modrdn_users,
- "modUsers": args.mod_users,
+ "addUsers": int(args.add_users),
+ "delUsers": int(args.del_users),
+ "modrdnUsers": int(args.modrdn_users),
+ "modUsers": int(args.mod_users),
"random": args.randomize,
"modAttrs": args.mod_attrs
}
diff --git a/src/lib389/lib389/dbgen.py b/src/lib389/lib389/dbgen.py
index 6273781a2..10fb200f7 100644
--- a/src/lib389/lib389/dbgen.py
+++ b/src/lib389/lib389/dbgen.py
@@ -220,6 +220,9 @@ def dbgen_users(instance, number, ldif_file, suffix, generic=False, entry_name="
"""
Generate an LDIF of randomly named entries
"""
+ # Lets insure that integer parameters are not string
+ number=int(number)
+ startIdx=int(startIdx)
familyname_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-FamilyNames')
givename_file = os.path.join(instance.ds_paths.data_dir, 'dirsrv/data/dbgen-GivenNames')
familynames = []
--
2.26.2

View File

@ -0,0 +1,127 @@
From 2a2773d4bf8553ba64b396d567fe05506b22c94c Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Tue, 24 Nov 2020 19:22:49 +0100
Subject: [PATCH] Issue 4449 - dsconf replication monitor fails to retrieve
database RUV - consumer (Unavailable) (#4451)
Bug Description:
"dsconf replication monitor" fails to retrieve database RUV entry from consumer and this
appears into the Cockpit web UI too.
The problem is that the bind credentials are not rightly propagated when trying to get
the consumers agreement status. Then supplier credntials are used instead and RUV
is searched anonymously because there is no bind dn in ldapi case.
Fix Description:
- Propagates the bind credentials when computing agreement status
- Add a credential cache because now a replica password could get asked several times:
when discovering the topology and
when getting the agreement maxcsn
- No testcase in 1.4.3 branch as the file modfied in master does not exists
- Add a comment about nonlocal keyword
Relates: #4449
Reviewers:
firstyear
droideck
mreynolds
Issue 4449: Add a comment about nonlocal keyword
(cherry picked from commit 73ee04fa12cd1de3a5e47c109e79e31c1aaaa2ab)
---
src/lib389/lib389/cli_conf/replication.py | 13 +++++++++++--
src/lib389/lib389/replica.py | 16 ++++++++++++----
2 files changed, 23 insertions(+), 6 deletions(-)
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
index 9dbaa320a..248972cba 100644
--- a/src/lib389/lib389/cli_conf/replication.py
+++ b/src/lib389/lib389/cli_conf/replication.py
@@ -369,9 +369,16 @@ def set_repl_config(inst, basedn, log, args):
def get_repl_monitor_info(inst, basedn, log, args):
connection_data = dsrc_to_repl_monitor(DSRC_HOME, log)
+ credentials_cache = {}
# Additional details for the connections to the topology
def get_credentials(host, port):
+ # credentials_cache is nonlocal to refer to the instance
+ # from enclosing function (get_repl_monitor_info)`
+ nonlocal credentials_cache
+ key = f'{host}:{port}'
+ if key in credentials_cache:
+ return credentials_cache[key]
found = False
if args.connections:
connections = args.connections
@@ -406,8 +413,10 @@ def get_repl_monitor_info(inst, basedn, log, args):
binddn = input(f'\nEnter a bind DN for {host}:{port}: ').rstrip()
bindpw = getpass(f"Enter a password for {binddn} on {host}:{port}: ").rstrip()
- return {"binddn": binddn,
- "bindpw": bindpw}
+ credentials = {"binddn": binddn,
+ "bindpw": bindpw}
+ credentials_cache[key] = credentials
+ return credentials
repl_monitor = ReplicationMonitor(inst)
report_dict = repl_monitor.generate_report(get_credentials, args.json)
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
index c2ad2104d..3d89e61fb 100644
--- a/src/lib389/lib389/replica.py
+++ b/src/lib389/lib389/replica.py
@@ -2487,9 +2487,10 @@ class ReplicationMonitor(object):
else:
self._log = logging.getLogger(__name__)
- def _get_replica_status(self, instance, report_data, use_json):
+ def _get_replica_status(self, instance, report_data, use_json, get_credentials=None):
"""Load all of the status data to report
and add new hostname:port pairs for future processing
+ :type get_credentials: function
"""
replicas_status = []
@@ -2503,6 +2504,13 @@ class ReplicationMonitor(object):
for agmt in agmts.list():
host = agmt.get_attr_val_utf8_l("nsds5replicahost")
port = agmt.get_attr_val_utf8_l("nsds5replicaport")
+ if get_credentials is not None:
+ credentials = get_credentials(host, port)
+ binddn = credentials["binddn"]
+ bindpw = credentials["bindpw"]
+ else:
+ binddn = instance.binddn
+ bindpw = instance.bindpw
protocol = agmt.get_attr_val_utf8_l('nsds5replicatransportinfo')
# Supply protocol here because we need it only for connection
# and agreement status is already preformatted for the user output
@@ -2510,9 +2518,9 @@ class ReplicationMonitor(object):
if consumer not in report_data:
report_data[f"{consumer}:{protocol}"] = None
if use_json:
- agmts_status.append(json.loads(agmt.status(use_json=True)))
+ agmts_status.append(json.loads(agmt.status(use_json=True, binddn=binddn, bindpw=bindpw)))
else:
- agmts_status.append(agmt.status())
+ agmts_status.append(agmt.status(binddn=binddn, bindpw=bindpw))
replicas_status.append({"replica_id": replica_id,
"replica_root": replica_root,
"replica_status": "Available",
@@ -2535,7 +2543,7 @@ class ReplicationMonitor(object):
initial_inst_key = f"{self._instance.config.get_attr_val_utf8_l('nsslapd-localhost')}:{self._instance.config.get_attr_val_utf8_l('nsslapd-port')}"
# Do this on an initial instance to get the agreements to other instances
try:
- report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json)
+ report_data[initial_inst_key] = self._get_replica_status(self._instance, report_data, use_json, get_credentials)
except ldap.LDAPError as e:
self._log.debug(f"Connection to consumer ({supplier_hostname}:{supplier_port}) failed, error: {e}")
report_data[initial_inst_key] = [{"replica_status": f"Unavailable - {e.args[0]['desc']}"}]
--
2.26.2

View File

@ -0,0 +1,63 @@
From e540effa692976c2eef766f1f735702ba5dc0950 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Mon, 30 Nov 2020 09:03:33 +0100
Subject: [PATCH] Issue 4243 - Fix test: SyncRepl plugin provides a wrong
cookie (#4467)
Bug description:
This test case was incorrect.
During a refreshPersistent search, a cookie is sent
with the intermediate message that indicates the end of the refresh phase.
Then a second cookie is sent on the updated entry (group10)
I believed this test was successful some time ago but neither python-ldap
nor sync_repl changed (intermediate sent in post refresh).
So the testcase was never successful :(
Fix description:
The fix is just to take into account the two expected cookies
relates: https://github.com/389ds/389-ds-base/issues/4243
Reviewed by: Mark Reynolds
Platforms tested: F31
---
.../tests/suites/syncrepl_plugin/basic_test.py | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
index 79ec374bc..7b35537d5 100644
--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
@@ -589,7 +589,7 @@ def test_sync_repl_cookie_with_failure(topology, request):
sync_repl.start()
time.sleep(5)
- # Add a test group just to check that sync_repl receives only one update
+ # Add a test group just to check that sync_repl receives that SyncControlInfo cookie
group.append(groups.create(properties={'cn': 'group%d' % 10}))
# create users, that automember/memberof will generate nested updates
@@ -610,13 +610,15 @@ def test_sync_repl_cookie_with_failure(topology, request):
time.sleep(10)
cookies = sync_repl.get_result()
- # checking that the cookie list contains only one entry
- assert len(cookies) == 1
- prev = 0
+ # checking that the cookie list contains only two entries
+ # the one from the SyncInfo/RefreshDelete that indicates the end of the refresh
+ # the the one from SyncStateControl related to the only updated entry (group10)
+ assert len(cookies) == 2
+ prev = -1
for cookie in cookies:
log.info('Check cookie %s' % cookie)
- assert int(cookie) > 0
+ assert int(cookie) >= 0
assert int(cookie) < 1000
assert int(cookie) > prev
prev = int(cookie)
--
2.26.2

View File

@ -0,0 +1,254 @@
From 8b0ba11c3dfb577d1696f4b71a6f4e9f8d42349f Mon Sep 17 00:00:00 2001
From: Pierre Rogier <progier@redhat.com>
Date: Mon, 30 Nov 2020 12:42:17 +0100
Subject: [PATCH] Add dsconf replication monitor test case (gitHub issue 4449)
in 1.4.3 branch
---
.../tests/suites/clu/repl_monitor_test.py | 234 ++++++++++++++++++
1 file changed, 234 insertions(+)
create mode 100644 dirsrvtests/tests/suites/clu/repl_monitor_test.py
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
new file mode 100644
index 000000000..b03d170c8
--- /dev/null
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
@@ -0,0 +1,234 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import time
+import subprocess
+import pytest
+
+from lib389.cli_conf.replication import get_repl_monitor_info
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.topologies import topology_m2
+from lib389.cli_base import FakeArgs
+from lib389.cli_base.dsrc import dsrc_arg_concat
+from lib389.cli_base import connect_instance
+
+pytestmark = pytest.mark.tier0
+
+LOG_FILE = '/tmp/monitor.log'
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+
+@pytest.fixture(scope="function")
+def set_log_file(request):
+ fh = logging.FileHandler(LOG_FILE)
+ fh.setLevel(logging.DEBUG)
+ log.addHandler(fh)
+
+ def fin():
+ log.info('Delete files')
+ os.remove(LOG_FILE)
+
+ config = os.path.expanduser(DSRC_HOME)
+ if os.path.exists(config):
+ os.remove(config)
+
+ request.addfinalizer(fin)
+
+
+def check_value_in_log_and_reset(content_list, second_list=None, single_value=None, error_list=None):
+ with open(LOG_FILE, 'r+') as f:
+ file_content = f.read()
+
+ for item in content_list:
+ log.info('Check that "{}" is present'.format(item))
+ assert item in file_content
+
+ if second_list is not None:
+ log.info('Check for "{}"'.format(second_list))
+ for item in second_list:
+ assert item in file_content
+
+ if single_value is not None:
+ log.info('Check for "{}"'.format(single_value))
+ assert single_value in file_content
+
+ if error_list is not None:
+ log.info('Check that "{}" is not present'.format(error_list))
+ for item in error_list:
+ assert item not in file_content
+
+ log.info('Reset log file')
+ f.truncate(0)
+
+
+@pytest.mark.ds50545
+@pytest.mark.bz1739718
+@pytest.mark.skipif(ds_is_older("1.4.0"), reason="Not implemented")
+def test_dsconf_replication_monitor(topology_m2, set_log_file):
+ """Test replication monitor that was ported from legacy tools
+
+ :id: ce48020d-7c30-41b7-8f68-144c9cd757f6
+ :setup: 2 MM topology
+ :steps:
+ 1. Create DS instance
+ 2. Run replication monitor with connections option
+ 3. Run replication monitor with aliases option
+ 4. Run replication monitor with --json option
+ 5. Run replication monitor with .dsrc file created
+ 6. Run replication monitor with connections option as if using dsconf CLI
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ """
+
+ m1 = topology_m2.ms["master1"]
+ m2 = topology_m2.ms["master2"]
+
+ alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')',
+ 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')']
+
+ connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port)
+ content_list = ['Replica Root: dc=example,dc=com',
+ 'Replica ID: 1',
+ 'Replica Status: Available',
+ 'Max CSN',
+ 'Status For Agreement: "002" ('+ m2.host + ':' + str(m2.port) + ')',
+ 'Replica Enabled: on',
+ 'Update In Progress: FALSE',
+ 'Last Update Start:',
+ 'Last Update End:',
+ 'Number Of Changes Sent:',
+ 'Number Of Changes Skipped: None',
+ 'Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded',
+ 'Last Init Start:',
+ 'Last Init End:',
+ 'Last Init Status:',
+ 'Reap Active: 0',
+ 'Replication Status: In Synchronization',
+ 'Replication Lag Time:',
+ 'Supplier: ',
+ m2.host + ':' + str(m2.port),
+ 'Replica Root: dc=example,dc=com',
+ 'Replica ID: 2',
+ 'Status For Agreement: "001" (' + m1.host + ':' + str(m1.port)+')']
+
+ error_list = ['consumer (Unavailable)',
+ 'Failed to retrieve database RUV entry from consumer']
+
+ json_list = ['type',
+ 'list',
+ 'items',
+ 'name',
+ m1.host + ':' + str(m1.port),
+ 'data',
+ '"replica_id": "1"',
+ '"replica_root": "dc=example,dc=com"',
+ '"replica_status": "Available"',
+ 'maxcsn',
+ 'agmts_status',
+ 'agmt-name',
+ '002',
+ 'replica',
+ m2.host + ':' + str(m2.port),
+ 'replica-enabled',
+ 'update-in-progress',
+ 'last-update-start',
+ 'last-update-end',
+ 'number-changes-sent',
+ 'number-changes-skipped',
+ 'last-update-status',
+ 'Error (0) Replica acquired successfully: Incremental update succeeded',
+ 'last-init-start',
+ 'last-init-end',
+ 'last-init-status',
+ 'reap-active',
+ 'replication-status',
+ 'In Synchronization',
+ 'replication-lag-time',
+ '"replica_id": "2"',
+ '001',
+ m1.host + ':' + str(m1.port)]
+
+ dsrc_content = '[repl-monitor-connections]\n' \
+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ '\n' \
+ '[repl-monitor-aliases]\n' \
+ 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \
+ 'M2 = ' + m2.host + ':' + str(m2.port)
+
+ connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
+ m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM]
+
+ aliases = ['M1=' + m1.host + ':' + str(m1.port),
+ 'M2=' + m2.host + ':' + str(m2.port)]
+
+ args = FakeArgs()
+ args.connections = connections
+ args.aliases = None
+ args.json = False
+
+ log.info('Run replication monitor with connections option')
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
+
+ log.info('Run replication monitor with aliases option')
+ args.aliases = aliases
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ check_value_in_log_and_reset(content_list, alias_content)
+
+ log.info('Run replication monitor with --json option')
+ args.aliases = None
+ args.json = True
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ check_value_in_log_and_reset(json_list)
+
+ with open(os.path.expanduser(DSRC_HOME), 'w+') as f:
+ f.write(dsrc_content)
+
+ args.connections = None
+ args.aliases = None
+ args.json = False
+
+ log.info('Run replication monitor when .dsrc file is present with content')
+ get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ check_value_in_log_and_reset(content_list, alias_content)
+ os.remove(os.path.expanduser(DSRC_HOME))
+
+ log.info('Run replication monitor with connections option as if using dsconf CLI')
+ # Perform same test than steps 2 test but without using directly the topology instance.
+ # but with an instance similar to those than dsconf cli generates:
+ # step 2 args
+ args.connections = connections
+ args.aliases = None
+ args.json = False
+ # args needed to generate an instance with dsrc_arg_concat
+ args.instance = 'master1'
+ args.basedn = None
+ args.binddn = None
+ args.bindpw = None
+ args.pwdfile = None
+ args.prompt = False
+ args.starttls = False
+ dsrc_inst = dsrc_arg_concat(args, None)
+ inst = connect_instance(dsrc_inst, True, args)
+ get_repl_monitor_info(inst, DEFAULT_SUFFIX, log, args)
+ check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
--
2.26.2

View File

@ -0,0 +1,100 @@
From 389b2c825742392365262a719be7c8f594e7e522 Mon Sep 17 00:00:00 2001
From: William Brown <william@blackhats.net.au>
Date: Thu, 26 Nov 2020 09:08:13 +1000
Subject: [PATCH] Issue 4460 - BUG - lib389 should use system tls policy
Bug Description: Due to some changes in dsrc for tlsreqcert
and how def open was structured in lib389, the system ldap.conf
policy was ignored.
Fix Description: Default to using the system ldap.conf policy
if undefined in lib389 or the tls_reqcert param in dsrc.
fixes: #4460
Author: William Brown <william@blackhats.net.au>
Review by: ???
---
src/lib389/lib389/__init__.py | 11 +++++++----
src/lib389/lib389/cli_base/dsrc.py | 16 +++++++++-------
2 files changed, 16 insertions(+), 11 deletions(-)
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 99ea9cc6a..4e6a1905a 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -962,7 +962,7 @@ class DirSrv(SimpleLDAPObject, object):
# Now, we are still an allocated ds object so we can be re-installed
self.state = DIRSRV_STATE_ALLOCATED
- def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=ldap.OPT_X_TLS_HARD,
+ def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=None,
usercert=None, userkey=None):
'''
It opens a ldap bound connection to dirsrv so that online
@@ -1025,9 +1025,12 @@ class DirSrv(SimpleLDAPObject, object):
try:
# Note this sets LDAP.OPT not SELF. Because once self has opened
# it can NOT change opts on reused (ie restart)
- self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert)
- self.log.debug("Using certificate policy %s", reqcert)
- self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", reqcert)
+ if reqcert is not None:
+ self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert)
+ self.log.debug("Using lib389 certificate policy %s", reqcert)
+ else:
+ self.log.debug("Using /etc/openldap/ldap.conf certificate policy")
+ self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", self.get_option(ldap.OPT_X_TLS_REQUIRE_CERT))
except ldap.LDAPError as e:
self.log.fatal('TLS negotiation failed: %s', e)
raise e
diff --git a/src/lib389/lib389/cli_base/dsrc.py b/src/lib389/lib389/cli_base/dsrc.py
index fec18a5f9..9b09ea568 100644
--- a/src/lib389/lib389/cli_base/dsrc.py
+++ b/src/lib389/lib389/cli_base/dsrc.py
@@ -45,7 +45,7 @@ def dsrc_arg_concat(args, dsrc_inst):
'tls_cacertdir': None,
'tls_cert': None,
'tls_key': None,
- 'tls_reqcert': ldap.OPT_X_TLS_HARD,
+ 'tls_reqcert': None,
'starttls': args.starttls,
'prompt': False,
'pwdfile': None,
@@ -134,7 +134,7 @@ def dsrc_to_ldap(path, instance_name, log):
dsrc_inst['binddn'] = config.get(instance_name, 'binddn', fallback=None)
dsrc_inst['saslmech'] = config.get(instance_name, 'saslmech', fallback=None)
if dsrc_inst['saslmech'] is not None and dsrc_inst['saslmech'] not in ['EXTERNAL', 'PLAIN']:
- raise Exception("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name))
+ raise ValueError("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name))
dsrc_inst['tls_cacertdir'] = config.get(instance_name, 'tls_cacertdir', fallback=None)
# At this point, we should check if the provided cacertdir is indeed, a dir. This can be a cause
@@ -145,16 +145,18 @@ def dsrc_to_ldap(path, instance_name, log):
dsrc_inst['tls_cert'] = config.get(instance_name, 'tls_cert', fallback=None)
dsrc_inst['tls_key'] = config.get(instance_name, 'tls_key', fallback=None)
- dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback='hard')
- if dsrc_inst['tls_reqcert'] not in ['never', 'allow', 'hard']:
- raise Exception("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name,
- path))
+ dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback=None)
if dsrc_inst['tls_reqcert'] == 'never':
dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_NEVER
elif dsrc_inst['tls_reqcert'] == 'allow':
dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_ALLOW
- else:
+ elif dsrc_inst['tls_reqcert'] == 'hard':
dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_HARD
+ elif dsrc_inst['tls_reqcert'] is None:
+ # Use system value
+ pass
+ else:
+ raise ValueError("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name, path))
dsrc_inst['starttls'] = config.getboolean(instance_name, 'starttls', fallback=False)
dsrc_inst['pwdfile'] = None
dsrc_inst['prompt'] = False
--
2.26.2

View File

@ -0,0 +1,60 @@
From 05b66529117d1cd85a636ab7d8fc84abdec814de Mon Sep 17 00:00:00 2001
From: William Brown <william@blackhats.net.au>
Date: Thu, 12 Nov 2020 13:04:21 +1000
Subject: [PATCH] Issue 4428 - BUG Paged Results with critical false causes
sigsegv in chaining
Bug Description: When a paged search through chaining backend is
received with a false criticality (such as SSSD), chaining backend
will sigsegv due to a null context.
Fix Description: When a NULL ctx is recieved to be freed, this is
as paged results have finished being sent, so we check the NULL
ctx and move on.
fixes: #4428
Author: William Brown <william@blackhats.net.au>
Review by: @droideck, @mreynolds389
---
ldap/servers/plugins/chainingdb/cb_search.c | 6 ++++++
ldap/servers/plugins/chainingdb/cb_utils.c | 4 ++++
2 files changed, 10 insertions(+)
diff --git a/ldap/servers/plugins/chainingdb/cb_search.c b/ldap/servers/plugins/chainingdb/cb_search.c
index 69d23a6b5..d47cbc8e4 100644
--- a/ldap/servers/plugins/chainingdb/cb_search.c
+++ b/ldap/servers/plugins/chainingdb/cb_search.c
@@ -740,6 +740,12 @@ chaining_back_search_results_release(void **sr)
slapi_log_err(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM,
"chaining_back_search_results_release\n");
+ if (ctx == NULL) {
+ /* The paged search is already complete, just return */
+ /* Could we have a ctx state flag instead? */
+ return;
+ }
+
if (ctx->readahead != ctx->tobefreed) {
slapi_entry_free(ctx->readahead);
}
diff --git a/ldap/servers/plugins/chainingdb/cb_utils.c b/ldap/servers/plugins/chainingdb/cb_utils.c
index dfd5dd92c..d52fd25a6 100644
--- a/ldap/servers/plugins/chainingdb/cb_utils.c
+++ b/ldap/servers/plugins/chainingdb/cb_utils.c
@@ -279,7 +279,11 @@ cb_add_suffix(cb_backend_instance *inst, struct berval **bvals, int apply_mod, c
return LDAP_SUCCESS;
}
+#ifdef DEBUG
+static int debug_on = 1;
+#else
static int debug_on = 0;
+#endif
int
cb_debug_on()
--
2.26.2

View File

@ -0,0 +1,50 @@
From 4c133d448f451b7c3b2ff1b42806c7516d623f09 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Mon, 7 Dec 2020 00:41:27 +0100
Subject: [PATCH] Issue 4315: performance search rate: nagle triggers high rate
of setsocketopt (#4437)
Bug description:
When a socket is set with NO_DELAY=0 (nagle), written pdu are buffered
until buffer is full or tcp_cork is set. This reduce network traffic when
the application writes partial pdu.
DS write complete pdu (results/entries/..) so it gives low benefit for DS.
In addition nagle being 'on' by default, DS sets/unset socket tcp_cork to send
immediately results/entries at each operation. This is an overhead of syscalls.
Fix description:
Disable nagle by default
relates: https://github.com/389ds/389-ds-base/issues/4315
Reviewed by: @mreynolds389, @Firstyear
Platforms tested: F33
---
ldap/servers/slapd/libglobs.c | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 7d5374c90..f8cf162e6 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -1635,12 +1635,11 @@ FrontendConfig_init(void)
#endif /* USE_SYSCONF */
init_accesscontrol = cfg->accesscontrol = LDAP_ON;
-#if defined(LINUX)
- /* On Linux, by default, we use TCP_CORK so we must enable nagle */
- init_nagle = cfg->nagle = LDAP_ON;
-#else
+
+ /* nagle triggers set/unset TCP_CORK setsockopt per operation
+ * as DS only sends complete PDU there is no benefit of nagle/tcp_cork
+ */
init_nagle = cfg->nagle = LDAP_OFF;
-#endif
init_security = cfg->security = LDAP_OFF;
init_ssl_check_hostname = cfg->ssl_check_hostname = LDAP_ON;
cfg->tls_check_crl = TLS_CHECK_NONE;
--
2.26.2

View File

@ -0,0 +1,39 @@
From 3007700a659ede03085f5390153cce483ce987a1 Mon Sep 17 00:00:00 2001
From: Firstyear <william@blackhats.net.au>
Date: Fri, 4 Dec 2020 10:14:33 +1000
Subject: [PATCH] Issue 4460 - BUG - add machine name to subject alt names in
SSCA (#4472)
Bug Description: During SSCA creation, the server cert did not have
the machine name, which meant that the cert would not work without
reqcert = never.
Fix Description: Add the machine name as an alt name during SSCA
creation. It is not guaranteed this value is correct, but it
is better than nothing.
relates: https://github.com/389ds/389-ds-base/issues/4460
Author: William Brown <william@blackhats.net.au>
Review by: mreynolds389, droideck
---
src/lib389/lib389/instance/setup.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index 7d42ba292..e46f2d1e5 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -887,7 +887,7 @@ class SetupDs(object):
tlsdb_inst = NssSsl(dbpath=os.path.join(etc_dirsrv_path, dir))
tlsdb_inst.import_rsa_crt(ca)
- csr = tlsdb.create_rsa_key_and_csr()
+ csr = tlsdb.create_rsa_key_and_csr(alt_names=[general['full_machine_name']])
(ca, crt) = ssca.rsa_ca_sign_csr(csr)
tlsdb.import_rsa_crt(ca, crt)
if general['selinux']:
--
2.26.2

View File

@ -0,0 +1,50 @@
From 1386b140d8cc81d37fdea6593487fe542587ccac Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 9 Dec 2020 09:52:08 -0500
Subject: [PATCH] Issue 4483 - heap-use-after-free in slapi_be_getsuffix
Description: heap-use-after-free in slapi_be_getsuffix after disk
monitoring runs. This feature is freeing a list of
backends which it does not need to do.
Fixes: https://github.com/389ds/389-ds-base/issues/4483
Reviewed by: firstyear & tbordaz(Thanks!!)
---
ldap/servers/slapd/daemon.c | 13 +------------
1 file changed, 1 insertion(+), 12 deletions(-)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 49199e4df..691f77570 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -606,12 +606,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
now = start;
while ((now - start) < grace_period) {
if (g_get_shutdown()) {
- be_index = 0;
- if (be_list[be_index] != NULL) {
- while ((be = be_list[be_index++])) {
- slapi_be_free(&be);
- }
- }
slapi_ch_array_free(dirs);
dirs = NULL;
return;
@@ -706,12 +700,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
}
}
}
- be_index = 0;
- if (be_list[be_index] != NULL) {
- while ((be = be_list[be_index++])) {
- slapi_be_free(&be);
- }
- }
+
slapi_ch_array_free(dirs);
dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */
g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL);
--
2.26.2

View File

@ -0,0 +1,65 @@
From 6e827f6d5e64e0be316f4e17111b2884899d302c Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 16 Dec 2020 16:30:28 +0100
Subject: [PATCH] Issue 4480 - Unexpected info returned to ldap request (#4491)
Bug description:
If the bind entry does not exist, the bind result info
reports that 'No such entry'. It should not give any
information if the target entry exists or not
Fix description:
Does not return any additional information during a bind
relates: https://github.com/389ds/389-ds-base/issues/4480
Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all)
Platforms tested: F31
---
dirsrvtests/tests/suites/basic/basic_test.py | 1 -
ldap/servers/slapd/back-ldbm/ldbm_config.c | 2 +-
ldap/servers/slapd/result.c | 2 +-
3 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
index 120207321..1ae82dcdd 100644
--- a/dirsrvtests/tests/suites/basic/basic_test.py
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
@@ -1400,7 +1400,6 @@ def test_dscreate_multiple_dashes_name(dscreate_long_instance):
assert not dscreate_long_instance.exists()
-
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
index 3fe86d567..10cef250f 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
@@ -1234,7 +1234,7 @@ ldbm_config_search_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
if (attrs) {
for (size_t i = 0; attrs[i]; i++) {
if (ldbm_config_moved_attr(attrs[i])) {
- slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry");
+ slapi_pblock_set(pb, SLAPI_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry");
break;
}
}
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
index 9daf3b151..ab0d79454 100644
--- a/ldap/servers/slapd/result.c
+++ b/ldap/servers/slapd/result.c
@@ -355,7 +355,7 @@ send_ldap_result_ext(
if (text) {
pbtext = text;
} else {
- slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &pbtext);
+ slapi_pblock_get(pb, SLAPI_RESULT_TEXT, &pbtext);
}
if (operation == NULL) {
--
2.26.2

View File

@ -0,0 +1,108 @@
From 1fef5649ce05a17a741789cafb65269c099b396b Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Wed, 16 Dec 2020 16:21:35 +0100
Subject: [PATCH 2/3] Issue #4504 - Fix pytest test_dsconf_replication_monitor
(#4505)
(cherry picked from commit 0b08e6f35b000d1383580be59f902ac813e940f2)
---
.../tests/suites/clu/repl_monitor_test.py | 50 +++++++++++++------
1 file changed, 36 insertions(+), 14 deletions(-)
diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
index b03d170c8..eb18d2da2 100644
--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py
+++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py
@@ -9,6 +9,7 @@
import time
import subprocess
import pytest
+import re
from lib389.cli_conf.replication import get_repl_monitor_info
from lib389.tasks import *
@@ -67,6 +68,25 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No
log.info('Reset log file')
f.truncate(0)
+def get_hostnames_from_log(port1, port2):
+ # Get the supplier host names as displayed in replication monitor output
+ with open(LOG_FILE, 'r') as logfile:
+ logtext = logfile.read()
+ # search for Supplier :hostname:port
+ # and use \D to insure there is no more number is after
+ # the matched port (i.e that 10 is not matching 101)
+ regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)'
+ match=re.search(regexp, logtext)
+ host_m1 = 'localhost.localdomain'
+ if (match is not None):
+ host_m1 = match.group(2)
+ # Same for master 2
+ regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)'
+ match=re.search(regexp, logtext)
+ host_m2 = 'localhost.localdomain'
+ if (match is not None):
+ host_m2 = match.group(2)
+ return (host_m1, host_m2)
@pytest.mark.ds50545
@pytest.mark.bz1739718
@@ -95,9 +115,6 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
m1 = topology_m2.ms["master1"]
m2 = topology_m2.ms["master2"]
- alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')',
- 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')']
-
connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port)
content_list = ['Replica Root: dc=example,dc=com',
'Replica ID: 1',
@@ -160,20 +177,9 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
'001',
m1.host + ':' + str(m1.port)]
- dsrc_content = '[repl-monitor-connections]\n' \
- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
- '\n' \
- '[repl-monitor-aliases]\n' \
- 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \
- 'M2 = ' + m2.host + ':' + str(m2.port)
-
connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM,
m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM]
- aliases = ['M1=' + m1.host + ':' + str(m1.port),
- 'M2=' + m2.host + ':' + str(m2.port)]
-
args = FakeArgs()
args.connections = connections
args.aliases = None
@@ -181,8 +187,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file):
log.info('Run replication monitor with connections option')
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
+ (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port)
check_value_in_log_and_reset(content_list, connection_content, error_list=error_list)
+ # Prepare the data for next tests
+ aliases = ['M1=' + host_m1 + ':' + str(m1.port),
+ 'M2=' + host_m2 + ':' + str(m2.port)]
+
+ alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')',
+ 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')']
+
+ dsrc_content = '[repl-monitor-connections]\n' \
+ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \
+ '\n' \
+ '[repl-monitor-aliases]\n' \
+ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \
+ 'M2 = ' + host_m2 + ':' + str(m2.port)
+
log.info('Run replication monitor with aliases option')
args.aliases = aliases
get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args)
--
2.26.2

View File

@ -0,0 +1,4 @@
For detailed information on developing plugins for
389 Directory Server visit.
http://port389/wiki/Plugins

View File

@ -0,0 +1,16 @@
#!/bin/bash
DATE=`date +%Y%m%d`
# use a real tag name here
VERSION=1.3.5.14
PKGNAME=389-ds-base
TAG=${TAG:-$PKGNAME-$VERSION}
URL="https://git.fedorahosted.org/git/?p=389/ds.git;a=snapshot;h=$TAG;sf=tgz"
SRCNAME=$PKGNAME-$VERSION
wget -O $SRCNAME.tar.gz "$URL"
echo convert tgz format to tar.bz2 format
gunzip $PKGNAME-$VERSION.tar.gz
bzip2 $PKGNAME-$VERSION.tar

933
SPECS/389-ds-base.spec Normal file
View File

@ -0,0 +1,933 @@
%global pkgname dirsrv
%global srcname 389-ds-base
# Exclude i686 bit arches
ExcludeArch: i686
# for a pre-release, define the prerel field e.g. .a1 .rc2 - comment out for official release
# also remove the space between % and global - this space is needed because
# fedpkg verrel stupidly ignores comment lines
#% global prerel .rc3
# also need the relprefix field for a pre-release e.g. .0 - also comment out for official release
#% global relprefix 0.
# If perl-Socket-2.000 or newer is available, set 0 to use_Socket6.
%global use_Socket6 0
%global use_asan 0
%global use_rust 0
%global use_legacy 1
%global bundle_jemalloc 1
%if %{use_asan}
%global bundle_jemalloc 0
%endif
%if %{bundle_jemalloc}
%global jemalloc_name jemalloc
%global jemalloc_ver 5.2.1
%global __provides_exclude ^libjemalloc\\.so.*$
%endif
# Use Clang instead of GCC
%global use_clang 0
# fedora 15 and later uses tmpfiles.d
# otherwise, comment this out
%{!?with_tmpfiles_d: %global with_tmpfiles_d %{_sysconfdir}/tmpfiles.d}
# systemd support
%global groupname %{pkgname}.target
# set PIE flag
%global _hardened_build 1
Summary: 389 Directory Server (base)
Name: 389-ds-base
Version: 1.4.3.16
Release: %{?relprefix}6%{?prerel}%{?dist}
License: GPLv3+
URL: https://www.port389.org
Group: System Environment/Daemons
Conflicts: selinux-policy-base < 3.9.8
Conflicts: freeipa-server < 4.0.3
Obsoletes: %{name} <= 1.4.0.9
Provides: ldif2ldbm >= 0
BuildRequires: nspr-devel
BuildRequires: nss-devel >= 3.34
BuildRequires: perl-generators
BuildRequires: openldap-devel
BuildRequires: libdb-devel
BuildRequires: cyrus-sasl-devel
BuildRequires: icu
BuildRequires: libicu-devel
BuildRequires: pcre-devel
BuildRequires: cracklib-devel
%if %{use_clang}
BuildRequires: libatomic
BuildRequires: clang
%else
BuildRequires: gcc
BuildRequires: gcc-c++
%endif
# The following are needed to build the snmp ldap-agent
BuildRequires: net-snmp-devel
BuildRequires: lm_sensors-devel
BuildRequires: bzip2-devel
BuildRequires: zlib-devel
BuildRequires: openssl-devel
# the following is for the pam passthru auth plug-in
BuildRequires: pam-devel
BuildRequires: systemd-units
BuildRequires: systemd-devel
%if %{use_asan}
BuildRequires: libasan
%endif
# If rust is enabled
%if %{use_rust}
BuildRequires: cargo
BuildRequires: rust
%endif
BuildRequires: pkgconfig
BuildRequires: pkgconfig(systemd)
BuildRequires: pkgconfig(krb5)
# Needed to support regeneration of the autotool artifacts.
BuildRequires: autoconf
BuildRequires: automake
BuildRequires: libtool
# For our documentation
BuildRequires: doxygen
# For tests!
BuildRequires: libcmocka-devel
BuildRequires: libevent-devel
# For lib389 and related components
BuildRequires: python%{python3_pkgversion}
BuildRequires: python%{python3_pkgversion}-devel
BuildRequires: python%{python3_pkgversion}-setuptools
BuildRequires: python%{python3_pkgversion}-ldap
BuildRequires: python%{python3_pkgversion}-six
BuildRequires: python%{python3_pkgversion}-pyasn1
BuildRequires: python%{python3_pkgversion}-pyasn1-modules
BuildRequires: python%{python3_pkgversion}-dateutil
BuildRequires: python%{python3_pkgversion}-argcomplete
BuildRequires: python%{python3_pkgversion}-argparse-manpage
BuildRequires: python%{python3_pkgversion}-policycoreutils
BuildRequires: python%{python3_pkgversion}-libselinux
# For cockpit
BuildRequires: rsync
Requires: %{name}-libs = %{version}-%{release}
Requires: python%{python3_pkgversion}-lib389 = %{version}-%{release}
# this is needed for using semanage from our setup scripts
Requires: policycoreutils-python-utils
Requires: /usr/sbin/semanage
Requires: libsemanage-python%{python3_pkgversion}
Requires: selinux-policy >= 3.14.1-29
# the following are needed for some of our scripts
Requires: openldap-clients
Requires: openssl-perl
Requires: python%{python3_pkgversion}-ldap
# this is needed to setup SSL if you are not using the
# administration server package
Requires: nss-tools
Requires: nss >= 3.34
# these are not found by the auto-dependency method
# they are required to support the mandatory LDAP SASL mechs
Requires: cyrus-sasl-gssapi
Requires: cyrus-sasl-md5
Requires: cyrus-sasl-plain
# this is needed for verify-db.pl
Requires: libdb-utils
# Needed for password dictionary checks
Requires: cracklib-dicts
# This picks up libperl.so as a Requires, so we add this versioned one
Requires: perl(:MODULE_COMPAT_%(eval "`%{__perl} -V:version`"; echo $version))
Requires: perl-Errno >= 1.23-360
# Needed by logconv.pl
Requires: perl-DB_File
Requires: perl-Archive-Tar
# Needed for password dictionary checks
Requires: cracklib-dicts
# Picks up our systemd deps.
%{?systemd_requires}
Obsoletes: %{name} <= 1.3.5.4
Source0: https://releases.pagure.org/389-ds-base/%{name}-%{version}.tar.bz2
# 389-ds-git.sh should be used to generate the source tarball from git
Source1: %{name}-git.sh
Source2: %{name}-devel.README
%if %{bundle_jemalloc}
Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2
%endif
Patch01: 0001-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch
Patch02: 0002-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch
Patch03: 0003-do-not-add-referrals-for-masters-with-different-data.patch
Patch04: 0004-Ticket-50933-Update-2307compat.ldif.patch
Patch05: 0005-Issue-50933-Fix-OID-change-between-10rfc2307-and-10r.patch
Patch06: 0006-Ticket-51131-improve-mutex-alloc-in-conntable.patch
Patch07: 0007-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch
Patch08: 0008-Issue-3657-Add-options-to-dsctl-for-dsrc-file.patch
Patch09: 0009-Issue-4440-BUG-ldifgen-with-start-idx-option-fails-w.patch
Patch10: 0010-Issue-4449-dsconf-replication-monitor-fails-to-retri.patch
Patch11: 0011-Issue-4243-Fix-test-SyncRepl-plugin-provides-a-wrong.patch
Patch12: 0012-Add-dsconf-replication-monitor-test-case-gitHub-issu.patch
Patch13: 0013-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch
Patch14: 0014-Issue-4428-BUG-Paged-Results-with-critical-false-cau.patch
Patch15: 0015-Issue-4315-performance-search-rate-nagle-triggers-hi.patch
Patch16: 0016-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch
Patch17: 0017-Issue-4483-heap-use-after-free-in-slapi_be_getsuffix.patch
Patch18: 0018-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch
Patch19: 0019-Issue-4504-Fix-pytest-test_dsconf_replication_monito.patch
%description
389 Directory Server is an LDAPv3 compliant server. The base package includes
the LDAP server and command line utilities for server administration.
%if %{use_asan}
WARNING! This build is linked to Address Sanitisation libraries. This probably
isn't what you want. Please contact support immediately.
Please see http://seclists.org/oss-sec/2016/q1/363 for more information.
%endif
%package libs
Summary: Core libraries for 389 Directory Server
Group: System Environment/Daemons
BuildRequires: nspr-devel
BuildRequires: nss-devel >= 3.34
BuildRequires: openldap-devel
BuildRequires: libdb-devel
BuildRequires: cyrus-sasl-devel
BuildRequires: libicu-devel
BuildRequires: pcre-devel
BuildRequires: libtalloc-devel
BuildRequires: libevent-devel
BuildRequires: libtevent-devel
Requires: krb5-libs
Requires: libevent
BuildRequires: systemd-devel
Provides: svrcore = 4.1.4
Conflicts: svrcore
Obsoletes: svrcore <= 4.1.3
%description libs
Core libraries for the 389 Directory Server base package. These libraries
are used by the main package and the -devel package. This allows the -devel
package to be installed with just the -libs package and without the main package.
%if %{use_legacy}
%package legacy-tools
Summary: Legacy utilities for 389 Directory Server
Group: System Environment/Daemons
Obsoletes: %{name} <= 1.4.0.9
Requires: %{name}-libs = %{version}-%{release}
# for setup-ds.pl to support ipv6
%if %{use_Socket6}
Requires: perl-Socket6
%else
Requires: perl-Socket
%endif
Requires: perl-NetAddr-IP
# use_openldap assumes perl-Mozilla-LDAP is built with openldap support
Requires: perl-Mozilla-LDAP
# for setup-ds.pl
Requires: bind-utils
%global __provides_exclude_from %{_libdir}/%{pkgname}/perl
%global __requires_exclude perl\\((DSCreate|DSMigration|DSUpdate|DSUtil|Dialog|DialogManager|FileConn|Inf|Migration|Resource|Setup|SetupLog)
%{?perl_default_filter}
%description legacy-tools
Legacy (and deprecated) utilities for 389 Directory Server. This includes
the old account management and task scripts. These are deprecated in favour of
the dscreate, dsctl, dsconf and dsidm tools.
%endif
%package devel
Summary: Development libraries for 389 Directory Server
Group: Development/Libraries
Requires: %{name}-libs = %{version}-%{release}
Requires: pkgconfig
Requires: nspr-devel
Requires: nss-devel >= 3.34
Requires: openldap-devel
Requires: libtalloc
Requires: libevent
Requires: libtevent
Requires: systemd-libs
Provides: svrcore-devel = 4.1.4
Conflicts: svrcore-devel
Obsoletes: svrcore-devel <= 4.1.3
%description devel
Development Libraries and headers for the 389 Directory Server base package.
%package snmp
Summary: SNMP Agent for 389 Directory Server
Group: System Environment/Daemons
Requires: %{name} = %{version}-%{release}
Obsoletes: %{name} <= 1.4.0.0
%description snmp
SNMP Agent for the 389 Directory Server base package.
%package -n python%{python3_pkgversion}-lib389
Summary: A library for accessing, testing, and configuring the 389 Directory Server
BuildArch: noarch
Group: Development/Libraries
Requires: openssl
Requires: iproute
Requires: platform-python
Recommends: bash-completion
Requires: python%{python3_pkgversion}-ldap
Requires: python%{python3_pkgversion}-six
Requires: python%{python3_pkgversion}-pyasn1
Requires: python%{python3_pkgversion}-pyasn1-modules
Requires: python%{python3_pkgversion}-dateutil
Requires: python%{python3_pkgversion}-argcomplete
Requires: python%{python3_pkgversion}-libselinux
Requires: python%{python3_pkgversion}-setuptools
Requires: python%{python3_pkgversion}-distro
%{?python_provide:%python_provide python%{python3_pkgversion}-lib389}
%description -n python%{python3_pkgversion}-lib389
This module contains tools and libraries for accessing, testing,
and configuring the 389 Directory Server.
%package -n cockpit-389-ds
Summary: Cockpit UI Plugin for configuring and administering the 389 Directory Server
BuildArch: noarch
Requires: cockpit
Requires: platform-python
Requires: python%{python3_pkgversion}-lib389
%description -n cockpit-389-ds
A cockpit UI Plugin for configuring and administering the 389 Directory Server
%prep
%autosetup -p1 -v -n %{name}-%{version}%{?prerel}
%if %{bundle_jemalloc}
%setup -q -n %{name}-%{version}%{?prerel} -T -D -b 3
%endif
cp %{SOURCE2} README.devel
%build
OPENLDAP_FLAG="--with-openldap"
%{?with_tmpfiles_d: TMPFILES_FLAG="--with-tmpfiles-d=%{with_tmpfiles_d}"}
# hack hack hack https://bugzilla.redhat.com/show_bug.cgi?id=833529
NSSARGS="--with-nss-lib=%{_libdir} --with-nss-inc=%{_includedir}/nss3"
%if %{use_asan}
ASAN_FLAGS="--enable-asan --enable-debug"
%endif
%if %{use_rust}
RUST_FLAGS="--enable-rust"
%endif
%if %{use_legacy}
LEGACY_FLAGS="--enable-legacy --enable-perl"
%else
LEGACY_FLAGS="--disable-legacy --disable-perl"
%endif
%if %{use_clang}
export CC=clang
export CXX=clang++
CLANG_FLAGS="--enable-clang"
%endif
%if %{bundle_jemalloc}
# Override page size, bz #1545539
# 4K
%ifarch %ix86 %arm x86_64 s390x
%define lg_page --with-lg-page=12
%endif
# 64K
%ifarch ppc64 ppc64le aarch64
%define lg_page --with-lg-page=16
%endif
# Override huge page size on aarch64
# 2M instead of 512M
%ifarch aarch64
%define lg_hugepage --with-lg-hugepage=21
%endif
# Build jemalloc
pushd ../%{jemalloc_name}-%{jemalloc_ver}
%configure \
--libdir=%{_libdir}/%{pkgname}/lib \
--bindir=%{_libdir}/%{pkgname}/bin \
--enable-prof
make %{?_smp_mflags}
popd
%endif
# Enforce strict linking
%define _strict_symbol_defs_build 1
# Rebuild the autotool artifacts now.
autoreconf -fiv
%configure --enable-autobind --with-selinux $OPENLDAP_FLAG $TMPFILES_FLAG \
--with-systemd \
--with-systemdsystemunitdir=%{_unitdir} \
--with-systemdsystemconfdir=%{_sysconfdir}/systemd/system \
--with-systemdgroupname=%{groupname} \
--libexecdir=%{_libexecdir}/%{pkgname} \
$NSSARGS $ASAN_FLAGS $RUST_FLAGS $LEGACY_FLAGS $CLANG_FLAGS \
--enable-cmocka
# lib389
pushd ./src/lib389
%py3_build
popd
# argparse-manpage dynamic man pages have hardcoded man v1 in header,
# need to change it to v8
sed -i "1s/\"1\"/\"8\"/" %{_builddir}/%{name}-%{version}%{?prerel}/src/lib389/man/dsconf.8
sed -i "1s/\"1\"/\"8\"/" %{_builddir}/%{name}-%{version}%{?prerel}/src/lib389/man/dsctl.8
sed -i "1s/\"1\"/\"8\"/" %{_builddir}/%{name}-%{version}%{?prerel}/src/lib389/man/dsidm.8
sed -i "1s/\"1\"/\"8\"/" %{_builddir}/%{name}-%{version}%{?prerel}/src/lib389/man/dscreate.8
# Generate symbolic info for debuggers
export XCFLAGS=$RPM_OPT_FLAGS
#make %{?_smp_mflags}
make
%install
mkdir -p %{buildroot}%{_datadir}/gdb/auto-load%{_sbindir}
mkdir -p %{buildroot}%{_datadir}/cockpit
make DESTDIR="$RPM_BUILD_ROOT" install
# Cockpit file list
find %{buildroot}%{_datadir}/cockpit/389-console -type d | sed -e "s@%{buildroot}@@" | sed -e 's/^/\%dir /' > cockpit.list
find %{buildroot}%{_datadir}/cockpit/389-console -type f | sed -e "s@%{buildroot}@@" >> cockpit.list
# Copy in our docs from doxygen.
cp -r %{_builddir}/%{name}-%{version}%{?prerel}/man/man3 $RPM_BUILD_ROOT/%{_mandir}/man3
# lib389
pushd src/lib389
%py3_install
popd
mkdir -p $RPM_BUILD_ROOT/var/log/%{pkgname}
mkdir -p $RPM_BUILD_ROOT/var/lib/%{pkgname}
mkdir -p $RPM_BUILD_ROOT/var/3lock/%{pkgname}
# for systemd
mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/systemd/system/%{groupname}.wants
#remove libtool archives and static libs
find %{buildroot} -type f -name "*.la" -delete
find %{buildroot} -type f -name "*.a" -delete
%if %{use_legacy}
# make sure perl scripts have a proper shebang
sed -i -e 's|#{{PERL-EXEC}}|#!/usr/bin/perl|' $RPM_BUILD_ROOT%{_datadir}/%{pkgname}/script-templates/template-*.pl
%endif
%if %{bundle_jemalloc}
pushd ../%{jemalloc_name}-%{jemalloc_ver}
make DESTDIR="$RPM_BUILD_ROOT" install_lib install_bin
cp -pa COPYING ../%{name}-%{version}%{?prerel}/COPYING.jemalloc
cp -pa README ../%{name}-%{version}%{?prerel}/README.jemalloc
popd
%endif
%check
# This checks the code, if it fails it prints why, then re-raises the fail to shortcircuit the rpm build.
if ! make DESTDIR="$RPM_BUILD_ROOT" check; then cat ./test-suite.log && false; fi
%clean
rm -rf $RPM_BUILD_ROOT
%post
if [ -n "$DEBUGPOSTTRANS" ] ; then
output=$DEBUGPOSTTRANS
output2=${DEBUGPOSTTRANS}.upgrade
else
output=/dev/null
output2=/dev/null
fi
# reload to pick up any changes to systemd files
/bin/systemctl daemon-reload >$output 2>&1 || :
# https://fedoraproject.org/wiki/Packaging:UsersAndGroups#Soft_static_allocation
# Soft static allocation for UID and GID
USERNAME="dirsrv"
ALLOCATED_UID=389
GROUPNAME="dirsrv"
ALLOCATED_GID=389
HOMEDIR="/usr/share/dirsrv"
getent group $GROUPNAME >/dev/null || /usr/sbin/groupadd -f -g $ALLOCATED_GID -r $GROUPNAME
if ! getent passwd $USERNAME >/dev/null ; then
if ! getent passwd $ALLOCATED_UID >/dev/null ; then
/usr/sbin/useradd -r -u $ALLOCATED_UID -g $GROUPNAME -d $HOMEDIR -s /sbin/nologin -c "user for 389-ds-base" $USERNAME
else
/usr/sbin/useradd -r -g $GROUPNAME -d $HOMEDIR -s /sbin/nologin -c "user for 389-ds-base" $USERNAME
fi
fi
# Reload our sysctl before we restart (if we can)
sysctl --system &> $output; true
%preun
if [ $1 -eq 0 ]; then # Final removal
# remove instance specific service files/links
rm -rf %{_sysconfdir}/systemd/system/%{groupname}.wants/* > /dev/null 2>&1 || :
fi
%postun
if [ $1 = 0 ]; then # Final removal
rm -rf /var/run/%{pkgname}
fi
%post snmp
%systemd_post %{pkgname}-snmp.service
%preun snmp
%systemd_preun %{pkgname}-snmp.service %{groupname}
%postun snmp
%systemd_postun_with_restart %{pkgname}-snmp.service
%if %{use_legacy}
%post legacy-tools
# START UPGRADE SCRIPT
if [ -n "$DEBUGPOSTTRANS" ] ; then
output=$DEBUGPOSTTRANS
output2=${DEBUGPOSTTRANS}.upgrade
else
output=/dev/null
output2=/dev/null
fi
# find all instances
instances="" # instances that require a restart after upgrade
ninst=0 # number of instances found in total
echo looking for instances in %{_sysconfdir}/%{pkgname} > $output 2>&1 || :
instbase="%{_sysconfdir}/%{pkgname}"
for dir in $instbase/slapd-* ; do
echo dir = $dir >> $output 2>&1 || :
if [ ! -d "$dir" ] ; then continue ; fi
case "$dir" in *.removed) continue ;; esac
basename=`basename $dir`
inst="%{pkgname}@`echo $basename | sed -e 's/slapd-//g'`"
echo found instance $inst - getting status >> $output 2>&1 || :
if /bin/systemctl -q is-active $inst ; then
echo instance $inst is running >> $output 2>&1 || :
instances="$instances $inst"
else
echo instance $inst is not running >> $output 2>&1 || :
fi
ninst=`expr $ninst + 1`
done
if [ $ninst -eq 0 ] ; then
echo no instances to upgrade >> $output 2>&1 || :
exit 0 # have no instances to upgrade - just skip the rest
fi
# shutdown all instances
echo shutting down all instances . . . >> $output 2>&1 || :
for inst in $instances ; do
echo stopping instance $inst >> $output 2>&1 || :
/bin/systemctl stop $inst >> $output 2>&1 || :
done
echo remove pid files . . . >> $output 2>&1 || :
/bin/rm -f /var/run/%{pkgname}*.pid /var/run/%{pkgname}*.startpid
# do the upgrade
echo upgrading instances . . . >> $output 2>&1 || :
DEBUGPOSTSETUPOPT=`/usr/bin/echo $DEBUGPOSTSETUP | /usr/bin/sed -e "s/[^d]//g"`
if [ -n "$DEBUGPOSTSETUPOPT" ] ; then
%{_sbindir}/setup-ds.pl -$DEBUGPOSTSETUPOPT -u -s General.UpdateMode=offline >> $output 2>&1 || :
else
%{_sbindir}/setup-ds.pl -u -s General.UpdateMode=offline >> $output 2>&1 || :
fi
# restart instances that require it
for inst in $instances ; do
echo restarting instance $inst >> $output 2>&1 || :
/bin/systemctl start $inst >> $output 2>&1 || :
done
#END UPGRADE
%endif
exit 0
%files
%if %{bundle_jemalloc}
%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.jemalloc
%license COPYING.jemalloc
%else
%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl
%endif
%dir %{_sysconfdir}/%{pkgname}
%dir %{_sysconfdir}/%{pkgname}/schema
%config(noreplace)%{_sysconfdir}/%{pkgname}/schema/*.ldif
%dir %{_sysconfdir}/%{pkgname}/config
%dir %{_sysconfdir}/systemd/system/%{groupname}.wants
%config(noreplace)%{_sysconfdir}/%{pkgname}/config/slapd-collations.conf
%config(noreplace)%{_sysconfdir}/%{pkgname}/config/certmap.conf
%{_datadir}/%{pkgname}
%{_datadir}/gdb/auto-load/*
%{_unitdir}
%{_bindir}/dbscan
%{_mandir}/man1/dbscan.1.gz
%{_bindir}/ds-replcheck
%{_mandir}/man1/ds-replcheck.1.gz
%{_bindir}/ds-logpipe.py
%{_mandir}/man1/ds-logpipe.py.1.gz
%{_bindir}/ldclt
%{_mandir}/man1/ldclt.1.gz
%{_sbindir}/ldif2ldap
%{_mandir}/man8/ldif2ldap.8.gz
%{_bindir}/logconv.pl
%{_mandir}/man1/logconv.pl.1.gz
%{_bindir}/pwdhash
%{_mandir}/man1/pwdhash.1.gz
%{_bindir}/readnsstate
%{_mandir}/man1/readnsstate.1.gz
# Remove for now: %caps(CAP_NET_BIND_SERVICE=pe) {_sbindir}/ns-slapd
%{_sbindir}/ns-slapd
%{_mandir}/man8/ns-slapd.8.gz
%{_libexecdir}/%{pkgname}/ds_systemd_ask_password_acl
%{_mandir}/man5/99user.ldif.5.gz
%{_mandir}/man5/certmap.conf.5.gz
%{_mandir}/man5/slapd-collations.conf.5.gz
%{_mandir}/man5/dirsrv.5.gz
%{_mandir}/man5/dirsrv.systemd.5.gz
%{_libdir}/%{pkgname}/python
%dir %{_libdir}/%{pkgname}/plugins
%{_libdir}/%{pkgname}/plugins/*.so
# This has to be hardcoded to /lib - $libdir changes between lib/lib64, but
# sysctl.d is always in /lib.
%{_prefix}/lib/sysctl.d/*
%dir %{_localstatedir}/lib/%{pkgname}
%dir %{_localstatedir}/log/%{pkgname}
%ghost %dir %{_localstatedir}/lock/%{pkgname}
%exclude %{_sbindir}/ldap-agent*
%exclude %{_mandir}/man1/ldap-agent.1.gz
%exclude %{_unitdir}/%{pkgname}-snmp.service
%if %{bundle_jemalloc}
%{_libdir}/%{pkgname}/lib/
%{_libdir}/%{pkgname}/bin/
%exclude %{_libdir}/%{pkgname}/bin/jemalloc-config
%exclude %{_libdir}/%{pkgname}/bin/jemalloc.sh
%exclude %{_libdir}/%{pkgname}/lib/libjemalloc.a
%exclude %{_libdir}/%{pkgname}/lib/libjemalloc.so
%exclude %{_libdir}/%{pkgname}/lib/libjemalloc_pic.a
%exclude %{_libdir}/%{pkgname}/lib/pkgconfig
%endif
%files devel
%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.devel
%{_mandir}/man3/*
%{_includedir}/svrcore.h
%{_includedir}/%{pkgname}
%{_libdir}/libsvrcore.so
%{_libdir}/%{pkgname}/libslapd.so
%{_libdir}/%{pkgname}/libns-dshttpd.so
%{_libdir}/%{pkgname}/libsds.so
%{_libdir}/%{pkgname}/libldaputil.so
%{_libdir}/pkgconfig/svrcore.pc
%{_libdir}/pkgconfig/dirsrv.pc
%{_libdir}/pkgconfig/libsds.pc
%files libs
%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.devel
%dir %{_libdir}/%{pkgname}
%{_libdir}/libsvrcore.so.*
%{_libdir}/%{pkgname}/libslapd.so.*
%{_libdir}/%{pkgname}/libns-dshttpd-*.so
%{_libdir}/%{pkgname}/libsds.so.*
%{_libdir}/%{pkgname}/libldaputil.so.*
%{_libdir}/%{pkgname}/librewriters.so*
%if %{bundle_jemalloc}
%{_libdir}/%{pkgname}/lib/libjemalloc.so.2
%endif
%if %{use_rust}
%{_libdir}/%{pkgname}/librsds.so
%endif
%if %{use_legacy}
%files legacy-tools
%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.devel
%{_bindir}/infadd
%{_mandir}/man1/infadd.1.gz
%{_bindir}/ldif
%{_mandir}/man1/ldif.1.gz
%{_bindir}/migratecred
%{_mandir}/man1/migratecred.1.gz
%{_bindir}/mmldif
%{_mandir}/man1/mmldif.1.gz
%{_bindir}/rsearch
%{_mandir}/man1/rsearch.1.gz
%{_libexecdir}/%{pkgname}/ds_selinux_enabled
%{_libexecdir}/%{pkgname}/ds_selinux_port_query
%config(noreplace)%{_sysconfdir}/%{pkgname}/config/template-initconfig
%{_mandir}/man5/template-initconfig.5.gz
%{_datadir}/%{pkgname}/properties/*.res
%{_datadir}/%{pkgname}/script-templates
%{_datadir}/%{pkgname}/updates
%{_sbindir}/ldif2ldap
%{_mandir}/man8/ldif2ldap.8.gz
%{_sbindir}/bak2db
%{_mandir}/man8/bak2db.8.gz
%{_sbindir}/db2bak
%{_mandir}/man8/db2bak.8.gz
%{_sbindir}/db2index
%{_mandir}/man8/db2index.8.gz
%{_sbindir}/db2ldif
%{_mandir}/man8/db2ldif.8.gz
%{_sbindir}/dbverify
%{_mandir}/man8/dbverify.8.gz
%{_sbindir}/ldif2db
%{_mandir}/man8/ldif2db.8.gz
%{_sbindir}/restart-dirsrv
%{_mandir}/man8/restart-dirsrv.8.gz
%{_sbindir}/start-dirsrv
%{_mandir}/man8/start-dirsrv.8.gz
%{_sbindir}/status-dirsrv
%{_mandir}/man8/status-dirsrv.8.gz
%{_sbindir}/stop-dirsrv
%{_mandir}/man8/stop-dirsrv.8.gz
%{_sbindir}/upgradedb
%{_mandir}/man8/upgradedb.8.gz
%{_sbindir}/vlvindex
%{_mandir}/man8/vlvindex.8.gz
%{_sbindir}/monitor
%{_mandir}/man8/monitor.8.gz
%{_sbindir}/dbmon.sh
%{_mandir}/man8/dbmon.sh.8.gz
%{_sbindir}/dn2rdn
%{_mandir}/man8/dn2rdn.8.gz
%{_sbindir}/restoreconfig
%{_mandir}/man8/restoreconfig.8.gz
%{_sbindir}/saveconfig
%{_mandir}/man8/saveconfig.8.gz
%{_sbindir}/suffix2instance
%{_mandir}/man8/suffix2instance.8.gz
%{_sbindir}/upgradednformat
%{_mandir}/man8/upgradednformat.8.gz
%{_mandir}/man1/dbgen.pl.1.gz
%{_bindir}/repl-monitor
%{_mandir}/man1/repl-monitor.1.gz
%{_bindir}/repl-monitor.pl
%{_mandir}/man1/repl-monitor.pl.1.gz
%{_bindir}/cl-dump
%{_mandir}/man1/cl-dump.1.gz
%{_bindir}/cl-dump.pl
%{_mandir}/man1/cl-dump.pl.1.gz
%{_bindir}/dbgen.pl
%{_mandir}/man8/bak2db.pl.8.gz
%{_sbindir}/bak2db.pl
%{_sbindir}/cleanallruv.pl
%{_mandir}/man8/cleanallruv.pl.8.gz
%{_sbindir}/db2bak.pl
%{_mandir}/man8/db2bak.pl.8.gz
%{_sbindir}/db2index.pl
%{_mandir}/man8/db2index.pl.8.gz
%{_sbindir}/db2ldif.pl
%{_mandir}/man8/db2ldif.pl.8.gz
%{_sbindir}/fixup-linkedattrs.pl
%{_mandir}/man8/fixup-linkedattrs.pl.8.gz
%{_sbindir}/fixup-memberof.pl
%{_mandir}/man8/fixup-memberof.pl.8.gz
%{_sbindir}/ldif2db.pl
%{_mandir}/man8/ldif2db.pl.8.gz
%{_sbindir}/migrate-ds.pl
%{_mandir}/man8/migrate-ds.pl.8.gz
%{_sbindir}/ns-accountstatus.pl
%{_mandir}/man8/ns-accountstatus.pl.8.gz
%{_sbindir}/ns-activate.pl
%{_mandir}/man8/ns-activate.pl.8.gz
%{_sbindir}/ns-inactivate.pl
%{_mandir}/man8/ns-inactivate.pl.8.gz
%{_sbindir}/ns-newpwpolicy.pl
%{_mandir}/man8/ns-newpwpolicy.pl.8.gz
%{_sbindir}/remove-ds.pl
%{_mandir}/man8/remove-ds.pl.8.gz
%{_sbindir}/schema-reload.pl
%{_mandir}/man8/schema-reload.pl.8.gz
%{_sbindir}/setup-ds.pl
%{_mandir}/man8/setup-ds.pl.8.gz
%{_sbindir}/syntax-validate.pl
%{_mandir}/man8/syntax-validate.pl.8.gz
%{_sbindir}/usn-tombstone-cleanup.pl
%{_mandir}/man8/usn-tombstone-cleanup.pl.8.gz
%{_sbindir}/verify-db.pl
%{_mandir}/man8/verify-db.pl.8.gz
%{_libdir}/%{pkgname}/perl
%endif
%files snmp
%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.devel
%config(noreplace)%{_sysconfdir}/%{pkgname}/config/ldap-agent.conf
%{_sbindir}/ldap-agent*
%{_mandir}/man1/ldap-agent.1.gz
%{_unitdir}/%{pkgname}-snmp.service
%files -n python%{python3_pkgversion}-lib389
%doc LICENSE LICENSE.GPLv3+
%{python3_sitelib}/lib389*
%{_sbindir}/dsconf
%{_mandir}/man8/dsconf.8.gz
%{_sbindir}/dscreate
%{_mandir}/man8/dscreate.8.gz
%{_sbindir}/dsctl
%{_mandir}/man8/dsctl.8.gz
%{_sbindir}/dsidm
%{_mandir}/man8/dsidm.8.gz
%{_libexecdir}/%{pkgname}/dscontainer
%files -n cockpit-389-ds -f cockpit.list
%{_datarootdir}/metainfo/389-console/org.port389.cockpit_console.metainfo.xml
%doc README.md
%changelog
* Wed Dec 16 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-6
- Bump version to 1.4.3.16-6
- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV - consumer (Unavailable) State (green) Reason (error (0)
- Resolves: Bug 1904991 - Unexpected info returned to ldap request
- Resolves: Bug 1843838 - heap-use-after-free in slapi_be_getsuffix
- Resolves: Bug 1903133 - Server-Cert.crt created using dscreate has Subject:CN =localhost instead of hostname.
* Wed Dec 9 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-5
- Bump version to 1.4.3.16-5
- Resolves: Bug 1879386 - cli dsconf replication monitor fails to retrieve database RUV
- Resolves: Bug 1887449 - Sync repl: missing update because operation are erroneously stated as nested
- Resolves: Bug 1887415 - Sync repl - if a series of updates target the same entry then the cookie get wrong changenumber
- Resolves: Bug 1851978 - SyncRepl plugin provides a wrong cookie
* Thu Dec 3 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-4
- Bump version to 1.4.3.16-4
- Resolves: Bug 1843517 - Using ldifgen with --start-idx option fails with unsupported operand
- Resolves: Bug 1801086 - [RFE] Generate dsrc file using dsconf
- Resolves: Bug 1843838 - heap-use-after-free in slapi_be_getsuffix
* Wed Nov 25 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-3
- Bump version to 1.4.3.16-3
- Resolves: Bug 1859219 - rfc2307 and rfc2307bis compat schema
- Resolves: Bug 1843604 - reduce the cost of allocation/free when open/close a connection
- Resolves: Bug 1898850 - Entries conflict not resolved by replication
* Thu Nov 19 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-2
- Bump version to 1.4.3.16-2
- Resolves: Bug 1859227 - create keep alive entry after on line init
- Resolves: Bug 1888863 - group rdn with leading space char and add fails error 21 invalid syntax and delete fails error 32
- Resolves: Bug 1859228 - do not add referrals for masters with different data generation
* Mon Oct 26 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.16-1
- Bump version to 1.4.3.16-1
- Resolves: Bug 1887415 - Sync repl - if a serie of updates target the same entry then the cookie get wrong changenumber
- Resolves: Bug 1859225 - suffix management in backends incorrect
* Mon Oct 26 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.14-1
- Bump version to 1.4.3.14-1
- Resolves: Bug 1862529 - Rebase 389-ds-base-1.4.3 in RHEL 8.4
- Resolves: Bug 1859301 - Misleading message in access log for idle timeout
- Resolves: Bug 1889782 - Missing closing quote when reporting the details of unindexed/paged search results
- Resolves: Bug 1862971 - dsidm user status fails with Error: 'nsUserAccount' object has no attribute 'is_locked'
- Resolves: Bug 1859878 - Managed Entries configuration not being enforced
- Resolves: Bug 1851973 - Duplicate entryUSN numbers for different LDAP entries in the same backend
- Resolves: Bug 1851967 - if dbhome directory is set online backup fails
- Resolves: Bug 1887449 - Sync repl: missing update because operation are erroneously stated as nested
- Resolves: Bug 1887415 - Sync repl - if a serie of updates target the same entry then the cookie get wrong changenumber
- Resolves: Bug 1851978 - SyncRepl plugin provides a wrong cookie
- Resolves: Bug 1843604 - reduce the cost of allocation/free when open/close a connection
- Resolves: Bug 1872930 - dscreate: Not possible to bind to a unix domain socket
- Resolves: Bug 1861504 - ds-replcheck crashes in offline mode
- Resolves: Bug 1859282 - remove ldbm_back_entry_release
- Resolves: Bug 1859225 - suffix management in backends incorrect
- Resolves: Bug 1859224 - remove unused or unnecessary database plugin functions
- Resolves: Bug 1859219 - rfc2307 and rfc2307bis compat schema
- Resolves: Bug 1851975 - Add option to reject internal unindexed searches
- Resolves: Bug 1851972 - Remove code duplication from the BDB backend separation work
- Resolves: Bug 1850275 - Add new access log keywords for time spent in work queue and actual operation time
- Resolves: Bug 1848359 - Add failover credentials to replication agreement
- Resolves: Bug 1837315 - Healthcheck code DSBLE0002 not returned on disabled suffix
* Wed Aug 5 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-5
- Bump version to 1.4.3.8-5
- Resolves: Bug 1841086 - SSL alert: The value of sslVersionMax "TLS1.3" is higher than the supported version
- Resolves: Bug 1800529 - Memory leaks in disk monitoring
- Resolves: Bug 1748227 - Instance name length is not enforced
- Resolves: Bug 1849418 - python3-lib389 pulls unnecessary bash-completion package
* Fri Jun 26 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-4
- Bump version to 1.4.3.8-4
- Resolves: Bug 1806978 - ns-slapd crashes during db2ldif
- Resolves: Bug 1450863 - Log warning when tuning of nsslapd-threadnumber above or below the optimal value
- Resolves: Bug 1647017 - A distinguished value of a single valued attribute can be missing in an entry
- Resolves: Bug 1806573 - Dsctl healthcheck doesn't work when using instance name with 'slapd-'
- Resolves: Bug 1807773 - dsctl healthcheck : typo in DSREPLLE0002 Lint error suggested resolution commands
- Resolves: Bug 1843567 - Healthcheck to find notes=F
- Resolves: Bug 1845094 - User/Directory Manager can modify Password Policy attribute "pwdReset"
- Resolves: Bug 1850275 - Add new access log keywords for time spent in work queue and actual operation time
- Resolves: Bug 1442386 - Recreating an index while changing case will create an indexfile with the old name (different case) and after restart the indexfile is abandoned
- Resolves: Bug 1672574 - nsIndexIDListScanLimit accepts any value
- Resolves: Bug 1800529 - Memory leaks in disk monitoring
* Fri Jun 5 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-3
- Bump version to 1.4.3.8-3
- Resolves: Bug 1835619 - Healthcheck with --json option reports "Object of type 'bytes' is not JSON serializable" when mapping tree is deleted
- Resolves: Bug 1836428 - Directory Server ds-replcheck RFE to add a timeout command-line arg/value to wait longer when connecting to a replica server
- Resolves: Bug 1843090 - abort when a empty valueset is freed
- Resolves: Bug 1843156 - Prevent unnecessarily duplication of the target entry
- Resolves: Bug 1843157 - Check for clock errors and time skew
- Resolves: Bug 1843159 - RFE AD filter rewriter for ObjectCategory
- Resolves: Bug 1843162 - Creating Replication Manager fails if uid=repman is used
- Resolves: Bug 1816851 - Add option to healthcheck to list all the lint reports
- Resolves: Bug 1748227 - Instance name length is not enforced
- Resolves: Bug 1748244 - dscreate doesn't sanitize instance name
* Mon May 11 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-2
- Bump version to 1.4.3.8-2
- Resolves: Bug 1833350 - Remove cockpit dependancies that are breaking builds
* Mon May 11 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-1
- Bump version to 1.4.3.8-1
- Resolves: Bug 1833350 - Rebase 389-ds-base for RHEL 8.3
- Resolves: Bug 1728943 - [RFE] Advance options in RHDS Disk Monitoring Framework
- Resolves: Bug 1775285 - [RFE] Implement the Password Policy attribute "pwdReset"
- Resolves: Bug 1638875 - [RFE] extract key/certs pem file into a private namespace
- Resolves: Bug 1758478 - AddressSanitizer: heap-buffer-overflow in ldap_utf8prev
- Resolves: Bug 1795943 - Port dbmon.sh from legacy tools package
- Resolves: Bug 1798394 - Port dbgen from legacy tools package
- Resolves: Bug 1800529 - Memory leaks in disk monitoring
- Resolves: Bug 1807419 - Unable to create a suffix with countryName either via dscreate or the admin console
- Resolves: Bug 1816848 - Database links: get_monitor() takes 1 positional argument but 2 were given
- Resolves: Bug 1816854 - Setting nsslapd-allowed-sasl-mechanisms truncates the value
- Resolves: Bug 1816857 - Searches on cn=config takes values with spaces and makes multiple attributes out of them
- Resolves: Bug 1816859 - lib389 - Replace exec() with setattr()
- Resolves: Bug 1816862 - Memory leak in indirect COS
- Resolves: Bug 1829071 - Installation of RHDS 11 fails on RHEL8 server with IPv6 disabled
- Resolves: Bug 1833515 - set 'nsslapd-enable-upgrade-hash: off' as this raises warnings in IPA
- Resolves: Bug 1790986 - cenotaph errors on modrdn operations
- Resolves: Bug 1769734 - Heavy StartTLS connection load can randomly fail with err=1
- Resolves: Bug 1758501 - LeakSanitizer: detected memory leaks in changelog5_init and perfctrs_init