Compare commits

...

15 Commits

Author SHA1 Message Date
eabdullin 62ada8d853 import CS 389-ds-base-1.4.3.39-3.el8 2024-05-22 11:32:14 +00:00
eabdullin 7e6d0bfb15 Sync with stable: 2024-01-15 13:06:27 +03:00
Andrew Lukoshko 91e5547fc3 import OL 389-ds-base-1.4.3.35-1.module+el8.8.0+21117+f0c9a24b 2023-07-10 07:43:07 +00:00
CentOS Sources e1f2cae548 import 389-ds-base-1.4.3.32-3.module+el8.8.0+17706+8ab0c717 2023-05-16 08:05:10 +00:00
CentOS Sources a7a184d43c import 389-ds-base-1.4.3.34-1.module+el8.7.0+18367+58a49cb0 2023-04-04 09:47:52 +00:00
CentOS Sources 69d0b88d75 import 389-ds-base-1.4.3.30-6.module+el8.7.0+16373+1a59bba2 2022-11-08 11:39:15 +00:00
CentOS Sources ceb31709f5 import 389-ds-base-1.4.3.28-8.module+el8.6.0+16880+945f9b53 2022-10-25 09:24:59 +00:00
CentOS Sources 58ecfd4248 import 389-ds-base-1.4.3.28-7.module+el8.6.0+15293+4900ec12 2022-08-02 07:09:31 +00:00
CentOS Sources 67d485ccf9 import 389-ds-base-1.4.3.28-6.module+el8.6.0+14129+983ceada 2022-05-10 07:19:32 +00:00
CentOS Sources 21cd549720 import 389-ds-base-1.4.3.23-14.module+el8.5.0+14377+c731dc97 2022-03-24 13:51:57 +00:00
CentOS Sources 7900475d31 import 389-ds-base-1.4.3.23-12.module+el8.5.0+13329+4096c77a 2021-12-21 09:12:53 +00:00
CentOS Sources 8a7f112eaf import 389-ds-base-1.4.3.23-10.module+el8.5.0+12398+47000435 2021-11-09 09:56:38 +00:00
CentOS Sources 924c45b0af import 389-ds-base-1.4.3.16-19.module+el8.4.0+11894+f5bb5c43 2021-09-10 09:06:58 +00:00
CentOS Sources 9e9ce1f94f import 389-ds-base-1.4.3.16-16.module+el8.4.0+11446+fc96bc48 2021-09-10 09:06:48 +00:00
CentOS Sources a7620a3470 import 389-ds-base-1.4.3.16-13.module+el8.4.0+10307+74bbfb4e 2021-09-10 09:06:38 +00:00
48 changed files with 2341 additions and 10440 deletions

View File

@ -1,2 +1,3 @@
7e651c99e43265c678c98ac2d8e31b8c48522be6 SOURCES/389-ds-base-1.4.3.8.tar.bz2
9e06b5cc57fd185379d007696da153893cf73e30 SOURCES/jemalloc-5.2.1.tar.bz2
bd9aab32d9cbf9231058d585479813f3420dc872 SOURCES/389-ds-base-1.4.3.39.tar.bz2
1c8f2d0dfbf39fa8cd86363bf3314351ab21f8d4 SOURCES/jemalloc-5.3.0.tar.bz2
978b7c5e4a9e5784fddb23ba1abe4dc5a071589f SOURCES/vendor-1.4.3.39-1.tar.gz

5
.gitignore vendored
View File

@ -1,2 +1,3 @@
SOURCES/389-ds-base-1.4.3.8.tar.bz2
SOURCES/jemalloc-5.2.1.tar.bz2
SOURCES/389-ds-base-1.4.3.39.tar.bz2
SOURCES/jemalloc-5.3.0.tar.bz2
SOURCES/vendor-1.4.3.39-1.tar.gz

View File

@ -0,0 +1,83 @@
From 7d1bc439a07c51b5f4f37405b6b27a1990b8cb28 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Tue, 27 Feb 2024 16:30:47 -0800
Subject: [PATCH] Issue 3527 - Support HAProxy and Instance on the same machine
configuration (#6107)
Description: Improve how we handle HAProxy connections to work better when
the DS and HAProxy are on the same machine.
Ensure the client and header destination IPs are checked against the trusted IP list.
Additionally, this change will also allow configuration having
HAProxy is listening on a different subnet than the one used to forward the request.
Related: https://github.com/389ds/389-ds-base/issues/3527
Reviewed by: @progier389, @jchapma (Thanks!)
---
ldap/servers/slapd/connection.c | 35 +++++++++++++++++++++++++--------
1 file changed, 27 insertions(+), 8 deletions(-)
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index d28a39bf7..10a8cc577 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -1187,6 +1187,8 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
char str_ip[INET6_ADDRSTRLEN + 1] = {0};
char str_haproxy_ip[INET6_ADDRSTRLEN + 1] = {0};
char str_haproxy_destip[INET6_ADDRSTRLEN + 1] = {0};
+ int trusted_matches_ip_found = 0;
+ int trusted_matches_destip_found = 0;
struct berval **bvals = NULL;
int proxy_connection = 0;
@@ -1245,21 +1247,38 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
normalize_IPv4(conn->cin_addr, buf_ip, sizeof(buf_ip), str_ip, sizeof(str_ip));
normalize_IPv4(&pr_netaddr_dest, buf_haproxy_destip, sizeof(buf_haproxy_destip),
str_haproxy_destip, sizeof(str_haproxy_destip));
+ size_t ip_len = strlen(buf_ip);
+ size_t destip_len = strlen(buf_haproxy_destip);
/* Now, reset RC and set it to 0 only if a match is found */
haproxy_rc = -1;
- /* Allow only:
- * Trusted IP == Original Client IP == HAProxy Header Destination IP */
+ /*
+ * We need to allow a configuration where DS instance and HAProxy are on the same machine.
+ * In this case, we need to check if
+ * the HAProxy client IP (which will be a loopback address) matches one of the the trusted IP addresses,
+ * while still checking that
+ * the HAProxy header destination IP address matches one of the trusted IP addresses.
+ * Additionally, this change will also allow configuration having
+ * HAProxy listening on a different subnet than one used to forward the request.
+ */
for (size_t i = 0; bvals[i] != NULL; ++i) {
- if ((strlen(bvals[i]->bv_val) == strlen(buf_ip)) &&
- (strlen(bvals[i]->bv_val) == strlen(buf_haproxy_destip)) &&
- (strncasecmp(bvals[i]->bv_val, buf_ip, strlen(buf_ip)) == 0) &&
- (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, strlen(buf_haproxy_destip)) == 0)) {
- haproxy_rc = 0;
- break;
+ size_t bval_len = strlen(bvals[i]->bv_val);
+
+ /* Check if the Client IP (HAProxy's machine IP) address matches the trusted IP address */
+ if (!trusted_matches_ip_found) {
+ trusted_matches_ip_found = (bval_len == ip_len) && (strncasecmp(bvals[i]->bv_val, buf_ip, ip_len) == 0);
+ }
+ /* Check if the HAProxy header destination IP address matches the trusted IP address */
+ if (!trusted_matches_destip_found) {
+ trusted_matches_destip_found = (bval_len == destip_len) && (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, destip_len) == 0);
}
}
+
+ if (trusted_matches_ip_found && trusted_matches_destip_found) {
+ haproxy_rc = 0;
+ }
+
if (haproxy_rc == -1) {
slapi_log_err(SLAPI_LOG_CONNS, "connection_read_operation", "HAProxy header received from unknown source.\n");
disconnect_server_nomutex(conn, conn->c_connid, -1, SLAPD_DISCONNECT_PROXY_UNKNOWN, EPROTO);
--
2.43.0

View File

@ -1,43 +0,0 @@
From 97ecf0190f264a2d87750bc2d26ebf011542e3e1 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 8 May 2020 10:52:43 -0400
Subject: [PATCH 01/12] Issue 51076 - prevent unnecessarily duplication of the
target entry
Bug Description: For any update operation the MEP plugin was calling
slapi_search_internal_get_entry() which duplicates
the entry it returns. In this case the entry is just
read from and discarded, but this entry is already
in the pblock (the PRE OP ENTRY).
Fix Description: Just grab the PRE OP ENTRY from the pblock and use
that to read the attribute values from. This saves
two entry duplications for every update operation
from MEP.
fixes: https://pagure.io/389-ds-base/issue/51076
Reviewed by: tbordaz & firstyear(Thanks!!)
---
ldap/servers/plugins/mep/mep.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/ldap/servers/plugins/mep/mep.c b/ldap/servers/plugins/mep/mep.c
index ca9a64b3b..401d95e3a 100644
--- a/ldap/servers/plugins/mep/mep.c
+++ b/ldap/servers/plugins/mep/mep.c
@@ -2165,9 +2165,8 @@ mep_pre_op(Slapi_PBlock *pb, int modop)
if (e && free_entry) {
slapi_entry_free(e);
}
-
- slapi_search_internal_get_entry(sdn, 0, &e, mep_get_plugin_id());
- free_entry = 1;
+ slapi_pblock_get(pb, SLAPI_ENTRY_PRE_OP, &e);
+ free_entry = 0;
}
if (e && mep_is_managed_entry(e)) {
--
2.26.2

View File

@ -0,0 +1,119 @@
From dddb14210b402f317e566b6387c76a8e659bf7fa Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Tue, 14 Feb 2023 13:34:10 +0100
Subject: [PATCH 1/2] issue 5647 - covscan: memory leak in audit log when
adding entries (#5650)
covscan reported an issue about "vals" variable in auditlog.c:231 and indeed a charray_free is missing.
Issue: 5647
Reviewed by: @mreynolds389, @droideck
---
ldap/servers/slapd/auditlog.c | 71 +++++++++++++++++++----------------
1 file changed, 38 insertions(+), 33 deletions(-)
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
index 68cbc674d..3128e0497 100644
--- a/ldap/servers/slapd/auditlog.c
+++ b/ldap/servers/slapd/auditlog.c
@@ -177,6 +177,40 @@ write_auditfail_log_entry(Slapi_PBlock *pb)
slapi_ch_free_string(&audit_config);
}
+/*
+ * Write the attribute values to the audit log as "comments"
+ *
+ * Slapi_Attr *entry - the attribute begin logged.
+ * char *attrname - the attribute name.
+ * lenstr *l - the audit log buffer
+ *
+ * Resulting output in the log:
+ *
+ * #ATTR: VALUE
+ * #ATTR: VALUE
+ */
+static void
+log_entry_attr(Slapi_Attr *entry_attr, char *attrname, lenstr *l)
+{
+ Slapi_Value **vals = attr_get_present_values(entry_attr);
+ for(size_t i = 0; vals && vals[i]; i++) {
+ char log_val[256] = "";
+ const struct berval *bv = slapi_value_get_berval(vals[i]);
+ if (bv->bv_len >= 256) {
+ strncpy(log_val, bv->bv_val, 252);
+ strcpy(log_val+252, "...");
+ } else {
+ strncpy(log_val, bv->bv_val, bv->bv_len);
+ log_val[bv->bv_len] = 0;
+ }
+ addlenstr(l, "#");
+ addlenstr(l, attrname);
+ addlenstr(l, ": ");
+ addlenstr(l, log_val);
+ addlenstr(l, "\n");
+ }
+}
+
/*
* Write "requested" attributes from the entry to the audit log as "comments"
*
@@ -212,21 +246,9 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
for (req_attr = ldap_utf8strtok_r(display_attrs, ", ", &last); req_attr;
req_attr = ldap_utf8strtok_r(NULL, ", ", &last))
{
- char **vals = slapi_entry_attr_get_charray(entry, req_attr);
- for(size_t i = 0; vals && vals[i]; i++) {
- char log_val[256] = {0};
-
- if (strlen(vals[i]) > 256) {
- strncpy(log_val, vals[i], 252);
- strcat(log_val, "...");
- } else {
- strcpy(log_val, vals[i]);
- }
- addlenstr(l, "#");
- addlenstr(l, req_attr);
- addlenstr(l, ": ");
- addlenstr(l, log_val);
- addlenstr(l, "\n");
+ slapi_entry_attr_find(entry, req_attr, &entry_attr);
+ if (entry_attr) {
+ log_entry_attr(entry_attr, req_attr, l);
}
}
} else {
@@ -234,7 +256,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
for (; entry_attr; entry_attr = entry_attr->a_next) {
Slapi_Value **vals = attr_get_present_values(entry_attr);
char *attr = NULL;
- const char *val = NULL;
slapi_attr_get_type(entry_attr, &attr);
if (strcmp(attr, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) {
@@ -251,23 +272,7 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
addlenstr(l, ": ****************************\n");
continue;
}
-
- for(size_t i = 0; vals && vals[i]; i++) {
- char log_val[256] = {0};
-
- val = slapi_value_get_string(vals[i]);
- if (strlen(val) > 256) {
- strncpy(log_val, val, 252);
- strcat(log_val, "...");
- } else {
- strcpy(log_val, val);
- }
- addlenstr(l, "#");
- addlenstr(l, attr);
- addlenstr(l, ": ");
- addlenstr(l, log_val);
- addlenstr(l, "\n");
- }
+ log_entry_attr(entry_attr, attr, l);
}
}
slapi_ch_free_string(&display_attrs);
--
2.43.0

View File

@ -0,0 +1,27 @@
From be7c2b82958e91ce08775bf6b5da3c311d3b00e5 Mon Sep 17 00:00:00 2001
From: progier389 <progier@redhat.com>
Date: Mon, 20 Feb 2023 16:14:05 +0100
Subject: [PATCH 2/2] Issue 5647 - Fix unused variable warning from previous
commit (#5670)
* issue 5647 - memory leak in audit log when adding entries
* Issue 5647 - Fix unused variable warning from previous commit
---
ldap/servers/slapd/auditlog.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
index 3128e0497..0597ecc6f 100644
--- a/ldap/servers/slapd/auditlog.c
+++ b/ldap/servers/slapd/auditlog.c
@@ -254,7 +254,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
} else {
/* Return all attributes */
for (; entry_attr; entry_attr = entry_attr->a_next) {
- Slapi_Value **vals = attr_get_present_values(entry_attr);
char *attr = NULL;
slapi_attr_get_type(entry_attr, &attr);
--
2.43.0

View File

@ -1,116 +0,0 @@
From 1426f086623404ab2eacb04de7e6414177c0993a Mon Sep 17 00:00:00 2001
From: Thierry Bordaz <tbordaz@redhat.com>
Date: Mon, 11 May 2020 17:11:49 +0200
Subject: [PATCH 02/12] Ticket 51082 - abort when a empty valueset is freed
Bug Description:
A large valueset (more than 10 values) manages a sorted array of values.
replication purges old values from a valueset (valueset_array_purge). If it purges all the values
the valueset is freed (slapi_valueset_done).
A problem is that the counter of values, in the valueset, is still reflecting the initial number
of values (before the purge). When the valueset is freed (because empty) a safety checking
detects incoherent values based on the wrong counter.
Fix Description:
When all the values have been purge reset the counter before freeing the valueset
https://pagure.io/389-ds-base/issue/51082
Reviewed by: Mark Reynolds
Platforms tested: F30
Flag Day: no
Doc impact: no
---
.../suites/replication/acceptance_test.py | 57 +++++++++++++++++++
ldap/servers/slapd/valueset.c | 4 ++
2 files changed, 61 insertions(+)
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
index c8e0a4c93..5009f4e7c 100644
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
@@ -500,6 +500,63 @@ def test_warining_for_invalid_replica(topo_m4):
assert topo_m4.ms["master1"].ds_error_log.match('.*nsds5ReplicaBackoffMax.*10.*invalid.*')
+@pytest.mark.ds51082
+def test_csnpurge_large_valueset(topo_m2):
+ """Test csn generator test
+
+ :id: 63e2bdb2-0a8f-4660-9465-7b80a9f72a74
+ :setup: MMR with 2 masters
+ :steps:
+ 1. Create a test_user
+ 2. add a large set of values (more than 10)
+ 3. delete all the values (more than 10)
+ 4. configure the replica to purge those values (purgedelay=5s)
+ 5. Waiting for 6 second
+ 6. do a series of update
+ :expectedresults:
+ 1. Should succeeds
+ 2. Should succeeds
+ 3. Should succeeds
+ 4. Should succeeds
+ 5. Should succeeds
+ 6. Should not crash
+ """
+ m1 = topo_m2.ms["master2"]
+
+ test_user = UserAccount(m1, TEST_ENTRY_DN)
+ if test_user.exists():
+ log.info('Deleting entry {}'.format(TEST_ENTRY_DN))
+ test_user.delete()
+ test_user.create(properties={
+ 'uid': TEST_ENTRY_NAME,
+ 'cn': TEST_ENTRY_NAME,
+ 'sn': TEST_ENTRY_NAME,
+ 'userPassword': TEST_ENTRY_NAME,
+ 'uidNumber' : '1000',
+ 'gidNumber' : '2000',
+ 'homeDirectory' : '/home/mmrepl_test',
+ })
+
+ # create a large value set so that it is sorted
+ for i in range(1,20):
+ test_user.add('description', 'value {}'.format(str(i)))
+
+ # delete all values of the valueset
+ for i in range(1,20):
+ test_user.remove('description', 'value {}'.format(str(i)))
+
+ # set purging delay to 5 second and wait more that 5second
+ replicas = Replicas(m1)
+ replica = replicas.list()[0]
+ log.info('nsds5ReplicaPurgeDelay to 5')
+ replica.set('nsds5ReplicaPurgeDelay', '5')
+ time.sleep(6)
+
+ # add some new values to the valueset containing entries that should be purged
+ for i in range(21,25):
+ test_user.add('description', 'value {}'.format(str(i)))
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/valueset.c b/ldap/servers/slapd/valueset.c
index 2af3ee18d..12027ecb8 100644
--- a/ldap/servers/slapd/valueset.c
+++ b/ldap/servers/slapd/valueset.c
@@ -801,6 +801,10 @@ valueset_array_purge(const Slapi_Attr *a, Slapi_ValueSet *vs, const CSN *csn)
}
}
} else {
+ /* empty valueset - reset the vs->num so that further
+ * checking will not abort
+ */
+ vs->num = 0;
slapi_valueset_done(vs);
}
--
2.26.2

View File

@ -1,45 +0,0 @@
From 7a62e72b81d75ebb844835619ecc97dbf5e21058 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 14 May 2020 09:38:20 -0400
Subject: [PATCH 03/12] Issue 51091 - healthcheck json report fails when
mapping tree is deleted
Description: We were passing the bename in bytes and not as a utf8 string.
This caused the json dumping to fail.
relates: https://pagure.io/389-ds-base/issue/51091
Reviewed by: firstyear(Thanks!)
---
src/lib389/lib389/backend.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
index e472d3de5..4f752f414 100644
--- a/src/lib389/lib389/backend.py
+++ b/src/lib389/lib389/backend.py
@@ -11,7 +11,7 @@ import copy
import ldap
from lib389._constants import *
from lib389.properties import *
-from lib389.utils import normalizeDN, ensure_str, ensure_bytes, assert_c
+from lib389.utils import normalizeDN, ensure_str, assert_c
from lib389 import Entry
# Need to fix this ....
@@ -488,10 +488,10 @@ class Backend(DSLdapObject):
# Check for the missing mapping tree.
suffix = self.get_attr_val_utf8('nsslapd-suffix')
- bename = self.get_attr_val_bytes('cn')
+ bename = self.get_attr_val_utf8('cn')
try:
mt = self._mts.get(suffix)
- if mt.get_attr_val_bytes('nsslapd-backend') != bename and mt.get_attr_val('nsslapd-state') != ensure_bytes('backend'):
+ if mt.get_attr_val_utf8('nsslapd-backend') != bename and mt.get_attr_val_utf8('nsslapd-state') != 'backend':
raise ldap.NO_SUCH_OBJECT("We have a matching suffix, but not a backend or correct database name.")
except ldap.NO_SUCH_OBJECT:
result = DSBLE0001
--
2.26.2

View File

@ -0,0 +1,147 @@
From 692c4cec6cc5c0086cf58f83bcfa690c766c9887 Mon Sep 17 00:00:00 2001
From: Thierry Bordaz <tbordaz@redhat.com>
Date: Fri, 2 Feb 2024 14:14:28 +0100
Subject: [PATCH] Issue 5407 - sync_repl crashes if enabled while dynamic
plugin is enabled (#5411)
Bug description:
When dynamic plugin is enabled, if a MOD enables sync_repl plugin
then sync_repl init function registers the postop callback
that will be called for the MOD itself while the preop
has not been called.
postop expects preop to be called and so primary operation
to be set. When it is not set it crashes
Fix description:
If the primary operation is not set, just return
relates: #5407
---
.../suites/syncrepl_plugin/basic_test.py | 68 +++++++++++++++++++
ldap/servers/plugins/sync/sync_persist.c | 23 ++++++-
2 files changed, 90 insertions(+), 1 deletion(-)
diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
index eb3770b78..cdf35eeaa 100644
--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
@@ -592,6 +592,74 @@ def test_sync_repl_cenotaph(topo_m2, request):
request.addfinalizer(fin)
+def test_sync_repl_dynamic_plugin(topology, request):
+ """Test sync_repl with dynamic plugin
+
+ :id: d4f84913-c18a-459f-8525-110f610ca9e6
+ :setup: install a standalone instance
+ :steps:
+ 1. reset instance to standard (no retroCL, no sync_repl, no dynamic plugin)
+ 2. Enable dynamic plugin
+ 3. Enable retroCL/content_sync
+ 4. Establish a sync_repl req
+ :expectedresults:
+ 1. Should succeeds
+ 2. Should succeeds
+ 3. Should succeeds
+ 4. Should succeeds
+ """
+
+ # Reset the instance in a default config
+ # Disable content sync plugin
+ topology.standalone.plugins.disable(name=PLUGIN_REPL_SYNC)
+
+ # Disable retro changelog
+ topology.standalone.plugins.disable(name=PLUGIN_RETRO_CHANGELOG)
+
+ # Disable dynamic plugins
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'off')])
+ topology.standalone.restart()
+
+ # Now start the test
+ # Enable dynamic plugins
+ try:
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')])
+ except ldap.LDAPError as e:
+ log.error('Failed to enable dynamic plugin! {}'.format(e.args[0]['desc']))
+ assert False
+
+ # Enable retro changelog
+ topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
+
+ # Enbale content sync plugin
+ topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC)
+
+ # create a sync repl client and wait 5 seconds to be sure it is running
+ sync_repl = Sync_persist(topology.standalone)
+ sync_repl.start()
+ time.sleep(5)
+
+ # create users
+ users = UserAccounts(topology.standalone, DEFAULT_SUFFIX)
+ users_set = []
+ for i in range(10001, 10004):
+ users_set.append(users.create_test_user(uid=i))
+
+ time.sleep(10)
+ # delete users, that automember/memberof will generate nested updates
+ for user in users_set:
+ user.delete()
+ # stop the server to get the sync_repl result set (exit from while loop).
+ # Only way I found to acheive that.
+ # and wait a bit to let sync_repl thread time to set its result before fetching it.
+ topology.standalone.stop()
+ sync_repl.get_result()
+ sync_repl.join()
+ log.info('test_sync_repl_dynamic_plugin: PASS\n')
+
+ # Success
+ log.info('Test complete')
+
def test_sync_repl_invalid_cookie(topology, request):
"""Test sync_repl with invalid cookie
diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c
index d2210b64c..283607361 100644
--- a/ldap/servers/plugins/sync/sync_persist.c
+++ b/ldap/servers/plugins/sync/sync_persist.c
@@ -156,6 +156,17 @@ ignore_op_pl(Slapi_PBlock *pb)
* This is the same for ident
*/
prim_op = get_thread_primary_op();
+ if (prim_op == NULL) {
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
+ * The only known case it happens is with dynamic plugin enabled and an
+ * update that enable the sync_repl plugin. In such case sync_repl registers
+ * the postop (sync_update_persist_op) that is called while the preop was not called
+ */
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
+ "ignore_op_pl - Operation without primary op set (0x%lx)\n",
+ (ulong) op);
+ return;
+ }
ident = sync_persist_get_operation_extension(pb);
if (ident) {
@@ -232,8 +243,18 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
prim_op = get_thread_primary_op();
+ if (prim_op == NULL) {
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
+ * The only known case it happens is with dynamic plugin enabled and an
+ * update that enable the sync_repl plugin. In such case sync_repl registers
+ * the postop (sync_update_persist_op) that is called while the preop was not called
+ */
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
+ "sync_update_persist_op - Operation without primary op set (0x%lx)\n",
+ (ulong) pb_op);
+ return;
+ }
ident = sync_persist_get_operation_extension(pb);
- PR_ASSERT(prim_op);
if ((ident == NULL) && operation_is_flag_set(pb_op, OP_FLAG_NOOP)) {
/* This happens for URP (add cenotaph, fixup rename, tombstone resurrect)
--
2.43.0

View File

@ -1,943 +0,0 @@
From f13d630ff98eb5b5505f1db3e7f207175b51b237 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 12 May 2020 13:48:30 -0400
Subject: [PATCH 04/12] Issue 51076 - remove unnecessary slapi entry dups
Description: So the problem is that slapi_search_internal_get_entry()
duplicates the entry twice. It does that as a convenience
where it will allocate a pblock, do the search, copy
the entry, free search results from the pblock, and then
free the pblock itself. I basically split this function
into two functions. One function allocates the pblock,
does the search and returns the entry. The other function
frees the entries and pblock.
99% of time when we call slapi_search_internal_get_entry()
we are just reading it and freeing it. It's not being
consumed. In these cases we can use the two function
approach eliminates an extra slapi_entry_dup(). Over the
time of an operation/connection we can save quite a bit
of mallocing/freeing. This could also help with memory
fragmentation.
ASAN: passed
relates: https://pagure.io/389-ds-base/issue/51076
Reviewed by: firstyear & tbordaz(Thanks!)
---
ldap/servers/plugins/acctpolicy/acct_config.c | 6 +--
ldap/servers/plugins/acctpolicy/acct_plugin.c | 36 +++++++-------
ldap/servers/plugins/acctpolicy/acct_util.c | 6 +--
ldap/servers/plugins/automember/automember.c | 17 +++----
ldap/servers/plugins/dna/dna.c | 23 ++++-----
ldap/servers/plugins/memberof/memberof.c | 16 +++----
.../plugins/pam_passthru/pam_ptconfig.c | 10 ++--
.../servers/plugins/pam_passthru/pam_ptimpl.c | 7 +--
.../plugins/pam_passthru/pam_ptpreop.c | 9 ++--
.../plugins/replication/repl5_tot_protocol.c | 5 +-
ldap/servers/plugins/uiduniq/uid.c | 23 ++++-----
ldap/servers/slapd/daemon.c | 11 ++---
ldap/servers/slapd/modify.c | 12 +++--
ldap/servers/slapd/plugin_internal_op.c | 48 +++++++++++++++++++
ldap/servers/slapd/resourcelimit.c | 13 ++---
ldap/servers/slapd/schema.c | 7 ++-
ldap/servers/slapd/slapi-plugin.h | 23 ++++++++-
17 files changed, 161 insertions(+), 111 deletions(-)
diff --git a/ldap/servers/plugins/acctpolicy/acct_config.c b/ldap/servers/plugins/acctpolicy/acct_config.c
index fe35ba5a0..01e4f319f 100644
--- a/ldap/servers/plugins/acctpolicy/acct_config.c
+++ b/ldap/servers/plugins/acctpolicy/acct_config.c
@@ -37,6 +37,7 @@ static int acct_policy_entry2config(Slapi_Entry *e,
int
acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void *plugin_id)
{
+ Slapi_PBlock *entry_pb = NULL;
acctPluginCfg *newcfg;
Slapi_Entry *config_entry = NULL;
Slapi_DN *config_sdn = NULL;
@@ -44,8 +45,7 @@ acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void *
/* Retrieve the config entry */
config_sdn = slapi_sdn_new_normdn_byref(PLUGIN_CONFIG_DN);
- rc = slapi_search_internal_get_entry(config_sdn, NULL, &config_entry,
- plugin_id);
+ rc = slapi_search_get_entry(&entry_pb, config_sdn, NULL, &config_entry, plugin_id);
slapi_sdn_free(&config_sdn);
if (rc != LDAP_SUCCESS || config_entry == NULL) {
@@ -60,7 +60,7 @@ acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void *
rc = acct_policy_entry2config(config_entry, newcfg);
config_unlock();
- slapi_entry_free(config_entry);
+ slapi_search_get_entry_done(&entry_pb);
return (rc);
}
diff --git a/ldap/servers/plugins/acctpolicy/acct_plugin.c b/ldap/servers/plugins/acctpolicy/acct_plugin.c
index 2a876ad72..c3c32b074 100644
--- a/ldap/servers/plugins/acctpolicy/acct_plugin.c
+++ b/ldap/servers/plugins/acctpolicy/acct_plugin.c
@@ -209,6 +209,7 @@ done:
int
acct_bind_preop(Slapi_PBlock *pb)
{
+ Slapi_PBlock *entry_pb = NULL;
const char *dn = NULL;
Slapi_DN *sdn = NULL;
Slapi_Entry *target_entry = NULL;
@@ -236,8 +237,7 @@ acct_bind_preop(Slapi_PBlock *pb)
goto done;
}
- ldrc = slapi_search_internal_get_entry(sdn, NULL, &target_entry,
- plugin_id);
+ ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &target_entry, plugin_id);
/* There was a problem retrieving the entry */
if (ldrc != LDAP_SUCCESS) {
@@ -275,7 +275,7 @@ done:
slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, NULL, 0, NULL);
}
- slapi_entry_free(target_entry);
+ slapi_search_get_entry_done(&entry_pb);
free_acctpolicy(&policy);
@@ -293,6 +293,7 @@ done:
int
acct_bind_postop(Slapi_PBlock *pb)
{
+ Slapi_PBlock *entry_pb = NULL;
char *dn = NULL;
int ldrc, tracklogin = 0;
int rc = 0; /* Optimistic default */
@@ -327,8 +328,7 @@ acct_bind_postop(Slapi_PBlock *pb)
covered by an account policy to decide whether we should track */
if (tracklogin == 0) {
sdn = slapi_sdn_new_normdn_byref(dn);
- ldrc = slapi_search_internal_get_entry(sdn, NULL, &target_entry,
- plugin_id);
+ ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &target_entry, plugin_id);
if (ldrc != LDAP_SUCCESS) {
slapi_log_err(SLAPI_LOG_ERR, POST_PLUGIN_NAME,
@@ -355,7 +355,7 @@ done:
slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, NULL, 0, NULL);
}
- slapi_entry_free(target_entry);
+ slapi_search_get_entry_done(&entry_pb);
slapi_sdn_free(&sdn);
@@ -370,11 +370,11 @@ done:
static int
acct_pre_op(Slapi_PBlock *pb, int modop)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_DN *sdn = 0;
Slapi_Entry *e = 0;
Slapi_Mods *smods = 0;
LDAPMod **mods;
- int free_entry = 0;
char *errstr = NULL;
int ret = SLAPI_PLUGIN_SUCCESS;
@@ -384,28 +384,25 @@ acct_pre_op(Slapi_PBlock *pb, int modop)
if (acct_policy_dn_is_config(sdn)) {
/* Validate config changes, but don't apply them.
- * This allows us to reject invalid config changes
- * here at the pre-op stage. Applying the config
- * needs to be done at the post-op stage. */
+ * This allows us to reject invalid config changes
+ * here at the pre-op stage. Applying the config
+ * needs to be done at the post-op stage. */
if (LDAP_CHANGETYPE_ADD == modop) {
slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &e);
- /* If the entry doesn't exist, just bail and
- * let the server handle it. */
+ /* If the entry doesn't exist, just bail and let the server handle it. */
if (e == NULL) {
goto bail;
}
} else if (LDAP_CHANGETYPE_MODIFY == modop) {
/* Fetch the entry being modified so we can
- * create the resulting entry for validation. */
+ * create the resulting entry for validation. */
if (sdn) {
- slapi_search_internal_get_entry(sdn, 0, &e, get_identity());
- free_entry = 1;
+ slapi_search_get_entry(&entry_pb, sdn, 0, &e, get_identity());
}
- /* If the entry doesn't exist, just bail and
- * let the server handle it. */
+ /* If the entry doesn't exist, just bail and let the server handle it. */
if (e == NULL) {
goto bail;
}
@@ -418,7 +415,7 @@ acct_pre_op(Slapi_PBlock *pb, int modop)
/* Apply the mods to create the resulting entry. */
if (mods && (slapi_entry_apply_mods(e, mods) != LDAP_SUCCESS)) {
/* The mods don't apply cleanly, so we just let this op go
- * to let the main server handle it. */
+ * to let the main server handle it. */
goto bailmod;
}
} else if (modop == LDAP_CHANGETYPE_DELETE) {
@@ -439,8 +436,7 @@ bailmod:
}
bail:
- if (free_entry && e)
- slapi_entry_free(e);
+ slapi_search_get_entry_done(&entry_pb);
if (ret) {
slapi_log_err(SLAPI_LOG_PLUGIN, PRE_PLUGIN_NAME,
diff --git a/ldap/servers/plugins/acctpolicy/acct_util.c b/ldap/servers/plugins/acctpolicy/acct_util.c
index f25a3202d..f432092fe 100644
--- a/ldap/servers/plugins/acctpolicy/acct_util.c
+++ b/ldap/servers/plugins/acctpolicy/acct_util.c
@@ -85,6 +85,7 @@ get_attr_string_val(Slapi_Entry *target_entry, char *attr_name)
int
get_acctpolicy(Slapi_PBlock *pb __attribute__((unused)), Slapi_Entry *target_entry, void *plugin_id, acctPolicy **policy)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_DN *sdn = NULL;
Slapi_Entry *policy_entry = NULL;
Slapi_Attr *attr;
@@ -123,8 +124,7 @@ get_acctpolicy(Slapi_PBlock *pb __attribute__((unused)), Slapi_Entry *target_ent
}
sdn = slapi_sdn_new_dn_byref(policy_dn);
- ldrc = slapi_search_internal_get_entry(sdn, NULL, &policy_entry,
- plugin_id);
+ ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &policy_entry, plugin_id);
slapi_sdn_free(&sdn);
/* There should be a policy but it can't be retrieved; fatal error */
@@ -160,7 +160,7 @@ dopolicy:
done:
config_unlock();
slapi_ch_free_string(&policy_dn);
- slapi_entry_free(policy_entry);
+ slapi_search_get_entry_done(&entry_pb);
return (rc);
}
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
index 7c875c852..39350ad53 100644
--- a/ldap/servers/plugins/automember/automember.c
+++ b/ldap/servers/plugins/automember/automember.c
@@ -1629,13 +1629,12 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
char *member_value = NULL;
int rc = 0;
Slapi_DN *group_sdn;
- Slapi_Entry *group_entry = NULL;
/* First thing check that the group still exists */
group_sdn = slapi_sdn_new_dn_byval(group_dn);
- rc = slapi_search_internal_get_entry(group_sdn, NULL, &group_entry, automember_get_plugin_id());
+ rc = slapi_search_internal_get_entry(group_sdn, NULL, NULL, automember_get_plugin_id());
slapi_sdn_free(&group_sdn);
- if (rc != LDAP_SUCCESS || group_entry == NULL) {
+ if (rc != LDAP_SUCCESS) {
if (rc == LDAP_NO_SUCH_OBJECT) {
/* the automember group (default or target) does not exist, just skip this definition */
slapi_log_err(SLAPI_LOG_INFO, AUTOMEMBER_PLUGIN_SUBSYSTEM,
@@ -1647,10 +1646,8 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
"automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n",
group_dn, rc);
}
- slapi_entry_free(group_entry);
return rc;
}
- slapi_entry_free(group_entry);
/* If grouping_value is dn, we need to fetch the dn instead. */
if (slapi_attr_type_cmp(grouping_value, "dn", SLAPI_TYPE_CMP_EXACT) == 0) {
@@ -1752,11 +1749,11 @@ out:
static int
automember_pre_op(Slapi_PBlock *pb, int modop)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_DN *sdn = 0;
Slapi_Entry *e = 0;
Slapi_Mods *smods = 0;
LDAPMod **mods;
- int free_entry = 0;
char *errstr = NULL;
int ret = SLAPI_PLUGIN_SUCCESS;
@@ -1784,8 +1781,7 @@ automember_pre_op(Slapi_PBlock *pb, int modop)
/* Fetch the entry being modified so we can
* create the resulting entry for validation. */
if (sdn) {
- slapi_search_internal_get_entry(sdn, 0, &e, automember_get_plugin_id());
- free_entry = 1;
+ slapi_search_get_entry(&entry_pb, sdn, 0, &e, automember_get_plugin_id());
}
/* If the entry doesn't exist, just bail and
@@ -1799,7 +1795,7 @@ automember_pre_op(Slapi_PBlock *pb, int modop)
smods = slapi_mods_new();
slapi_mods_init_byref(smods, mods);
- /* Apply the mods to create the resulting entry. */
+ /* Apply the mods to create the resulting entry. */
if (mods && (slapi_entry_apply_mods(e, mods) != LDAP_SUCCESS)) {
/* The mods don't apply cleanly, so we just let this op go
* to let the main server handle it. */
@@ -1831,8 +1827,7 @@ bailmod:
}
bail:
- if (free_entry && e)
- slapi_entry_free(e);
+ slapi_search_get_entry_done(&entry_pb);
if (ret) {
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
index 1ee271359..16c625bb0 100644
--- a/ldap/servers/plugins/dna/dna.c
+++ b/ldap/servers/plugins/dna/dna.c
@@ -1178,7 +1178,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
value = slapi_entry_attr_get_charptr(e, DNA_SHARED_CFG_DN);
if (value) {
- Slapi_Entry *shared_e = NULL;
Slapi_DN *sdn = NULL;
char *normdn = NULL;
char *attrs[2];
@@ -1197,10 +1196,8 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
/* We don't need attributes */
attrs[0] = "cn";
attrs[1] = NULL;
- slapi_search_internal_get_entry(sdn, attrs, &shared_e, getPluginID());
-
/* Make sure that the shared config entry exists. */
- if (!shared_e) {
+ if(slapi_search_internal_get_entry(sdn, attrs, NULL, getPluginID()) != LDAP_SUCCESS) {
/* We didn't locate the shared config container entry. Log
* a message and skip this config entry. */
slapi_log_err(SLAPI_LOG_ERR, DNA_PLUGIN_SUBSYSTEM,
@@ -1210,9 +1207,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply)
ret = DNA_FAILURE;
slapi_sdn_free(&sdn);
goto bail;
- } else {
- slapi_entry_free(shared_e);
- shared_e = NULL;
}
normdn = (char *)slapi_sdn_get_dn(sdn);
@@ -1539,6 +1533,7 @@ dna_delete_shared_servers(PRCList **servers)
static int
dna_load_host_port(void)
{
+ Slapi_PBlock *pb = NULL;
int status = DNA_SUCCESS;
Slapi_Entry *e = NULL;
Slapi_DN *config_dn = NULL;
@@ -1554,7 +1549,7 @@ dna_load_host_port(void)
config_dn = slapi_sdn_new_ndn_byref("cn=config");
if (config_dn) {
- slapi_search_internal_get_entry(config_dn, attrs, &e, getPluginID());
+ slapi_search_get_entry(&pb, config_dn, attrs, &e, getPluginID());
slapi_sdn_free(&config_dn);
}
@@ -1562,8 +1557,8 @@ dna_load_host_port(void)
hostname = slapi_entry_attr_get_charptr(e, "nsslapd-localhost");
portnum = slapi_entry_attr_get_charptr(e, "nsslapd-port");
secureportnum = slapi_entry_attr_get_charptr(e, "nsslapd-secureport");
- slapi_entry_free(e);
}
+ slapi_search_get_entry_done(&pb);
if (!hostname || !portnum) {
status = DNA_FAILURE;
@@ -2876,6 +2871,7 @@ bail:
static int
dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
{
+ Slapi_PBlock *entry_pb = NULL;
char *replica_dn = NULL;
Slapi_DN *replica_sdn = NULL;
Slapi_DN *range_sdn = NULL;
@@ -2912,8 +2908,7 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
attrs[2] = 0;
/* Find cn=replica entry via search */
- slapi_search_internal_get_entry(replica_sdn, attrs, &e, getPluginID());
-
+ slapi_search_get_entry(&entry_pb, replica_sdn, attrs, &e, getPluginID());
if (e) {
/* Check if the passed in bind dn matches any of the replica bind dns. */
Slapi_Value *bind_dn_sv = slapi_value_new_string(bind_dn);
@@ -2927,6 +2922,7 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
attrs[0] = "member";
attrs[1] = "uniquemember";
attrs[2] = 0;
+ slapi_search_get_entry_done(&entry_pb);
for (i = 0; bind_group_dn != NULL && bind_group_dn[i] != NULL; i++) {
if (ret) {
/* already found a member, just free group */
@@ -2934,14 +2930,14 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
continue;
}
bind_group_sdn = slapi_sdn_new_normdn_passin(bind_group_dn[i]);
- slapi_search_internal_get_entry(bind_group_sdn, attrs, &bind_group_entry, getPluginID());
+ slapi_search_get_entry(&entry_pb, bind_group_sdn, attrs, &bind_group_entry, getPluginID());
if (bind_group_entry) {
ret = slapi_entry_attr_has_syntax_value(bind_group_entry, "member", bind_dn_sv);
if (ret == 0) {
ret = slapi_entry_attr_has_syntax_value(bind_group_entry, "uniquemember", bind_dn_sv);
}
}
- slapi_entry_free(bind_group_entry);
+ slapi_search_get_entry_done(&entry_pb);
slapi_sdn_free(&bind_group_sdn);
}
slapi_ch_free((void **)&bind_group_dn);
@@ -2956,7 +2952,6 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn)
}
done:
- slapi_entry_free(e);
slapi_sdn_free(&range_sdn);
slapi_sdn_free(&replica_sdn);
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index 40bd4b380..e9e1ec4c7 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -884,7 +884,7 @@ memberof_postop_modrdn(Slapi_PBlock *pb)
pre_sdn = slapi_entry_get_sdn(pre_e);
post_sdn = slapi_entry_get_sdn(post_e);
}
-
+
if (pre_sdn && post_sdn && slapi_sdn_compare(pre_sdn, post_sdn) == 0) {
/* Regarding memberof plugin, this rename is a no-op
* but it can be expensive to process it. So skip it
@@ -1466,6 +1466,7 @@ memberof_modop_one_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_op, Slapi
int
memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_op, Slapi_DN *group_sdn, Slapi_DN *op_this_sdn, Slapi_DN *replace_with_sdn, Slapi_DN *op_to_sdn, memberofstringll *stack)
{
+ Slapi_PBlock *entry_pb = NULL;
int rc = 0;
LDAPMod mod;
LDAPMod replace_mod;
@@ -1515,8 +1516,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o
}
/* determine if this is a group op or single entry */
- slapi_search_internal_get_entry(op_to_sdn, config->groupattrs,
- &e, memberof_get_plugin_id());
+ slapi_search_get_entry(&entry_pb, op_to_sdn, config->groupattrs, &e, memberof_get_plugin_id());
if (!e) {
/* In the case of a delete, we need to worry about the
* missing entry being a nested group. There's a small
@@ -1751,7 +1751,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o
bail:
slapi_value_free(&to_dn_val);
slapi_value_free(&this_dn_val);
- slapi_entry_free(e);
+ slapi_search_get_entry_done(&entry_pb);
return rc;
}
@@ -2368,6 +2368,7 @@ bail:
int
memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Value *memberdn)
{
+ Slapi_PBlock *pb = NULL;
int rc = 0;
Slapi_DN *sdn = 0;
Slapi_Entry *group_e = 0;
@@ -2376,8 +2377,8 @@ memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Va
sdn = slapi_sdn_new_normdn_byref(slapi_value_get_string(groupdn));
- slapi_search_internal_get_entry(sdn, config->groupattrs,
- &group_e, memberof_get_plugin_id());
+ slapi_search_get_entry(&pb, sdn, config->groupattrs,
+ &group_e, memberof_get_plugin_id());
if (group_e) {
/* See if memberdn is referred to by any of the group attributes. */
@@ -2388,9 +2389,8 @@ memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Va
break;
}
}
-
- slapi_entry_free(group_e);
}
+ slapi_search_get_entry_done(&pb);
slapi_sdn_free(&sdn);
return rc;
diff --git a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
index 46a76d884..cbec2ec40 100644
--- a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
+++ b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
@@ -749,22 +749,22 @@ pam_passthru_get_config(Slapi_DN *bind_sdn)
if (pam_passthru_check_suffix(cfg, bind_sdn) == LDAP_SUCCESS) {
if (cfg->slapi_filter) {
/* A filter is configured, so see if the bind entry is a match. */
+ Slapi_PBlock *entry_pb = NULL;
Slapi_Entry *test_e = NULL;
/* Fetch the bind entry */
- slapi_search_internal_get_entry(bind_sdn, NULL, &test_e,
- pam_passthruauth_get_plugin_identity());
+ slapi_search_get_entry(&entry_pb, bind_sdn, NULL, &test_e,
+ pam_passthruauth_get_plugin_identity());
/* If the entry doesn't exist, just fall through to the main server code */
if (test_e) {
/* Evaluate the filter. */
if (LDAP_SUCCESS == slapi_filter_test_simple(test_e, cfg->slapi_filter)) {
/* This is a match. */
- slapi_entry_free(test_e);
+ slapi_search_get_entry_done(&entry_pb);
goto done;
}
-
- slapi_entry_free(test_e);
+ slapi_search_get_entry_done(&entry_pb);
}
} else {
/* There is no filter to check, so this is a match. */
diff --git a/ldap/servers/plugins/pam_passthru/pam_ptimpl.c b/ldap/servers/plugins/pam_passthru/pam_ptimpl.c
index 7f5fb02c4..5b43f8d1f 100644
--- a/ldap/servers/plugins/pam_passthru/pam_ptimpl.c
+++ b/ldap/servers/plugins/pam_passthru/pam_ptimpl.c
@@ -81,11 +81,12 @@ derive_from_bind_dn(Slapi_PBlock *pb __attribute__((unused)), const Slapi_DN *bi
static char *
derive_from_bind_entry(Slapi_PBlock *pb, const Slapi_DN *bindsdn, MyStrBuf *pam_id, char *map_ident_attr, int *locked)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_Entry *entry = NULL;
char *attrs[] = {NULL, NULL};
attrs[0] = map_ident_attr;
- int rc = slapi_search_internal_get_entry((Slapi_DN *)bindsdn, attrs, &entry,
- pam_passthruauth_get_plugin_identity());
+ int32_t rc = slapi_search_get_entry(&entry_pb, (Slapi_DN *)bindsdn, attrs, &entry,
+ pam_passthruauth_get_plugin_identity());
if (rc != LDAP_SUCCESS) {
slapi_log_err(SLAPI_LOG_ERR, PAM_PASSTHRU_PLUGIN_SUBSYSTEM,
@@ -108,7 +109,7 @@ derive_from_bind_entry(Slapi_PBlock *pb, const Slapi_DN *bindsdn, MyStrBuf *pam_
init_my_str_buf(pam_id, val);
}
- slapi_entry_free(entry);
+ slapi_search_get_entry_done(&entry_pb);
return pam_id->str;
}
diff --git a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c
index 3d0067531..5bca823ff 100644
--- a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c
+++ b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c
@@ -526,6 +526,7 @@ done:
static int
pam_passthru_preop(Slapi_PBlock *pb, int modtype)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_DN *sdn = NULL;
Slapi_Entry *e = NULL;
LDAPMod **mods;
@@ -555,8 +556,8 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype)
case LDAP_CHANGETYPE_MODIFY:
/* Fetch the entry being modified so we can
* create the resulting entry for validation. */
- slapi_search_internal_get_entry(sdn, 0, &e,
- pam_passthruauth_get_plugin_identity());
+ slapi_search_get_entry(&entry_pb, sdn, 0, &e,
+ pam_passthruauth_get_plugin_identity());
/* If the entry doesn't exist, just bail and
* let the server handle it. */
@@ -576,9 +577,6 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype)
/* Don't bail here, as we need to free the entry. */
}
}
-
- /* Free the entry. */
- slapi_entry_free(e);
break;
case LDAP_CHANGETYPE_DELETE:
case LDAP_CHANGETYPE_MODDN:
@@ -591,6 +589,7 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype)
}
bail:
+ slapi_search_get_entry_done(&entry_pb);
/* If we are refusing the operation, return the result to the client. */
if (ret) {
slapi_send_ldap_result(pb, ret, NULL, returntext, 0, NULL);
diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c
index 3b65d6b20..a25839f21 100644
--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c
@@ -469,7 +469,8 @@ retry:
*/
/* Get suffix */
Slapi_Entry *suffix = NULL;
- rc = slapi_search_internal_get_entry(area_sdn, NULL, &suffix, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION));
+ Slapi_PBlock *suffix_pb = NULL;
+ rc = slapi_search_get_entry(&suffix_pb, area_sdn, NULL, &suffix, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION));
if (rc) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "repl5_tot_run - Unable to "
"get the suffix entry \"%s\".\n",
@@ -517,7 +518,7 @@ retry:
LDAP_SCOPE_SUBTREE, "(parentid>=1)", NULL, 0, ctrls, NULL,
repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), OP_FLAG_BULK_IMPORT);
cb_data.num_entries = 0UL;
- slapi_entry_free(suffix);
+ slapi_search_get_entry_done(&suffix_pb);
} else {
/* Original total update */
/* we need to provide managedsait control so that referral entries can
diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
index d7ccf0e07..e69012204 100644
--- a/ldap/servers/plugins/uiduniq/uid.c
+++ b/ldap/servers/plugins/uiduniq/uid.c
@@ -1254,6 +1254,7 @@ preop_modify(Slapi_PBlock *pb)
static int
preop_modrdn(Slapi_PBlock *pb)
{
+ Slapi_PBlock *entry_pb = NULL;
int result = LDAP_SUCCESS;
Slapi_Entry *e = NULL;
Slapi_Value *sv_requiredObjectClass = NULL;
@@ -1351,7 +1352,7 @@ preop_modrdn(Slapi_PBlock *pb)
/* Get the entry that is being renamed so we can make a dummy copy
* of what it will look like after the rename. */
- err = slapi_search_internal_get_entry(sdn, NULL, &e, plugin_identity);
+ err = slapi_search_get_entry(&entry_pb, sdn, NULL, &e, plugin_identity);
if (err != LDAP_SUCCESS) {
result = uid_op_error(35);
/* We want to return a no such object error if the target doesn't exist. */
@@ -1371,24 +1372,24 @@ preop_modrdn(Slapi_PBlock *pb)
/*
- * Check if it has the required object class
- */
+ * Check if it has the required object class
+ */
if (requiredObjectClass &&
!slapi_entry_attr_has_syntax_value(e, SLAPI_ATTR_OBJECTCLASS, sv_requiredObjectClass)) {
break;
}
/*
- * Find any unique attribute data in the new RDN
- */
+ * Find any unique attribute data in the new RDN
+ */
for (i = 0; attrNames && attrNames[i]; i++) {
err = slapi_entry_attr_find(e, attrNames[i], &attr);
if (!err) {
/*
- * Passed all the requirements - this is an operation we
- * need to enforce uniqueness on. Now find all parent entries
- * with the marker object class, and do a search for each one.
- */
+ * Passed all the requirements - this is an operation we
+ * need to enforce uniqueness on. Now find all parent entries
+ * with the marker object class, and do a search for each one.
+ */
if (NULL != markerObjectClass) {
/* Subtree defined by location of marker object class */
result = findSubtreeAndSearch(slapi_entry_get_sdn(e), attrNames, attr, NULL,
@@ -1407,8 +1408,8 @@ preop_modrdn(Slapi_PBlock *pb)
END
/* Clean-up */
slapi_value_free(&sv_requiredObjectClass);
- if (e)
- slapi_entry_free(e);
+
+ slapi_search_get_entry_done(&entry_pb);
if (result) {
slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name,
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 65f23363a..a70f40316 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -1916,18 +1916,13 @@ slapd_bind_local_user(Connection *conn)
char *root_dn = config_get_ldapi_root_dn();
if (root_dn) {
+ Slapi_PBlock *entry_pb = NULL;
Slapi_DN *edn = slapi_sdn_new_dn_byref(
slapi_dn_normalize(root_dn));
Slapi_Entry *e = 0;
/* root might be locked too! :) */
- ret = slapi_search_internal_get_entry(
- edn, 0,
- &e,
- (void *)plugin_get_default_component_id()
-
- );
-
+ ret = slapi_search_get_entry(&entry_pb, edn, 0, &e, (void *)plugin_get_default_component_id());
if (0 == ret && e) {
ret = slapi_check_account_lock(
0, /* pb not req */
@@ -1955,7 +1950,7 @@ slapd_bind_local_user(Connection *conn)
root_map_free:
/* root_dn consumed by bind creds set */
slapi_sdn_free(&edn);
- slapi_entry_free(e);
+ slapi_search_get_entry_done(&entry_pb);
ret = 0;
}
}
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
index bbc0ab71a..259bedfff 100644
--- a/ldap/servers/slapd/modify.c
+++ b/ldap/servers/slapd/modify.c
@@ -592,6 +592,7 @@ modify_internal_pb(Slapi_PBlock *pb)
static void
op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
{
+ Slapi_PBlock *entry_pb = NULL;
Slapi_Backend *be = NULL;
Slapi_Entry *pse;
Slapi_Entry *referral;
@@ -723,7 +724,7 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
* 2. If yes, then if the mods contain any passwdpolicy specific attributes.
* 3. If yes, then it invokes corrosponding checking function.
*/
- if (!repl_op && !internal_op && normdn && (e = get_entry(pb, normdn))) {
+ if (!repl_op && !internal_op && normdn && slapi_search_get_entry(&entry_pb, sdn, NULL, &e, NULL) == LDAP_SUCCESS) {
Slapi_Value target;
slapi_value_init(&target);
slapi_value_set_string(&target, "passwordpolicy");
@@ -1072,7 +1073,7 @@ free_and_return : {
slapi_entry_free(epre);
slapi_entry_free(epost);
}
- slapi_entry_free(e);
+ slapi_search_get_entry_done(&entry_pb);
if (be)
slapi_be_Unlock(be);
@@ -1202,12 +1203,13 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M
if (!internal_op) {
/* slapi_acl_check_mods needs an array of LDAPMods, but
* we're really only interested in the one password mod. */
+ Slapi_PBlock *entry_pb = NULL;
LDAPMod *mods[2];
mods[0] = mod;
mods[1] = NULL;
/* We need to actually fetch the target here to use for ACI checking. */
- slapi_search_internal_get_entry(&sdn, NULL, &e, (void *)plugin_get_default_component_id());
+ slapi_search_get_entry(&entry_pb, &sdn, NULL, &e, NULL);
/* Create a bogus entry with just the target dn if we were unable to
* find the actual entry. This will only be used for checking the ACIs. */
@@ -1238,9 +1240,12 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M
}
send_ldap_result(pb, res, NULL, errtxt, 0, NULL);
slapi_ch_free_string(&errtxt);
+ slapi_search_get_entry_done(&entry_pb);
rc = -1;
goto done;
}
+ /* done with slapi entry e */
+ slapi_search_get_entry_done(&entry_pb);
/*
* If this mod is being performed by a password administrator/rootDN,
@@ -1353,7 +1358,6 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M
valuearray_free(&values);
done:
- slapi_entry_free(e);
slapi_sdn_done(&sdn);
slapi_ch_free_string(&proxydn);
slapi_ch_free_string(&proxystr);
diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c
index 9da266b61..a140e7988 100644
--- a/ldap/servers/slapd/plugin_internal_op.c
+++ b/ldap/servers/slapd/plugin_internal_op.c
@@ -882,3 +882,51 @@ slapi_search_internal_get_entry(Slapi_DN *dn, char **attrs, Slapi_Entry **ret_en
int_search_pb = NULL;
return rc;
}
+
+int32_t
+slapi_search_get_entry(Slapi_PBlock **pb, Slapi_DN *dn, char **attrs, Slapi_Entry **ret_entry, void *component_identity)
+{
+ Slapi_Entry **entries = NULL;
+ int32_t rc = 0;
+ void *component = component_identity;
+
+ if (ret_entry) {
+ *ret_entry = NULL;
+ }
+
+ if (component == NULL) {
+ component = (void *)plugin_get_default_component_id();
+ }
+
+ if (*pb == NULL) {
+ *pb = slapi_pblock_new();
+ }
+ slapi_search_internal_set_pb(*pb, slapi_sdn_get_dn(dn), LDAP_SCOPE_BASE,
+ "(|(objectclass=*)(objectclass=ldapsubentry))",
+ attrs, 0, NULL, NULL, component, 0 );
+ slapi_search_internal_pb(*pb);
+ slapi_pblock_get(*pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
+ if (LDAP_SUCCESS == rc) {
+ slapi_pblock_get(*pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
+ if (NULL != entries && NULL != entries[0]) {
+ /* Only need to dup the entry if the caller passed ret_entry in. */
+ if (ret_entry) {
+ *ret_entry = entries[0];
+ }
+ } else {
+ rc = LDAP_NO_SUCH_OBJECT;
+ }
+ }
+
+ return rc;
+}
+
+void
+slapi_search_get_entry_done(Slapi_PBlock **pb)
+{
+ if (pb && *pb) {
+ slapi_free_search_results_internal(*pb);
+ slapi_pblock_destroy(*pb);
+ *pb = NULL;
+ }
+}
diff --git a/ldap/servers/slapd/resourcelimit.c b/ldap/servers/slapd/resourcelimit.c
index 705344c84..9c2619716 100644
--- a/ldap/servers/slapd/resourcelimit.c
+++ b/ldap/servers/slapd/resourcelimit.c
@@ -305,22 +305,17 @@ reslimit_get_ext(Slapi_Connection *conn, const char *logname, SLAPIResLimitConnD
int
reslimit_update_from_dn(Slapi_Connection *conn, Slapi_DN *dn)
{
- Slapi_Entry *e;
+ Slapi_PBlock *pb = NULL;
+ Slapi_Entry *e = NULL;
int rc;
- e = NULL;
if (dn != NULL) {
-
char **attrs = reslimit_get_registered_attributes();
- (void)slapi_search_internal_get_entry(dn, attrs, &e, reslimit_componentid);
+ slapi_search_get_entry(&pb, dn, attrs, &e, reslimit_componentid);
charray_free(attrs);
}
-
rc = reslimit_update_from_entry(conn, e);
-
- if (NULL != e) {
- slapi_entry_free(e);
- }
+ slapi_search_get_entry_done(&pb);
return (rc);
}
diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c
index d44b03b0e..bf7e59f75 100644
--- a/ldap/servers/slapd/schema.c
+++ b/ldap/servers/slapd/schema.c
@@ -341,6 +341,7 @@ schema_policy_add_action(Slapi_Entry *entry, char *attrName, schema_item_t **lis
static void
schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica)
{
+ Slapi_PBlock *pb = NULL;
Slapi_DN sdn;
Slapi_Entry *entry = NULL;
schema_item_t *schema_item, *next;
@@ -369,8 +370,7 @@ schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica)
/* Load the replication policy of the schema */
slapi_sdn_init_dn_byref(&sdn, dn);
- if (slapi_search_internal_get_entry(&sdn, NULL, &entry, plugin_get_default_component_id()) == LDAP_SUCCESS) {
-
+ if (slapi_search_get_entry(&pb, &sdn, NULL, &entry, plugin_get_default_component_id()) == LDAP_SUCCESS) {
/* fill the policies (accept/reject) regarding objectclass */
schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_OBJECTCLASS_ACCEPT, &replica->objectclasses);
schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_OBJECTCLASS_REJECT, &replica->objectclasses);
@@ -378,9 +378,8 @@ schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica)
/* fill the policies (accept/reject) regarding attribute */
schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_ATTRIBUTE_ACCEPT, &replica->attributes);
schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_ATTRIBUTE_REJECT, &replica->attributes);
-
- slapi_entry_free(entry);
}
+ slapi_search_get_entry_done(&pb);
slapi_sdn_done(&sdn);
}
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 0e3857068..be1e52e4d 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -5972,7 +5972,7 @@ void slapi_seq_internal_set_pb(Slapi_PBlock *pb, char *ibase, int type, char *at
/*
* slapi_search_internal_get_entry() finds an entry given a dn. It returns
- * an LDAP error code (LDAP_SUCCESS if all goes well).
+ * an LDAP error code (LDAP_SUCCESS if all goes well). Caller must free ret_entry
*/
int slapi_search_internal_get_entry(Slapi_DN *dn, char **attrlist, Slapi_Entry **ret_entry, void *caller_identity);
@@ -8296,6 +8296,27 @@ uint64_t slapi_atomic_decr_64(uint64_t *ptr, int memorder);
/* helper function */
const char * slapi_fetch_attr(Slapi_Entry *e, const char *attrname, char *default_val);
+/**
+ * Get a Slapi_Entry via an internal search. The caller then needs to call
+ * slapi_get_entry_done() to free any resources allocated to get the entry
+ *
+ * \param pb - slapi_pblock pointer (the function will allocate if necessary)
+ * \param dn - Slapi_DN of the entry to retrieve
+ * \param attrs - char list of attributes to get
+ * \param ret_entry - pointer to a Slapi_entry wer the returned entry is stored
+ * \param component_identity - plugin component
+ *
+ * \return - ldap result code
+ */
+int32_t slapi_search_get_entry(Slapi_PBlock **pb, Slapi_DN *dn, char **attrs, Slapi_Entry **ret_entry, void *component_identity);
+
+/**
+ * Free the resources allocated by slapi_search_get_entry()
+ *
+ * \param pb - slapi_pblock pointer
+ */
+void slapi_search_get_entry_done(Slapi_PBlock **pb);
+
#ifdef __cplusplus
}
#endif
--
2.26.2

View File

@ -0,0 +1,840 @@
From 8dc61a176323f0d41df730abd715ccff3034c2be Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Sun, 27 Nov 2022 09:37:19 -0500
Subject: [PATCH] Issue 5547 - automember plugin improvements
Description:
Rebuild task has the following improvements:
- Only one task allowed at a time
- Do not cleanup previous members by default. Add new CLI option to intentionally
cleanup memberships before rebuilding from scratch.
- Add better task logging to show fixup progress
To prevent automember from being called in a nested be_txn loop thread storage is
used to check and skip these loops.
relates: https://github.com/389ds/389-ds-base/issues/5547
Reviewed by: spichugi(Thanks!)
---
.../automember_plugin/automember_mod_test.py | 43 +++-
ldap/servers/plugins/automember/automember.c | 232 ++++++++++++++----
ldap/servers/slapd/back-ldbm/ldbm_add.c | 11 +-
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 10 +-
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 11 +-
.../lib389/cli_conf/plugins/automember.py | 10 +-
src/lib389/lib389/plugins.py | 7 +-
src/lib389/lib389/tasks.py | 9 +-
8 files changed, 250 insertions(+), 83 deletions(-)
diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
index 8d25384bf..7a0ed3275 100644
--- a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
+++ b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
@@ -5,12 +5,13 @@
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
-#
+import ldap
import logging
import pytest
import os
+import time
from lib389.utils import ds_is_older
-from lib389._constants import *
+from lib389._constants import DEFAULT_SUFFIX
from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions
from lib389.idm.user import UserAccounts
from lib389.idm.group import Groups
@@ -41,6 +42,11 @@ def automember_fixture(topo, request):
user_accts = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
user = user_accts.create_test_user()
+ # Create extra users
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
+ for i in range(0, 100):
+ users.create_test_user(uid=i)
+
# Create automember definitions and regex rules
automember_prop = {
'cn': 'testgroup_definition',
@@ -59,7 +65,7 @@ def automember_fixture(topo, request):
automemberplugin.enable()
topo.standalone.restart()
- return (user, groups)
+ return user, groups
def test_mods(automember_fixture, topo):
@@ -72,19 +78,21 @@ def test_mods(automember_fixture, topo):
2. Update user that should add it to group[1]
3. Update user that should add it to group[2]
4. Update user that should add it to group[0]
- 5. Test rebuild task correctly moves user to group[1]
+ 5. Test rebuild task adds user to group[1]
+ 6. Test rebuild task cleanups groups and only adds it to group[1]
:expectedresults:
1. Success
2. Success
3. Success
4. Success
5. Success
+ 6. Success
"""
(user, groups) = automember_fixture
# Update user which should go into group[0]
user.replace('cn', 'whatever')
- groups[0].is_member(user.dn)
+ assert groups[0].is_member(user.dn)
if groups[1].is_member(user.dn):
assert False
if groups[2].is_member(user.dn):
@@ -92,7 +100,7 @@ def test_mods(automember_fixture, topo):
# Update user0 which should go into group[1]
user.replace('cn', 'mark')
- groups[1].is_member(user.dn)
+ assert groups[1].is_member(user.dn)
if groups[0].is_member(user.dn):
assert False
if groups[2].is_member(user.dn):
@@ -100,7 +108,7 @@ def test_mods(automember_fixture, topo):
# Update user which should go into group[2]
user.replace('cn', 'simon')
- groups[2].is_member(user.dn)
+ assert groups[2].is_member(user.dn)
if groups[0].is_member(user.dn):
assert False
if groups[1].is_member(user.dn):
@@ -108,7 +116,7 @@ def test_mods(automember_fixture, topo):
# Update user which should go back into group[0] (full circle)
user.replace('cn', 'whatever')
- groups[0].is_member(user.dn)
+ assert groups[0].is_member(user.dn)
if groups[1].is_member(user.dn):
assert False
if groups[2].is_member(user.dn):
@@ -128,12 +136,24 @@ def test_mods(automember_fixture, topo):
automemberplugin.enable()
topo.standalone.restart()
- # Run rebuild task
+ # Run rebuild task (no cleanup)
task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount")
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
+ # test only one fixup task is allowed at a time
+ automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=top")
task.wait()
- # Test membership
- groups[1].is_member(user.dn)
+ # Test membership (user should still be in groups[0])
+ assert groups[1].is_member(user.dn)
+ if not groups[0].is_member(user.dn):
+ assert False
+
+ # Run rebuild task with cleanup
+ task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount", cleanup=True)
+ task.wait()
+
+ # Test membership (user should only be in groups[1])
+ assert groups[1].is_member(user.dn)
if groups[0].is_member(user.dn):
assert False
if groups[2].is_member(user.dn):
@@ -148,4 +168,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main(["-s", CURRENT_FILE])
-
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
index 3494d0343..419adb052 100644
--- a/ldap/servers/plugins/automember/automember.c
+++ b/ldap/servers/plugins/automember/automember.c
@@ -1,5 +1,5 @@
/** BEGIN COPYRIGHT BLOCK
- * Copyright (C) 2011 Red Hat, Inc.
+ * Copyright (C) 2022 Red Hat, Inc.
* All rights reserved.
*
* License: GPL (version 3 or any later version).
@@ -14,7 +14,7 @@
* Auto Membership Plug-in
*/
#include "automember.h"
-
+#include <pthread.h>
/*
* Plug-in globals
@@ -22,7 +22,9 @@
static PRCList *g_automember_config = NULL;
static Slapi_RWLock *g_automember_config_lock = NULL;
static uint64_t abort_rebuild_task = 0;
-
+static pthread_key_t td_automem_block_nested;
+static PRBool fixup_running = PR_FALSE;
+static PRLock *fixup_lock = NULL;
static void *_PluginID = NULL;
static Slapi_DN *_PluginDN = NULL;
static Slapi_DN *_ConfigAreaDN = NULL;
@@ -93,9 +95,43 @@ static void automember_task_export_destructor(Slapi_Task *task);
static void automember_task_map_destructor(Slapi_Task *task);
#define DEFAULT_FILE_MODE PR_IRUSR | PR_IWUSR
+#define FIXUP_PROGRESS_LIMIT 1000
static uint64_t plugin_do_modify = 0;
static uint64_t plugin_is_betxn = 0;
+/* automember_plugin fixup task and add operations should block other be_txn
+ * plugins from calling automember_post_op_mod() */
+static int32_t
+slapi_td_block_nested_post_op(void)
+{
+ int32_t val = 12345;
+
+ if (pthread_setspecific(td_automem_block_nested, (void *)&val) != 0) {
+ return PR_FAILURE;
+ }
+ return PR_SUCCESS;
+}
+
+static int32_t
+slapi_td_unblock_nested_post_op(void)
+{
+ if (pthread_setspecific(td_automem_block_nested, NULL) != 0) {
+ return PR_FAILURE;
+ }
+ return PR_SUCCESS;
+}
+
+static int32_t
+slapi_td_is_post_op_nested(void)
+{
+ int32_t *value = pthread_getspecific(td_automem_block_nested);
+
+ if (value == NULL) {
+ return 0;
+ }
+ return 1;
+}
+
/*
* Config cache locking functions
*/
@@ -317,6 +353,14 @@ automember_start(Slapi_PBlock *pb)
return -1;
}
+ if (fixup_lock == NULL) {
+ if ((fixup_lock = PR_NewLock()) == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_start - Failed to create fixup lock.\n");
+ return -1;
+ }
+ }
+
/*
* Get the plug-in target dn from the system
* and store it for future use. */
@@ -360,6 +404,11 @@ automember_start(Slapi_PBlock *pb)
}
}
+ if (pthread_key_create(&td_automem_block_nested, NULL) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_start - pthread_key_create failed\n");
+ }
+
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"automember_start - ready for service\n");
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
@@ -394,6 +443,8 @@ automember_close(Slapi_PBlock *pb __attribute__((unused)))
slapi_sdn_free(&_ConfigAreaDN);
slapi_destroy_rwlock(g_automember_config_lock);
g_automember_config_lock = NULL;
+ PR_DestroyLock(fixup_lock);
+ fixup_lock = NULL;
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"<-- automember_close\n");
@@ -1619,7 +1670,6 @@ out:
return rc;
}
-
/*
* automember_update_member_value()
*
@@ -1634,7 +1684,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
LDAPMod *mods[2];
char *vals[2];
char *member_value = NULL;
- int rc = 0;
+ int rc = LDAP_SUCCESS;
Slapi_DN *group_sdn;
/* First thing check that the group still exists */
@@ -1653,7 +1703,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
"automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n",
group_dn, rc);
}
- return rc;
+ goto out;
}
/* If grouping_value is dn, we need to fetch the dn instead. */
@@ -1879,6 +1929,13 @@ automember_mod_post_op(Slapi_PBlock *pb)
PRCList *list = NULL;
int rc = SLAPI_PLUGIN_SUCCESS;
+ if (slapi_td_is_post_op_nested()) {
+ /* don't process op twice in the same thread */
+ return rc;
+ } else {
+ slapi_td_block_nested_post_op();
+ }
+
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"--> automember_mod_post_op\n");
@@ -2005,6 +2062,7 @@ automember_mod_post_op(Slapi_PBlock *pb)
}
}
}
+ slapi_td_unblock_nested_post_op();
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"<-- automember_mod_post_op (%d)\n", rc);
@@ -2024,6 +2082,13 @@ automember_add_post_op(Slapi_PBlock *pb)
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"--> automember_add_post_op\n");
+ if (slapi_td_is_post_op_nested()) {
+ /* don't process op twice in the same thread */
+ return rc;
+ } else {
+ slapi_td_block_nested_post_op();
+ }
+
/* Reload config if a config entry was added. */
if ((sdn = automember_get_sdn(pb))) {
if (automember_dn_is_config(sdn)) {
@@ -2039,7 +2104,7 @@ automember_add_post_op(Slapi_PBlock *pb)
/* If replication, just bail. */
if (automember_isrepl(pb)) {
- return SLAPI_PLUGIN_SUCCESS;
+ goto bail;
}
/* Get the newly added entry. */
@@ -2052,7 +2117,7 @@ automember_add_post_op(Slapi_PBlock *pb)
tombstone);
slapi_value_free(&tombstone);
if (is_tombstone) {
- return SLAPI_PLUGIN_SUCCESS;
+ goto bail;
}
/* Check if a config entry applies
@@ -2063,21 +2128,19 @@ automember_add_post_op(Slapi_PBlock *pb)
list = PR_LIST_HEAD(g_automember_config);
while (list != g_automember_config) {
config = (struct configEntry *)list;
-
/* Does the entry meet scope and filter requirements? */
if (slapi_dn_issuffix(slapi_sdn_get_dn(sdn), config->scope) &&
- (slapi_filter_test_simple(e, config->filter) == 0)) {
+ (slapi_filter_test_simple(e, config->filter) == 0))
+ {
/* Find out what membership changes are needed and make them. */
if (automember_update_membership(config, e, NULL) == SLAPI_PLUGIN_FAILURE) {
rc = SLAPI_PLUGIN_FAILURE;
break;
}
}
-
list = PR_NEXT_LINK(list);
}
}
-
automember_config_unlock();
} else {
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
@@ -2098,6 +2161,7 @@ bail:
slapi_pblock_set(pb, SLAPI_RESULT_CODE, &result);
slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, &errtxt);
}
+ slapi_td_unblock_nested_post_op();
return rc;
}
@@ -2138,6 +2202,7 @@ typedef struct _task_data
Slapi_DN *base_dn;
char *bind_dn;
int scope;
+ PRBool cleanup;
} task_data;
static void
@@ -2270,6 +2335,7 @@ automember_task_abort_thread(void *arg)
* basedn: dc=example,dc=com
* filter: (uid=*)
* scope: sub
+ * cleanup: yes/on (default is off)
*
* basedn and filter are required. If scope is omitted, the default is sub
*/
@@ -2284,9 +2350,22 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
const char *base_dn;
const char *filter;
const char *scope;
+ const char *cleanup_str;
+ PRBool cleanup = PR_FALSE;
*returncode = LDAP_SUCCESS;
+ PR_Lock(fixup_lock);
+ if (fixup_running) {
+ PR_Unlock(fixup_lock);
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_task_add - there is already a fixup task running\n");
+ rv = SLAPI_DSE_CALLBACK_ERROR;
+ goto out;
+ }
+ PR_Unlock(fixup_lock);
+
/*
* Grab the task params
*/
@@ -2300,6 +2379,12 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
rv = SLAPI_DSE_CALLBACK_ERROR;
goto out;
}
+ if ((cleanup_str = slapi_entry_attr_get_ref(e, "cleanup"))) {
+ if (strcasecmp(cleanup_str, "yes") == 0 || strcasecmp(cleanup_str, "on")) {
+ cleanup = PR_TRUE;
+ }
+ }
+
scope = slapi_fetch_attr(e, "scope", "sub");
/*
* setup our task data
@@ -2315,6 +2400,7 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
mytaskdata->bind_dn = slapi_ch_strdup(bind_dn);
mytaskdata->base_dn = slapi_sdn_new_dn_byval(base_dn);
mytaskdata->filter_str = slapi_ch_strdup(filter);
+ mytaskdata->cleanup = cleanup;
if (scope) {
if (strcasecmp(scope, "sub") == 0) {
@@ -2334,6 +2420,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
task = slapi_plugin_new_task(slapi_entry_get_ndn(e), arg);
slapi_task_set_destructor_fn(task, automember_task_destructor);
slapi_task_set_data(task, mytaskdata);
+ PR_Lock(fixup_lock);
+ fixup_running = PR_TRUE;
+ PR_Unlock(fixup_lock);
/*
* Start the task as a separate thread
*/
@@ -2345,6 +2434,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
"automember_task_add - Unable to create task thread!\n");
*returncode = LDAP_OPERATIONS_ERROR;
slapi_task_finish(task, *returncode);
+ PR_Lock(fixup_lock);
+ fixup_running = PR_FALSE;
+ PR_Unlock(fixup_lock);
rv = SLAPI_DSE_CALLBACK_ERROR;
} else {
rv = SLAPI_DSE_CALLBACK_OK;
@@ -2372,6 +2464,9 @@ automember_rebuild_task_thread(void *arg)
PRCList *list = NULL;
PRCList *include_list = NULL;
int result = 0;
+ int64_t fixup_progress_count = 0;
+ int64_t fixup_progress_elapsed = 0;
+ int64_t fixup_start_time = 0;
size_t i = 0;
/* Reset abort flag */
@@ -2380,6 +2475,7 @@ automember_rebuild_task_thread(void *arg)
if (!task) {
return; /* no task */
}
+
slapi_task_inc_refcount(task);
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"automember_rebuild_task_thread - Refcount incremented.\n");
@@ -2393,9 +2489,11 @@ automember_rebuild_task_thread(void *arg)
slapi_task_log_status(task, "Automember rebuild task starting (base dn: (%s) filter (%s)...",
slapi_sdn_get_dn(td->base_dn), td->filter_str);
/*
- * Set the bind dn in the local thread data
+ * Set the bind dn in the local thread data, and block post op mods
*/
slapi_td_set_dn(slapi_ch_strdup(td->bind_dn));
+ slapi_td_block_nested_post_op();
+ fixup_start_time = slapi_current_rel_time_t();
/*
* Take the config lock now and search the database
*/
@@ -2426,6 +2524,21 @@ automember_rebuild_task_thread(void *arg)
* Loop over the entries
*/
for (i = 0; entries && (entries[i] != NULL); i++) {
+ fixup_progress_count++;
+ if (fixup_progress_count % FIXUP_PROGRESS_LIMIT == 0 ) {
+ slapi_task_log_notice(task,
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
+ fixup_progress_count,
+ slapi_current_rel_time_t() - fixup_start_time,
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
+ slapi_task_log_status(task,
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
+ fixup_progress_count,
+ slapi_current_rel_time_t() - fixup_start_time,
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
+ slapi_task_inc_progress(task);
+ fixup_progress_elapsed = slapi_current_rel_time_t();
+ }
if (slapi_atomic_load_64(&abort_rebuild_task, __ATOMIC_ACQUIRE) == 1) {
/* The task was aborted */
slapi_task_log_notice(task, "Automember rebuild task was intentionally aborted");
@@ -2443,48 +2556,66 @@ automember_rebuild_task_thread(void *arg)
if (slapi_dn_issuffix(slapi_entry_get_dn(entries[i]), config->scope) &&
(slapi_filter_test_simple(entries[i], config->filter) == 0))
{
- /* First clear out all the defaults groups */
- for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
- if ((result = automember_update_member_value(entries[i], config->default_groups[ii],
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
- {
- slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
- "member from default group (%s) error (%d)",
- config->default_groups[ii], result);
- slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
- "member from default group (%s) error (%d)",
- config->default_groups[ii], result);
- slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
- "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
- config->default_groups[ii], result);
- goto out;
- }
- }
-
- /* Then clear out the non-default group */
- if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
- include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
- while (include_list != (PRCList *)config->inclusive_rules) {
- struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
- if ((result = automember_update_member_value(entries[i], slapi_sdn_get_dn(curr_rule->target_group_dn),
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
+ if (td->cleanup) {
+
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_rebuild_task_thread - Cleaning up groups (config %s)\n",
+ config->dn);
+ /* First clear out all the defaults groups */
+ for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
+ if ((result = automember_update_member_value(entries[i],
+ config->default_groups[ii],
+ config->grouping_attr,
+ config->grouping_value,
+ NULL, DEL_MEMBER)))
{
slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
- "member from group (%s) error (%d)",
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
+ "member from default group (%s) error (%d)",
+ config->default_groups[ii], result);
slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
- "member from group (%s) error (%d)",
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
+ "member from default group (%s) error (%d)",
+ config->default_groups[ii], result);
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
"automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
+ config->default_groups[ii], result);
goto out;
}
- include_list = PR_NEXT_LINK(include_list);
}
+
+ /* Then clear out the non-default group */
+ if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
+ include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
+ while (include_list != (PRCList *)config->inclusive_rules) {
+ struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
+ if ((result = automember_update_member_value(entries[i],
+ slapi_sdn_get_dn(curr_rule->target_group_dn),
+ config->grouping_attr,
+ config->grouping_value,
+ NULL, DEL_MEMBER)))
+ {
+ slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
+ "member from group (%s) error (%d)",
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
+ slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
+ "member from group (%s) error (%d)",
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
+ goto out;
+ }
+ include_list = PR_NEXT_LINK(include_list);
+ }
+ }
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_rebuild_task_thread - Finished cleaning up groups (config %s)\n",
+ config->dn);
}
/* Update the memberships for this entries */
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
+ "automember_rebuild_task_thread - Updating membership (config %s)\n",
+ config->dn);
if (slapi_is_shutting_down() ||
automember_update_membership(config, entries[i], NULL) == SLAPI_PLUGIN_FAILURE)
{
@@ -2508,15 +2639,22 @@ out:
slapi_task_log_notice(task, "Automember rebuild task aborted. Error (%d)", result);
slapi_task_log_status(task, "Automember rebuild task aborted. Error (%d)", result);
} else {
- slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
- slapi_task_log_status(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
+ slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
+ slapi_task_log_status(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
}
slapi_task_inc_progress(task);
slapi_task_finish(task, result);
slapi_task_dec_refcount(task);
slapi_atomic_store_64(&abort_rebuild_task, 0, __ATOMIC_RELEASE);
+ slapi_td_unblock_nested_post_op();
+ PR_Lock(fixup_lock);
+ fixup_running = PR_FALSE;
+ PR_Unlock(fixup_lock);
+
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
- "automember_rebuild_task_thread - Refcount decremented.\n");
+ "automember_rebuild_task_thread - task finished, refcount decremented.\n");
}
/*
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
index ba2d73a84..ce4c314a1 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
@@ -1,6 +1,6 @@
/** BEGIN COPYRIGHT BLOCK
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
- * Copyright (C) 2005 Red Hat, Inc.
+ * Copyright (C) 2022 Red Hat, Inc.
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
* All rights reserved.
*
@@ -1264,10 +1264,6 @@ ldbm_back_add(Slapi_PBlock *pb)
goto common_return;
error_return:
- /* Revert the caches if this is the parent operation */
- if (parent_op && betxn_callback_fails) {
- revert_cache(inst, &parent_time);
- }
if (addingentry_id_assigned) {
next_id_return(be, addingentry->ep_id);
}
@@ -1376,6 +1372,11 @@ diskfull_return:
if (!not_an_error) {
rc = SLAPI_FAIL_GENERAL;
}
+
+ /* Revert the caches if this is the parent operation */
+ if (parent_op && betxn_callback_fails) {
+ revert_cache(inst, &parent_time);
+ }
}
common_return:
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
index de23190c3..27f0ac58a 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
@@ -1407,11 +1407,6 @@ commit_return:
goto common_return;
error_return:
- /* Revert the caches if this is the parent operation */
- if (parent_op && betxn_callback_fails) {
- revert_cache(inst, &parent_time);
- }
-
if (tombstone) {
if (cache_is_in_cache(&inst->inst_cache, tombstone)) {
tomb_ep_id = tombstone->ep_id; /* Otherwise, tombstone might have been freed. */
@@ -1496,6 +1491,11 @@ error_return:
conn_id, op_id, parent_modify_c.old_entry, parent_modify_c.new_entry, myrc);
}
+ /* Revert the caches if this is the parent operation */
+ if (parent_op && betxn_callback_fails) {
+ revert_cache(inst, &parent_time);
+ }
+
common_return:
if (orig_entry) {
/* NOTE: #define SLAPI_DELETE_BEPREOP_ENTRY SLAPI_ENTRY_PRE_OP */
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index 537369055..64b293001 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -1,6 +1,6 @@
/** BEGIN COPYRIGHT BLOCK
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
- * Copyright (C) 2005 Red Hat, Inc.
+ * Copyright (C) 2022 Red Hat, Inc.
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
* All rights reserved.
*
@@ -1043,11 +1043,6 @@ ldbm_back_modify(Slapi_PBlock *pb)
goto common_return;
error_return:
- /* Revert the caches if this is the parent operation */
- if (parent_op && betxn_callback_fails) {
- revert_cache(inst, &parent_time);
- }
-
if (postentry != NULL) {
slapi_entry_free(postentry);
postentry = NULL;
@@ -1103,6 +1098,10 @@ error_return:
if (!not_an_error) {
rc = SLAPI_FAIL_GENERAL;
}
+ /* Revert the caches if this is the parent operation */
+ if (parent_op && betxn_callback_fails) {
+ revert_cache(inst, &parent_time);
+ }
}
/* if ec is in cache, remove it, then add back e if we still have it */
diff --git a/src/lib389/lib389/cli_conf/plugins/automember.py b/src/lib389/lib389/cli_conf/plugins/automember.py
index 15b00c633..568586ad8 100644
--- a/src/lib389/lib389/cli_conf/plugins/automember.py
+++ b/src/lib389/lib389/cli_conf/plugins/automember.py
@@ -155,7 +155,7 @@ def fixup(inst, basedn, log, args):
log.info('Attempting to add task entry... This will fail if Automembership plug-in is not enabled.')
if not plugin.status():
log.error("'%s' is disabled. Rebuild membership task can't be executed" % plugin.rdn)
- fixup_task = plugin.fixup(args.DN, args.filter)
+ fixup_task = plugin.fixup(args.DN, args.filter, args.cleanup)
if args.wait:
log.info(f'Waiting for fixup task "{fixup_task.dn}" to complete. You can safely exit by pressing Control C ...')
fixup_task.wait(timeout=args.timeout)
@@ -225,8 +225,8 @@ def create_parser(subparsers):
subcommands = automember.add_subparsers(help='action')
add_generic_plugin_parsers(subcommands, AutoMembershipPlugin)
- list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
- subcommands_list = list.add_subparsers(help='action')
+ automember_list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
+ subcommands_list = automember_list.add_subparsers(help='action')
list_definitions = subcommands_list.add_parser('definitions', help='Lists Automembership definitions.')
list_definitions.set_defaults(func=definition_list)
list_regexes = subcommands_list.add_parser('regexes', help='List Automembership regex rules.')
@@ -269,6 +269,8 @@ def create_parser(subparsers):
fixup_task.add_argument('-f', '--filter', required=True, help='Sets the LDAP filter for entries to fix up')
fixup_task.add_argument('-s', '--scope', required=True, choices=['sub', 'base', 'one'], type=str.lower,
help='Sets the LDAP search scope for entries to fix up')
+ fixup_task.add_argument('--cleanup', action='store_true',
+ help="Clean up previous group memberships before rebuilding")
fixup_task.add_argument('--wait', action='store_true',
help="Wait for the task to finish, this could take a long time")
fixup_task.add_argument('--timeout', default=0, type=int,
@@ -279,7 +281,7 @@ def create_parser(subparsers):
fixup_status.add_argument('--dn', help="The task entry's DN")
fixup_status.add_argument('--show-log', action='store_true', help="Display the task log")
fixup_status.add_argument('--watch', action='store_true',
- help="Watch the task's status and wait for it to finish")
+ help="Watch the task's status and wait for it to finish")
abort_fixup = subcommands.add_parser('abort-fixup', help='Abort the rebuild membership task.')
abort_fixup.set_defaults(func=abort)
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
index 52691a44c..a1ad0a45b 100644
--- a/src/lib389/lib389/plugins.py
+++ b/src/lib389/lib389/plugins.py
@@ -1141,13 +1141,15 @@ class AutoMembershipPlugin(Plugin):
def __init__(self, instance, dn="cn=Auto Membership Plugin,cn=plugins,cn=config"):
super(AutoMembershipPlugin, self).__init__(instance, dn)
- def fixup(self, basedn, _filter=None):
+ def fixup(self, basedn, _filter=None, cleanup=False):
"""Create an automember rebuild membership task
:param basedn: Basedn to fix up
:type basedn: str
:param _filter: a filter for entries to fix up
:type _filter: str
+ :param cleanup: cleanup old group memberships
+ :type cleanup: boolean
:returns: an instance of Task(DSLdapObject)
"""
@@ -1156,6 +1158,9 @@ class AutoMembershipPlugin(Plugin):
task_properties = {'basedn': basedn}
if _filter is not None:
task_properties['filter'] = _filter
+ if cleanup:
+ task_properties['cleanup'] = "yes"
+
task.create(properties=task_properties)
return task
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
index 1a16bbb83..193805780 100644
--- a/src/lib389/lib389/tasks.py
+++ b/src/lib389/lib389/tasks.py
@@ -1006,12 +1006,13 @@ class Tasks(object):
return exitCode
def automemberRebuild(self, suffix=DEFAULT_SUFFIX, scope='sub',
- filterstr='objectclass=top', args=None):
+ filterstr='objectclass=top', cleanup=False, args=None):
'''
- @param suffix - The suffix the task should examine - defualt is
+ @param suffix - The suffix the task should examine - default is
"dc=example,dc=com"
@param scope - The scope of the search to find entries
- @param fitlerstr - THe search filter to find entries
+ @param fitlerstr - The search filter to find entries
+ @param cleanup - reset/clear the old group mmeberships prior to rebuilding
@param args - is a dictionary that contains modifier of the task
wait: True/[False] - If True, waits for the completion of
the task before to return
@@ -1027,6 +1028,8 @@ class Tasks(object):
entry.setValues('basedn', suffix)
entry.setValues('filter', filterstr)
entry.setValues('scope', scope)
+ if cleanup:
+ entry.setValues('cleanup', 'yes')
# start the task and possibly wait for task completion
try:
--
2.43.0

View File

@ -1,96 +0,0 @@
From 9710c327b3034d7a9d112306961c9cec98083df5 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <simon.pichugin@gmail.com>
Date: Mon, 18 May 2020 22:33:45 +0200
Subject: [PATCH 05/12] Issue 51086 - Improve dscreate instance name validation
Bug Description: When creating an instance using dscreate, it doesn't enforce
max name length. The ldapi socket name contains name of the instance. If it's
too long, we can hit limits, and the file name will be truncated. Also, it
doesn't sanitize the instance name, it's possible to create an instance with
non-ascii symbols in its name.
Fix Description: Add more checks to 'dscreate from-file' installation.
Add a limitation for nsslapd-ldapifilepath string lenght because it is
limited by sizeof((*ports_info.i_listenaddr)->local.path)) it is copied to.
https://pagure.io/389-ds-base/issue/51086
Reviewed by: firstyear, mreynolds (Thanks!)
---
ldap/servers/slapd/libglobs.c | 12 ++++++++++++
src/cockpit/389-console/src/ds.jsx | 8 ++++++--
src/lib389/lib389/instance/setup.py | 9 +++++++++
3 files changed, 27 insertions(+), 2 deletions(-)
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 0d3d9a924..fbf90d92d 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -2390,11 +2390,23 @@ config_set_ldapi_filename(const char *attrname, char *value, char *errorbuf, int
{
int retVal = LDAP_SUCCESS;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ /*
+ * LDAPI file path length is limited by sizeof((*ports_info.i_listenaddr)->local.path))
+ * which is set in main.c inside of "#if defined(ENABLE_LDAPI)" block
+ * ports_info.i_listenaddr is sizeof(PRNetAddr) and our required sizes is 8 bytes less
+ */
+ size_t result_size = sizeof(PRNetAddr) - 8;
if (config_value_is_null(attrname, value, errorbuf, 0)) {
return LDAP_OPERATIONS_ERROR;
}
+ if (strlen(value) >= result_size) {
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: \"%s\" is invalid, its length must be less than %d",
+ attrname, value, result_size);
+ return LDAP_OPERATIONS_ERROR;
+ }
+
if (apply) {
CFG_LOCK_WRITE(slapdFrontendConfig);
diff --git a/src/cockpit/389-console/src/ds.jsx b/src/cockpit/389-console/src/ds.jsx
index 90d9e5abd..53aa5cb79 100644
--- a/src/cockpit/389-console/src/ds.jsx
+++ b/src/cockpit/389-console/src/ds.jsx
@@ -793,10 +793,14 @@ class CreateInstanceModal extends React.Component {
return;
}
newServerId = newServerId.replace(/^slapd-/i, ""); // strip "slapd-"
- if (newServerId.length > 128) {
+ if (newServerId === "admin") {
+ addNotification("warning", "Instance Name 'admin' is reserved, please choose a different name");
+ return;
+ }
+ if (newServerId.length > 80) {
addNotification(
"warning",
- "Instance name is too long, it must not exceed 128 characters"
+ "Instance name is too long, it must not exceed 80 characters"
);
return;
}
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index 803992275..f5fc5495d 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -567,6 +567,15 @@ class SetupDs(object):
# We need to know the prefix before we can do the instance checks
assert_c(slapd['instance_name'] is not None, "Configuration instance_name in section [slapd] not found")
+ assert_c(len(slapd['instance_name']) <= 80, "Server identifier should not be longer than 80 symbols")
+ assert_c(all(ord(c) < 128 for c in slapd['instance_name']), "Server identifier can not contain non ascii characters")
+ assert_c(' ' not in slapd['instance_name'], "Server identifier can not contain a space")
+ assert_c(slapd['instance_name'] != 'admin', "Server identifier \"admin\" is reserved, please choose a different identifier")
+
+ # Check that valid characters are used
+ safe = re.compile(r'^[#%:\w@_-]+$').search
+ assert_c(bool(safe(slapd['instance_name'])), "Server identifier has invalid characters, please choose a different value")
+
# Check if the instance exists or not.
# Should I move this import? I think this prevents some recursion
from lib389 import DirSrv
--
2.26.2

View File

@ -1,254 +0,0 @@
From c0cb15445c1434b3d317b1c06ab1a0ba8dbc6f04 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 19 May 2020 15:11:53 -0400
Subject: [PATCH 06/12] Issue 51102 - RFE - ds-replcheck - make online timeout
configurable
Bug Description: When doing an online check with replicas that are very
far apart the connection can time out as the hardcoded
timeout is 5 seconds.
Fix Description: Change the default timeout to never timeout, and add an
CLI option to specify a specific timeout.
Also caught all the possible LDAP exceptions so we can
cleanly "fail". Fixed some python syntax issues, and
improved the entry inconsistency report
relates: https://pagure.io/389-ds-base/issue/51102
Reviewed by: firstyear & spichugi(Thanks!)
---
ldap/admin/src/scripts/ds-replcheck | 90 ++++++++++++++++++-----------
1 file changed, 57 insertions(+), 33 deletions(-)
diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
index 30bcfd65d..5bb7dfce3 100755
--- a/ldap/admin/src/scripts/ds-replcheck
+++ b/ldap/admin/src/scripts/ds-replcheck
@@ -1,7 +1,7 @@
#!/usr/bin/python3
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2018 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -21,10 +21,9 @@ import getpass
import signal
from ldif import LDIFRecordList
from ldap.ldapobject import SimpleLDAPObject
-from ldap.cidict import cidict
from ldap.controls import SimplePagedResultsControl
from lib389._entry import Entry
-from lib389.utils import ensure_str, ensure_list_str, ensure_int
+from lib389.utils import ensure_list_str, ensure_int
VERSION = "2.0"
RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))'
@@ -185,11 +184,11 @@ def report_conflict(entry, attr, opts):
report = True
if 'nscpentrywsi' in entry.data:
- found = False
for val in entry.data['nscpentrywsi']:
if val.lower().startswith(attr + ';'):
if (opts['starttime'] - extract_time(val)) <= opts['lag']:
report = False
+ break
return report
@@ -321,6 +320,9 @@ def ldif_search(LDIF, dn):
count = 0
ignore_list = ['conflictcsn', 'modifytimestamp', 'modifiersname']
val = ""
+ attr = ""
+ state_attr = ""
+ part_dn = ""
result['entry'] = None
result['conflict'] = None
result['tombstone'] = False
@@ -570,6 +572,7 @@ def cmp_entry(mentry, rentry, opts):
if val.lower().startswith(mattr + ';'):
if not found:
diff['diff'].append(" Master:")
+ diff['diff'].append(" - Value: %s" % (val.split(':')[1].lstrip()))
diff['diff'].append(" - State Info: %s" % (val))
diff['diff'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
found = True
@@ -588,6 +591,7 @@ def cmp_entry(mentry, rentry, opts):
if val.lower().startswith(mattr + ';'):
if not found:
diff['diff'].append(" Replica:")
+ diff['diff'].append(" - Value: %s" % (val.split(':')[1].lstrip()))
diff['diff'].append(" - State Info: %s" % (val))
diff['diff'].append(" - Date: %s\n" % (time.ctime(extract_time(val))))
found = True
@@ -654,7 +658,6 @@ def do_offline_report(opts, output_file=None):
rconflicts = []
rtombstones = 0
mtombstones = 0
- idx = 0
# Open LDIF files
try:
@@ -926,7 +929,7 @@ def validate_suffix(ldapnode, suffix, hostname):
:return - True if suffix exists, otherwise False
"""
try:
- master_basesuffix = ldapnode.search_s(suffix, ldap.SCOPE_BASE )
+ ldapnode.search_s(suffix, ldap.SCOPE_BASE)
except ldap.NO_SUCH_OBJECT:
print("Error: Failed to validate suffix in {}. {} does not exist.".format(hostname, suffix))
return False
@@ -968,12 +971,12 @@ def connect_to_replicas(opts):
replica = SimpleLDAPObject(ruri)
# Set timeouts
- master.set_option(ldap.OPT_NETWORK_TIMEOUT,5.0)
- master.set_option(ldap.OPT_TIMEOUT,5.0)
- replica.set_option(ldap.OPT_NETWORK_TIMEOUT,5.0)
- replica.set_option(ldap.OPT_TIMEOUT,5.0)
+ master.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
+ master.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
+ replica.set_option(ldap.OPT_NETWORK_TIMEOUT, opts['timeout'])
+ replica.set_option(ldap.OPT_TIMEOUT, opts['timeout'])
- # Setup Secure Conenction
+ # Setup Secure Connection
if opts['certdir'] is not None:
# Setup Master
if opts['mprotocol'] != LDAPI:
@@ -1003,7 +1006,7 @@ def connect_to_replicas(opts):
try:
master.simple_bind_s(opts['binddn'], opts['bindpw'])
except ldap.SERVER_DOWN as e:
- print("Cannot connect to %r" % muri)
+ print(f"Cannot connect to {muri} ({str(e)})")
sys.exit(1)
except ldap.LDAPError as e:
print("Error: Failed to authenticate to Master: ({}). "
@@ -1014,7 +1017,7 @@ def connect_to_replicas(opts):
try:
replica.simple_bind_s(opts['binddn'], opts['bindpw'])
except ldap.SERVER_DOWN as e:
- print("Cannot connect to %r" % ruri)
+ print(f"Cannot connect to {ruri} ({str(e)})")
sys.exit(1)
except ldap.LDAPError as e:
print("Error: Failed to authenticate to Replica: ({}). "
@@ -1218,7 +1221,6 @@ def do_online_report(opts, output_file=None):
"""
m_done = False
r_done = False
- done = False
report = {}
report['diff'] = []
report['m_missing'] = []
@@ -1257,15 +1259,22 @@ def do_online_report(opts, output_file=None):
# Read the results and start comparing
while not m_done or not r_done:
- if not m_done:
- m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid)
- elif not r_done:
- m_rdata = []
-
- if not r_done:
- r_rtype, r_rdata, r_rmsgid, r_rctrls = replica.result3(replica_msgid)
- elif not m_done:
- r_rdata = []
+ try:
+ if not m_done:
+ m_rtype, m_rdata, m_rmsgid, m_rctrls = master.result3(master_msgid)
+ elif not r_done:
+ m_rdata = []
+ except ldap.LDAPError as e:
+ print("Error: Problem getting the results from the master: %s", str(e))
+ sys.exit(1)
+ try:
+ if not r_done:
+ r_rtype, r_rdata, r_rmsgid, r_rctrls = replica.result3(replica_msgid)
+ elif not m_done:
+ r_rdata = []
+ except ldap.LDAPError as e:
+ print("Error: Problem getting the results from the replica: %s", str(e))
+ sys.exit(1)
# Convert entries
mresult = convert_entries(m_rdata)
@@ -1291,11 +1300,15 @@ def do_online_report(opts, output_file=None):
]
if m_pctrls:
if m_pctrls[0].cookie:
- # Copy cookie from response control to request control
- req_pr_ctrl.cookie = m_pctrls[0].cookie
- master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
- "(|(objectclass=*)(objectclass=ldapsubentry))",
- ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
+ try:
+ # Copy cookie from response control to request control
+ req_pr_ctrl.cookie = m_pctrls[0].cookie
+ master_msgid = master.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+ "(|(objectclass=*)(objectclass=ldapsubentry))",
+ ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
+ except ldap.LDAPError as e:
+ print("Error: Problem searching the master: %s", str(e))
+ sys.exit(1)
else:
m_done = True # No more pages available
else:
@@ -1311,11 +1324,15 @@ def do_online_report(opts, output_file=None):
if r_pctrls:
if r_pctrls[0].cookie:
- # Copy cookie from response control to request control
- req_pr_ctrl.cookie = r_pctrls[0].cookie
- replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
- "(|(objectclass=*)(objectclass=ldapsubentry))",
- ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
+ try:
+ # Copy cookie from response control to request control
+ req_pr_ctrl.cookie = r_pctrls[0].cookie
+ replica_msgid = replica.search_ext(opts['suffix'], ldap.SCOPE_SUBTREE,
+ "(|(objectclass=*)(objectclass=ldapsubentry))",
+ ['*', 'createtimestamp', 'nscpentrywsi', 'conflictcsn', 'nsds5replconflict'], serverctrls=controls)
+ except ldap.LDAPError as e:
+ print("Error: Problem searching the replica: %s", str(e))
+ sys.exit(1)
else:
r_done = True # No more pages available
else:
@@ -1426,6 +1443,9 @@ def init_online_params(args):
# prompt for password
opts['bindpw'] = getpass.getpass('Enter password: ')
+ # lastly handle the timeout
+ opts['timeout'] = int(args.timeout)
+
return opts
@@ -1553,6 +1573,8 @@ def main():
state_parser.add_argument('-y', '--pass-file', help='A text file containing the clear text password for the bind dn', dest='pass_file', default=None)
state_parser.add_argument('-Z', '--cert-dir', help='The certificate database directory for secure connections',
dest='certdir', default=None)
+ state_parser.add_argument('-t', '--timeout', help='The timeout for the LDAP connections. Default is no timeout.',
+ type=int, dest='timeout', default=-1)
# Online mode
online_parser = subparsers.add_parser('online', help="Compare two online replicas for differences")
@@ -1577,6 +1599,8 @@ def main():
online_parser.add_argument('-p', '--page-size', help='The paged-search result grouping size (default 500 entries)',
dest='pagesize', default=500)
online_parser.add_argument('-o', '--out-file', help='The output file', dest='file', default=None)
+ online_parser.add_argument('-t', '--timeout', help='The timeout for the LDAP connections. Default is no timeout.',
+ type=int, dest='timeout', default=-1)
# Offline LDIF mode
offline_parser = subparsers.add_parser('offline', help="Compare two replication LDIF files for differences (LDIF file generated by 'db2ldif -r')")
--
2.26.2

View File

@ -1,428 +0,0 @@
From a1cd3cf8e8b6b33ab21d5338921187a76dd9dcd0 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 22 May 2020 15:41:45 -0400
Subject: [PATCH 07/12] Issue 51110 - Fix ASAN ODR warnings
Description: Fixed ODR issues with glboal attributes which were duplicated from
the core server into the replication and retrocl plugins.
relates: https://pagure.io/389-ds-base/issue/51110
Reviewed by: firstyear(Thanks!)
---
ldap/servers/plugins/replication/repl5.h | 17 +++---
.../plugins/replication/repl_globals.c | 17 +++---
ldap/servers/plugins/replication/replutil.c | 16 +++---
ldap/servers/plugins/retrocl/retrocl.h | 22 ++++----
ldap/servers/plugins/retrocl/retrocl_cn.c | 12 ++---
ldap/servers/plugins/retrocl/retrocl_po.c | 52 +++++++++----------
ldap/servers/plugins/retrocl/retrocl_trim.c | 30 +++++------
7 files changed, 82 insertions(+), 84 deletions(-)
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 873dd8a16..72b7089e3 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -280,15 +280,14 @@ struct berval *NSDS90StartReplicationRequest_new(const char *protocol_oid,
int multimaster_extop_NSDS50ReplicationEntry(Slapi_PBlock *pb);
/* From repl_globals.c */
-extern char *attr_changenumber;
-extern char *attr_targetdn;
-extern char *attr_changetype;
-extern char *attr_newrdn;
-extern char *attr_deleteoldrdn;
-extern char *attr_changes;
-extern char *attr_newsuperior;
-extern char *attr_changetime;
-extern char *attr_dataversion;
+extern char *repl_changenumber;
+extern char *repl_targetdn;
+extern char *repl_changetype;
+extern char *repl_newrdn;
+extern char *repl_deleteoldrdn;
+extern char *repl_changes;
+extern char *repl_newsuperior;
+extern char *repl_changetime;
extern char *attr_csn;
extern char *changetype_add;
extern char *changetype_delete;
diff --git a/ldap/servers/plugins/replication/repl_globals.c b/ldap/servers/plugins/replication/repl_globals.c
index 355a0ffa1..c615c77da 100644
--- a/ldap/servers/plugins/replication/repl_globals.c
+++ b/ldap/servers/plugins/replication/repl_globals.c
@@ -48,15 +48,14 @@ char *changetype_delete = CHANGETYPE_DELETE;
char *changetype_modify = CHANGETYPE_MODIFY;
char *changetype_modrdn = CHANGETYPE_MODRDN;
char *changetype_moddn = CHANGETYPE_MODDN;
-char *attr_changenumber = ATTR_CHANGENUMBER;
-char *attr_targetdn = ATTR_TARGETDN;
-char *attr_changetype = ATTR_CHANGETYPE;
-char *attr_newrdn = ATTR_NEWRDN;
-char *attr_deleteoldrdn = ATTR_DELETEOLDRDN;
-char *attr_changes = ATTR_CHANGES;
-char *attr_newsuperior = ATTR_NEWSUPERIOR;
-char *attr_changetime = ATTR_CHANGETIME;
-char *attr_dataversion = ATTR_DATAVERSION;
+char *repl_changenumber = ATTR_CHANGENUMBER;
+char *repl_targetdn = ATTR_TARGETDN;
+char *repl_changetype = ATTR_CHANGETYPE;
+char *repl_newrdn = ATTR_NEWRDN;
+char *repl_deleteoldrdn = ATTR_DELETEOLDRDN;
+char *repl_changes = ATTR_CHANGES;
+char *repl_newsuperior = ATTR_NEWSUPERIOR;
+char *repl_changetime = ATTR_CHANGETIME;
char *attr_csn = ATTR_CSN;
char *type_copyingFrom = TYPE_COPYINGFROM;
char *type_copiedFrom = TYPE_COPIEDFROM;
diff --git a/ldap/servers/plugins/replication/replutil.c b/ldap/servers/plugins/replication/replutil.c
index de1e77880..39f821d12 100644
--- a/ldap/servers/plugins/replication/replutil.c
+++ b/ldap/servers/plugins/replication/replutil.c
@@ -64,14 +64,14 @@ get_cleattrs()
{
if (cleattrs[0] == NULL) {
cleattrs[0] = type_objectclass;
- cleattrs[1] = attr_changenumber;
- cleattrs[2] = attr_targetdn;
- cleattrs[3] = attr_changetype;
- cleattrs[4] = attr_newrdn;
- cleattrs[5] = attr_deleteoldrdn;
- cleattrs[6] = attr_changes;
- cleattrs[7] = attr_newsuperior;
- cleattrs[8] = attr_changetime;
+ cleattrs[1] = repl_changenumber;
+ cleattrs[2] = repl_targetdn;
+ cleattrs[3] = repl_changetype;
+ cleattrs[4] = repl_newrdn;
+ cleattrs[5] = repl_deleteoldrdn;
+ cleattrs[6] = repl_changes;
+ cleattrs[7] = repl_newsuperior;
+ cleattrs[8] = repl_changetime;
cleattrs[9] = NULL;
}
return cleattrs;
diff --git a/ldap/servers/plugins/retrocl/retrocl.h b/ldap/servers/plugins/retrocl/retrocl.h
index 06482a14c..2ce76fcec 100644
--- a/ldap/servers/plugins/retrocl/retrocl.h
+++ b/ldap/servers/plugins/retrocl/retrocl.h
@@ -94,17 +94,17 @@ extern int retrocl_nattributes;
extern char **retrocl_attributes;
extern char **retrocl_aliases;
-extern const char *attr_changenumber;
-extern const char *attr_targetdn;
-extern const char *attr_changetype;
-extern const char *attr_newrdn;
-extern const char *attr_newsuperior;
-extern const char *attr_deleteoldrdn;
-extern const char *attr_changes;
-extern const char *attr_changetime;
-extern const char *attr_objectclass;
-extern const char *attr_nsuniqueid;
-extern const char *attr_isreplicated;
+extern const char *retrocl_changenumber;
+extern const char *retrocl_targetdn;
+extern const char *retrocl_changetype;
+extern const char *retrocl_newrdn;
+extern const char *retrocl_newsuperior;
+extern const char *retrocl_deleteoldrdn;
+extern const char *retrocl_changes;
+extern const char *retrocl_changetime;
+extern const char *retrocl_objectclass;
+extern const char *retrocl_nsuniqueid;
+extern const char *retrocl_isreplicated;
extern PRLock *retrocl_internal_lock;
extern Slapi_RWLock *retrocl_cn_lock;
diff --git a/ldap/servers/plugins/retrocl/retrocl_cn.c b/ldap/servers/plugins/retrocl/retrocl_cn.c
index 709d7a857..5fc5f586d 100644
--- a/ldap/servers/plugins/retrocl/retrocl_cn.c
+++ b/ldap/servers/plugins/retrocl/retrocl_cn.c
@@ -62,7 +62,7 @@ handle_cnum_entry(Slapi_Entry *e, void *callback_data)
Slapi_Attr *chattr = NULL;
sval = NULL;
value = NULL;
- if (slapi_entry_attr_find(e, attr_changenumber, &chattr) == 0) {
+ if (slapi_entry_attr_find(e, retrocl_changenumber, &chattr) == 0) {
slapi_attr_first_value(chattr, &sval);
if (NULL != sval) {
value = slapi_value_get_berval(sval);
@@ -79,7 +79,7 @@ handle_cnum_entry(Slapi_Entry *e, void *callback_data)
chattr = NULL;
sval = NULL;
value = NULL;
- if (slapi_entry_attr_find(e, attr_changetime, &chattr) == 0) {
+ if (slapi_entry_attr_find(e, retrocl_changetime, &chattr) == 0) {
slapi_attr_first_value(chattr, &sval);
if (NULL != sval) {
value = slapi_value_get_berval(sval);
@@ -134,7 +134,7 @@ retrocl_get_changenumbers(void)
cr.cr_time = 0;
slapi_seq_callback(RETROCL_CHANGELOG_DN, SLAPI_SEQ_FIRST,
- (char *)attr_changenumber, /* cast away const */
+ (char *)retrocl_changenumber, /* cast away const */
NULL, NULL, 0, &cr, NULL, handle_cnum_result,
handle_cnum_entry, NULL);
@@ -144,7 +144,7 @@ retrocl_get_changenumbers(void)
slapi_ch_free((void **)&cr.cr_time);
slapi_seq_callback(RETROCL_CHANGELOG_DN, SLAPI_SEQ_LAST,
- (char *)attr_changenumber, /* cast away const */
+ (char *)retrocl_changenumber, /* cast away const */
NULL, NULL, 0, &cr, NULL, handle_cnum_result,
handle_cnum_entry, NULL);
@@ -185,7 +185,7 @@ retrocl_getchangetime(int type, int *err)
return NO_TIME;
}
slapi_seq_callback(RETROCL_CHANGELOG_DN, type,
- (char *)attr_changenumber, /* cast away const */
+ (char *)retrocl_changenumber, /* cast away const */
NULL,
NULL, 0, &cr, NULL,
handle_cnum_result, handle_cnum_entry, NULL);
@@ -353,7 +353,7 @@ retrocl_update_lastchangenumber(void)
cr.cr_cnum = 0;
cr.cr_time = 0;
slapi_seq_callback(RETROCL_CHANGELOG_DN, SLAPI_SEQ_LAST,
- (char *)attr_changenumber, /* cast away const */
+ (char *)retrocl_changenumber, /* cast away const */
NULL, NULL, 0, &cr, NULL, handle_cnum_result,
handle_cnum_entry, NULL);
diff --git a/ldap/servers/plugins/retrocl/retrocl_po.c b/ldap/servers/plugins/retrocl/retrocl_po.c
index d2af79b31..e1488f56b 100644
--- a/ldap/servers/plugins/retrocl/retrocl_po.c
+++ b/ldap/servers/plugins/retrocl/retrocl_po.c
@@ -25,17 +25,17 @@ modrdn2reple(Slapi_Entry *e, const char *newrdn, int deloldrdn, LDAPMod **ldm, c
/******************************/
-const char *attr_changenumber = "changenumber";
-const char *attr_targetdn = "targetdn";
-const char *attr_changetype = "changetype";
-const char *attr_newrdn = "newrdn";
-const char *attr_deleteoldrdn = "deleteoldrdn";
-const char *attr_changes = "changes";
-const char *attr_newsuperior = "newsuperior";
-const char *attr_changetime = "changetime";
-const char *attr_objectclass = "objectclass";
-const char *attr_nsuniqueid = "nsuniqueid";
-const char *attr_isreplicated = "isreplicated";
+const char *retrocl_changenumber = "changenumber";
+const char *retrocl_targetdn = "targetdn";
+const char *retrocl_changetype = "changetype";
+const char *retrocl_newrdn = "newrdn";
+const char *retrocl_deleteoldrdn = "deleteoldrdn";
+const char *retrocl_changes = "changes";
+const char *retrocl_newsuperior = "newsuperior";
+const char *retrocl_changetime = "changetime";
+const char *retrocl_objectclass = "objectclass";
+const char *retrocl_nsuniqueid = "nsuniqueid";
+const char *retrocl_isreplicated = "isreplicated";
/*
* Function: make_changes_string
@@ -185,7 +185,7 @@ write_replog_db(
changenum, dn);
/* Construct the dn of this change record */
- edn = slapi_ch_smprintf("%s=%lu,%s", attr_changenumber, changenum, RETROCL_CHANGELOG_DN);
+ edn = slapi_ch_smprintf("%s=%lu,%s", retrocl_changenumber, changenum, RETROCL_CHANGELOG_DN);
/*
* Create the entry struct, and fill in fields common to all types
@@ -214,7 +214,7 @@ write_replog_db(
attributeAlias = attributeName;
}
- if (strcasecmp(attributeName, attr_nsuniqueid) == 0) {
+ if (strcasecmp(attributeName, retrocl_nsuniqueid) == 0) {
Slapi_Entry *entry = NULL;
const char *uniqueId = NULL;
@@ -236,7 +236,7 @@ write_replog_db(
extensibleObject = 1;
- } else if (strcasecmp(attributeName, attr_isreplicated) == 0) {
+ } else if (strcasecmp(attributeName, retrocl_isreplicated) == 0) {
int isReplicated = 0;
char *attributeValue = NULL;
@@ -298,17 +298,17 @@ write_replog_db(
sprintf(chnobuf, "%lu", changenum);
val.bv_val = chnobuf;
val.bv_len = strlen(chnobuf);
- slapi_entry_add_values(e, attr_changenumber, vals);
+ slapi_entry_add_values(e, retrocl_changenumber, vals);
/* Set the targetentrydn attribute */
val.bv_val = dn;
val.bv_len = strlen(dn);
- slapi_entry_add_values(e, attr_targetdn, vals);
+ slapi_entry_add_values(e, retrocl_targetdn, vals);
/* Set the changeTime attribute */
val.bv_val = format_genTime(curtime);
val.bv_len = strlen(val.bv_val);
- slapi_entry_add_values(e, attr_changetime, vals);
+ slapi_entry_add_values(e, retrocl_changetime, vals);
slapi_ch_free((void **)&val.bv_val);
/*
@@ -344,7 +344,7 @@ write_replog_db(
/* Set the changetype attribute */
val.bv_val = "delete";
val.bv_len = 6;
- slapi_entry_add_values(e, attr_changetype, vals);
+ slapi_entry_add_values(e, retrocl_changetype, vals);
}
break;
@@ -422,7 +422,7 @@ entry2reple(Slapi_Entry *e, Slapi_Entry *oe, int optype)
} else {
return (1);
}
- slapi_entry_add_values(e, attr_changetype, vals);
+ slapi_entry_add_values(e, retrocl_changetype, vals);
estr = slapi_entry2str(oe, &len);
p = estr;
@@ -435,7 +435,7 @@ entry2reple(Slapi_Entry *e, Slapi_Entry *oe, int optype)
}
val.bv_val = p;
val.bv_len = len - (p - estr); /* length + terminating \0 */
- slapi_entry_add_values(e, attr_changes, vals);
+ slapi_entry_add_values(e, retrocl_changes, vals);
slapi_ch_free_string(&estr);
return 0;
}
@@ -471,7 +471,7 @@ mods2reple(Slapi_Entry *e, LDAPMod **ldm)
if (NULL != l) {
val.bv_val = l->ls_buf;
val.bv_len = l->ls_len + 1; /* string + terminating \0 */
- slapi_entry_add_values(e, attr_changes, vals);
+ slapi_entry_add_values(e, retrocl_changes, vals);
lenstr_free(&l);
}
}
@@ -511,12 +511,12 @@ modrdn2reple(
val.bv_val = "modrdn";
val.bv_len = 6;
- slapi_entry_add_values(e, attr_changetype, vals);
+ slapi_entry_add_values(e, retrocl_changetype, vals);
if (newrdn) {
val.bv_val = (char *)newrdn; /* cast away const */
val.bv_len = strlen(newrdn);
- slapi_entry_add_values(e, attr_newrdn, vals);
+ slapi_entry_add_values(e, retrocl_newrdn, vals);
}
if (deloldrdn == 0) {
@@ -526,12 +526,12 @@ modrdn2reple(
val.bv_val = "TRUE";
val.bv_len = 4;
}
- slapi_entry_add_values(e, attr_deleteoldrdn, vals);
+ slapi_entry_add_values(e, retrocl_deleteoldrdn, vals);
if (newsuperior) {
val.bv_val = (char *)newsuperior; /* cast away const */
val.bv_len = strlen(newsuperior);
- slapi_entry_add_values(e, attr_newsuperior, vals);
+ slapi_entry_add_values(e, retrocl_newsuperior, vals);
}
if (NULL != ldm) {
@@ -540,7 +540,7 @@ modrdn2reple(
if (l->ls_len) {
val.bv_val = l->ls_buf;
val.bv_len = l->ls_len;
- slapi_entry_add_values(e, attr_changes, vals);
+ slapi_entry_add_values(e, retrocl_changes, vals);
}
lenstr_free(&l);
}
diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c
index 0378eb7f6..d031dc3f8 100644
--- a/ldap/servers/plugins/retrocl/retrocl_trim.c
+++ b/ldap/servers/plugins/retrocl/retrocl_trim.c
@@ -49,15 +49,15 @@ static const char **
get_cleattrs(void)
{
if (cleattrs[0] == NULL) {
- cleattrs[0] = attr_objectclass;
- cleattrs[1] = attr_changenumber;
- cleattrs[2] = attr_targetdn;
- cleattrs[3] = attr_changetype;
- cleattrs[4] = attr_newrdn;
- cleattrs[5] = attr_deleteoldrdn;
- cleattrs[6] = attr_changes;
- cleattrs[7] = attr_newsuperior;
- cleattrs[8] = attr_changetime;
+ cleattrs[0] = retrocl_objectclass;
+ cleattrs[1] = retrocl_changenumber;
+ cleattrs[2] = retrocl_targetdn;
+ cleattrs[3] = retrocl_changetype;
+ cleattrs[4] = retrocl_newrdn;
+ cleattrs[5] = retrocl_deleteoldrdn;
+ cleattrs[6] = retrocl_changes;
+ cleattrs[7] = retrocl_newsuperior;
+ cleattrs[8] = retrocl_changetime;
cleattrs[9] = NULL;
}
return cleattrs;
@@ -81,7 +81,7 @@ delete_changerecord(changeNumber cnum)
char *dnbuf;
int delrc;
- dnbuf = slapi_ch_smprintf("%s=%ld, %s", attr_changenumber, cnum,
+ dnbuf = slapi_ch_smprintf("%s=%ld, %s", retrocl_changenumber, cnum,
RETROCL_CHANGELOG_DN);
pb = slapi_pblock_new();
slapi_delete_internal_set_pb(pb, dnbuf, NULL /*controls*/, NULL /* uniqueid */,
@@ -154,7 +154,7 @@ handle_getchangetime_search(Slapi_Entry *e, void *callback_data)
if (NULL != e) {
Slapi_Value *sval = NULL;
const struct berval *val = NULL;
- rc = slapi_entry_attr_find(e, attr_changetime, &attr);
+ rc = slapi_entry_attr_find(e, retrocl_changetime, &attr);
/* Bug 624442: Logic checking for lack of timestamp was
reversed. */
if (0 != rc || slapi_attr_first_value(attr, &sval) == -1 ||
@@ -174,14 +174,14 @@ handle_getchangetime_search(Slapi_Entry *e, void *callback_data)
/*
* Function: get_changetime
* Arguments: cnum - number of change record to retrieve
- * Returns: Taking the attr_changetime of the 'cnum' entry,
+ * Returns: Taking the retrocl_changetime of the 'cnum' entry,
* it converts it into time_t (parse_localTime) and returns this time value.
* It returns 0 in the following cases:
- * - changerecord entry has not attr_changetime
+ * - changerecord entry has not retrocl_changetime
* - attr_changetime attribute has no value
* - attr_changetime attribute value is empty
*
- * Description: Retrieve attr_changetime ("changetime") from a changerecord whose number is "cnum".
+ * Description: Retrieve retrocl_changetime ("changetime") from a changerecord whose number is "cnum".
*/
static time_t
get_changetime(changeNumber cnum, int *err)
@@ -198,7 +198,7 @@ get_changetime(changeNumber cnum, int *err)
}
crtp->crt_nentries = crtp->crt_err = 0;
crtp->crt_time = 0;
- PR_snprintf(fstr, sizeof(fstr), "%s=%ld", attr_changenumber, cnum);
+ PR_snprintf(fstr, sizeof(fstr), "%s=%ld", retrocl_changenumber, cnum);
pb = slapi_pblock_new();
slapi_search_internal_set_pb(pb, RETROCL_CHANGELOG_DN,
--
2.26.2

View File

@ -1,466 +0,0 @@
From 8d14ff153e9335b09739438344f9c3c78a496548 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 22 May 2020 10:42:11 -0400
Subject: [PATCH 08/12] Issue 51095 - abort operation if CSN can not be
generated
Bug Description: If we fail to get the system time then we were using an
uninitialized timespec struct which could lead to bizarre
times in CSN's.
Fix description: Check if the system time function fails, and if it does
then abort the update operation.
relates: https://pagure.io/389-ds-base/issue/51095
Reviewed by: firstyear & tbordaz(Thanks!!)
---
ldap/servers/plugins/replication/repl5.h | 2 +-
.../plugins/replication/repl5_replica.c | 33 ++++++++------
ldap/servers/slapd/back-ldbm/ldbm_add.c | 8 +++-
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 9 +++-
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 10 ++++-
ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 8 +++-
ldap/servers/slapd/csngen.c | 18 +++++++-
ldap/servers/slapd/entrywsi.c | 15 ++++---
ldap/servers/slapd/slap.h | 2 +-
ldap/servers/slapd/slapi-plugin.h | 8 ++++
ldap/servers/slapd/slapi-private.h | 5 ++-
ldap/servers/slapd/time.c | 43 +++++++++++++------
12 files changed, 118 insertions(+), 43 deletions(-)
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 72b7089e3..638471744 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -776,7 +776,7 @@ void replica_disable_replication(Replica *r);
int replica_start_agreement(Replica *r, Repl_Agmt *ra);
int windows_replica_start_agreement(Replica *r, Repl_Agmt *ra);
-CSN *replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn);
+int32_t replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn, CSN **opcsn);
int replica_get_attr(Slapi_PBlock *pb, const char *type, void *value);
/* mapping tree extensions manipulation */
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index 02caa88d9..f01782330 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -3931,11 +3931,9 @@ windows_replica_start_agreement(Replica *r, Repl_Agmt *ra)
* A callback function registered as op->o_csngen_handler and
* called by backend ops to generate opcsn.
*/
-CSN *
-replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn)
+int32_t
+replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn, CSN **opcsn)
{
- CSN *opcsn = NULL;
-
Replica *replica = replica_get_replica_for_op(pb);
if (NULL != replica) {
Slapi_Operation *op;
@@ -3946,17 +3944,26 @@ replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn)
CSNGen *gen = (CSNGen *)object_get_data(gen_obj);
if (NULL != gen) {
/* The new CSN should be greater than the base CSN */
- csngen_new_csn(gen, &opcsn, PR_FALSE /* don't notify */);
- if (csn_compare(opcsn, basecsn) <= 0) {
- char opcsnstr[CSN_STRSIZE], basecsnstr[CSN_STRSIZE];
+ if (csngen_new_csn(gen, opcsn, PR_FALSE /* don't notify */) != CSN_SUCCESS) {
+ /* Failed to generate CSN we must abort */
+ object_release(gen_obj);
+ return -1;
+ }
+ if (csn_compare(*opcsn, basecsn) <= 0) {
+ char opcsnstr[CSN_STRSIZE];
+ char basecsnstr[CSN_STRSIZE];
char opcsn2str[CSN_STRSIZE];
- csn_as_string(opcsn, PR_FALSE, opcsnstr);
+ csn_as_string(*opcsn, PR_FALSE, opcsnstr);
csn_as_string(basecsn, PR_FALSE, basecsnstr);
- csn_free(&opcsn);
+ csn_free(opcsn);
csngen_adjust_time(gen, basecsn);
- csngen_new_csn(gen, &opcsn, PR_FALSE /* don't notify */);
- csn_as_string(opcsn, PR_FALSE, opcsn2str);
+ if (csngen_new_csn(gen, opcsn, PR_FALSE) != CSN_SUCCESS) {
+ /* Failed to generate CSN we must abort */
+ object_release(gen_obj);
+ return -1;
+ }
+ csn_as_string(*opcsn, PR_FALSE, opcsn2str);
slapi_log_err(SLAPI_LOG_WARNING, repl_plugin_name,
"replica_generate_next_csn - "
"opcsn=%s <= basecsn=%s, adjusted opcsn=%s\n",
@@ -3966,14 +3973,14 @@ replica_generate_next_csn(Slapi_PBlock *pb, const CSN *basecsn)
* Insert opcsn into the csn pending list.
* This is the notify effect in csngen_new_csn().
*/
- assign_csn_callback(opcsn, (void *)replica);
+ assign_csn_callback(*opcsn, (void *)replica);
}
object_release(gen_obj);
}
}
}
- return opcsn;
+ return 0;
}
/*
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
index d0d88bf16..ee366c74c 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
@@ -645,7 +645,13 @@ ldbm_back_add(Slapi_PBlock *pb)
* Current op is a user request. Opcsn will be assigned
* if the dn is in an updatable replica.
*/
- opcsn = entry_assign_operation_csn(pb, e, parententry ? parententry->ep_entry : NULL);
+ if (entry_assign_operation_csn(pb, e, parententry ? parententry->ep_entry : NULL, &opcsn) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_add",
+ "failed to generate add CSN for entry (%s), aborting operation\n",
+ slapi_entry_get_dn(e));
+ ldap_result_code = LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
}
if (opcsn != NULL) {
entry_set_csn(e, opcsn);
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
index 873b5b00e..fbcb57310 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
@@ -464,7 +464,14 @@ replace_entry:
* by entry_assign_operation_csn() if the dn is in an
* updatable replica.
*/
- opcsn = entry_assign_operation_csn ( pb, e->ep_entry, NULL );
+ if (entry_assign_operation_csn(pb, e->ep_entry, NULL, &opcsn) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_delete",
+ "failed to generate delete CSN for entry (%s), aborting operation\n",
+ slapi_entry_get_dn(e->ep_entry));
+ retval = -1;
+ ldap_result_code = LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
}
if (opcsn != NULL) {
if (!is_fixup_operation) {
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index b0c477e3f..e9d7e87e3 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -598,12 +598,18 @@ ldbm_back_modify(Slapi_PBlock *pb)
goto error_return;
}
opcsn = operation_get_csn(operation);
- if (NULL == opcsn && operation->o_csngen_handler) {
+ if (opcsn == NULL && operation->o_csngen_handler) {
/*
* Current op is a user request. Opcsn will be assigned
* if the dn is in an updatable replica.
*/
- opcsn = entry_assign_operation_csn(pb, e->ep_entry, NULL);
+ if (entry_assign_operation_csn(pb, e->ep_entry, NULL, &opcsn) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modify",
+ "failed to generate modify CSN for entry (%s), aborting operation\n",
+ slapi_entry_get_dn(e->ep_entry));
+ ldap_result_code = LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
}
if (opcsn) {
entry_set_maxcsn(e->ep_entry, opcsn);
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
index 26698012a..fde83c99f 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
@@ -543,7 +543,13 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
* Current op is a user request. Opcsn will be assigned
* if the dn is in an updatable replica.
*/
- opcsn = entry_assign_operation_csn(pb, e->ep_entry, parententry ? parententry->ep_entry : NULL);
+ if (entry_assign_operation_csn(pb, e->ep_entry, parententry ? parententry->ep_entry : NULL, &opcsn) != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modrdn",
+ "failed to generate modrdn CSN for entry (%s), aborting operation\n",
+ slapi_entry_get_dn(e->ep_entry));
+ ldap_result_code = LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
}
if (opcsn != NULL) {
entry_set_maxcsn(e->ep_entry, opcsn);
diff --git a/ldap/servers/slapd/csngen.c b/ldap/servers/slapd/csngen.c
index 68dbbda8e..b08d8b25c 100644
--- a/ldap/servers/slapd/csngen.c
+++ b/ldap/servers/slapd/csngen.c
@@ -164,6 +164,7 @@ csngen_free(CSNGen **gen)
int
csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
{
+ struct timespec now = {0};
int rc = CSN_SUCCESS;
time_t cur_time;
int delta;
@@ -179,12 +180,25 @@ csngen_new_csn(CSNGen *gen, CSN **csn, PRBool notify)
return CSN_MEMORY_ERROR;
}
- slapi_rwlock_wrlock(gen->lock);
+ if ((rc = slapi_clock_gettime(&now)) != 0) {
+ /* Failed to get system time, we must abort */
+ slapi_log_err(SLAPI_LOG_ERR, "csngen_new_csn",
+ "Failed to get system time (%s)\n",
+ slapd_system_strerror(rc));
+ return CSN_TIME_ERROR;
+ }
+ cur_time = now.tv_sec;
- cur_time = slapi_current_utc_time();
+ slapi_rwlock_wrlock(gen->lock);
/* check if the time should be adjusted */
delta = cur_time - gen->state.sampled_time;
+ if (delta > _SEC_PER_DAY || delta < (-1 * _SEC_PER_DAY)) {
+ /* We had a jump larger than a day */
+ slapi_log_err(SLAPI_LOG_INFO, "csngen_new_csn",
+ "Detected large jump in CSN time. Delta: %d (current time: %ld vs previous time: %ld)\n",
+ delta, cur_time, gen->state.sampled_time);
+ }
if (delta > 0) {
rc = _csngen_adjust_local_time(gen, cur_time);
if (rc != CSN_SUCCESS) {
diff --git a/ldap/servers/slapd/entrywsi.c b/ldap/servers/slapd/entrywsi.c
index 5d1d7238a..31bf65d8e 100644
--- a/ldap/servers/slapd/entrywsi.c
+++ b/ldap/servers/slapd/entrywsi.c
@@ -224,13 +224,12 @@ entry_add_rdn_csn(Slapi_Entry *e, const CSN *csn)
slapi_rdn_free(&rdn);
}
-CSN *
-entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry)
+int32_t
+entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry, CSN **opcsn)
{
Slapi_Operation *op;
const CSN *basecsn = NULL;
const CSN *parententry_dncsn = NULL;
- CSN *opcsn = NULL;
slapi_pblock_get(pb, SLAPI_OPERATION, &op);
@@ -252,14 +251,16 @@ entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parent
basecsn = parententry_dncsn;
}
}
- opcsn = op->o_csngen_handler(pb, basecsn);
+ if(op->o_csngen_handler(pb, basecsn, opcsn) != 0) {
+ return -1;
+ }
- if (NULL != opcsn) {
- operation_set_csn(op, opcsn);
+ if (*opcsn) {
+ operation_set_csn(op, *opcsn);
}
}
- return opcsn;
+ return 0;
}
/*
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index a4cae784a..cef8c789c 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1480,7 +1480,7 @@ struct op;
typedef void (*result_handler)(struct conn *, struct op *, int, char *, char *, int, struct berval **);
typedef int (*search_entry_handler)(Slapi_Backend *, struct conn *, struct op *, struct slapi_entry *);
typedef int (*search_referral_handler)(Slapi_Backend *, struct conn *, struct op *, struct berval **);
-typedef CSN *(*csngen_handler)(Slapi_PBlock *pb, const CSN *basecsn);
+typedef int32_t *(*csngen_handler)(Slapi_PBlock *pb, const CSN *basecsn, CSN **opcsn);
typedef int (*replica_attr_handler)(Slapi_PBlock *pb, const char *type, void **value);
/*
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index be1e52e4d..834a98742 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -6743,6 +6743,14 @@ int slapi_reslimit_get_integer_limit(Slapi_Connection *conn, int handle, int *li
*/
time_t slapi_current_time(void) __attribute__((deprecated));
+/**
+ * Get the system time and check for errors. Return
+ *
+ * \param tp - a timespec struct where the system time is set
+ * \return result code, upon success tp is set to the system time
+ */
+int32_t slapi_clock_gettime(struct timespec *tp);
+
/**
* Returns the current system time as a hr clock relative to uptime
* This means the clock is not affected by timezones
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index d85ee43e5..c98c1947c 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -233,7 +233,8 @@ enum
CSN_INVALID_PARAMETER, /* invalid function argument */
CSN_INVALID_FORMAT, /* invalid state format */
CSN_LDAP_ERROR, /* LDAP operation failed */
- CSN_NSPR_ERROR /* NSPR API failure */
+ CSN_NSPR_ERROR, /* NSPR API failure */
+ CSN_TIME_ERROR /* Error generating new CSN due to clock failure */
};
typedef struct csngen CSNGen;
@@ -326,7 +327,7 @@ int slapi_entries_diff(Slapi_Entry **old_entries, Slapi_Entry **new_entries, int
void set_attr_to_protected_list(char *attr, int flag);
/* entrywsi.c */
-CSN *entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry);
+int32_t entry_assign_operation_csn(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry, CSN **opcsn);
const CSN *entry_get_maxcsn(const Slapi_Entry *entry);
void entry_set_maxcsn(Slapi_Entry *entry, const CSN *csn);
const CSN *entry_get_dncsn(const Slapi_Entry *entry);
diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c
index 8048a3359..545538404 100644
--- a/ldap/servers/slapd/time.c
+++ b/ldap/servers/slapd/time.c
@@ -61,6 +61,25 @@ poll_current_time()
return 0;
}
+/*
+ * Check if the time function returns an error. If so return the errno
+ */
+int32_t
+slapi_clock_gettime(struct timespec *tp)
+{
+ int32_t rc = 0;
+
+ PR_ASSERT(tp && tp->tv_nsec == 0 && tp->tv_sec == 0);
+
+ if (clock_gettime(CLOCK_REALTIME, tp) != 0) {
+ rc = errno;
+ }
+
+ PR_ASSERT(rc == 0);
+
+ return rc;
+}
+
time_t
current_time(void)
{
@@ -69,7 +88,7 @@ current_time(void)
* but this should be removed in favour of the
* more accurately named slapi_current_utc_time
*/
- struct timespec now;
+ struct timespec now = {0};
clock_gettime(CLOCK_REALTIME, &now);
return now.tv_sec;
}
@@ -83,7 +102,7 @@ slapi_current_time(void)
struct timespec
slapi_current_rel_time_hr(void)
{
- struct timespec now;
+ struct timespec now = {0};
clock_gettime(CLOCK_MONOTONIC, &now);
return now;
}
@@ -91,7 +110,7 @@ slapi_current_rel_time_hr(void)
struct timespec
slapi_current_utc_time_hr(void)
{
- struct timespec ltnow;
+ struct timespec ltnow = {0};
clock_gettime(CLOCK_REALTIME, &ltnow);
return ltnow;
}
@@ -99,7 +118,7 @@ slapi_current_utc_time_hr(void)
time_t
slapi_current_utc_time(void)
{
- struct timespec ltnow;
+ struct timespec ltnow = {0};
clock_gettime(CLOCK_REALTIME, &ltnow);
return ltnow.tv_sec;
}
@@ -108,8 +127,8 @@ void
slapi_timestamp_utc_hr(char *buf, size_t bufsize)
{
PR_ASSERT(bufsize >= SLAPI_TIMESTAMP_BUFSIZE);
- struct timespec ltnow;
- struct tm utctm;
+ struct timespec ltnow = {0};
+ struct tm utctm = {0};
clock_gettime(CLOCK_REALTIME, &ltnow);
gmtime_r(&(ltnow.tv_sec), &utctm);
strftime(buf, bufsize, "%Y%m%d%H%M%SZ", &utctm);
@@ -140,7 +159,7 @@ format_localTime_log(time_t t, int initsize __attribute__((unused)), char *buf,
{
long tz;
- struct tm *tmsp, tms;
+ struct tm *tmsp, tms = {0};
char tbuf[*bufsize];
char sign;
/* make sure our buffer will be big enough. Need at least 29 */
@@ -191,7 +210,7 @@ format_localTime_hr_log(time_t t, long nsec, int initsize __attribute__((unused)
{
long tz;
- struct tm *tmsp, tms;
+ struct tm *tmsp, tms = {0};
char tbuf[*bufsize];
char sign;
/* make sure our buffer will be big enough. Need at least 39 */
@@ -278,7 +297,7 @@ slapi_timespec_expire_check(struct timespec *expire)
if (expire->tv_sec == 0 && expire->tv_nsec == 0) {
return TIMER_CONTINUE;
}
- struct timespec now;
+ struct timespec now = {0};
clock_gettime(CLOCK_MONOTONIC, &now);
if (now.tv_sec > expire->tv_sec ||
(expire->tv_sec == now.tv_sec && now.tv_sec > expire->tv_nsec)) {
@@ -293,7 +312,7 @@ format_localTime(time_t from)
in the syntax of a generalizedTime, except without the time zone. */
{
char *into;
- struct tm t;
+ struct tm t = {0};
localtime_r(&from, &t);
@@ -362,7 +381,7 @@ format_genTime(time_t from)
in the syntax of a generalizedTime. */
{
char *into;
- struct tm t;
+ struct tm t = {0};
gmtime_r(&from, &t);
into = slapi_ch_malloc(SLAPI_TIMESTAMP_BUFSIZE);
@@ -382,7 +401,7 @@ time_t
read_genTime(struct berval *from)
{
struct tm t = {0};
- time_t retTime;
+ time_t retTime = {0};
time_t diffsec = 0;
int i, gflag = 0, havesec = 0;
--
2.26.2

View File

@ -1,179 +0,0 @@
From 52ce524f7672563b543e84401665765cfa72dea5 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 26 May 2020 17:03:11 -0400
Subject: [PATCH 09/12] Issue 51113 - Allow using uid for replication manager
entry
Bug Description: Currently it was hardcoded to only allow "cn" as
the rdn attribute for the replication manager entry.
Fix description: Allow setting the rdn attribute of the replication
manager DS ldap object, and include the schema that
allows "uid".
relates: https://pagure.io/389-ds-base/issue/51113
Reviewed by: spichugi & firstyear(Thanks!!)
---
src/lib389/lib389/cli_conf/replication.py | 53 ++++++++++++-----------
src/lib389/lib389/replica.py | 11 +++--
2 files changed, 35 insertions(+), 29 deletions(-)
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
index 09cb9b435..b9bc3d291 100644
--- a/src/lib389/lib389/cli_conf/replication.py
+++ b/src/lib389/lib389/cli_conf/replication.py
@@ -199,19 +199,21 @@ def enable_replication(inst, basedn, log, args):
# Create replication manager if password was provided
if args.bind_dn and args.bind_passwd:
- cn_rdn = args.bind_dn.split(",", 1)[0]
- cn_val = cn_rdn.split("=", 1)[1]
- manager = BootstrapReplicationManager(inst, dn=args.bind_dn)
+ rdn = args.bind_dn.split(",", 1)[0]
+ rdn_attr, rdn_val = rdn.split("=", 1)
+ manager = BootstrapReplicationManager(inst, dn=args.bind_dn, rdn_attr=rdn_attr)
try:
manager.create(properties={
- 'cn': cn_val,
+ 'cn': rdn_val,
+ 'uid': rdn_val,
'userPassword': args.bind_passwd
})
except ldap.ALREADY_EXISTS:
# Already there, but could have different password. Delete and recreate
manager.delete()
manager.create(properties={
- 'cn': cn_val,
+ 'cn': rdn_val,
+ 'uid': rdn_val,
'userPassword': args.bind_passwd
})
except ldap.NO_SUCH_OBJECT:
@@ -511,22 +513,23 @@ def get_cl(inst, basedn, log, args):
def create_repl_manager(inst, basedn, log, args):
- manager_cn = "replication manager"
+ manager_name = "replication manager"
repl_manager_password = ""
repl_manager_password_confirm = ""
if args.name:
- manager_cn = args.name
-
- if is_a_dn(manager_cn):
- # A full DN was provided, make sure it uses "cn" for the RDN
- if manager_cn.split("=", 1)[0].lower() != "cn":
- raise ValueError("Replication manager DN must use \"cn\" for the rdn attribute")
- manager_dn = manager_cn
- manager_rdn = manager_dn.split(",", 1)[0]
- manager_cn = manager_rdn.split("=", 1)[1]
+ manager_name = args.name
+
+ if is_a_dn(manager_name):
+ # A full DN was provided
+ manager_dn = manager_name
+ manager_rdn = manager_name.split(",", 1)[0]
+ manager_attr, manager_name = manager_rdn.split("=", 1)
+ if manager_attr.lower() not in ['cn', 'uid']:
+ raise ValueError(f'The RDN attribute "{manager_attr}" is not allowed, you must use "cn" or "uid"')
else:
- manager_dn = "cn={},cn=config".format(manager_cn)
+ manager_dn = "cn={},cn=config".format(manager_name)
+ manager_attr = "cn"
if args.passwd:
repl_manager_password = args.passwd
@@ -544,10 +547,11 @@ def create_repl_manager(inst, basedn, log, args):
repl_manager_password = ""
repl_manager_password_confirm = ""
- manager = BootstrapReplicationManager(inst, dn=manager_dn)
+ manager = BootstrapReplicationManager(inst, dn=manager_dn, rdn_attr=manager_attr)
try:
manager.create(properties={
- 'cn': manager_cn,
+ 'cn': manager_name,
+ 'uid': manager_name,
'userPassword': repl_manager_password
})
if args.suffix:
@@ -564,7 +568,8 @@ def create_repl_manager(inst, basedn, log, args):
# Already there, but could have different password. Delete and recreate
manager.delete()
manager.create(properties={
- 'cn': manager_cn,
+ 'cn': manager_name,
+ 'uid': manager_name,
'userPassword': repl_manager_password
})
if args.suffix:
@@ -954,6 +959,7 @@ def get_winsync_agmt_status(inst, basedn, log, args):
status = agmt.status(winsync=True, use_json=args.json)
log.info(status)
+
#
# Tasks
#
@@ -1347,8 +1353,7 @@ def create_parser(subparsers):
agmt_set_parser.add_argument('--wait-async-results', help="The amount of time in milliseconds the server waits if "
"the consumer is not ready before resending data")
agmt_set_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after "
- "a consumer sends back a busy response before making another "
- "attempt to acquire access.")
+ "a consumer sends back a busy response before making another attempt to acquire access.")
agmt_set_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.")
agmt_set_parser.add_argument('--flow-control-window', help="Sets the maximum number of entries and updates sent by a supplier, which are not acknowledged by the consumer.")
agmt_set_parser.add_argument('--flow-control-pause', help="The time in milliseconds to pause after reaching the number of entries and updates set in \"--flow-control-window\"")
@@ -1438,8 +1443,7 @@ def create_parser(subparsers):
winsync_agmt_add_parser.add_argument('--subtree-pair', help="Set the subtree pair: <DS_SUBTREE>:<WINDOWS_SUBTREE>")
winsync_agmt_add_parser.add_argument('--conn-timeout', help="The timeout used for replicaton connections")
winsync_agmt_add_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after "
- "a consumer sends back a busy response before making another "
- "attempt to acquire access.")
+ "a consumer sends back a busy response before making another attempt to acquire access.")
winsync_agmt_add_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.")
winsync_agmt_add_parser.add_argument('--init', action='store_true', default=False, help="Initialize the agreement after creating it.")
@@ -1468,8 +1472,7 @@ def create_parser(subparsers):
winsync_agmt_set_parser.add_argument('--subtree-pair', help="Set the subtree pair: <DS_SUBTREE>:<WINDOWS_SUBTREE>")
winsync_agmt_set_parser.add_argument('--conn-timeout', help="The timeout used for replicaton connections")
winsync_agmt_set_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after "
- "a consumer sends back a busy response before making another "
- "attempt to acquire access.")
+ "a consumer sends back a busy response before making another attempt to acquire access.")
winsync_agmt_set_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.")
# Get
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
index e3fc7fe1f..f8adb3ce2 100644
--- a/src/lib389/lib389/replica.py
+++ b/src/lib389/lib389/replica.py
@@ -1779,15 +1779,18 @@ class BootstrapReplicationManager(DSLdapObject):
:type instance: lib389.DirSrv
:param dn: The dn to create
:type dn: str
+ :param rdn_attr: The attribute to use for the RDN
+ :type rdn_attr: str
"""
- def __init__(self, instance, dn='cn=replication manager,cn=config'):
+ def __init__(self, instance, dn='cn=replication manager,cn=config', rdn_attr='cn'):
super(BootstrapReplicationManager, self).__init__(instance, dn)
- self._rdn_attribute = 'cn'
+ self._rdn_attribute = rdn_attr
self._must_attributes = ['cn', 'userPassword']
self._create_objectclasses = [
'top',
- 'netscapeServer',
- 'nsAccount'
+ 'inetUser', # for uid
+ 'netscapeServer', # for cn
+ 'nsAccount', # for authentication attributes
]
if ds_is_older('1.4.0'):
self._create_objectclasses.remove('nsAccount')
--
2.26.2

View File

@ -1,34 +0,0 @@
From ec85e986ec5710682de883f0f40f539b2f9945fa Mon Sep 17 00:00:00 2001
From: Viktor Ashirov <vashirov@redhat.com>
Date: Wed, 27 May 2020 15:22:18 +0200
Subject: [PATCH 10/12] Issue 50931 - RFE AD filter rewriter for ObjectCategory
Bug Description:
ASAN build fails on RHEL due to linking issues
Fix Description:
Add missing libslapd.la for librewriters.la
Relates: https://pagure.io/389-ds-base/issue/50931
Reviewed by: tbordaz (Thanks!)
---
Makefile.am | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile.am b/Makefile.am
index 2309f3010..0e5f04f91 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1159,7 +1159,7 @@ librewriters_la_SOURCES = \
librewriters_la_LDFLAGS = $(AM_LDFLAGS)
librewriters_la_CPPFLAGS = $(AM_CPPFLAGS) $(REWRITERS_INCLUDES) $(DSPLUGIN_CPPFLAGS)
-librewriters_la_LIBADD = $(NSS_LINK) $(NSPR_LINK)
+librewriters_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK)
#------------------------
# libsvrcore
--
2.26.2

View File

@ -1,54 +0,0 @@
From 2540354b7eb6fa03db7d36a5b755001b0852aa1b Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Thu, 26 Mar 2020 19:33:47 +0100
Subject: [PATCH] Issue 50984 - Memory leaks in disk monitoring
Description: Memory leaks are reported by the disk monitoring test suite.
The direct leak is related to char **dirs array which is not freed at all.
Free the array when we clean up or go to shutdown.
Fix disk_monitoring_test.py::test_below_half_of_the_threshold_not_starting_after_shutdown.
It should accept different exception when the instance is not started.
https://pagure.io/389-ds-base/issue/50984
Reviewed by: firstyear (Thanks!)
---
ldap/servers/slapd/daemon.c | 2 --
ldap/servers/slapd/main.c | 1 -
2 files changed, 3 deletions(-)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index a70f40316..542d31037 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -613,7 +613,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
}
}
slapi_ch_array_free(dirs);
- dirs = NULL;
return;
}
/*
@@ -713,7 +712,6 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
}
}
slapi_ch_array_free(dirs);
- dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */
g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL);
return;
}
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index e54b8e1c5..1f8b01959 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -958,7 +958,6 @@ main(int argc, char **argv)
goto cleanup;
}
slapi_ch_array_free(dirs);
- dirs = NULL;
}
/* log the max fd limit as it is typically set in env/systemd */
slapi_log_err(SLAPI_LOG_INFO, "main",
--
2.26.2

View File

@ -1,52 +0,0 @@
From a720e002751815323a295e11e77c56d7ce38314e Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Fri, 27 Mar 2020 11:35:55 +0100
Subject: [PATCH] Issue 50984 - Memory leaks in disk monitoring
Description: Reset dirs pointer every time we free it.
The code may be changed in the future so we should make it
more robust.
https://pagure.io/389-ds-base/issue/50984
Reviewed by: spichugi, tbordaz (one line commit rule)
---
ldap/servers/slapd/daemon.c | 2 ++
ldap/servers/slapd/main.c | 1 +
2 files changed, 3 insertions(+)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 542d31037..a70f40316 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -613,6 +613,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
}
}
slapi_ch_array_free(dirs);
+ dirs = NULL;
return;
}
/*
@@ -712,6 +713,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused)))
}
}
slapi_ch_array_free(dirs);
+ dirs = NULL; /* now it is not needed but the code may be changed in the future and it'd better be more robust */
g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL);
return;
}
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 1f8b01959..e54b8e1c5 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -958,6 +958,7 @@ main(int argc, char **argv)
goto cleanup;
}
slapi_ch_array_free(dirs);
+ dirs = NULL;
}
/* log the max fd limit as it is typically set in env/systemd */
slapi_log_err(SLAPI_LOG_INFO, "main",
--
2.26.2

View File

@ -1,569 +0,0 @@
From f60364cd9472edc61e7d327d13dca67eadf0c5b2 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <simon.pichugin@gmail.com>
Date: Tue, 28 Apr 2020 23:44:20 +0200
Subject: [PATCH] Issue 50201 - nsIndexIDListScanLimit accepts any value
Bug Description: Setting of nsIndexIDListScanLimit like
'limit=2 limit=3' are detected and logged in error logs.
But the invalid value is successfully applied in the config entry
and the operation itself is successful.
The impact is limited because the index will be used following
idlistscanlimit rather than invalid definition nsIndexIDListScanLimit.
Fix Description: Print the errors to the user when he tries to add
or to modify index config entry with malformed values.
Change tests accordingly.
https://pagure.io/389-ds-base/issue/50201
Reviewed by: mreynolds, tbordaz (Thanks!)
---
.../suites/filter/filterscanlimit_test.py | 87 ++++++++-----------
ldap/servers/slapd/back-ldbm/instance.c | 4 +-
ldap/servers/slapd/back-ldbm/ldbm_attr.c | 33 ++++++-
.../slapd/back-ldbm/ldbm_index_config.c | 59 +++++++++----
ldap/servers/slapd/back-ldbm/ldif2ldbm.c | 2 +-
.../servers/slapd/back-ldbm/proto-back-ldbm.h | 2 +-
6 files changed, 114 insertions(+), 73 deletions(-)
diff --git a/dirsrvtests/tests/suites/filter/filterscanlimit_test.py b/dirsrvtests/tests/suites/filter/filterscanlimit_test.py
index dd9c6ee4e..0198f6533 100644
--- a/dirsrvtests/tests/suites/filter/filterscanlimit_test.py
+++ b/dirsrvtests/tests/suites/filter/filterscanlimit_test.py
@@ -11,6 +11,7 @@ This script will test different type of Filers.
"""
import os
+import ldap
import pytest
from lib389._constants import DEFAULT_SUFFIX, PW_DM
@@ -19,11 +20,10 @@ from lib389.idm.user import UserAccounts
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389.index import Index
from lib389.idm.account import Accounts
-from lib389.idm.group import UniqueGroups, Group
+from lib389.idm.group import UniqueGroups
pytestmark = pytest.mark.tier1
-
GIVEN_NAME = 'cn=givenname,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
CN_NAME = 'cn=sn,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
UNIQMEMBER = 'cn=uniquemember,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
@@ -39,7 +39,6 @@ LIST_OF_USER_ACCOUNTING = [
"Judy Wallace",
"Marcus Ward",
"Judy McFarland",
- "Anuj Hall",
"Gern Triplett",
"Emanuel Johnson",
"Brad Walker",
@@ -57,7 +56,6 @@ LIST_OF_USER_ACCOUNTING = [
"Randy Ulrich",
"Richard Francis",
"Morgan White",
- "Anuj Maddox",
"Jody Jensen",
"Mike Carter",
"Gern Tyler",
@@ -77,8 +75,6 @@ LIST_OF_USER_HUMAN = [
"Robert Daugherty",
"Torrey Mason",
"Brad Talbot",
- "Anuj Jablonski",
- "Harry Miller",
"Jeffrey Campaigne",
"Stephen Triplett",
"John Falena",
@@ -107,8 +103,7 @@ LIST_OF_USER_HUMAN = [
"Tobias Schmith",
"Jon Goldstein",
"Janet Lutz",
- "Karl Cope",
-]
+ "Karl Cope"]
LIST_OF_USER_TESTING = [
"Andy Bergin",
@@ -122,8 +117,7 @@ LIST_OF_USER_TESTING = [
"Alan White",
"Daniel Ward",
"Lee Stockton",
- "Matthew Vaughan"
-]
+ "Matthew Vaughan"]
LIST_OF_USER_DEVELOPMENT = [
"Kelly Winters",
@@ -143,7 +137,6 @@ LIST_OF_USER_DEVELOPMENT = [
"Timothy Kelly",
"Sue Mason",
"Chris Alexander",
- "Anuj Jensen",
"Martin Talbot",
"Scott Farmer",
"Allison Jensen",
@@ -152,9 +145,7 @@ LIST_OF_USER_DEVELOPMENT = [
"Dan Langdon",
"Ashley Knutson",
"Jon Bourke",
- "Pete Hunt",
-
-]
+ "Pete Hunt"]
LIST_OF_USER_PAYROLL = [
"Ashley Chassin",
@@ -164,12 +155,17 @@ LIST_OF_USER_PAYROLL = [
"Patricia Shelton",
"Dietrich Swain",
"Allison Hunter",
- "Anne-Louise Barnes"
+ "Anne-Louise Barnes"]
-]
+LIST_OF_USER_PEOPLE = [
+ 'Sam Carter',
+ 'Tom Morris',
+ 'Kevin Vaughan',
+ 'Rich Daugherty',
+ 'Harry Miller',
+ 'Sam Schmith']
-@pytest.mark.skip(reason="https://pagure.io/389-ds-base/issue/50201")
def test_invalid_configuration(topo):
""""
Error handling for invalid configuration
@@ -190,10 +186,7 @@ def test_invalid_configuration(topo):
'limit=0 flags=AND flags=AND',
'limit=0 type=eq values=foo values=foo',
'limit=0 type=eq values=foo,foo',
- 'limit=0 type=sub',
- 'limit=0 type=eq values=notvalid',
'limit',
- 'limit=0 type=eq values=notavaliddn',
'limit=0 type=pres values=bogus',
'limit=0 type=eq,sub values=bogus',
'limit=',
@@ -203,7 +196,8 @@ def test_invalid_configuration(topo):
'limit=-2',
'type=eq',
'limit=0 type=bogus']:
- Index(topo.standalone, GIVEN_NAME).replace('nsIndexIDListScanLimit', i)
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
+ Index(topo.standalone, GIVEN_NAME).replace('nsIndexIDListScanLimit', i)
def test_idlistscanlimit(topo):
@@ -247,28 +241,24 @@ def test_idlistscanlimit(topo):
(LIST_OF_USER_HUMAN, users_human),
(LIST_OF_USER_TESTING, users_testing),
(LIST_OF_USER_DEVELOPMENT, users_development),
- (LIST_OF_USER_PAYROLL, users_payroll)]:
+ (LIST_OF_USER_PAYROLL, users_payroll),
+ (LIST_OF_USER_PEOPLE, users_people)]:
for demo1 in data[0]:
+ fn = demo1.split()[0]
+ sn = demo1.split()[1]
+ uid = ''.join([fn[:1], sn]).lower()
data[1].create(properties={
- 'uid': demo1,
+ 'uid': uid,
'cn': demo1,
- 'sn': demo1.split()[1],
+ 'sn': sn,
'uidNumber': str(1000),
'gidNumber': '2000',
- 'homeDirectory': '/home/' + demo1,
- 'givenname': demo1.split()[0],
- 'userpassword': PW_DM
+ 'homeDirectory': f'/home/{uid}',
+ 'givenname': fn,
+ 'userpassword': PW_DM,
+ 'mail': f'{uid}@test.com'
})
- users_people.create(properties={
- 'uid': 'scarter',
- 'cn': 'Sam Carter',
- 'sn': 'Carter',
- 'uidNumber': str(1000),
- 'gidNumber': '2000',
- 'homeDirectory': '/home/' + 'scarter',
- 'mail': 'scarter@anuj.com',
- })
try:
# Change log levels
errorlog_value = topo.standalone.config.get_attr_val_utf8('nsslapd-errorlog-level')
@@ -297,16 +287,12 @@ def test_idlistscanlimit(topo):
Index(topo.standalone, UNIQMEMBER).\
replace('nsIndexIDListScanLimit',
- 'limit=0 type=eq values=uid=kvaughan,ou=People,'
- 'dc=example,dc=com,uid=rdaugherty,ou=People,dc=example,dc=com')
+ 'limit=0 type=eq values=uid=kvaughan\2Cou=People\2Cdc=example\2Cdc=com,'
+ 'uid=rdaugherty\2Cou=People\2Cdc=example\2Cdc=com')
Index(topo.standalone, OBJECTCLASS).\
replace('nsIndexIDListScanLimit', 'limit=0 type=eq flags=AND values=inetOrgPerson')
- Index(topo.standalone, MAIL).\
- replace('nsIndexIDListScanLimit',
- 'cn=mail,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config')
-
# Search with filter
for i in ['(sn=Lutz)',
'(sn=*ter)',
@@ -321,22 +307,24 @@ def test_idlistscanlimit(topo):
'(&(sn=*)(cn=*))',
'(sn=Hunter)',
'(&(givenname=Richard)(objectclass=organizationalPerson))',
- '(givenname=Anuj)',
+ '(givenname=Morgan)',
'(&(givenname=*)(cn=*))',
'(givenname=*)']:
assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(f'{i}')
- # Creating Group
- Group(topo.standalone, 'cn=Accounting Managers,ou=groups,dc=example,dc=com').\
- add('uniquemember',
+ # Creating Groups and adding members
+ groups = UniqueGroups(topo.standalone, DEFAULT_SUFFIX)
+ accounting_managers = groups.ensure_state(properties={'cn': 'Accounting Managers'})
+ hr_managers = groups.ensure_state(properties={'cn': 'HR Managers'})
+
+ accounting_managers.add('uniquemember',
['uid=scarter, ou=People, dc=example,dc=com',
'uid=tmorris, ou=People, dc=example,dc=com',
'uid=kvaughan, ou=People, dc=example,dc=com',
'uid=rdaugherty, ou=People, dc=example,dc=com',
'uid=hmiller, ou=People, dc=example,dc=com'])
- Group(topo.standalone, 'cn=HR Managers,ou=groups,dc=example,dc=com').\
- add('uniquemember',
+ hr_managers.add('uniquemember',
['uid=kvaughan, ou=People, dc=example,dc=com',
'uid=cschmith, ou=People, dc=example,dc=com'])
@@ -403,10 +391,9 @@ def test_idlistscanlimit(topo):
'(&(sn=*)(cn=*))',
'(sn=Hunter)',
'(&(givenname=Richard)(objectclass=organizationalPerson))',
- '(givenname=Anuj)',
+ '(givenname=Morgan)',
'(&(givenname=*)(cn=*))',
'(givenname=*)']:
-
assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(value)
finally:
diff --git a/ldap/servers/slapd/back-ldbm/instance.c b/ldap/servers/slapd/back-ldbm/instance.c
index 04c28ff39..07655a8ec 100644
--- a/ldap/servers/slapd/back-ldbm/instance.c
+++ b/ldap/servers/slapd/back-ldbm/instance.c
@@ -231,7 +231,7 @@ ldbm_instance_create_default_indexes(backend *be)
/* ldbm_instance_config_add_index_entry(inst, 2, argv); */
e = ldbm_instance_init_config_entry(LDBM_PSEUDO_ATTR_DEFAULT, "none", 0, 0, 0);
- attr_index_config(be, "ldbm index init", 0, e, 1, 0);
+ attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL);
slapi_entry_free(e);
if (!entryrdn_get_noancestorid()) {
@@ -240,7 +240,7 @@ ldbm_instance_create_default_indexes(backend *be)
* but we still want to use the attr index file APIs.
*/
e = ldbm_instance_init_config_entry(LDBM_ANCESTORID_STR, "eq", 0, 0, 0);
- attr_index_config(be, "ldbm index init", 0, e, 1, 0);
+ attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL);
slapi_entry_free(e);
}
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
index b9e130d77..f0d418572 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
@@ -633,6 +633,18 @@ attr_index_idlistsize_config(Slapi_Entry *e, struct attrinfo *ai, char *returnte
return rc;
}
+/*
+ * Function that process index attributes and modifies attrinfo structure
+ *
+ * Called while adding default indexes, during db2index execution and
+ * when we add/modify/delete index config entry
+ *
+ * If char *err_buf is not NULL, it will additionally print all error messages to STDERR
+ * It is used when we add/modify/delete index config entry, so the user would have a better verbose
+ *
+ * returns -1, 1 on a failure
+ * 0 on success
+ */
int
attr_index_config(
backend *be,
@@ -640,7 +652,8 @@ attr_index_config(
int lineno,
Slapi_Entry *e,
int init __attribute__((unused)),
- int indextype_none)
+ int indextype_none,
+ char *err_buf)
{
ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
int j = 0;
@@ -662,6 +675,7 @@ attr_index_config(
slapi_attr_first_value(attr, &sval);
attrValue = slapi_value_get_berval(sval);
} else {
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: missing indexing arguments\n");
slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "Missing indexing arguments\n");
return -1;
}
@@ -705,6 +719,10 @@ attr_index_config(
}
a->ai_indexmask = INDEX_OFFLINE; /* note that the index isn't available */
} else {
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: %s: line %d: unknown index type \"%s\" (ignored) in entry (%s), "
+ "valid index types are \"pres\", \"eq\", \"approx\", or \"sub\"\n",
+ fname, lineno, attrValue->bv_val, slapi_entry_get_dn(e));
slapi_log_err(SLAPI_LOG_ERR, "attr_index_config",
"%s: line %d: unknown index type \"%s\" (ignored) in entry (%s), "
"valid index types are \"pres\", \"eq\", \"approx\", or \"sub\"\n",
@@ -715,6 +733,7 @@ attr_index_config(
}
if (hasIndexType == 0) {
/* indexType missing, error out */
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: missing index type\n");
slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "Missing index type\n");
attrinfo_delete(&a);
return -1;
@@ -873,16 +892,26 @@ attr_index_config(
slapi_ch_free((void **)&official_rules);
}
}
-
if ((return_value = attr_index_idlistsize_config(e, a, myreturntext))) {
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: %s: Failed to parse idscanlimit info: %d:%s\n",
+ fname, return_value, myreturntext);
slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "%s: Failed to parse idscanlimit info: %d:%s\n",
fname, return_value, myreturntext);
+ if (err_buf != NULL) {
+ /* we are inside of a callback, we shouldn't allow malformed attributes in index entries */
+ attrinfo_delete(&a);
+ return return_value;
+ }
}
/* initialize the IDL code's private data */
return_value = idl_init_private(be, a);
if (0 != return_value) {
/* fatal error, exit */
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: %s: line %d:Fatal Error: Failed to initialize attribute structure\n",
+ fname, lineno);
slapi_log_err(SLAPI_LOG_CRIT, "attr_index_config",
"%s: line %d:Fatal Error: Failed to initialize attribute structure\n",
fname, lineno);
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
index 45f0034f0..720f93036 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
@@ -25,26 +25,34 @@ int ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, Slapi_Entry *en
#define INDEXTYPE_NONE 1
static int
-ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name)
+ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, char *err_buf)
{
Slapi_Attr *attr;
const struct berval *attrValue;
Slapi_Value *sval;
+ char *edn = slapi_entry_get_dn(e);
/* Get the name of the attribute to index which will be the value
* of the cn attribute. */
if (slapi_entry_attr_find(e, "cn", &attr) != 0) {
- slapi_log_err(SLAPI_LOG_ERR, "ldbm_index_parse_entry", "Malformed index entry %s\n",
- slapi_entry_get_dn(e));
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s\n",
+ edn);
+ slapi_log_err(SLAPI_LOG_ERR,
+ "ldbm_index_parse_entry", "Malformed index entry %s\n",
+ edn);
return LDAP_OPERATIONS_ERROR;
}
slapi_attr_first_value(attr, &sval);
attrValue = slapi_value_get_berval(sval);
if (NULL == attrValue->bv_val || 0 == attrValue->bv_len) {
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s -- empty index name\n",
+ edn);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_index_parse_entry", "Malformed index entry %s -- empty index name\n",
- slapi_entry_get_dn(e));
+ edn);
return LDAP_OPERATIONS_ERROR;
}
@@ -59,16 +67,19 @@ ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_st
attrValue = slapi_value_get_berval(sval);
if (NULL == attrValue->bv_val || attrValue->bv_len == 0) {
/* missing the index type, error out */
- slapi_log_err(SLAPI_LOG_ERR,
- "ldbm_index_parse_entry", "Malformed index entry %s -- empty nsIndexType\n",
- slapi_entry_get_dn(e));
+ slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s -- empty nsIndexType\n",
+ edn);
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_index_parse_entry",
+ "Malformed index entry %s -- empty nsIndexType\n",
+ edn);
slapi_ch_free_string(index_name);
return LDAP_OPERATIONS_ERROR;
}
}
/* ok the entry is good to process, pass it to attr_index_config */
- if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0)) {
+ if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0, err_buf)) {
slapi_ch_free_string(index_name);
return LDAP_OPERATIONS_ERROR;
}
@@ -92,7 +103,7 @@ ldbm_index_init_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
ldbm_instance *inst = (ldbm_instance *)arg;
returntext[0] = '\0';
- *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL);
+ *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, NULL);
if (*returncode == LDAP_SUCCESS) {
return SLAPI_DSE_CALLBACK_OK;
} else {
@@ -117,7 +128,7 @@ ldbm_instance_index_config_add_callback(Slapi_PBlock *pb __attribute__((unused))
char *index_name = NULL;
returntext[0] = '\0';
- *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name);
+ *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, returntext);
if (*returncode == LDAP_SUCCESS) {
struct attrinfo *ai = NULL;
/* if the index is a "system" index, we assume it's being added by
@@ -179,7 +190,7 @@ ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb,
slapi_attr_first_value(attr, &sval);
attrValue = slapi_value_get_berval(sval);
- attr_index_config(inst->inst_be, "From DSE delete", 0, e, 0, INDEXTYPE_NONE);
+ attr_index_config(inst->inst_be, "From DSE delete", 0, e, 0, INDEXTYPE_NONE, returntext);
ainfo_get(inst->inst_be, attrValue->bv_val, &ainfo);
if (NULL == ainfo) {
@@ -213,14 +224,19 @@ ldbm_instance_index_config_modify_callback(Slapi_PBlock *pb __attribute__((unuse
Slapi_Value *sval;
const struct berval *attrValue;
struct attrinfo *ainfo = NULL;
+ char *edn = slapi_entry_get_dn(e);
+ char *edn_after = slapi_entry_get_dn(entryAfter);
returntext[0] = '\0';
*returncode = LDAP_SUCCESS;
if (slapi_entry_attr_find(entryAfter, "cn", &attr) != 0) {
+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s - missing cn attribute\n",
+ edn_after);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing cn attribute\n",
- slapi_entry_get_dn(entryAfter));
+ edn_after);
*returncode = LDAP_OBJECT_CLASS_VIOLATION;
return SLAPI_DSE_CALLBACK_ERROR;
}
@@ -228,31 +244,40 @@ ldbm_instance_index_config_modify_callback(Slapi_PBlock *pb __attribute__((unuse
attrValue = slapi_value_get_berval(sval);
if (NULL == attrValue->bv_val || 0 == attrValue->bv_len) {
+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s - missing index name\n",
+ edn);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_instance_index_config_modify_callback", "Malformed index entry %s, missing index name\n",
- slapi_entry_get_dn(e));
+ edn);
*returncode = LDAP_UNWILLING_TO_PERFORM;
return SLAPI_DSE_CALLBACK_ERROR;
}
ainfo_get(inst->inst_be, attrValue->bv_val, &ainfo);
if (NULL == ainfo) {
+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s - missing cn attribute info\n",
+ edn);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing cn attribute info\n",
- slapi_entry_get_dn(e));
+ edn);
*returncode = LDAP_UNWILLING_TO_PERFORM;
return SLAPI_DSE_CALLBACK_ERROR;
}
if (slapi_entry_attr_find(entryAfter, "nsIndexType", &attr) != 0) {
+ slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Error: malformed index entry %s - missing nsIndexType attribute\n",
+ edn_after);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing nsIndexType attribute\n",
- slapi_entry_get_dn(entryAfter));
+ edn_after);
*returncode = LDAP_OBJECT_CLASS_VIOLATION;
return SLAPI_DSE_CALLBACK_ERROR;
}
- if (attr_index_config(inst->inst_be, "from DSE modify", 0, entryAfter, 0, 0)) {
+ if (attr_index_config(inst->inst_be, "from DSE modify", 0, entryAfter, 0, 0, returntext)) {
*returncode = LDAP_UNWILLING_TO_PERFORM;
return SLAPI_DSE_CALLBACK_ERROR;
}
@@ -364,7 +389,7 @@ ldbm_instance_index_config_enable_index(ldbm_instance *inst, Slapi_Entry *e)
ainfo_get(inst->inst_be, index_name, &ai);
}
if (!ai) {
- rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name);
+ rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, NULL);
}
if (rc == LDAP_SUCCESS) {
/* Assume the caller knows if it is OK to go online immediately */
diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
index 9d82c8228..f2ef5ecd4 100644
--- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
+++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
@@ -291,7 +291,7 @@ db2index_add_indexed_attr(backend *be, char *attrString)
}
}
- attr_index_config(be, "from db2index()", 0, e, 0, 0);
+ attr_index_config(be, "from db2index()", 0, e, 0, 0, NULL);
slapi_entry_free(e);
return (0);
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
index 9a86c752b..a07acee5e 100644
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
@@ -24,7 +24,7 @@ void attrinfo_delete(struct attrinfo **pp);
void ainfo_get(backend *be, char *type, struct attrinfo **at);
void attr_masks(backend *be, char *type, int *indexmask, int *syntaxmask);
void attr_masks_ex(backend *be, char *type, int *indexmask, int *syntaxmask, struct attrinfo **at);
-int attr_index_config(backend *be, char *fname, int lineno, Slapi_Entry *e, int init, int none);
+int attr_index_config(backend *be, char *fname, int lineno, Slapi_Entry *e, int init, int none, char *err_buf);
int db2index_add_indexed_attr(backend *be, char *attrString);
int ldbm_compute_init(void);
void attrinfo_deletetree(ldbm_instance *inst);
--
2.26.2

View File

@ -1,213 +0,0 @@
From 3b3faee01e645577ad77ff4f38429a9e0806231b Mon Sep 17 00:00:00 2001
From: Simon Pichugin <simon.pichugin@gmail.com>
Date: Tue, 16 Jun 2020 20:35:05 +0200
Subject: [PATCH] Issue 51157 - Reindex task may create abandoned index file
Bug Description: Recreating an index for the same attribute but changing
the case of for example 1 letter, results in abandoned indexfile.
Fix Decsription: Add a test case to a newly created 'indexes' test suite.
When we remove the index config from the backend, - remove the attribute
info from LDBM instance attributes.
https://pagure.io/389-ds-base/issue/51157
Reviewed by: firstyear, mreynolds (Thanks!)
---
dirsrvtests/tests/suites/indexes/__init__.py | 3 +
.../tests/suites/indexes/regression_test.py | 125 ++++++++++++++++++
ldap/servers/slapd/back-ldbm/ldbm_attr.c | 7 +
.../slapd/back-ldbm/ldbm_index_config.c | 3 +
.../servers/slapd/back-ldbm/proto-back-ldbm.h | 1 +
5 files changed, 139 insertions(+)
create mode 100644 dirsrvtests/tests/suites/indexes/__init__.py
create mode 100644 dirsrvtests/tests/suites/indexes/regression_test.py
diff --git a/dirsrvtests/tests/suites/indexes/__init__.py b/dirsrvtests/tests/suites/indexes/__init__.py
new file mode 100644
index 000000000..04441667e
--- /dev/null
+++ b/dirsrvtests/tests/suites/indexes/__init__.py
@@ -0,0 +1,3 @@
+"""
+ :Requirement: 389-ds-base: Indexes
+"""
diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
new file mode 100644
index 000000000..1a71f16e9
--- /dev/null
+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
@@ -0,0 +1,125 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import time
+import os
+import pytest
+import ldap
+from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX
+from lib389.index import Indexes
+from lib389.backend import Backends
+from lib389.idm.user import UserAccounts
+from lib389.topologies import topology_st as topo
+
+pytestmark = pytest.mark.tier1
+
+
+def test_reindex_task_creates_abandoned_index_file(topo):
+ """
+ Recreating an index for the same attribute but changing
+ the case of for example 1 letter, results in abandoned indexfile
+
+ :id: 07ae5274-481a-4fa8-8074-e0de50d89ac6
+ :setup: Standalone instance
+ :steps:
+ 1. Create a user object with additional attributes:
+ objectClass: mozillaabpersonalpha
+ mozillaCustom1: xyz
+ 2. Add an index entry mozillacustom1
+ 3. Reindex the backend
+ 4. Check the content of the index (after it has been flushed to disk) mozillacustom1.db
+ 5. Remove the index
+ 6. Notice the mozillacustom1.db is removed
+ 7. Recreate the index but now use the exact case as mentioned in the schema
+ 8. Reindex the backend
+ 9. Check the content of the index (after it has been flushed to disk) mozillaCustom1.db
+ 10. Check that an ldapsearch does not return a result (mozillacustom1=xyz)
+ 11. Check that an ldapsearch returns the results (mozillaCustom1=xyz)
+ 12. Restart the instance
+ 13. Notice that an ldapsearch does not return a result(mozillacustom1=xyz)
+ 15. Check that an ldapsearch does not return a result (mozillacustom1=xyz)
+ 16. Check that an ldapsearch returns the results (mozillaCustom1=xyz)
+ 17. Reindex the backend
+ 18. Notice the second indexfile for this attribute
+ 19. Check the content of the index (after it has been flushed to disk) no mozillacustom1.db
+ 20. Check the content of the index (after it has been flushed to disk) mozillaCustom1.db
+ :expectedresults:
+ 1. Should Success.
+ 2. Should Success.
+ 3. Should Success.
+ 4. Should Success.
+ 5. Should Success.
+ 6. Should Success.
+ 7. Should Success.
+ 8. Should Success.
+ 9. Should Success.
+ 10. Should Success.
+ 11. Should Success.
+ 12. Should Success.
+ 13. Should Success.
+ 14. Should Success.
+ 15. Should Success.
+ 16. Should Success.
+ 17. Should Success.
+ 18. Should Success.
+ 19. Should Success.
+ 20. Should Success.
+ """
+
+ inst = topo.standalone
+ attr_name = "mozillaCustom1"
+ attr_value = "xyz"
+
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
+ user = users.create_test_user()
+ user.add("objectClass", "mozillaabpersonalpha")
+ user.add(attr_name, attr_value)
+
+ backends = Backends(inst)
+ backend = backends.get(DEFAULT_BENAME)
+ indexes = backend.get_indexes()
+ index = indexes.create(properties={
+ 'cn': attr_name.lower(),
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': ['eq', 'pres']
+ })
+
+ backend.reindex()
+ time.sleep(3)
+ assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
+ index.delete()
+ assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
+
+ index = indexes.create(properties={
+ 'cn': attr_name,
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': ['eq', 'pres']
+ })
+
+ backend.reindex()
+ time.sleep(3)
+ assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
+ assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name}.db")
+
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f"{attr_name}={attr_value}")
+ assert len(entries) > 0
+ inst.restart()
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f"{attr_name}={attr_value}")
+ assert len(entries) > 0
+
+ backend.reindex()
+ time.sleep(3)
+ assert not os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db")
+ assert os.path.exists(f"{inst.ds_paths.db_home_dir}/{DEFAULT_BENAME}/{attr_name}.db")
+
+
+if __name__ == "__main__":
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
index f0d418572..688c4f137 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
@@ -98,6 +98,13 @@ ainfo_cmp(
return (strcasecmp(a->ai_type, b->ai_type));
}
+void
+attrinfo_delete_from_tree(backend *be, struct attrinfo *ai)
+{
+ ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
+ avl_delete(&inst->inst_attrs, ai, ainfo_cmp);
+}
+
/*
* Called when a duplicate "index" line is encountered.
*
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
index 720f93036..9722d0ce7 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
@@ -201,7 +201,10 @@ ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb,
*returncode = LDAP_UNWILLING_TO_PERFORM;
rc = SLAPI_DSE_CALLBACK_ERROR;
}
+ attrinfo_delete_from_tree(inst->inst_be, ainfo);
}
+ /* Free attrinfo structure */
+ attrinfo_delete(&ainfo);
bail:
return rc;
}
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
index a07acee5e..4d2524fd9 100644
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
@@ -21,6 +21,7 @@
*/
struct attrinfo *attrinfo_new(void);
void attrinfo_delete(struct attrinfo **pp);
+void attrinfo_delete_from_tree(backend *be, struct attrinfo *ai);
void ainfo_get(backend *be, char *type, struct attrinfo **at);
void attr_masks(backend *be, char *type, int *indexmask, int *syntaxmask);
void attr_masks_ex(backend *be, char *type, int *indexmask, int *syntaxmask, struct attrinfo **at);
--
2.26.2

View File

@ -1,668 +0,0 @@
From 282edde7950ceb2515d74fdbcc0a188131769d74 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 23 Jun 2020 16:38:55 -0400
Subject: [PATCH] Issue 51165 - add new access log keywords for wtime and
optime
Description: In addition to the "etime" stat in the access we can also
add the time the operation spent in the work queue, and
how long the actual operation took. We now have "wtime"
and "optime" to track these stats in the access log.
Also updated logconf for notes=F (related to a different
ticket), and stats for wtime and optime.
relates: https://pagure.io/389-ds-base/issue/51165
Reviewed by: ?
---
ldap/admin/src/logconv.pl | 187 +++++++++++++++++++++++++++---
ldap/servers/slapd/add.c | 3 +
ldap/servers/slapd/bind.c | 4 +
ldap/servers/slapd/delete.c | 3 +
ldap/servers/slapd/modify.c | 3 +
ldap/servers/slapd/modrdn.c | 3 +
ldap/servers/slapd/operation.c | 24 ++++
ldap/servers/slapd/opshared.c | 3 +
ldap/servers/slapd/result.c | 49 ++++----
ldap/servers/slapd/slap.h | 13 ++-
ldap/servers/slapd/slapi-plugin.h | 26 ++++-
11 files changed, 269 insertions(+), 49 deletions(-)
diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl
index f4808a101..1ed44a888 100755
--- a/ldap/admin/src/logconv.pl
+++ b/ldap/admin/src/logconv.pl
@@ -3,7 +3,7 @@
#
# BEGIN COPYRIGHT BLOCK
# Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
-# Copyright (C) 2013 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -55,7 +55,7 @@ my $reportStats = "";
my $dataLocation = "/tmp";
my $startTLSoid = "1.3.6.1.4.1.1466.20037";
my @statnames=qw(last last_str results srch add mod modrdn moddn cmp del abandon
- conns sslconns bind anonbind unbind notesA notesU etime);
+ conns sslconns bind anonbind unbind notesA notesU notesF etime);
my $s_stats;
my $m_stats;
my $verb = "no";
@@ -211,6 +211,7 @@ my $sslClientBindCount = 0;
my $sslClientFailedCount = 0;
my $objectclassTopCount= 0;
my $pagedSearchCount = 0;
+my $invalidFilterCount = 0;
my $bindCount = 0;
my $filterCount = 0;
my $baseCount = 0;
@@ -258,7 +259,7 @@ map {$conn{$_} = $_} @conncodes;
# hash db-backed hashes
my @hashnames = qw(attr rc src rsrc excount conn_hash ip_hash conncount nentries
filter base ds6xbadpwd saslmech saslconnop bindlist etime oid
- start_time_of_connection end_time_of_connection
+ start_time_of_connection end_time_of_connection notesf_conn_op
notesa_conn_op notesu_conn_op etime_conn_op nentries_conn_op
optype_conn_op time_conn_op srch_conn_op del_conn_op mod_conn_op
mdn_conn_op cmp_conn_op bind_conn_op unbind_conn_op ext_conn_op
@@ -926,7 +927,7 @@ if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){
}
while($op > 0){
# The bind op is not the same as the search op that triggered the notes=A.
- # We have adjust the key by decrementing the op count until we find the last bind op.
+ # We have to adjust the key by decrementing the op count until we find the last bind op.
$op--;
$binddn_key = "$srvRstCnt,$conn,$op";
if (exists($bind_conn_op->{$binddn_key}) && defined($bind_conn_op->{$binddn_key})) {
@@ -1049,9 +1050,60 @@ if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){
}
}
}
-} # end of unindexed search report
+ print "\n";
+}
+
+print "Invalid Attribute Filters: $invalidFilterCount\n";
+if ($invalidFilterCount > 0 && $verb eq "yes"){
+ my $conn_hash = $hashes->{conn_hash};
+ my $notesf_conn_op = $hashes->{notesf_conn_op};
+ my $time_conn_op = $hashes->{time_conn_op};
+ my $etime_conn_op = $hashes->{etime_conn_op};
+ my $nentries_conn_op = $hashes->{nentries_conn_op};
+ my $filter_conn_op = $hashes->{filter_conn_op};
+ my $bind_conn_op = $hashes->{bind_conn_op};
+ my $notesCount = 1;
+ my $unindexedIp;
+ my $binddn_key;
+ my %uniqFilt = (); # hash of unique filters
+ my %uniqFilter = (); # hash of unique filters bind dn
+ my %uniqBindDNs = (); # hash of unique bind dn's
+ my %uniqBindFilters = (); # hash of filters for a bind DN
+
+ while (my ($srcnt_conn_op, $count) = each %{$notesf_conn_op}) {
+ my ($srvRstCnt, $conn, $op) = split(",", $srcnt_conn_op);
+ my $attrIp = getIPfromConn($conn, $srvRstCnt);
+ print "\n Invalid Attribute Filter #".$notesCount." (notes=F)\n";
+ print " - Date/Time: $time_conn_op->{$srcnt_conn_op}\n";
+ print " - Connection Number: $conn\n";
+ print " - Operation Number: $op\n";
+ print " - Etime: $etime_conn_op->{$srcnt_conn_op}\n";
+ print " - Nentries: $nentries_conn_op->{$srcnt_conn_op}\n";
+ print " - IP Address: $attrIp\n";
+ if (exists($filter_conn_op->{$srcnt_conn_op}) && defined($filter_conn_op->{$srcnt_conn_op})) {
+ print " - Search Filter: $filter_conn_op->{$srcnt_conn_op}\n";
+ $uniqFilt{$filter_conn_op->{$srcnt_conn_op}}++;
+ }
+ while($op > 0){
+ # The bind op is not the same as the search op that triggered the notes=A.
+ # We have to adjust the key by decrementing the op count until we find the last bind op.
+ $op--;
+ $binddn_key = "$srvRstCnt,$conn,$op";
+ if (exists($bind_conn_op->{$binddn_key}) && defined($bind_conn_op->{$binddn_key})) {
+ print " - Bind DN: $bind_conn_op->{$binddn_key}\n";
+ $uniqBindDNs{$bind_conn_op->{$binddn_key}}++;
+ if( $uniqFilt{$filter_conn_op->{$srcnt_conn_op}} && defined($filter_conn_op->{$srcnt_conn_op})) {
+ $uniqBindFilters{$bind_conn_op->{$binddn_key}}{$filter_conn_op->{$srcnt_conn_op}}++;
+ $uniqFilter{$filter_conn_op->{$srcnt_conn_op}}{$bind_conn_op->{$binddn_key}}++;
+ }
+ last;
+ }
+ }
+ $notesCount++;
+ }
+ print "\n";
+}
-print "\n";
print "FDs Taken: $fdTaken\n";
print "FDs Returned: $fdReturned\n";
print "Highest FD Taken: $highestFdTaken\n\n";
@@ -1386,20 +1438,20 @@ if ($usage =~ /l/ || $verb eq "yes"){
}
}
-#########################################
-# #
-# Gather and Process the unique etimes #
-# #
-#########################################
+##############################################################
+# #
+# Gather and Process the unique etimes, wtimes, and optimes #
+# #
+##############################################################
my $first;
if ($usage =~ /t/i || $verb eq "yes"){
+ # Print the elapsed times (etime)
+
my $etime = $hashes->{etime};
my @ekeys = keys %{$etime};
- #
# print most often etimes
- #
- print "\n\n----- Top $sizeCount Most Frequent etimes -----\n\n";
+ print "\n\n----- Top $sizeCount Most Frequent etimes (elapsed times) -----\n\n";
my $eloop = 0;
my $retime = 0;
foreach my $et (sort { $etime->{$b} <=> $etime->{$a} } @ekeys) {
@@ -1411,16 +1463,84 @@ if ($usage =~ /t/i || $verb eq "yes"){
printf "%-8s %-12s\n", $etime->{ $et }, "etime=$et";
$eloop++;
}
- #
+ if ($eloop == 0) {
+ print "None";
+ }
# print longest etimes
- #
- print "\n\n----- Top $sizeCount Longest etimes -----\n\n";
+ print "\n\n----- Top $sizeCount Longest etimes (elapsed times) -----\n\n";
$eloop = 0;
foreach my $et (sort { $b <=> $a } @ekeys) {
if ($eloop == $sizeCount) { last; }
printf "%-12s %-10s\n","etime=$et",$etime->{ $et };
$eloop++;
}
+ if ($eloop == 0) {
+ print "None";
+ }
+
+ # Print the wait times (wtime)
+
+ my $wtime = $hashes->{wtime};
+ my @wkeys = keys %{$wtime};
+ # print most often wtimes
+ print "\n\n----- Top $sizeCount Most Frequent wtimes (wait times) -----\n\n";
+ $eloop = 0;
+ $retime = 0;
+ foreach my $et (sort { $wtime->{$b} <=> $wtime->{$a} } @wkeys) {
+ if ($eloop == $sizeCount) { last; }
+ if ($retime ne "2"){
+ $first = $et;
+ $retime = "2";
+ }
+ printf "%-8s %-12s\n", $wtime->{ $et }, "wtime=$et";
+ $eloop++;
+ }
+ if ($eloop == 0) {
+ print "None";
+ }
+ # print longest wtimes
+ print "\n\n----- Top $sizeCount Longest wtimes (wait times) -----\n\n";
+ $eloop = 0;
+ foreach my $et (sort { $b <=> $a } @wkeys) {
+ if ($eloop == $sizeCount) { last; }
+ printf "%-12s %-10s\n","wtime=$et",$wtime->{ $et };
+ $eloop++;
+ }
+ if ($eloop == 0) {
+ print "None";
+ }
+
+ # Print the operation times (optime)
+
+ my $optime = $hashes->{optime};
+ my @opkeys = keys %{$optime};
+ # print most often optimes
+ print "\n\n----- Top $sizeCount Most Frequent optimes (actual operation times) -----\n\n";
+ $eloop = 0;
+ $retime = 0;
+ foreach my $et (sort { $optime->{$b} <=> $optime->{$a} } @opkeys) {
+ if ($eloop == $sizeCount) { last; }
+ if ($retime ne "2"){
+ $first = $et;
+ $retime = "2";
+ }
+ printf "%-8s %-12s\n", $optime->{ $et }, "optime=$et";
+ $eloop++;
+ }
+ if ($eloop == 0) {
+ print "None";
+ }
+ # print longest optimes
+ print "\n\n----- Top $sizeCount Longest optimes (actual operation times) -----\n\n";
+ $eloop = 0;
+ foreach my $et (sort { $b <=> $a } @opkeys) {
+ if ($eloop == $sizeCount) { last; }
+ printf "%-12s %-10s\n","optime=$et",$optime->{ $et };
+ $eloop++;
+ }
+ if ($eloop == 0) {
+ print "None";
+ }
}
#######################################
@@ -2152,6 +2272,26 @@ sub parseLineNormal
if (m/ RESULT err=/ && m/ notes=[A-Z,]*P/){
$pagedSearchCount++;
}
+ if (m/ RESULT err=/ && m/ notes=[A-Z,]*F/){
+ $invalidFilterCount++;
+ $con = "";
+ if ($_ =~ /conn= *([0-9A-Z]+)/i){
+ $con = $1;
+ if ($_ =~ /op= *([0-9\-]+)/i){ $op = $1;}
+ }
+
+ if($reportStats){ inc_stats('notesF',$s_stats,$m_stats); }
+ if ($usage =~ /u/ || $usage =~ /U/ || $verb eq "yes"){
+ if($_ =~ /etime= *([0-9.]+)/i ){
+ if($1 >= $minEtime){
+ $hashes->{etime_conn_op}->{"$serverRestartCount,$con,$op"} = $1;
+ $hashes->{notesf_conn_op}->{"$serverRestartCount,$con,$op"}++;
+ if ($_ =~ / *([0-9a-z:\/]+)/i){ $hashes->{time_conn_op}->{"$serverRestartCount,$con,$op"} = $1; }
+ if ($_ =~ /nentries= *([0-9]+)/i ){ $hashes->{nentries_conn_op}->{"$serverRestartCount,$con,$op"} = $1; }
+ }
+ }
+ }
+ }
if (m/ notes=[A-Z,]*A/){
$con = "";
if ($_ =~ /conn= *([0-9A-Z]+)/i){
@@ -2435,6 +2575,16 @@ sub parseLineNormal
if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{etime}->{$etime_val}++; }
if ($reportStats){ inc_stats_val('etime',$etime_val,$s_stats,$m_stats); }
}
+ if ($_ =~ /wtime= *([0-9.]+)/ ) {
+ my $wtime_val = $1;
+ if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{wtime}->{$wtime_val}++; }
+ if ($reportStats){ inc_stats_val('wtime',$wtime_val,$s_stats,$m_stats); }
+ }
+ if ($_ =~ /optime= *([0-9.]+)/ ) {
+ my $optime_val = $1;
+ if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{optime}->{$optime_val}++; }
+ if ($reportStats){ inc_stats_val('optime',$optime_val,$s_stats,$m_stats); }
+ }
if ($_ =~ / tag=101 / || $_ =~ / tag=111 / || $_ =~ / tag=100 / || $_ =~ / tag=115 /){
if ($_ =~ / nentries= *([0-9]+)/i ){
my $nents = $1;
@@ -2555,7 +2705,7 @@ sub parseLineNormal
}
}
}
- if (/ RESULT err=/ && / tag=97 nentries=0 etime=/ && $_ =~ /dn=\"(.*)\"/i){
+ if (/ RESULT err=/ && / tag=97 nentries=0 / && $_ =~ /dn=\"(.*)\"/i){
# Check if this is a sasl bind, if see we need to add the RESULT's dn as a bind dn
my $binddn = $1;
my ($conn, $op);
@@ -2680,6 +2830,7 @@ print_stats_block
$stats->{'unbind'},
$stats->{'notesA'},
$stats->{'notesU'},
+ $stats->{'notesF'},
$stats->{'etime'}),
"\n" );
} else {
diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c
index 06ca1ee79..52c64fa3c 100644
--- a/ldap/servers/slapd/add.c
+++ b/ldap/servers/slapd/add.c
@@ -441,6 +441,9 @@ op_shared_add(Slapi_PBlock *pb)
internal_op = operation_is_flag_set(operation, OP_FLAG_INTERNAL);
pwpolicy = new_passwdPolicy(pb, slapi_entry_get_dn(e));
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(operation);
+
/* target spec is used to decide which plugins are applicable for the operation */
operation_set_target_spec(operation, slapi_entry_get_sdn(e));
diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c
index 310216e89..55f865077 100644
--- a/ldap/servers/slapd/bind.c
+++ b/ldap/servers/slapd/bind.c
@@ -87,6 +87,10 @@ do_bind(Slapi_PBlock *pb)
send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, NULL, 0, NULL);
goto free_and_return;
}
+
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(pb_op);
+
ber = pb_op->o_ber;
/*
diff --git a/ldap/servers/slapd/delete.c b/ldap/servers/slapd/delete.c
index c0e61adf1..1a7209317 100644
--- a/ldap/servers/slapd/delete.c
+++ b/ldap/servers/slapd/delete.c
@@ -236,6 +236,9 @@ op_shared_delete(Slapi_PBlock *pb)
slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
internal_op = operation_is_flag_set(operation, OP_FLAG_INTERNAL);
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(operation);
+
sdn = slapi_sdn_new_dn_byval(rawdn);
dn = slapi_sdn_get_dn(sdn);
slapi_pblock_set(pb, SLAPI_DELETE_TARGET_SDN, (void *)sdn);
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
index 259bedfff..a186dbde3 100644
--- a/ldap/servers/slapd/modify.c
+++ b/ldap/servers/slapd/modify.c
@@ -626,6 +626,9 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
slapi_pblock_get(pb, SLAPI_SKIP_MODIFIED_ATTRS, &skip_modified_attrs);
slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn);
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(operation);
+
if (sdn) {
passin_sdn = 1;
} else {
diff --git a/ldap/servers/slapd/modrdn.c b/ldap/servers/slapd/modrdn.c
index 3efe584a7..e04916b83 100644
--- a/ldap/servers/slapd/modrdn.c
+++ b/ldap/servers/slapd/modrdn.c
@@ -417,6 +417,9 @@ op_shared_rename(Slapi_PBlock *pb, int passin_args)
internal_op = operation_is_flag_set(operation, OP_FLAG_INTERNAL);
slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn);
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(operation);
+
/*
* If ownership has not been passed to this function, we replace the
* string input fields within the pblock with strdup'd copies. Why?
diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c
index ff16cd906..4dd3481c7 100644
--- a/ldap/servers/slapd/operation.c
+++ b/ldap/servers/slapd/operation.c
@@ -651,3 +651,27 @@ slapi_operation_time_expiry(Slapi_Operation *o, time_t timeout, struct timespec
{
slapi_timespec_expire_rel(timeout, &(o->o_hr_time_rel), expiry);
}
+
+/* Set the time the operation actually started */
+void
+slapi_operation_set_time_started(Slapi_Operation *o)
+{
+ clock_gettime(CLOCK_MONOTONIC, &(o->o_hr_time_started_rel));
+}
+
+/* The time diff of how long the operation took once it actually started */
+void
+slapi_operation_op_time_elapsed(Slapi_Operation *o, struct timespec *elapsed)
+{
+ struct timespec o_hr_time_now;
+ clock_gettime(CLOCK_MONOTONIC, &o_hr_time_now);
+
+ slapi_timespec_diff(&o_hr_time_now, &(o->o_hr_time_started_rel), elapsed);
+}
+
+/* The time diff the operation waited in the work queue */
+void
+slapi_operation_workq_time_elapsed(Slapi_Operation *o, struct timespec *elapsed)
+{
+ slapi_timespec_diff(&(o->o_hr_time_started_rel), &(o->o_hr_time_rel), elapsed);
+}
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index 9fe78655c..c0bc5dcd0 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -284,6 +284,9 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
slapi_pblock_get(pb, SLAPI_SEARCH_TARGET_SDN, &sdn);
slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
+ /* Set the time we actually started the operation */
+ slapi_operation_set_time_started(operation);
+
if (NULL == sdn) {
sdn = slapi_sdn_new_dn_byval(base);
slapi_pblock_set(pb, SLAPI_SEARCH_TARGET_SDN, sdn);
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
index 0b13c30e9..61efb6f8d 100644
--- a/ldap/servers/slapd/result.c
+++ b/ldap/servers/slapd/result.c
@@ -1975,6 +1975,8 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
CSN *operationcsn = NULL;
char csn_str[CSN_STRSIZE + 5];
char etime[ETIME_BUFSIZ] = {0};
+ char wtime[ETIME_BUFSIZ] = {0};
+ char optime[ETIME_BUFSIZ] = {0};
int pr_idx = -1;
int pr_cookie = -1;
uint32_t operation_notes;
@@ -1982,19 +1984,26 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
int32_t op_id;
int32_t op_internal_id;
int32_t op_nested_count;
+ struct timespec o_hr_time_end;
get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count);
-
slapi_pblock_get(pb, SLAPI_PAGED_RESULTS_INDEX, &pr_idx);
slapi_pblock_get(pb, SLAPI_PAGED_RESULTS_COOKIE, &pr_cookie);
-
internal_op = operation_is_flag_set(op, OP_FLAG_INTERNAL);
- struct timespec o_hr_time_end;
+ /* total elapsed time */
slapi_operation_time_elapsed(op, &o_hr_time_end);
+ snprintf(etime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
+
+ /* wait time */
+ slapi_operation_workq_time_elapsed(op, &o_hr_time_end);
+ snprintf(wtime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
+
+ /* op time */
+ slapi_operation_op_time_elapsed(op, &o_hr_time_end);
+ snprintf(optime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
- snprintf(etime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec);
operation_notes = slapi_pblock_get_operation_notes(pb);
@@ -2025,16 +2034,16 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
if (!internal_op) {
slapi_log_access(LDAP_DEBUG_STATS,
"conn=%" PRIu64 " op=%d RESULT err=%d"
- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s"
+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s"
", SASL bind in progress\n",
op->o_connid,
op->o_opid,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str);
} else {
-#define LOG_SASLMSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s, SASL bind in progress\n"
+#define LOG_SASLMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s, SASL bind in progress\n"
slapi_log_access(LDAP_DEBUG_ARGS,
connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_SASLMSG_FMT :
LOG_CONN_OP_FMT_EXT_INT LOG_SASLMSG_FMT,
@@ -2043,7 +2052,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
op_internal_id,
op_nested_count,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str);
}
} else if (op->o_tag == LDAP_REQ_BIND && err == LDAP_SUCCESS) {
@@ -2057,15 +2066,15 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
if (!internal_op) {
slapi_log_access(LDAP_DEBUG_STATS,
"conn=%" PRIu64 " op=%d RESULT err=%d"
- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s"
+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s"
" dn=\"%s\"\n",
op->o_connid,
op->o_opid,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str, dn ? dn : "");
} else {
-#define LOG_BINDMSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s dn=\"%s\"\n"
+#define LOG_BINDMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s dn=\"%s\"\n"
slapi_log_access(LDAP_DEBUG_ARGS,
connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_BINDMSG_FMT :
LOG_CONN_OP_FMT_EXT_INT LOG_BINDMSG_FMT,
@@ -2074,7 +2083,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
op_internal_id,
op_nested_count,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str, dn ? dn : "");
}
slapi_ch_free((void **)&dn);
@@ -2083,15 +2092,15 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
if (!internal_op) {
slapi_log_access(LDAP_DEBUG_STATS,
"conn=%" PRIu64 " op=%d RESULT err=%d"
- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s"
+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s"
" pr_idx=%d pr_cookie=%d\n",
op->o_connid,
op->o_opid,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str, pr_idx, pr_cookie);
} else {
-#define LOG_PRMSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s pr_idx=%d pr_cookie=%d \n"
+#define LOG_PRMSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s pr_idx=%d pr_cookie=%d \n"
slapi_log_access(LDAP_DEBUG_ARGS,
connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_PRMSG_FMT :
LOG_CONN_OP_FMT_EXT_INT LOG_PRMSG_FMT,
@@ -2100,7 +2109,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
op_internal_id,
op_nested_count,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str, pr_idx, pr_cookie);
}
} else if (!internal_op) {
@@ -2114,11 +2123,11 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
}
slapi_log_access(LDAP_DEBUG_STATS,
"conn=%" PRIu64 " op=%d RESULT err=%d"
- " tag=%" BERTAG_T " nentries=%d etime=%s%s%s%s\n",
+ " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s\n",
op->o_connid,
op->o_opid,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str, ext_str);
if (pbtxt) {
/* if !pbtxt ==> ext_str == "". Don't free ext_str. */
@@ -2126,7 +2135,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
}
} else {
int optype;
-#define LOG_MSG_FMT " tag=%" BERTAG_T " nentries=%d etime=%s%s%s\n"
+#define LOG_MSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s\n"
slapi_log_access(LDAP_DEBUG_ARGS,
connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_MSG_FMT :
LOG_CONN_OP_FMT_EXT_INT LOG_MSG_FMT,
@@ -2135,7 +2144,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
op_internal_id,
op_nested_count,
err, tag, nentries,
- etime,
+ wtime, optime, etime,
notes_str, csn_str);
/*
* If this is an unindexed search we should log it in the error log if
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index cef8c789c..8e76393c3 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1538,16 +1538,17 @@ typedef struct slapi_operation_results
*/
typedef struct op
{
- BerElement *o_ber; /* ber of the request */
- ber_int_t o_msgid; /* msgid of the request */
- ber_tag_t o_tag; /* tag of the request */
+ BerElement *o_ber; /* ber of the request */
+ ber_int_t o_msgid; /* msgid of the request */
+ ber_tag_t o_tag; /* tag of the request */
struct timespec o_hr_time_rel; /* internal system time op initiated */
struct timespec o_hr_time_utc; /* utc system time op initiated */
- int o_isroot; /* requestor is manager */
+ struct timespec o_hr_time_started_rel; /* internal system time op started */
+ int o_isroot; /* requestor is manager */
Slapi_DN o_sdn; /* dn bound when op was initiated */
- char *o_authtype; /* auth method used to bind dn */
+ char *o_authtype; /* auth method used to bind dn */
int o_ssf; /* ssf for this operation (highest between SASL and TLS/SSL) */
- int o_opid; /* id of this operation */
+ int o_opid; /* id of this operation */
PRUint64 o_connid; /* id of conn initiating this op; for logging only */
void *o_handler_data;
result_handler o_result_handler;
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 834a98742..8d9c3fa6a 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -8210,13 +8210,29 @@ void slapi_operation_time_elapsed(Slapi_Operation *o, struct timespec *elapsed);
*/
void slapi_operation_time_initiated(Slapi_Operation *o, struct timespec *initiated);
/**
- * Given an operation and a timeout, return a populate struct with the expiry
- * time of the operation suitable for checking with slapi_timespec_expire_check
+ * Given an operation, determine the time elapsed since the op
+ * was actually started.
*
- * \param Slapi_Operation o - the operation that is in progress
- * \param time_t timeout the seconds relative to operation initiation to expiry at.
- * \param struct timespec *expiry the timespec to popluate with the relative expiry.
+ * \param Slapi_Operation o - the operation which is inprogress
+ * \param struct timespec *elapsed - location where the time difference will be
+ * placed.
+ */
+void slapi_operation_op_time_elapsed(Slapi_Operation *o, struct timespec *elapsed);
+/**
+ * Given an operation, determine the time elapsed that the op spent
+ * in the work queue before actually being dispatched to a worker thread
+ *
+ * \param Slapi_Operation o - the operation which is inprogress
+ * \param struct timespec *elapsed - location where the time difference will be
+ * placed.
+ */
+void slapi_operation_workq_time_elapsed(Slapi_Operation *o, struct timespec *elapsed);
+/**
+ * Set the time the operation actually started
+ *
+ * \param Slapi_Operation o - the operation which is inprogress
*/
+void slapi_operation_set_time_started(Slapi_Operation *o);
#endif
/**
--
2.26.2

View File

@ -1,31 +0,0 @@
From ec1714c81290a03ae9aa5fd10acf3e9be71596d7 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 11 Jun 2020 15:47:43 -0400
Subject: [PATCH] Issue 50912 - pwdReset can be modified by a user
Description: The attribute "pwdReset" should only be allowed to be set by the
server. Update schema definition to include NO-USER-MODIFICATION
relates: https://pagure.io/389-ds-base/issue/50912
Reviewed by: mreynolds(one line commit rule)
---
ldap/schema/02common.ldif | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif
index 966636bef..c6dc074db 100644
--- a/ldap/schema/02common.ldif
+++ b/ldap/schema/02common.ldif
@@ -76,7 +76,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2349 NAME ( 'passwordDictCheck' 'pwdDict
attributeTypes: ( 2.16.840.1.113730.3.1.2350 NAME ( 'passwordDictPath' 'pwdDictPath' ) DESC '389 Directory Server password policy attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2351 NAME ( 'passwordUserAttributes' 'pwdUserAttributes' ) DESC '389 Directory Server password policy attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2352 NAME ( 'passwordBadWords' 'pwdBadWords' ) DESC '389 Directory Server password policy attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE X-ORIGIN '389 Directory Server' )
-attributeTypes: ( 2.16.840.1.113730.3.1.2366 NAME 'pwdReset' DESC '389 Directory Server password policy attribute type' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE USAGE directoryOperation X-ORIGIN '389 Directory Server' )
+attributeTypes: ( 2.16.840.1.113730.3.1.2366 NAME 'pwdReset' DESC '389 Directory Server password policy attribute type' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN '389 Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.198 NAME 'memberURL' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.199 NAME 'memberCertificateDescription' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.207 NAME 'vlvBase' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'Netscape Directory Server' )
--
2.26.2

View File

@ -1,202 +0,0 @@
From a6a52365df26edd4f6b0028056395d943344d787 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 11 Jun 2020 15:30:28 -0400
Subject: [PATCH] Issue 50791 - Healthcheck should look for notes=A/F in access
log
Description: Add checks for notes=A (fully unindexed search) and
notes=F (Unknown attribute in search filter) in the
current access log.
relates: https://pagure.io/389-ds-base/issue/50791
Reviewed by: firstyear(Thanks!)
---
src/lib389/lib389/cli_ctl/health.py | 4 +-
src/lib389/lib389/dirsrv_log.py | 72 +++++++++++++++++++++++++++--
src/lib389/lib389/lint.py | 26 ++++++++++-
3 files changed, 96 insertions(+), 6 deletions(-)
diff --git a/src/lib389/lib389/cli_ctl/health.py b/src/lib389/lib389/cli_ctl/health.py
index 6333a753a..89484a11b 100644
--- a/src/lib389/lib389/cli_ctl/health.py
+++ b/src/lib389/lib389/cli_ctl/health.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2019 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -18,6 +18,7 @@ from lib389.monitor import MonitorDiskSpace
from lib389.replica import Replica, Changelog5
from lib389.nss_ssl import NssSsl
from lib389.dseldif import FSChecks, DSEldif
+from lib389.dirsrv_log import DirsrvAccessLog
from lib389 import lint
from lib389 import plugins
from lib389._constants import DSRC_HOME
@@ -37,6 +38,7 @@ CHECK_OBJECTS = [
Changelog5,
DSEldif,
NssSsl,
+ DirsrvAccessLog,
]
diff --git a/src/lib389/lib389/dirsrv_log.py b/src/lib389/lib389/dirsrv_log.py
index baac2a3c9..7bed4bb17 100644
--- a/src/lib389/lib389/dirsrv_log.py
+++ b/src/lib389/lib389/dirsrv_log.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2016 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -9,12 +9,17 @@
"""Helpers for managing the directory server internal logs.
"""
+import copy
import re
import gzip
from dateutil.parser import parse as dt_parse
from glob import glob
from lib389.utils import ensure_bytes
-
+from lib389._mapped_object_lint import DSLint
+from lib389.lint import (
+ DSLOGNOTES0001, # Unindexed search
+ DSLOGNOTES0002, # Unknown attr in search filter
+)
# Because many of these settings can change live, we need to check for certain
# attributes all the time.
@@ -35,7 +40,7 @@ MONTH_LOOKUP = {
}
-class DirsrvLog(object):
+class DirsrvLog(DSLint):
"""Class of functions to working with the various DIrectory Server logs
"""
def __init__(self, dirsrv):
@@ -189,6 +194,67 @@ class DirsrvAccessLog(DirsrvLog):
self.full_regexs = [self.prog_m1, self.prog_con, self.prog_discon]
self.result_regexs = [self.prog_notes, self.prog_repl,
self.prog_result]
+ @classmethod
+ def lint_uid(cls):
+ return 'logs'
+
+ def _log_get_search_stats(self, conn, op):
+ lines = self.match(f".* conn={conn} op={op} SRCH base=.*")
+ if len(lines) != 1:
+ return None
+
+ quoted_vals = re.findall('"([^"]*)"', lines[0])
+ return {
+ 'base': quoted_vals[0],
+ 'filter': quoted_vals[1],
+ 'timestamp': re.findall('\[(.*)\]', lines[0])[0],
+ 'scope': lines[0].split(' scope=', 1)[1].split(' ',1)[0]
+ }
+
+ def _lint_notes(self):
+ """
+ Check for notes=A (fully unindexed searches), and
+ notes=F (unknown attribute in filter)
+ """
+ for pattern, lint_report in [(".* notes=A", DSLOGNOTES0001), (".* notes=F", DSLOGNOTES0002)]:
+ lines = self.match(pattern)
+ if len(lines) > 0:
+ count = 0
+ searches = []
+ for line in lines:
+ if ' RESULT err=' in line:
+ # Looks like a valid notes=A/F
+ conn = line.split(' conn=', 1)[1].split(' ',1)[0]
+ op = line.split(' op=', 1)[1].split(' ',1)[0]
+ etime = line.split(' etime=', 1)[1].split(' ',1)[0]
+ stats = self._log_get_search_stats(conn, op)
+ if stats is not None:
+ timestamp = stats['timestamp']
+ base = stats['base']
+ scope = stats['scope']
+ srch_filter = stats['filter']
+ count += 1
+ if lint_report == DSLOGNOTES0001:
+ searches.append(f'\n [{count}] Unindexed Search\n'
+ f' - date: {timestamp}\n'
+ f' - conn/op: {conn}/{op}\n'
+ f' - base: {base}\n'
+ f' - scope: {scope}\n'
+ f' - filter: {srch_filter}\n'
+ f' - etime: {etime}\n')
+ else:
+ searches.append(f'\n [{count}] Invalid Attribute in Filter\n'
+ f' - date: {timestamp}\n'
+ f' - conn/op: {conn}/{op}\n'
+ f' - filter: {srch_filter}\n')
+ if len(searches) > 0:
+ report = copy.deepcopy(lint_report)
+ report['items'].append(self._get_log_path())
+ report['detail'] = report['detail'].replace('NUMBER', str(count))
+ for srch in searches:
+ report['detail'] += srch
+ yield report
+
def _get_log_path(self):
"""Return the current log file location"""
diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
index a103feec7..4b1700b92 100644
--- a/src/lib389/lib389/lint.py
+++ b/src/lib389/lib389/lint.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2019 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -253,7 +253,7 @@ can use the CLI tool "dsconf" to resolve the conflict. Here is an example:
Remove conflict entry and keep only the original/counterpart entry:
- # dsconf slapd-YOUR_INSTANCE repl-conflict remove <DN of conflict entry>
+ # dsconf slapd-YOUR_INSTANCE repl-conflict delete <DN of conflict entry>
Replace the original/counterpart entry with the conflict entry:
@@ -418,3 +418,25 @@ until the time issues have been resolved:
Also look at https://access.redhat.com/documentation/en-us/red_hat_directory_server/11/html/administration_guide/managing_replication-troubleshooting_replication_related_problems
and find the paragraph "Too much time skew"."""
}
+
+DSLOGNOTES0001 = {
+ 'dsle': 'DSLOGNOTES0001',
+ 'severity': 'Medium',
+ 'description': 'Unindexed Search',
+ 'items': ['Performance'],
+ 'detail': """Found NUMBER fully unindexed searches in the current access log.
+Unindexed searches can cause high CPU and slow down the entire server's performance.\n""",
+ 'fix': """Examine the searches that are unindexed, and either properly index the attributes
+in the filter, increase the nsslapd-idlistscanlimit, or stop using that filter."""
+}
+
+DSLOGNOTES0002 = {
+ 'dsle': 'DSLOGNOTES0002',
+ 'severity': 'Medium',
+ 'description': 'Unknown Attribute In Filter',
+ 'items': ['Possible Performance Impact'],
+ 'detail': """Found NUMBER searches in the current access log that are using an
+unknown attribute in the search filter.\n""",
+ 'fix': """Stop using this these unknown attributes in the filter, or add the schema
+to the server and make sure it's properly indexed."""
+}
--
2.26.2

View File

@ -1,51 +0,0 @@
From 2844d4ad90cbbd23ae75309e50ae4d7145586bb7 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 10 Jun 2020 14:07:24 -0400
Subject: [PATCH] Issue 51144 - dsctl fails with instance names that contain
slapd-
Bug Description: If an instance name contains 'slapd-' the CLI breaks:
slapd-test-slapd
Fix Description: Only strip off "slapd-" from the front of the instance
name.
relates: https://pagure.io/389-ds-base/issue/51144
Reviewed by: firstyear(Thanks!)
---
src/lib389/lib389/__init__.py | 2 +-
src/lib389/lib389/dseldif.py | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 0ff1ab173..63d44b60a 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -710,7 +710,7 @@ class DirSrv(SimpleLDAPObject, object):
# Don't need a default value now since it's set in init.
if serverid is None and hasattr(self, 'serverid'):
serverid = self.serverid
- elif serverid is not None:
+ elif serverid is not None and serverid.startswith('slapd-'):
serverid = serverid.replace('slapd-', '', 1)
if self.serverid is None:
diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py
index 96c9af9d1..f2725add9 100644
--- a/src/lib389/lib389/dseldif.py
+++ b/src/lib389/lib389/dseldif.py
@@ -40,7 +40,8 @@ class DSEldif(DSLint):
if serverid:
# Get the dse.ldif from the instance name
prefix = os.environ.get('PREFIX', ""),
- serverid = serverid.replace("slapd-", "")
+ if serverid.startswith("slapd-"):
+ serverid = serverid.replace("slapd-", "", 1)
self.path = "{}/etc/dirsrv/slapd-{}/dse.ldif".format(prefix[0], serverid)
else:
ds_paths = Paths(self._instance.serverid, self._instance)
--
2.26.2

View File

@ -1,520 +0,0 @@
From 6cd4b1c60dbd3d7b74adb19a2434585d50553f39 Mon Sep 17 00:00:00 2001
From: Thierry Bordaz <tbordaz@redhat.com>
Date: Fri, 5 Jun 2020 12:14:51 +0200
Subject: [PATCH] Ticket 49859 - A distinguished value can be missing in an
entry
Bug description:
According to RFC 4511 (see ticket), the values of the RDN attributes
should be present in an entry.
With a set of replicated operations, it is possible that those values
would be missing
Fix description:
MOD and MODRDN update checks that the RDN values are presents.
If they are missing they are added to the resulting entry. In addition
the set of modifications to add those values are also indexed.
The specific case of single-valued attributes, where the final and unique value
can not be the RDN value, the attribute nsds5ReplConflict is added.
https://pagure.io/389-ds-base/issue/49859
Reviewed by: Mark Reynolds, William Brown
Platforms tested: F31
---
.../replication/conflict_resolve_test.py | 174 +++++++++++++++++-
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 136 ++++++++++++++
ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 37 +++-
.../servers/slapd/back-ldbm/proto-back-ldbm.h | 1 +
4 files changed, 343 insertions(+), 5 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py
index 99a072935..48d0067db 100644
--- a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py
+++ b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py
@@ -10,10 +10,11 @@ import time
import logging
import ldap
import pytest
+import re
from itertools import permutations
from lib389._constants import *
from lib389.idm.nscontainer import nsContainers
-from lib389.idm.user import UserAccounts
+from lib389.idm.user import UserAccounts, UserAccount
from lib389.idm.group import Groups
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389.replica import ReplicationManager
@@ -763,6 +764,177 @@ class TestTwoMasters:
user_dns_m2 = [user.dn for user in test_users_m2.list()]
assert set(user_dns_m1) == set(user_dns_m2)
+ def test_conflict_attribute_multi_valued(self, topology_m2, base_m2):
+ """A RDN attribute being multi-valued, checks that after several operations
+ MODRDN and MOD_REPL its RDN values are the same on both servers
+
+ :id: 225b3522-8ed7-4256-96f9-5fab9b7044a5
+ :setup: Two master replication,
+ audit log, error log for replica and access log for internal
+ :steps:
+ 1. Create a test entry uid=user_test_1000,...
+ 2. Pause all replication agreements
+ 3. On M1 rename it into uid=foo1,...
+ 4. On M2 rename it into uid=foo2,...
+ 5. On M1 MOD_REPL uid:foo1
+ 6. Resume all replication agreements
+ 7. Check that entry on M1 has uid=foo1, foo2
+ 8. Check that entry on M2 has uid=foo1, foo2
+ 9. Check that entry on M1 and M2 has the same uid values
+ :expectedresults:
+ 1. It should pass
+ 2. It should pass
+ 3. It should pass
+ 4. It should pass
+ 5. It should pass
+ 6. It should pass
+ 7. It should pass
+ 8. It should pass
+ 9. It should pass
+ """
+
+ M1 = topology_m2.ms["master1"]
+ M2 = topology_m2.ms["master2"]
+
+ # add a test user
+ test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
+ user_1 = test_users_m1.create_test_user(uid=1000)
+ test_users_m2 = UserAccount(M2, user_1.dn)
+ # Waiting fo the user to be replicated
+ for i in range(0,4):
+ time.sleep(1)
+ if test_users_m2.exists():
+ break
+ assert(test_users_m2.exists())
+
+ # Stop replication agreements
+ topology_m2.pause_all_replicas()
+
+ # On M1 rename test entry in uid=foo1
+ original_dn = user_1.dn
+ user_1.rename('uid=foo1')
+ time.sleep(1)
+
+ # On M2 rename test entry in uid=foo2
+ M2.rename_s(original_dn, 'uid=foo2')
+ time.sleep(2)
+
+ # on M1 MOD_REPL uid into foo1
+ user_1.replace('uid', 'foo1')
+
+ # resume replication agreements
+ topology_m2.resume_all_replicas()
+ time.sleep(5)
+
+ # check that on M1, the entry 'uid' has two values 'foo1' and 'foo2'
+ final_dn = re.sub('^.*1000,', 'uid=foo2,', original_dn)
+ final_user_m1 = UserAccount(M1, final_dn)
+ for val in final_user_m1.get_attr_vals_utf8('uid'):
+ log.info("Check %s is on M1" % val)
+ assert(val in ['foo1', 'foo2'])
+
+ # check that on M2, the entry 'uid' has two values 'foo1' and 'foo2'
+ final_user_m2 = UserAccount(M2, final_dn)
+ for val in final_user_m2.get_attr_vals_utf8('uid'):
+ log.info("Check %s is on M1" % val)
+ assert(val in ['foo1', 'foo2'])
+
+ # check that the entry have the same uid values
+ for val in final_user_m1.get_attr_vals_utf8('uid'):
+ log.info("Check M1.uid %s is also on M2" % val)
+ assert(val in final_user_m2.get_attr_vals_utf8('uid'))
+
+ for val in final_user_m2.get_attr_vals_utf8('uid'):
+ log.info("Check M2.uid %s is also on M1" % val)
+ assert(val in final_user_m1.get_attr_vals_utf8('uid'))
+
+ def test_conflict_attribute_single_valued(self, topology_m2, base_m2):
+ """A RDN attribute being signle-valued, checks that after several operations
+ MODRDN and MOD_REPL its RDN values are the same on both servers
+
+ :id: c38ae613-5d1e-47cf-b051-c7284e64b817
+ :setup: Two master replication, test container for entries, enable plugin logging,
+ audit log, error log for replica and access log for internal
+ :steps:
+ 1. Create a test entry uid=user_test_1000,...
+ 2. Pause all replication agreements
+ 3. On M1 rename it into employeenumber=foo1,...
+ 4. On M2 rename it into employeenumber=foo2,...
+ 5. On M1 MOD_REPL employeenumber:foo1
+ 6. Resume all replication agreements
+ 7. Check that entry on M1 has employeenumber=foo1
+ 8. Check that entry on M2 has employeenumber=foo1
+ 9. Check that entry on M1 and M2 has the same employeenumber values
+ :expectedresults:
+ 1. It should pass
+ 2. It should pass
+ 3. It should pass
+ 4. It should pass
+ 5. It should pass
+ 6. It should pass
+ 7. It should pass
+ 8. It should pass
+ 9. It should pass
+ """
+
+ M1 = topology_m2.ms["master1"]
+ M2 = topology_m2.ms["master2"]
+
+ # add a test user with a dummy 'uid' extra value because modrdn removes
+ # uid that conflict with 'account' objectclass
+ test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None)
+ user_1 = test_users_m1.create_test_user(uid=1000)
+ user_1.add('objectclass', 'extensibleobject')
+ user_1.add('uid', 'dummy')
+ test_users_m2 = UserAccount(M2, user_1.dn)
+
+ # Waiting fo the user to be replicated
+ for i in range(0,4):
+ time.sleep(1)
+ if test_users_m2.exists():
+ break
+ assert(test_users_m2.exists())
+
+ # Stop replication agreements
+ topology_m2.pause_all_replicas()
+
+ # On M1 rename test entry in employeenumber=foo1
+ original_dn = user_1.dn
+ user_1.rename('employeenumber=foo1')
+ time.sleep(1)
+
+ # On M2 rename test entry in employeenumber=foo2
+ M2.rename_s(original_dn, 'employeenumber=foo2')
+ time.sleep(2)
+
+ # on M1 MOD_REPL uid into foo1
+ user_1.replace('employeenumber', 'foo1')
+
+ # resume replication agreements
+ topology_m2.resume_all_replicas()
+ time.sleep(5)
+
+ # check that on M1, the entry 'employeenumber' has value 'foo1'
+ final_dn = re.sub('^.*1000,', 'employeenumber=foo2,', original_dn)
+ final_user_m1 = UserAccount(M1, final_dn)
+ for val in final_user_m1.get_attr_vals_utf8('employeenumber'):
+ log.info("Check %s is on M1" % val)
+ assert(val in ['foo1'])
+
+ # check that on M2, the entry 'employeenumber' has values 'foo1'
+ final_user_m2 = UserAccount(M2, final_dn)
+ for val in final_user_m2.get_attr_vals_utf8('employeenumber'):
+ log.info("Check %s is on M2" % val)
+ assert(val in ['foo1'])
+
+ # check that the entry have the same uid values
+ for val in final_user_m1.get_attr_vals_utf8('employeenumber'):
+ log.info("Check M1.uid %s is also on M2" % val)
+ assert(val in final_user_m2.get_attr_vals_utf8('employeenumber'))
+
+ for val in final_user_m2.get_attr_vals_utf8('employeenumber'):
+ log.info("Check M2.uid %s is also on M1" % val)
+ assert(val in final_user_m1.get_attr_vals_utf8('employeenumber'))
class TestThreeMasters:
def test_nested_entries(self, topology_m3, base_m3):
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index e9d7e87e3..a507f3c31 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -213,6 +213,112 @@ error:
return retval;
}
+int32_t
+entry_get_rdn_mods(Slapi_PBlock *pb, Slapi_Entry *entry, CSN *csn, int repl_op, Slapi_Mods **smods_ret)
+{
+ unsigned long op_type = SLAPI_OPERATION_NONE;
+ char *new_rdn = NULL;
+ char **dns = NULL;
+ char **rdns = NULL;
+ Slapi_Mods *smods = NULL;
+ char *type = NULL;
+ struct berval *bvp[2] = {0};
+ struct berval bv;
+ Slapi_Attr *attr = NULL;
+ const char *entry_dn = NULL;
+
+ *smods_ret = NULL;
+ entry_dn = slapi_entry_get_dn_const(entry);
+ /* Do not bother to check that RDN is present, no one rename RUV or change its nsuniqueid */
+ if (strcasestr(entry_dn, RUV_STORAGE_ENTRY_UNIQUEID)) {
+ return 0;
+ }
+
+ /* First get the RDNs of the operation */
+ slapi_pblock_get(pb, SLAPI_OPERATION_TYPE, &op_type);
+ switch (op_type) {
+ case SLAPI_OPERATION_MODIFY:
+ dns = slapi_ldap_explode_dn(entry_dn, 0);
+ if (dns == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, "entry_get_rdn_mods",
+ "Fails to split DN \"%s\" into components\n", entry_dn);
+ return -1;
+ }
+ rdns = slapi_ldap_explode_rdn(dns[0], 0);
+ slapi_ldap_value_free(dns);
+
+ break;
+ case SLAPI_OPERATION_MODRDN:
+ slapi_pblock_get(pb, SLAPI_MODRDN_NEWRDN, &new_rdn);
+ rdns = slapi_ldap_explode_rdn(new_rdn, 0);
+ break;
+ default:
+ break;
+ }
+ if (rdns == NULL || rdns[0] == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, "entry_get_rdn_mods",
+ "Fails to split RDN \"%s\" into components\n", slapi_entry_get_dn_const(entry));
+ return -1;
+ }
+
+ /* Update the entry to add RDNs values if they are missing */
+ smods = slapi_mods_new();
+
+ bvp[0] = &bv;
+ bvp[1] = NULL;
+ for (size_t rdns_count = 0; rdns[rdns_count]; rdns_count++) {
+ Slapi_Value *value;
+ attr = NULL;
+ slapi_rdn2typeval(rdns[rdns_count], &type, &bv);
+
+ /* Check if the RDN value exists */
+ if ((slapi_entry_attr_find(entry, type, &attr) != 0) ||
+ (slapi_attr_value_find(attr, &bv))) {
+ const CSN *csn_rdn_add;
+ const CSN *adcsn = attr_get_deletion_csn(attr);
+
+ /* It is missing => adds it */
+ if (slapi_attr_flag_is_set(attr, SLAPI_ATTR_FLAG_SINGLE)) {
+ if (csn_compare(adcsn, csn) >= 0) {
+ /* this is a single valued attribute and the current value
+ * (that is different from RDN value) is more recent than
+ * the RDN value we want to apply.
+ * Keep the current value and add a conflict flag
+ */
+
+ type = ATTR_NSDS5_REPLCONFLICT;
+ bv.bv_val = "RDN value may be missing because it is single-valued";
+ bv.bv_len = strlen(bv.bv_val);
+ slapi_entry_add_string(entry, type, bv.bv_val);
+ slapi_mods_add_modbvps(smods, LDAP_MOD_ADD, type, bvp);
+ continue;
+ }
+ }
+ /* if a RDN value needs to be forced, make sure it csn is ahead */
+ slapi_mods_add_modbvps(smods, LDAP_MOD_ADD, type, bvp);
+ csn_rdn_add = csn_max(adcsn, csn);
+
+ if (entry_apply_mods_wsi(entry, smods, csn_rdn_add, repl_op)) {
+ slapi_log_err(SLAPI_LOG_ERR, "entry_get_rdn_mods",
+ "Fails to set \"%s\" in \"%s\"\n", type, slapi_entry_get_dn_const(entry));
+ slapi_ldap_value_free(rdns);
+ slapi_mods_free(&smods);
+ return -1;
+ }
+ /* Make the RDN value a distinguished value */
+ attr_value_find_wsi(attr, &bv, &value);
+ value_update_csn(value, CSN_TYPE_VALUE_DISTINGUISHED, csn_rdn_add);
+ }
+ }
+ slapi_ldap_value_free(rdns);
+ if (smods->num_mods == 0) {
+ /* smods_ret already NULL, just free the useless smods */
+ slapi_mods_free(&smods);
+ } else {
+ *smods_ret = smods;
+ }
+ return 0;
+}
/**
Apply the mods to the ec entry. Check for syntax, schema problems.
Check for abandon.
@@ -269,6 +375,8 @@ modify_apply_check_expand(
goto done;
}
+
+
/*
* If the objectClass attribute type was modified in any way, expand
* the objectClass values to reflect the inheritance hierarchy.
@@ -414,6 +522,7 @@ ldbm_back_modify(Slapi_PBlock *pb)
int result_sent = 0;
int32_t parent_op = 0;
struct timespec parent_time;
+ Slapi_Mods *smods_add_rdn = NULL;
slapi_pblock_get(pb, SLAPI_BACKEND, &be);
slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
@@ -731,6 +840,15 @@ ldbm_back_modify(Slapi_PBlock *pb)
}
} /* else if new_mod_count == mod_count then betxnpremod plugin did nothing */
+ /* time to check if applying a replicated operation removed
+ * the RDN value from the entry. Assuming that only replicated update
+ * can lead to that bad result
+ */
+ if (entry_get_rdn_mods(pb, ec->ep_entry, opcsn, repl_op, &smods_add_rdn)) {
+ goto error_return;
+ }
+
+
/*
* Update the ID to Entry index.
* Note that id2entry_add replaces the entry, so the Entry ID
@@ -764,6 +882,23 @@ ldbm_back_modify(Slapi_PBlock *pb)
MOD_SET_ERROR(ldap_result_code, LDAP_OPERATIONS_ERROR, retry_count);
goto error_return;
}
+
+ if (smods_add_rdn && slapi_mods_get_num_mods(smods_add_rdn) > 0) {
+ retval = index_add_mods(be, (LDAPMod **) slapi_mods_get_ldapmods_byref(smods_add_rdn), e, ec, &txn);
+ if (DB_LOCK_DEADLOCK == retval) {
+ /* Abort and re-try */
+ slapi_mods_free(&smods_add_rdn);
+ continue;
+ }
+ if (retval != 0) {
+ slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modify",
+ "index_add_mods (rdn) failed, err=%d %s\n",
+ retval, (msg = dblayer_strerror(retval)) ? msg : "");
+ MOD_SET_ERROR(ldap_result_code, LDAP_OPERATIONS_ERROR, retry_count);
+ slapi_mods_free(&smods_add_rdn);
+ goto error_return;
+ }
+ }
/*
* Remove the old entry from the Virtual List View indexes.
* Add the new entry to the Virtual List View indexes.
@@ -978,6 +1113,7 @@ error_return:
common_return:
slapi_mods_done(&smods);
+ slapi_mods_free(&smods_add_rdn);
if (inst) {
if (ec_locked || cache_is_in_cache(&inst->inst_cache, ec)) {
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
index fde83c99f..e97b7a5f6 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
@@ -21,7 +21,7 @@ static void moddn_unlock_and_return_entry(backend *be, struct backentry **target
static int moddn_newrdn_mods(Slapi_PBlock *pb, const char *olddn, struct backentry *ec, Slapi_Mods *smods_wsi, int is_repl_op);
static IDList *moddn_get_children(back_txn *ptxn, Slapi_PBlock *pb, backend *be, struct backentry *parententry, Slapi_DN *parentdn, struct backentry ***child_entries, struct backdn ***child_dns, int is_resurect_operation);
static int moddn_rename_children(back_txn *ptxn, Slapi_PBlock *pb, backend *be, IDList *children, Slapi_DN *dn_parentdn, Slapi_DN *dn_newsuperiordn, struct backentry *child_entries[]);
-static int modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li, struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3);
+static int modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li, struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3, Slapi_Mods *smods4);
static void mods_remove_nsuniqueid(Slapi_Mods *smods);
#define MOD_SET_ERROR(rc, error, count) \
@@ -100,6 +100,7 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
Connection *pb_conn = NULL;
int32_t parent_op = 0;
struct timespec parent_time;
+ Slapi_Mods *smods_add_rdn = NULL;
if (slapi_pblock_get(pb, SLAPI_CONN_ID, &conn_id) < 0) {
conn_id = 0; /* connection is NULL */
@@ -842,6 +843,15 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
goto error_return;
}
}
+
+ /* time to check if applying a replicated operation removed
+ * the RDN value from the entry. Assuming that only replicated update
+ * can lead to that bad result
+ */
+ if (entry_get_rdn_mods(pb, ec->ep_entry, opcsn, is_replicated_operation, &smods_add_rdn)) {
+ goto error_return;
+ }
+
/* check that the entry still obeys the schema */
if (slapi_entry_schema_check(pb, ec->ep_entry) != 0) {
ldap_result_code = LDAP_OBJECT_CLASS_VIOLATION;
@@ -1003,7 +1013,7 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
/*
* Update the indexes for the entry.
*/
- retval = modrdn_rename_entry_update_indexes(&txn, pb, li, e, &ec, &smods_generated, &smods_generated_wsi, &smods_operation_wsi);
+ retval = modrdn_rename_entry_update_indexes(&txn, pb, li, e, &ec, &smods_generated, &smods_generated_wsi, &smods_operation_wsi, smods_add_rdn);
if (DB_LOCK_DEADLOCK == retval) {
/* Retry txn */
continue;
@@ -1497,6 +1507,7 @@ common_return:
slapi_mods_done(&smods_operation_wsi);
slapi_mods_done(&smods_generated);
slapi_mods_done(&smods_generated_wsi);
+ slapi_mods_free(&smods_add_rdn);
slapi_ch_free((void **)&child_entries);
slapi_ch_free((void **)&child_dns);
if (ldap_result_matcheddn && 0 != strcmp(ldap_result_matcheddn, "NULL"))
@@ -1778,7 +1789,7 @@ mods_remove_nsuniqueid(Slapi_Mods *smods)
* mods contains the list of attribute change made.
*/
static int
-modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li __attribute__((unused)), struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3)
+modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbminfo *li __attribute__((unused)), struct backentry *e, struct backentry **ec, Slapi_Mods *smods1, Slapi_Mods *smods2, Slapi_Mods *smods3, Slapi_Mods *smods4)
{
backend *be;
ldbm_instance *inst;
@@ -1874,6 +1885,24 @@ modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbm
goto error_return;
}
}
+ if (smods4 != NULL && slapi_mods_get_num_mods(smods4) > 0) {
+ /*
+ * update the indexes: lastmod, rdn, etc.
+ */
+ retval = index_add_mods(be, slapi_mods_get_ldapmods_byref(smods4), e, *ec, ptxn);
+ if (DB_LOCK_DEADLOCK == retval) {
+ /* Retry txn */
+ slapi_log_err(SLAPI_LOG_BACKLDBM, "modrdn_rename_entry_update_indexes",
+ "index_add_mods4 deadlock\n");
+ goto error_return;
+ }
+ if (retval != 0) {
+ slapi_log_err(SLAPI_LOG_TRACE, "modrdn_rename_entry_update_indexes",
+ "index_add_mods 4 failed, err=%d %s\n",
+ retval, (msg = dblayer_strerror(retval)) ? msg : "");
+ goto error_return;
+ }
+ }
/*
* Remove the old entry from the Virtual List View indexes.
* Add the new entry to the Virtual List View indexes.
@@ -1991,7 +2020,7 @@ moddn_rename_child_entry(
* Update all the indexes.
*/
retval = modrdn_rename_entry_update_indexes(ptxn, pb, li, e, ec,
- smodsp, NULL, NULL);
+ smodsp, NULL, NULL, NULL);
/* JCMREPL - Should the children get updated modifiersname and lastmodifiedtime? */
slapi_mods_done(&smods);
}
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
index 4d2524fd9..e2f1100ed 100644
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
@@ -324,6 +324,7 @@ int get_parent_rdn(DB *db, ID parentid, Slapi_RDN *srdn);
/*
* modify.c
*/
+int32_t entry_get_rdn_mods(Slapi_PBlock *pb, Slapi_Entry *entry, CSN *csn, int repl_op, Slapi_Mods **smods_ret);
int modify_update_all(backend *be, Slapi_PBlock *pb, modify_context *mc, back_txn *txn);
void modify_init(modify_context *mc, struct backentry *old_entry);
int modify_apply_mods(modify_context *mc, Slapi_Mods *smods);
--
2.26.2

View File

@ -1,128 +0,0 @@
From 2be9d1b4332d3b9b55a2d285e9610813100e235f Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Mon, 22 Jun 2020 17:49:10 -0400
Subject: [PATCH] Issue 49256 - log warning when thread number is very
different from autotuned value
Description: To help prevent customers from setting incorrect values for
the thread number it would be useful to warn them that the
configured value is either way too low or way too high.
relates: https://pagure.io/389-ds-base/issue/49256
Reviewed by: firstyear(Thanks!)
---
.../tests/suites/config/autotuning_test.py | 28 +++++++++++++++
ldap/servers/slapd/libglobs.c | 34 ++++++++++++++++++-
ldap/servers/slapd/slap.h | 3 ++
3 files changed, 64 insertions(+), 1 deletion(-)
diff --git a/dirsrvtests/tests/suites/config/autotuning_test.py b/dirsrvtests/tests/suites/config/autotuning_test.py
index d1c751444..540761250 100644
--- a/dirsrvtests/tests/suites/config/autotuning_test.py
+++ b/dirsrvtests/tests/suites/config/autotuning_test.py
@@ -43,6 +43,34 @@ def test_threads_basic(topo):
assert topo.standalone.config.get_attr_val_int("nsslapd-threadnumber") > 0
+def test_threads_warning(topo):
+ """Check that we log a warning if the thread number is too high or low
+
+ :id: db92412b-2812-49de-84b0-00f452cd254f
+ :setup: Standalone Instance
+ :steps:
+ 1. Get autotuned thread number
+ 2. Set threads way higher than hw threads, and find a warning in the log
+ 3. Set threads way lower than hw threads, and find a warning in the log
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ """
+ topo.standalone.config.set("nsslapd-threadnumber", "-1")
+ autotuned_value = topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber")
+
+ topo.standalone.config.set("nsslapd-threadnumber", str(int(autotuned_value) * 4))
+ time.sleep(.5)
+ assert topo.standalone.ds_error_log.match('.*higher.*hurt server performance.*')
+
+ if int(autotuned_value) > 1:
+ # If autotuned is 1, there isn't anything to test here
+ topo.standalone.config.set("nsslapd-threadnumber", "1")
+ time.sleep(.5)
+ assert topo.standalone.ds_error_log.match('.*lower.*hurt server performance.*')
+
+
@pytest.mark.parametrize("invalid_value", ('-2', '0', 'invalid'))
def test_threads_invalid_value(topo, invalid_value):
"""Check nsslapd-threadnumber for an invalid values
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index fbf90d92d..88676a303 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -4374,6 +4374,7 @@ config_set_threadnumber(const char *attrname, char *value, char *errorbuf, int a
{
int retVal = LDAP_SUCCESS;
int32_t threadnum = 0;
+ int32_t hw_threadnum = 0;
char *endp = NULL;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
@@ -4386,8 +4387,39 @@ config_set_threadnumber(const char *attrname, char *value, char *errorbuf, int a
threadnum = strtol(value, &endp, 10);
/* Means we want to re-run the hardware detection. */
+ hw_threadnum = util_get_hardware_threads();
if (threadnum == -1) {
- threadnum = util_get_hardware_threads();
+ threadnum = hw_threadnum;
+ } else {
+ /*
+ * Log a message if the user defined thread number is very different
+ * from the hardware threads as this is probably not the optimal
+ * value.
+ */
+ if (threadnum >= hw_threadnum) {
+ if (threadnum > MIN_THREADS && threadnum / hw_threadnum >= 4) {
+ /* We're over the default minimum and way higher than the hw
+ * threads. */
+ slapi_log_err(SLAPI_LOG_NOTICE, "config_set_threadnumber",
+ "The configured thread number (%d) is significantly "
+ "higher than the number of hardware threads (%d). "
+ "This can potentially hurt server performance. If "
+ "you are unsure how to tune \"nsslapd-threadnumber\" "
+ "then set it to \"-1\" and the server will tune it "
+ "according to the system hardware\n",
+ threadnum, hw_threadnum);
+ }
+ } else if (threadnum < MIN_THREADS) {
+ /* The thread number should never be less than the minimum and
+ * hardware threads. */
+ slapi_log_err(SLAPI_LOG_WARNING, "config_set_threadnumber",
+ "The configured thread number (%d) is lower than the number "
+ "of hardware threads (%d). This will hurt server performance. "
+ "If you are unsure how to tune \"nsslapd-threadnumber\" then "
+ "set it to \"-1\" and the server will tune it according to the "
+ "system hardware\n",
+ threadnum, hw_threadnum);
+ }
}
if (*endp != '\0' || errno == ERANGE || threadnum < 1 || threadnum > 65535) {
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 8e76393c3..894efd29c 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -403,6 +403,9 @@ typedef void (*VFPV)(); /* takes undefined arguments */
#define SLAPD_DEFAULT_PW_MAX_CLASS_CHARS_ATTRIBUTE 0
#define SLAPD_DEFAULT_PW_MAX_CLASS_CHARS_ATTRIBUTE_STR "0"
+#define MIN_THREADS 16
+#define MAX_THREADS 512
+
/* Default password values. */
--
2.26.2

View File

@ -1,34 +0,0 @@
From d24381488a997dda0006b603fb2b452b726757c0 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <simon.pichugin@gmail.com>
Date: Thu, 25 Jun 2020 10:45:16 +0200
Subject: [PATCH] Issue 51188 - db2ldif crashes when LDIF file can't be
accessed
Bug Description: db2ldif crashes when we set '-a LDIF_PATH' to a place that
can't be accessed by the user (dirsrv by default)
Fix Description: Don't attempt to close DB if we bail after a failed
attempt to open LDIF file.
https://pagure.io/389-ds-base/issue/51188
Reviewed by: mreynolds (Thanks!)
---
ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
index 542147c3d..9ffd877cb 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
@@ -871,6 +871,7 @@ bdb_db2ldif(Slapi_PBlock *pb)
slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif",
"db2ldif: %s: can't open %s: %d (%s) while running as user \"%s\"\n",
inst->inst_name, fname, errno, dblayer_strerror(errno), slapdFrontendConfig->localuserinfo->pw_name);
+ we_start_the_backends = 0;
return_value = -1;
goto bye;
}
--
2.26.2

View File

@ -1,33 +0,0 @@
From 5e0a2d34f1c03a7d6a1c8591896a21e122d90d6b Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Thu, 23 Jul 2020 23:45:18 +0200
Subject: [PATCH] Issue 51086 - Fix instance name length for interactive
install
Description: Instance name lenght is not properly validated
during interactive install. Add a check during a user input.
https://pagure.io/389-ds-base/issue/51086
Reviewed by: mreynolds (Thanks!)
---
src/lib389/lib389/instance/setup.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index f5fc5495d..45c7dfdd4 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -308,6 +308,9 @@ class SetupDs(object):
val = input('\nEnter the instance name [{}]: '.format(slapd['instance_name'])).rstrip()
if val != "":
+ if len(val) > 80:
+ print("Server identifier should not be longer than 80 symbols")
+ continue
if not all(ord(c) < 128 for c in val):
print("Server identifier can not contain non ascii characters")
continue
--
2.26.2

View File

@ -1,360 +0,0 @@
From 3e11020fa7a79d335a02c001435aabcf59aaa622 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Fri, 24 Jul 2020 12:14:44 -0400
Subject: [PATCH] Issue 51129 - SSL alert: The value of sslVersionMax "TLS1.3"
is higher than the supported version
Bug Description: If you try and set the sslVersionMax higher than the
default range, but within the supported range, you
would still get an error and the server would reset
the max to "default" max value.
Fix Description: Keep track of both the supported and default SSL ranges,
and correctly use each range for value validation. If
the value is outside the supported range, then use default
value, etc, but do not check the requested range against
the default range. We only use the default range if
there is no specified min or max in the config, or if
a invalid min or max value is set in the config.
Also, refactored the range variable names to be more
accurate:
enabledNSSVersions --> defaultNSSVersions
emin, emax --> dmin, dmax
relates: https://pagure.io/389-ds-base/issue/51129
Reviewed by: firstyear(Thanks!)
---
ldap/servers/slapd/ssl.c | 155 ++++++++++++++++----------------
src/lib389/lib389/dirsrv_log.py | 2 +-
2 files changed, 81 insertions(+), 76 deletions(-)
diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c
index 846106b42..7206cafd2 100644
--- a/ldap/servers/slapd/ssl.c
+++ b/ldap/servers/slapd/ssl.c
@@ -50,11 +50,11 @@
******************************************************************************/
#define DEFVERSION "TLS1.2"
-#define CURRENT_DEFAULT_SSL_VERSION SSL_LIBRARY_VERSION_TLS_1_2
extern char *slapd_SSL3ciphers;
extern symbol_t supported_ciphers[];
-static SSLVersionRange enabledNSSVersions;
+static SSLVersionRange defaultNSSVersions;
+static SSLVersionRange supportedNSSVersions;
static SSLVersionRange slapdNSSVersions;
@@ -1014,15 +1014,24 @@ slapd_nss_init(int init_ssl __attribute__((unused)), int config_available __attr
int create_certdb = 0;
PRUint32 nssFlags = 0;
char *certdir;
- char emin[VERSION_STR_LENGTH], emax[VERSION_STR_LENGTH];
- /* Get the range of the supported SSL version */
- SSL_VersionRangeGetDefault(ssl_variant_stream, &enabledNSSVersions);
+ char dmin[VERSION_STR_LENGTH], dmax[VERSION_STR_LENGTH];
+ char smin[VERSION_STR_LENGTH], smax[VERSION_STR_LENGTH];
- (void)slapi_getSSLVersion_str(enabledNSSVersions.min, emin, sizeof(emin));
- (void)slapi_getSSLVersion_str(enabledNSSVersions.max, emax, sizeof(emax));
+ /* Get the range of the supported SSL version */
+ SSL_VersionRangeGetSupported(ssl_variant_stream, &supportedNSSVersions);
+ (void)slapi_getSSLVersion_str(supportedNSSVersions.min, smin, sizeof(smin));
+ (void)slapi_getSSLVersion_str(supportedNSSVersions.max, smax, sizeof(smax));
+
+ /* Get the enabled default range */
+ SSL_VersionRangeGetDefault(ssl_variant_stream, &defaultNSSVersions);
+ (void)slapi_getSSLVersion_str(defaultNSSVersions.min, dmin, sizeof(dmin));
+ (void)slapi_getSSLVersion_str(defaultNSSVersions.max, dmax, sizeof(dmax));
slapi_log_err(SLAPI_LOG_CONFIG, "Security Initialization",
"slapd_nss_init - Supported range by NSS: min: %s, max: %s\n",
- emin, emax);
+ smin, smax);
+ slapi_log_err(SLAPI_LOG_CONFIG, "Security Initialization",
+ "slapd_nss_init - Enabled default range by NSS: min: %s, max: %s\n",
+ dmin, dmax);
/* set in slapd_bootstrap_config,
thus certdir is available even if config_available is false
@@ -1344,21 +1353,21 @@ static int
set_NSS_version(char *val, PRUint16 *rval, int ismin)
{
char *vp;
- char emin[VERSION_STR_LENGTH], emax[VERSION_STR_LENGTH];
+ char dmin[VERSION_STR_LENGTH], dmax[VERSION_STR_LENGTH];
if (NULL == rval) {
return 1;
}
- (void)slapi_getSSLVersion_str(enabledNSSVersions.min, emin, sizeof(emin));
- (void)slapi_getSSLVersion_str(enabledNSSVersions.max, emax, sizeof(emax));
+ (void)slapi_getSSLVersion_str(defaultNSSVersions.min, dmin, sizeof(dmin));
+ (void)slapi_getSSLVersion_str(defaultNSSVersions.max, dmax, sizeof(dmax));
if (!strncasecmp(val, SSLSTR, SSLLEN)) { /* ssl# NOT SUPPORTED */
if (ismin) {
- slapd_SSL_warn("SSL3 is no longer supported. Using NSS default min value: %s\n", emin);
- (*rval) = enabledNSSVersions.min;
+ slapd_SSL_warn("SSL3 is no longer supported. Using NSS default min value: %s", dmin);
+ (*rval) = defaultNSSVersions.min;
} else {
- slapd_SSL_warn("SSL3 is no longer supported. Using NSS default max value: %s\n", emax);
- (*rval) = enabledNSSVersions.max;
+ slapd_SSL_warn("SSL3 is no longer supported. Using NSS default max value: %s", dmax);
+ (*rval) = defaultNSSVersions.max;
}
} else if (!strncasecmp(val, TLSSTR, TLSLEN)) { /* tls# */
float tlsv;
@@ -1366,122 +1375,122 @@ set_NSS_version(char *val, PRUint16 *rval, int ismin)
sscanf(vp, "%4f", &tlsv);
if (tlsv < 1.1f) { /* TLS1.0 */
if (ismin) {
- if (enabledNSSVersions.min > CURRENT_DEFAULT_SSL_VERSION) {
+ if (supportedNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_0) {
slapd_SSL_warn("The value of sslVersionMin "
"\"%s\" is lower than the supported version; "
"the default value \"%s\" is used.",
- val, emin);
- (*rval) = enabledNSSVersions.min;
+ val, dmin);
+ (*rval) = defaultNSSVersions.min;
} else {
(*rval) = SSL_LIBRARY_VERSION_TLS_1_0;
}
} else {
- if (enabledNSSVersions.max < CURRENT_DEFAULT_SSL_VERSION) {
+ if (supportedNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_0) {
/* never happens */
slapd_SSL_warn("The value of sslVersionMax "
"\"%s\" is higher than the supported version; "
"the default value \"%s\" is used.",
- val, emax);
- (*rval) = enabledNSSVersions.max;
+ val, dmax);
+ (*rval) = defaultNSSVersions.max;
} else {
(*rval) = SSL_LIBRARY_VERSION_TLS_1_0;
}
}
} else if (tlsv < 1.2f) { /* TLS1.1 */
if (ismin) {
- if (enabledNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_1) {
+ if (supportedNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_1) {
slapd_SSL_warn("The value of sslVersionMin "
"\"%s\" is lower than the supported version; "
"the default value \"%s\" is used.",
- val, emin);
- (*rval) = enabledNSSVersions.min;
+ val, dmin);
+ (*rval) = defaultNSSVersions.min;
} else {
(*rval) = SSL_LIBRARY_VERSION_TLS_1_1;
}
} else {
- if (enabledNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_1) {
+ if (supportedNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_1) {
/* never happens */
slapd_SSL_warn("The value of sslVersionMax "
"\"%s\" is higher than the supported version; "
"the default value \"%s\" is used.",
- val, emax);
- (*rval) = enabledNSSVersions.max;
+ val, dmax);
+ (*rval) = defaultNSSVersions.max;
} else {
(*rval) = SSL_LIBRARY_VERSION_TLS_1_1;
}
}
} else if (tlsv < 1.3f) { /* TLS1.2 */
if (ismin) {
- if (enabledNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_2) {
+ if (supportedNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_2) {
slapd_SSL_warn("The value of sslVersionMin "
"\"%s\" is lower than the supported version; "
"the default value \"%s\" is used.",
- val, emin);
- (*rval) = enabledNSSVersions.min;
+ val, dmin);
+ (*rval) = defaultNSSVersions.min;
} else {
(*rval) = SSL_LIBRARY_VERSION_TLS_1_2;
}
} else {
- if (enabledNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_2) {
+ if (supportedNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_2) {
/* never happens */
slapd_SSL_warn("The value of sslVersionMax "
"\"%s\" is higher than the supported version; "
"the default value \"%s\" is used.",
- val, emax);
- (*rval) = enabledNSSVersions.max;
+ val, dmax);
+ (*rval) = defaultNSSVersions.max;
} else {
(*rval) = SSL_LIBRARY_VERSION_TLS_1_2;
}
}
} else if (tlsv < 1.4f) { /* TLS1.3 */
- if (ismin) {
- if (enabledNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_3) {
- slapd_SSL_warn("The value of sslVersionMin "
- "\"%s\" is lower than the supported version; "
- "the default value \"%s\" is used.",
- val, emin);
- (*rval) = enabledNSSVersions.min;
- } else {
- (*rval) = SSL_LIBRARY_VERSION_TLS_1_3;
- }
- } else {
- if (enabledNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_3) {
- /* never happens */
- slapd_SSL_warn("The value of sslVersionMax "
- "\"%s\" is higher than the supported version; "
- "the default value \"%s\" is used.",
- val, emax);
- (*rval) = enabledNSSVersions.max;
- } else {
- (*rval) = SSL_LIBRARY_VERSION_TLS_1_3;
- }
- }
+ if (ismin) {
+ if (supportedNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_3) {
+ slapd_SSL_warn("The value of sslVersionMin "
+ "\"%s\" is lower than the supported version; "
+ "the default value \"%s\" is used.",
+ val, dmin);
+ (*rval) = defaultNSSVersions.min;
+ } else {
+ (*rval) = SSL_LIBRARY_VERSION_TLS_1_3;
+ }
+ } else {
+ if (supportedNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_3) {
+ /* never happens */
+ slapd_SSL_warn("The value of sslVersionMax "
+ "\"%s\" is higher than the supported version; "
+ "the default value \"%s\" is used.",
+ val, dmax);
+ (*rval) = defaultNSSVersions.max;
+ } else {
+ (*rval) = SSL_LIBRARY_VERSION_TLS_1_3;
+ }
+ }
} else { /* Specified TLS is newer than supported */
if (ismin) {
slapd_SSL_warn("The value of sslVersionMin "
"\"%s\" is out of the range of the supported version; "
"the default value \"%s\" is used.",
- val, emin);
- (*rval) = enabledNSSVersions.min;
+ val, dmin);
+ (*rval) = defaultNSSVersions.min;
} else {
slapd_SSL_warn("The value of sslVersionMax "
"\"%s\" is out of the range of the supported version; "
"the default value \"%s\" is used.",
- val, emax);
- (*rval) = enabledNSSVersions.max;
+ val, dmax);
+ (*rval) = defaultNSSVersions.max;
}
}
} else {
if (ismin) {
slapd_SSL_warn("The value of sslVersionMin "
"\"%s\" is invalid; the default value \"%s\" is used.",
- val, emin);
- (*rval) = enabledNSSVersions.min;
+ val, dmin);
+ (*rval) = defaultNSSVersions.min;
} else {
slapd_SSL_warn("The value of sslVersionMax "
"\"%s\" is invalid; the default value \"%s\" is used.",
- val, emax);
- (*rval) = enabledNSSVersions.max;
+ val, dmax);
+ (*rval) = defaultNSSVersions.max;
}
}
return 0;
@@ -1511,10 +1520,9 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
char *tmpDir;
Slapi_Entry *e = NULL;
PRBool fipsMode = PR_FALSE;
- PRUint16 NSSVersionMin = enabledNSSVersions.min;
- PRUint16 NSSVersionMax = enabledNSSVersions.max;
+ PRUint16 NSSVersionMin = defaultNSSVersions.min;
+ PRUint16 NSSVersionMax = defaultNSSVersions.max;
char mymin[VERSION_STR_LENGTH], mymax[VERSION_STR_LENGTH];
- char newmax[VERSION_STR_LENGTH];
int allowweakcipher = CIPHER_SET_DEFAULTWEAKCIPHER;
int_fast16_t renegotiation = (int_fast16_t)SSL_RENEGOTIATE_REQUIRES_XTN;
@@ -1875,12 +1883,9 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
if (NSSVersionMin > NSSVersionMax) {
(void)slapi_getSSLVersion_str(NSSVersionMin, mymin, sizeof(mymin));
(void)slapi_getSSLVersion_str(NSSVersionMax, mymax, sizeof(mymax));
- slapd_SSL_warn("The min value of NSS version range \"%s\" is greater than the max value \"%s\".",
+ slapd_SSL_warn("The min value of NSS version range \"%s\" is greater than the max value \"%s\". Adjusting the max to match the miniumum.",
mymin, mymax);
- (void)slapi_getSSLVersion_str(enabledNSSVersions.max, newmax, sizeof(newmax));
- slapd_SSL_warn("Reset the max \"%s\" to supported max \"%s\".",
- mymax, newmax);
- NSSVersionMax = enabledNSSVersions.max;
+ NSSVersionMax = NSSVersionMin;
}
}
@@ -1896,7 +1901,7 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
if (sslStatus != SECSuccess) {
errorCode = PR_GetError();
slapd_SSL_error("Security Initialization - "
- "slapd_ssl_init2 - Failed to set SSL range: min: %s, max: %s - error %d (%s)\n",
+ "slapd_ssl_init2 - Failed to set SSL range: min: %s, max: %s - error %d (%s)",
mymin, mymax, errorCode, slapd_pr_strerror(errorCode));
}
/*
@@ -1926,13 +1931,13 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
(void)slapi_getSSLVersion_str(slapdNSSVersions.min, mymin, sizeof(mymin));
(void)slapi_getSSLVersion_str(slapdNSSVersions.max, mymax, sizeof(mymax));
slapd_SSL_error("Security Initialization - "
- "slapd_ssl_init2 - Failed to set SSL range: min: %s, max: %s - error %d (%s)\n",
+ "slapd_ssl_init2 - Failed to set SSL range: min: %s, max: %s - error %d (%s)",
mymin, mymax, errorCode, slapd_pr_strerror(errorCode));
}
} else {
errorCode = PR_GetError();
slapd_SSL_error("Security Initialization - ",
- "slapd_ssl_init2 - Failed to get SSL range from socket - error %d (%s)\n",
+ "slapd_ssl_init2 - Failed to get SSL range from socket - error %d (%s)",
errorCode, slapd_pr_strerror(errorCode));
}
@@ -2265,7 +2270,7 @@ slapd_SSL_client_auth(LDAP *ld)
}
} else {
if (token == NULL) {
- slapd_SSL_warn("slapd_SSL_client_auth - certificate token was not found\n");
+ slapd_SSL_warn("slapd_SSL_client_auth - certificate token was not found");
}
rc = -1;
}
diff --git a/src/lib389/lib389/dirsrv_log.py b/src/lib389/lib389/dirsrv_log.py
index 7bed4bb17..ab8872051 100644
--- a/src/lib389/lib389/dirsrv_log.py
+++ b/src/lib389/lib389/dirsrv_log.py
@@ -207,7 +207,7 @@ class DirsrvAccessLog(DirsrvLog):
return {
'base': quoted_vals[0],
'filter': quoted_vals[1],
- 'timestamp': re.findall('\[(.*)\]', lines[0])[0],
+ 'timestamp': re.findall('[(.*)]', lines[0])[0],
'scope': lines[0].split(' scope=', 1)[1].split(' ',1)[0]
}
--
2.26.2

View File

@ -1,202 +0,0 @@
From 68ca1de0f39c11056a57b03a544520bd6708d855 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <simon.pichugin@gmail.com>
Date: Thu, 11 Jun 2020 15:39:59 +0200
Subject: [PATCH] Issue 50984 - Memory leaks in disk monitoring
Description: Fix the rest of the leaks in disk monitoring
which are present when we shutdown while being below half
of the threshold (at the start-up in main.c).
Free directories, sockets and ports before going to cleanup.
https://pagure.io/389-ds-base/issue/50984
Reviewed by: mhonek, tbordaz (Thanks!)
---
ldap/servers/slapd/daemon.c | 75 ++++++++++++++++++++-----------------
ldap/servers/slapd/fe.h | 1 +
ldap/servers/slapd/main.c | 49 +++++++++++++-----------
3 files changed, 70 insertions(+), 55 deletions(-)
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index a70f40316..7091b570d 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -884,6 +884,46 @@ convert_pbe_des_to_aes(void)
charray_free(attrs);
}
+void
+slapd_sockets_ports_free(daemon_ports_t *ports_info)
+{
+ /* freeing PRFileDescs */
+ PRFileDesc **fdesp = NULL;
+ for (fdesp = ports_info->n_socket; fdesp && *fdesp; fdesp++) {
+ PR_Close(*fdesp);
+ }
+ slapi_ch_free((void **)&ports_info->n_socket);
+
+ for (fdesp = ports_info->s_socket; fdesp && *fdesp; fdesp++) {
+ PR_Close(*fdesp);
+ }
+ slapi_ch_free((void **)&ports_info->s_socket);
+#if defined(ENABLE_LDAPI)
+ for (fdesp = ports_info->i_socket; fdesp && *fdesp; fdesp++) {
+ PR_Close(*fdesp);
+ }
+ slapi_ch_free((void **)&ports_info->i_socket);
+#endif /* ENABLE_LDAPI */
+
+ /* freeing NetAddrs */
+ PRNetAddr **nap;
+ for (nap = ports_info->n_listenaddr; nap && *nap; nap++) {
+ slapi_ch_free((void **)nap);
+ }
+ slapi_ch_free((void **)&ports_info->n_listenaddr);
+
+ for (nap = ports_info->s_listenaddr; nap && *nap; nap++) {
+ slapi_ch_free((void **)nap);
+ }
+ slapi_ch_free((void **)&ports_info->s_listenaddr);
+#if defined(ENABLE_LDAPI)
+ for (nap = ports_info->i_listenaddr; nap && *nap; nap++) {
+ slapi_ch_free((void **)nap);
+ }
+ slapi_ch_free((void **)&ports_info->i_listenaddr);
+#endif
+}
+
void
slapd_daemon(daemon_ports_t *ports)
{
@@ -1099,40 +1139,7 @@ slapd_daemon(daemon_ports_t *ports)
/* free the listener indexes */
slapi_ch_free((void **)&listener_idxs);
- for (fdesp = n_tcps; fdesp && *fdesp; fdesp++) {
- PR_Close(*fdesp);
- }
- slapi_ch_free((void **)&n_tcps);
-
- for (fdesp = i_unix; fdesp && *fdesp; fdesp++) {
- PR_Close(*fdesp);
- }
- slapi_ch_free((void **)&i_unix);
-
- for (fdesp = s_tcps; fdesp && *fdesp; fdesp++) {
- PR_Close(*fdesp);
- }
- slapi_ch_free((void **)&s_tcps);
-
- /* freeing NetAddrs */
- {
- PRNetAddr **nap;
- for (nap = ports->n_listenaddr; nap && *nap; nap++) {
- slapi_ch_free((void **)nap);
- }
- slapi_ch_free((void **)&ports->n_listenaddr);
-
- for (nap = ports->s_listenaddr; nap && *nap; nap++) {
- slapi_ch_free((void **)nap);
- }
- slapi_ch_free((void **)&ports->s_listenaddr);
-#if defined(ENABLE_LDAPI)
- for (nap = ports->i_listenaddr; nap && *nap; nap++) {
- slapi_ch_free((void **)nap);
- }
- slapi_ch_free((void **)&ports->i_listenaddr);
-#endif
- }
+ slapd_sockets_ports_free(ports);
op_thread_cleanup();
housekeeping_stop(); /* Run this after op_thread_cleanup() logged sth */
diff --git a/ldap/servers/slapd/fe.h b/ldap/servers/slapd/fe.h
index 2d9a0931b..9cd122881 100644
--- a/ldap/servers/slapd/fe.h
+++ b/ldap/servers/slapd/fe.h
@@ -120,6 +120,7 @@ int connection_table_iterate_active_connections(Connection_Table *ct, void *arg,
*/
int signal_listner(void);
int daemon_pre_setuid_init(daemon_ports_t *ports);
+void slapd_sockets_ports_free(daemon_ports_t *ports_info);
void slapd_daemon(daemon_ports_t *ports);
void daemon_register_connection(void);
int slapd_listenhost2addr(const char *listenhost, PRNetAddr ***addr);
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index e54b8e1c5..9e5219c4a 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -734,7 +734,6 @@ main(int argc, char **argv)
* etc the backends need to start
*/
-
/* Important: up 'till here we could be running as root (on unix).
* we believe that we've not created any files before here, otherwise
* they'd be owned by root, which is bad. We're about to change identity
@@ -891,6 +890,34 @@ main(int argc, char **argv)
}
}
+ if (config_get_disk_monitoring()) {
+ char **dirs = NULL;
+ char *dirstr = NULL;
+ uint64_t disk_space = 0;
+ int64_t threshold = 0;
+ uint64_t halfway = 0;
+ threshold = config_get_disk_threshold();
+ halfway = threshold / 2;
+ disk_mon_get_dirs(&dirs);
+ dirstr = disk_mon_check_diskspace(dirs, threshold, &disk_space);
+ if (dirstr != NULL && disk_space < halfway) {
+ slapi_log_err(SLAPI_LOG_EMERG, "main",
+ "Disk Monitoring is enabled and disk space on (%s) is too far below the threshold(%" PRIu64 " bytes). Exiting now.\n",
+ dirstr, threshold);
+ slapi_ch_array_free(dirs);
+ /*
+ * We should free the structs we allocated for sockets and addresses
+ * as they would be freed at the slapd_daemon but it was not initiated
+ * at that point of start-up.
+ */
+ slapd_sockets_ports_free(&ports_info);
+ return_value = 1;
+ goto cleanup;
+ }
+ slapi_ch_array_free(dirs);
+ dirs = NULL;
+ }
+
/* initialize the normalized DN cache */
if (ndn_cache_init() != 0) {
slapi_log_err(SLAPI_LOG_EMERG, "main", "Unable to create ndn cache\n");
@@ -940,26 +967,6 @@ main(int argc, char **argv)
slapi_ch_free((void **)&versionstring);
}
- if (config_get_disk_monitoring()) {
- char **dirs = NULL;
- char *dirstr = NULL;
- uint64_t disk_space = 0;
- int64_t threshold = 0;
- uint64_t halfway = 0;
- threshold = config_get_disk_threshold();
- halfway = threshold / 2;
- disk_mon_get_dirs(&dirs);
- dirstr = disk_mon_check_diskspace(dirs, threshold, &disk_space);
- if (dirstr != NULL && disk_space < halfway) {
- slapi_log_err(SLAPI_LOG_EMERG, "main",
- "Disk Monitoring is enabled and disk space on (%s) is too far below the threshold(%" PRIu64 " bytes). Exiting now.\n",
- dirstr, threshold);
- return_value = 1;
- goto cleanup;
- }
- slapi_ch_array_free(dirs);
- dirs = NULL;
- }
/* log the max fd limit as it is typically set in env/systemd */
slapi_log_err(SLAPI_LOG_INFO, "main",
"Setting the maximum file descriptor limit to: %ld\n",
--
2.26.2

View File

@ -1,194 +0,0 @@
From e78d3bd879b880d679b49f3fa5ebe8009d309063 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Fri, 2 Oct 2020 12:03:12 +0200
Subject: [PATCH 1/8] Issue 4297- On ADD replication URP issue internal
searches with filter containing unescaped chars (#4355)
Bug description:
In MMR a consumer receiving a ADD has to do some checking based on basedn.
It checks if the entry was a tombstone or if the conflicting parent entry was a tombstone.
To do this checking, URP does internal searches using basedn.
A '*' (ASTERISK) is valid in a RDN and in a DN. But using a DN in an assertionvalue of a filter, the ASTERISK needs to be escaped else the server will interprete the filtertype to be a substring. (see
https://tools.ietf.org/html/rfc4515#section-3)
The problem is that if a added entry contains an ASTERISK in the DN, it will not be escaped in internal search and trigger substring search (likely unindexed).
Fix description:
escape the DN before doing internal search in URP
Fixes: #4297
Reviewed by: Mark Reynolds, William Brown, Simon Pichugi (thanks !)
Platforms tested: F31
---
.../suites/replication/acceptance_test.py | 63 +++++++++++++++++++
ldap/servers/plugins/replication/urp.c | 10 ++-
ldap/servers/slapd/filter.c | 21 +++++++
ldap/servers/slapd/slapi-plugin.h | 1 +
4 files changed, 93 insertions(+), 2 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
index 5009f4e7c..661dddb11 100644
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
@@ -7,6 +7,7 @@
# --- END COPYRIGHT BLOCK ---
#
import pytest
+import logging
from lib389.replica import Replicas
from lib389.tasks import *
from lib389.utils import *
@@ -556,6 +557,68 @@ def test_csnpurge_large_valueset(topo_m2):
for i in range(21,25):
test_user.add('description', 'value {}'.format(str(i)))
+@pytest.mark.ds51244
+def test_urp_trigger_substring_search(topo_m2):
+ """Test that a ADD of a entry with a '*' in its DN, triggers
+ an internal search with a escaped DN
+
+ :id: 9869bb39-419f-42c3-a44b-c93eb0b77667
+ :setup: MMR with 2 masters
+ :steps:
+ 1. enable internal operation loggging for plugins
+ 2. Create on M1 a test_user with a '*' in its DN
+ 3. Check the test_user is replicated
+ 4. Check in access logs that the internal search does not contain '*'
+ :expectedresults:
+ 1. Should succeeds
+ 2. Should succeeds
+ 3. Should succeeds
+ 4. Should succeeds
+ """
+ m1 = topo_m2.ms["master1"]
+ m2 = topo_m2.ms["master2"]
+
+ # Enable loggging of internal operation logging to capture URP intop
+ log.info('Set nsslapd-plugin-logging to on')
+ for inst in (m1, m2):
+ inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access')
+ inst.config.set('nsslapd-plugin-logging', 'on')
+ inst.restart()
+
+ # add a user with a DN containing '*'
+ test_asterisk_uid = 'asterisk_*_in_value'
+ test_asterisk_dn = 'uid={},{}'.format(test_asterisk_uid, DEFAULT_SUFFIX)
+
+ test_user = UserAccount(m1, test_asterisk_dn)
+ if test_user.exists():
+ log.info('Deleting entry {}'.format(test_asterisk_dn))
+ test_user.delete()
+ test_user.create(properties={
+ 'uid': test_asterisk_uid,
+ 'cn': test_asterisk_uid,
+ 'sn': test_asterisk_uid,
+ 'userPassword': test_asterisk_uid,
+ 'uidNumber' : '1000',
+ 'gidNumber' : '2000',
+ 'homeDirectory' : '/home/asterisk',
+ })
+
+ # check that the ADD was replicated on M2
+ test_user_m2 = UserAccount(m2, test_asterisk_dn)
+ for i in range(1,5):
+ if test_user_m2.exists():
+ break
+ else:
+ log.info('Entry not yet replicated on M2, wait a bit')
+ time.sleep(2)
+
+ # check that M2 access logs does not "(&(objectclass=nstombstone)(nscpentrydn=uid=asterisk_*_in_value,dc=example,dc=com))"
+ log.info('Check that on M2, URP as not triggered such internal search')
+ pattern = ".*\(Internal\).*SRCH.*\(&\(objectclass=nstombstone\)\(nscpentrydn=uid=asterisk_\*_in_value,dc=example,dc=com.*"
+ found = m2.ds_access_log.match(pattern)
+ log.info("found line: %s" % found)
+ assert not found
+
if __name__ == '__main__':
# Run isolated
diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c
index 79a817c90..301e9fa00 100644
--- a/ldap/servers/plugins/replication/urp.c
+++ b/ldap/servers/plugins/replication/urp.c
@@ -1411,9 +1411,12 @@ urp_add_check_tombstone (Slapi_PBlock *pb, char *sessionid, Slapi_Entry *entry,
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
char *basedn = slapi_entry_get_ndn(entry);
+ char *escaped_basedn;
const Slapi_DN *suffix = slapi_get_suffix_by_dn(slapi_entry_get_sdn (entry));
+ escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
+ slapi_ch_free((void **)&escaped_basedn);
newpb = slapi_pblock_new();
slapi_search_internal_set_pb(newpb,
slapi_sdn_get_dn(suffix), /* Base DN */
@@ -1602,12 +1605,15 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
const char *basedn = slapi_sdn_get_dn(parentdn);
+ char *escaped_basedn;
+ escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn");
CSN *conflict_csn = csn_new_by_string(conflict_csnstr);
CSN *tombstone_csn = NULL;
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
+ slapi_ch_free((void **)&escaped_basedn);
newpb = slapi_pblock_new();
char *parent_dn = slapi_dn_parent (basedn);
slapi_search_internal_set_pb(newpb,
diff --git a/ldap/servers/slapd/filter.c b/ldap/servers/slapd/filter.c
index c818baec3..d671c87ff 100644
--- a/ldap/servers/slapd/filter.c
+++ b/ldap/servers/slapd/filter.c
@@ -130,6 +130,27 @@ filter_escape_filter_value(struct slapi_filter *f, const char *fmt, size_t len _
return ptr;
}
+/* Escaped an equality filter value (assertionValue) of a given attribute
+ * Caller must free allocated escaped filter value
+ */
+char *
+slapi_filter_escape_filter_value(char* filter_attr, char *filter_value)
+{
+ char *result;
+ struct slapi_filter *f;
+
+ if ((filter_attr == NULL) || (filter_value == NULL)) {
+ return NULL;
+ }
+ f = (struct slapi_filter *)slapi_ch_calloc(1, sizeof(struct slapi_filter));
+ f->f_choice = LDAP_FILTER_EQUALITY;
+ f->f_un.f_un_ava.ava_type = filter_attr;
+ f->f_un.f_un_ava.ava_value.bv_len = strlen(filter_value);
+ f->f_un.f_un_ava.ava_value.bv_val = filter_value;
+ result = filter_escape_filter_value(f, FILTER_EQ_FMT, FILTER_EQ_LEN);
+ slapi_ch_free((void**) &f);
+ return result;
+}
/*
* get_filter_internal(): extract an LDAP filter from a BerElement and create
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 8d9c3fa6a..04c02cf7c 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -5262,6 +5262,7 @@ int slapi_vattr_filter_test_ext(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Filter *
int slapi_filter_compare(struct slapi_filter *f1, struct slapi_filter *f2);
Slapi_Filter *slapi_filter_dup(Slapi_Filter *f);
int slapi_filter_changetype(Slapi_Filter *f, const char *newtype);
+char *slapi_filter_escape_filter_value(char* filter_attr, char *filter_value);
int slapi_attr_is_last_mod(char *attr);
--
2.26.2

View File

@ -1,66 +0,0 @@
From 3cf7734177c70c36062d4e667b91e15f22a2ea81 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 25 Nov 2020 18:07:34 +0100
Subject: [PATCH 2/8] Issue 4297 - 2nd fix for on ADD replication URP issue
internal searches with filter containing unescaped chars (#4439)
Bug description:
Previous fix is buggy because slapi_filter_escape_filter_value returns
a escaped filter component not an escaped assertion value.
Fix description:
use the escaped filter component
relates: https://github.com/389ds/389-ds-base/issues/4297
Reviewed by: William Brown
Platforms tested: F31
---
ldap/servers/plugins/replication/urp.c | 17 ++++++++---------
1 file changed, 8 insertions(+), 9 deletions(-)
diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c
index 301e9fa00..96ad2759a 100644
--- a/ldap/servers/plugins/replication/urp.c
+++ b/ldap/servers/plugins/replication/urp.c
@@ -1411,12 +1411,12 @@ urp_add_check_tombstone (Slapi_PBlock *pb, char *sessionid, Slapi_Entry *entry,
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
char *basedn = slapi_entry_get_ndn(entry);
- char *escaped_basedn;
+ char *escaped_filter;
const Slapi_DN *suffix = slapi_get_suffix_by_dn(slapi_entry_get_sdn (entry));
- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", basedn);
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
- slapi_ch_free((void **)&escaped_basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter);
+ slapi_ch_free((void **)&escaped_filter);
newpb = slapi_pblock_new();
slapi_search_internal_set_pb(newpb,
slapi_sdn_get_dn(suffix), /* Base DN */
@@ -1605,15 +1605,14 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr
Slapi_Entry **entries = NULL;
Slapi_PBlock *newpb;
const char *basedn = slapi_sdn_get_dn(parentdn);
- char *escaped_basedn;
- escaped_basedn = slapi_filter_escape_filter_value("nscpentrydn", basedn);
-
+ char *escaped_filter;
+ escaped_filter = slapi_filter_escape_filter_value("nscpentrydn", (char *)basedn);
char *conflict_csnstr = (char*)slapi_entry_attr_get_ref((Slapi_Entry *)entry, "conflictcsn");
CSN *conflict_csn = csn_new_by_string(conflict_csnstr);
CSN *tombstone_csn = NULL;
- char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)(nscpentrydn=%s))", escaped_basedn);
- slapi_ch_free((void **)&escaped_basedn);
+ char *filter = slapi_filter_sprintf("(&(objectclass=nstombstone)%s)", escaped_filter);
+ slapi_ch_free((void **)&escaped_filter);
newpb = slapi_pblock_new();
char *parent_dn = slapi_dn_parent (basedn);
slapi_search_internal_set_pb(newpb,
--
2.26.2

View File

@ -1,37 +0,0 @@
From 16a004faf7eda3f8c4d59171bceab8cf78a9d002 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Thu, 6 Aug 2020 14:50:19 -0400
Subject: [PATCH 3/8] Issue 51233 - ds-replcheck crashes in offline mode
Bug Description: When processing all the DN's found in the Master LDIF
it is possible that the LDIF is not in the expected
order and ldifsearch fails (crashing the tool).
Fix Description: If ldifsearch does not find an entry, start from the
beginning of the LDIF and try again.
relates: https://pagure.io/389-ds-base/issue/51233
Reviewed by: spichugi(Thanks!)
---
ldap/admin/src/scripts/ds-replcheck | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
index 5bb7dfce3..1c133f4dd 100755
--- a/ldap/admin/src/scripts/ds-replcheck
+++ b/ldap/admin/src/scripts/ds-replcheck
@@ -725,6 +725,10 @@ def do_offline_report(opts, output_file=None):
missing = False
for dn in master_dns:
mresult = ldif_search(MLDIF, dn)
+ if mresult['entry'] is None and mresult['conflict'] is None and not mresult['tombstone']:
+ # Try from the beginning
+ MLDIF.seek(0)
+ mresult = ldif_search(MLDIF, dn)
rresult = ldif_search(RLDIF, dn)
if dn in replica_dns:
--
2.26.2

View File

@ -1,103 +0,0 @@
From bc8bdaa57ba9b57671e2921705b99eaa70729ce7 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 11 Nov 2020 11:45:11 -0500
Subject: [PATCH 4/8] Issue 4429 - NULL dereference in revert_cache()
Bug Description: During a delete, if the DN (with an escaped leading space)
of an existing entry fail to parse the server will revert
the entry update. In this case it will lead to a crash
becuase ther ldbm inst struct is not set before it attempts
the cache revert.
Fix Description: Check the the ldbm instance struct is not NULL before
dereferencing it.
Relates: https://github.com/389ds/389-ds-base/issues/4429
Reviewed by: firstyear & spichugi(Thanks!!)
---
.../tests/suites/syntax/acceptance_test.py | 40 +++++++++++++++++++
ldap/servers/slapd/back-ldbm/cache.c | 3 ++
2 files changed, 43 insertions(+)
diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py
index db8f63c7e..543718689 100644
--- a/dirsrvtests/tests/suites/syntax/acceptance_test.py
+++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py
@@ -6,12 +6,14 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
+import ldap
import logging
import pytest
import os
from lib389.schema import Schema
from lib389.config import Config
from lib389.idm.user import UserAccounts
+from lib389.idm.group import Groups
from lib389._constants import DEFAULT_SUFFIX
from lib389.topologies import log, topology_st as topo
@@ -105,6 +107,44 @@ def test_invalid_uidnumber(topo, validate_syntax_off):
log.info('Found an invalid entry with wrong uidNumber - Success')
+def test_invalid_dn_syntax_crash(topo):
+ """Add an entry with an escaped space, restart the server, and try to delete
+ it. In this case the DN is not correctly parsed and causes cache revert to
+ to dereference a NULL pointer. So the delete can fail as long as the server
+ does not crash.
+
+ :id: 62d87272-dfb8-4627-9ca1-dbe33082caf8
+ :setup: Standalone Instance
+ :steps:
+ 1. Add entry with leading escaped space in the RDN
+ 2. Restart the server so the entry is rebuilt from the database
+ 3. Delete the entry
+ 4. The server should still be running
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ # Create group
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties={'cn': ' test'})
+
+ # Restart the server
+ topo.standalone.restart()
+
+ # Delete group
+ try:
+ group.delete()
+ except ldap.NO_SUCH_OBJECT:
+ # This is okay in this case as we are only concerned about a crash
+ pass
+
+ # Make sure server is still running
+ groups.list()
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c
index 89f958a35..5ad9ca829 100644
--- a/ldap/servers/slapd/back-ldbm/cache.c
+++ b/ldap/servers/slapd/back-ldbm/cache.c
@@ -614,6 +614,9 @@ flush_hash(struct cache *cache, struct timespec *start_time, int32_t type)
void
revert_cache(ldbm_instance *inst, struct timespec *start_time)
{
+ if (inst == NULL) {
+ return;
+ }
flush_hash(&inst->inst_cache, start_time, ENTRY_CACHE);
flush_hash(&inst->inst_dncache, start_time, DN_CACHE);
}
--
2.26.2

View File

@ -1,232 +0,0 @@
From 132f126c18214345ef4204bf8a061a0eca58fa59 Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Tue, 3 Nov 2020 12:18:50 +0100
Subject: [PATCH 5/8] ticket 2058: Add keep alive entry after on-line
initialization - second version (#4399)
Bug description:
Keep alive entry is not created on target master after on line initialization,
and its RUVelement stays empty until a direct update is issued on that master
Fix description:
The patch allows a consumer (configured as a master) to create (if it did not
exist before) the consumer's keep alive entry. It creates it at the end of a
replication session at a time we are sure the changelog exists and will not
be reset. It allows a consumer to have RUVelement with csn in the RUV at the
first incoming replication session.
That is basically lkrispen's proposal with an associated pytest testcase
Second version changes:
- moved the testcase to suites/replication/regression_test.py
- set up the topology from a 2 master topology then
reinitialized the replicas from an ldif without replication metadata
rather than using the cli.
- search for keepalive entries using search_s instead of getEntry
- add a comment about keep alive entries purpose
last commit:
- wait that ruv are in sync before checking keep alive entries
Reviewed by: droideck, Firstyear
Platforms tested: F32
relates: #2058
---
.../suites/replication/regression_test.py | 130 ++++++++++++++++++
.../plugins/replication/repl5_replica.c | 14 ++
ldap/servers/plugins/replication/repl_extop.c | 4 +
3 files changed, 148 insertions(+)
diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py
index 844d762b9..14b9d6a44 100644
--- a/dirsrvtests/tests/suites/replication/regression_test.py
+++ b/dirsrvtests/tests/suites/replication/regression_test.py
@@ -98,6 +98,30 @@ def _move_ruv(ldif_file):
for dn, entry in ldif_list:
ldif_writer.unparse(dn, entry)
+def _remove_replication_data(ldif_file):
+ """ Remove the replication data from ldif file:
+ db2lif without -r includes some of the replica data like
+ - nsUniqueId
+ - keepalive entries
+ This function filters the ldif fil to remove these data
+ """
+
+ with open(ldif_file) as f:
+ parser = ldif.LDIFRecordList(f)
+ parser.parse()
+
+ ldif_list = parser.all_records
+ # Iterate on a copy of the ldif entry list
+ for dn, entry in ldif_list[:]:
+ if dn.startswith('cn=repl keep alive'):
+ ldif_list.remove((dn,entry))
+ else:
+ entry.pop('nsUniqueId')
+ with open(ldif_file, 'w') as f:
+ ldif_writer = ldif.LDIFWriter(f)
+ for dn, entry in ldif_list:
+ ldif_writer.unparse(dn, entry)
+
@pytest.fixture(scope="module")
def topo_with_sigkill(request):
@@ -897,6 +921,112 @@ def test_moving_entry_make_online_init_fail(topology_m2):
assert len(m1entries) == len(m2entries)
+def get_keepalive_entries(instance,replica):
+ # Returns the keep alive entries that exists with the suffix of the server instance
+ try:
+ entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL,
+ "(&(objectclass=ldapsubentry)(cn=repl keep alive*))",
+ ['cn', 'nsUniqueId', 'modifierTimestamp'])
+ except ldap.LDAPError as e:
+ log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e)))
+ assert False
+ # No error, so lets log the keepalive entries
+ if log.isEnabledFor(logging.DEBUG):
+ for ret in entries:
+ log.debug("Found keepalive entry:\n"+str(ret));
+ return entries
+
+def verify_keepalive_entries(topo, expected):
+ #Check that keep alive entries exists (or not exists) for every masters on every masters
+ #Note: The testing method is quite basic: counting that there is one keepalive entry per master.
+ # that is ok for simple test cases like test_online_init_should_create_keepalive_entries but
+ # not for the general case as keep alive associated with no more existing master may exists
+ # (for example after: db2ldif / demote a master / ldif2db / init other masters)
+ # ==> if the function is somehow pushed in lib389, a check better than simply counting the entries
+ # should be done.
+ for masterId in topo.ms:
+ master=topo.ms[masterId]
+ for replica in Replicas(master).list():
+ if (replica.get_role() != ReplicaRole.MASTER):
+ continue
+ replica_info = f'master: {masterId} RID: {replica.get_rid()} suffix: {replica.get_suffix()}'
+ log.debug(f'Checking keepAliveEntries on {replica_info}')
+ keepaliveEntries = get_keepalive_entries(master, replica);
+ expectedCount = len(topo.ms) if expected else 0
+ foundCount = len(keepaliveEntries)
+ if (foundCount == expectedCount):
+ log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.')
+ else:
+ log.error(f'{foundCount} Keepalive entries are found '
+ f'while {expectedCount} were expected on {replica_info}.')
+ assert False
+
+
+def test_online_init_should_create_keepalive_entries(topo_m2):
+ """Check that keep alive entries are created when initializinf a master from another one
+
+ :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
+ :setup: Two masters replication setup
+ :steps:
+ 1. Generate ldif without replication data
+ 2 Init both masters from that ldif
+ 3 Check that keep alive entries does not exists
+ 4 Perform on line init of master2 from master1
+ 5 Check that keep alive entries exists
+ :expectedresults:
+ 1. No error while generating ldif
+ 2. No error while importing the ldif file
+ 3. No keepalive entrie should exists on any masters
+ 4. No error while initializing master2
+ 5. All keepalive entries should exist on every masters
+
+ """
+
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+ m1 = topo_m2.ms["master1"]
+ m2 = topo_m2.ms["master2"]
+ # Step 1: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ _remove_replication_data(ldif_file)
+
+ # Step 2: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ """ Replica state is now as if CLI setup has been done using:
+ dsconf master1 replication enable --suffix "${SUFFIX}" --role master
+ dsconf master2 replication enable --suffix "${SUFFIX}" --role master
+ dsconf master1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
+ dsconf master2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
+ dsconf master1 repl-agmt create --suffix "${SUFFIX}"
+ dsconf master2 repl-agmt create --suffix "${SUFFIX}"
+ """
+
+ # Step 3: No keepalive entrie should exists on any masters
+ verify_keepalive_entries(topo_m2, False)
+
+ # Step 4: Perform on line init of master2 from master1
+ agmt = Agreements(m1).list()[0]
+ agmt.begin_reinit()
+ (done, error) = agmt.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 5: All keepalive entries should exists on every masters
+ # Verify the keep alive entry once replication is in sync
+ # (that is the step that fails when bug is not fixed)
+ repl.wait_for_ruv(m2,m1)
+ verify_keepalive_entries(topo_m2, True);
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index f01782330..f0ea0f8ef 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -373,6 +373,20 @@ replica_destroy(void **arg)
slapi_ch_free((void **)arg);
}
+/******************************************************************************
+ ******************** REPLICATION KEEP ALIVE ENTRIES **************************
+ ******************************************************************************
+ * They are subentries of the replicated suffix and there is one per master. *
+ * These entries exist only to trigger a change that get replicated over the *
+ * topology. *
+ * Their main purpose is to generate records in the changelog and they are *
+ * updated from time to time by fractional replication to insure that at *
+ * least a change must be replicated by FR after a great number of not *
+ * replicated changes are found in the changelog. The interest is that the *
+ * fractional RUV get then updated so less changes need to be walked in the *
+ * changelog when searching for the first change to send *
+ ******************************************************************************/
+
#define KEEP_ALIVE_ATTR "keepalivetimestamp"
#define KEEP_ALIVE_ENTRY "repl keep alive"
#define KEEP_ALIVE_DN_FORMAT "cn=%s %d,%s"
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
index 14c8e0bcc..af486f730 100644
--- a/ldap/servers/plugins/replication/repl_extop.c
+++ b/ldap/servers/plugins/replication/repl_extop.c
@@ -1173,6 +1173,10 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb)
*/
if (cl5GetState() == CL5_STATE_OPEN) {
replica_log_ruv_elements(r);
+ /* now that the changelog is open and started, we can alos cretae the
+ * keep alive entry without risk that db and cl will not match
+ */
+ replica_subentry_check(replica_get_root(r), replica_get_rid(r));
}
/* ONREPL code that dealt with new RUV, etc was moved into the code
--
2.26.2

View File

@ -1,513 +0,0 @@
From 9d25d8bc3262bfaeeda2992538f649bf1a1b33de Mon Sep 17 00:00:00 2001
From: progier389 <72748589+progier389@users.noreply.github.com>
Date: Thu, 12 Nov 2020 18:50:04 +0100
Subject: [PATCH 6/8] do not add referrals for masters with different data
generation #2054 (#4427)
Bug description:
The problem is that some operation mandatory in the usual cases are
also performed when replication cannot take place because the
database set are differents (i.e: RUV generation ids are different)
One of the issue is that the csn generator state is updated when
starting a replication session (it is a problem when trying to
reset the time skew, as freshly reinstalled replicas get infected
by the old ones)
A second issue is that the RUV got updated when ending a replication session
(which may add replica that does not share the same data set,
then update operations on consumer retun referrals towards wrong masters
Fix description:
The fix checks the RUVs generation id before updating the csn generator
and before updating the RUV.
Reviewed by: mreynolds
firstyear
vashirov
Platforms tested: F32
---
.../suites/replication/regression_test.py | 290 ++++++++++++++++++
ldap/servers/plugins/replication/repl5.h | 1 +
.../plugins/replication/repl5_inc_protocol.c | 20 +-
.../plugins/replication/repl5_replica.c | 39 ++-
src/lib389/lib389/dseldif.py | 37 +++
5 files changed, 368 insertions(+), 19 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py
index 14b9d6a44..a72af6b30 100644
--- a/dirsrvtests/tests/suites/replication/regression_test.py
+++ b/dirsrvtests/tests/suites/replication/regression_test.py
@@ -13,6 +13,7 @@ from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts
from lib389.pwpolicy import PwPolicyManager
from lib389.utils import *
from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db, topology_i2 as topo_i2
+from lib389.topologies import topology_m2c2 as topo_m2c2
from lib389._constants import *
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389.idm.user import UserAccount
@@ -22,6 +23,7 @@ from lib389.idm.directorymanager import DirectoryManager
from lib389.replica import Replicas, ReplicationManager, Changelog5, BootstrapReplicationManager
from lib389.agreement import Agreements
from lib389 import pid_from_file
+from lib389.dseldif import *
pytestmark = pytest.mark.tier1
@@ -1027,6 +1029,294 @@ def test_online_init_should_create_keepalive_entries(topo_m2):
verify_keepalive_entries(topo_m2, True);
+def get_agreement(agmts, consumer):
+ # Get agreement towards consumer among the agremment list
+ for agmt in agmts.list():
+ if (agmt.get_attr_val_utf8('nsDS5ReplicaPort') == str(consumer.port) and
+ agmt.get_attr_val_utf8('nsDS5ReplicaHost') == consumer.host):
+ return agmt
+ return None;
+
+
+def test_ruv_url_not_added_if_different_uuid(topo_m2c2):
+ """Check that RUV url is not updated if RUV generation uuid are different
+
+ :id: 7cc30a4e-0ffd-4758-8f00-e500279af344
+ :setup: Two masters + two consumers replication setup
+ :steps:
+ 1. Generate ldif without replication data
+ 2. Init both masters from that ldif
+ (to clear the ruvs and generates different generation uuid)
+ 3. Perform on line init from master1 to consumer1
+ and from master2 to consumer2
+ 4. Perform update on both masters
+ 5. Check that c1 RUV does not contains URL towards m2
+ 6. Check that c2 RUV does contains URL towards m2
+ 7. Perform on line init from master1 to master2
+ 8. Perform update on master2
+ 9. Check that c1 RUV does contains URL towards m2
+ :expectedresults:
+ 1. No error while generating ldif
+ 2. No error while importing the ldif file
+ 3. No error and Initialization done.
+ 4. No error
+ 5. master2 replicaid should not be in the consumer1 RUV
+ 6. master2 replicaid should be in the consumer2 RUV
+ 7. No error and Initialization done.
+ 8. No error
+ 9. master2 replicaid should be in the consumer1 RUV
+
+ """
+
+ # Variables initialization
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+
+ m1 = topo_m2c2.ms["master1"]
+ m2 = topo_m2c2.ms["master2"]
+ c1 = topo_m2c2.cs["consumer1"]
+ c2 = topo_m2c2.cs["consumer2"]
+
+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)
+
+ replicid_m2 = replica_m2.get_rid()
+
+ agmts_m1 = Agreements(m1, replica_m1.dn)
+ agmts_m2 = Agreements(m2, replica_m2.dn)
+
+ m1_m2 = get_agreement(agmts_m1, m2)
+ m1_c1 = get_agreement(agmts_m1, c1)
+ m1_c2 = get_agreement(agmts_m1, c2)
+ m2_m1 = get_agreement(agmts_m2, m1)
+ m2_c1 = get_agreement(agmts_m2, c1)
+ m2_c2 = get_agreement(agmts_m2, c2)
+
+ # Step 1: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ # _remove_replication_data(ldif_file)
+
+ # Step 2: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ # Step 3: Perform on line init from master1 to consumer1
+ # and from master2 to consumer2
+ m1_c1.begin_reinit()
+ m2_c2.begin_reinit()
+ (done, error) = m1_c1.wait_reinit()
+ assert done is True
+ assert error is False
+ (done, error) = m2_c2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 4: Perform update on both masters
+ repl.test_replication(m1, c1)
+ repl.test_replication(m2, c2)
+
+ # Step 5: Check that c1 RUV does not contains URL towards m2
+ ruv = replica_c1.get_ruv()
+ log.debug(f"c1 RUV: {ruv}")
+ url=ruv._rid_url.get(replica_m2.get_rid())
+ if (url == None):
+ log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV");
+ else:
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
+ log.error(f"URL for RID {replica_m2.get_rid()} found in RUV")
+ #Note: this assertion fails if issue 2054 is not fixed.
+ assert False
+
+ # Step 6: Check that c2 RUV does contains URL towards m2
+ ruv = replica_c2.get_ruv()
+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
+ url=ruv._rid_url.get(replica_m2.get_rid())
+ if (url == None):
+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV");
+ assert False
+ else:
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
+
+
+ # Step 7: Perform on line init from master1 to master2
+ m1_m2.begin_reinit()
+ (done, error) = m1_m2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 8: Perform update on master2
+ repl.test_replication(m2, c1)
+
+ # Step 9: Check that c1 RUV does contains URL towards m2
+ ruv = replica_c1.get_ruv()
+ log.debug(f"c1 RUV: {ruv} {ruv._rids} ")
+ url=ruv._rid_url.get(replica_m2.get_rid())
+ if (url == None):
+ log.error(f"No URL for RID {replica_m2.get_rid()} in RUV");
+ assert False
+ else:
+ log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}");
+
+
+def test_csngen_state_not_updated_if_different_uuid(topo_m2c2):
+ """Check that csngen remote offset is not updated if RUV generation uuid are different
+
+ :id: 77694b8e-22ae-11eb-89b2-482ae39447e5
+ :setup: Two masters + two consumers replication setup
+ :steps:
+ 1. Disable m1<->m2 agreement to avoid propagate timeSkew
+ 2. Generate ldif without replication data
+ 3. Increase time skew on master2
+ 4. Init both masters from that ldif
+ (to clear the ruvs and generates different generation uuid)
+ 5. Perform on line init from master1 to consumer1 and master2 to consumer2
+ 6. Perform update on both masters
+ 7: Check that c1 has no time skew
+ 8: Check that c2 has time skew
+ 9. Init master2 from master1
+ 10. Perform update on master2
+ 11. Check that c1 has time skew
+ :expectedresults:
+ 1. No error
+ 2. No error while generating ldif
+ 3. No error
+ 4. No error while importing the ldif file
+ 5. No error and Initialization done.
+ 6. No error
+ 7. c1 time skew should be lesser than threshold
+ 8. c2 time skew should be higher than threshold
+ 9. No error and Initialization done.
+ 10. No error
+ 11. c1 time skew should be higher than threshold
+
+ """
+
+ # Variables initialization
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+
+ m1 = topo_m2c2.ms["master1"]
+ m2 = topo_m2c2.ms["master2"]
+ c1 = topo_m2c2.cs["consumer1"]
+ c2 = topo_m2c2.cs["consumer2"]
+
+ replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX)
+ replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX)
+ replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX)
+ replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX)
+
+ replicid_m2 = replica_m2.get_rid()
+
+ agmts_m1 = Agreements(m1, replica_m1.dn)
+ agmts_m2 = Agreements(m2, replica_m2.dn)
+
+ m1_m2 = get_agreement(agmts_m1, m2)
+ m1_c1 = get_agreement(agmts_m1, c1)
+ m1_c2 = get_agreement(agmts_m1, c2)
+ m2_m1 = get_agreement(agmts_m2, m1)
+ m2_c1 = get_agreement(agmts_m2, c1)
+ m2_c2 = get_agreement(agmts_m2, c2)
+
+ # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew
+ m1_m2.pause()
+ m2_m1.pause()
+
+ # Step 2: Generate ldif without replication data
+ m1.stop()
+ m2.stop()
+ ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+ m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+ excludeSuffixes=None, repl_data=False,
+ outputfile=ldif_file, encrypt=False)
+ # Remove replication metadata that are still in the ldif
+ # _remove_replication_data(ldif_file)
+
+ # Step 3: Increase time skew on master2
+ timeSkew=6*3600
+ # We can modify master2 time skew
+ # But the time skew on the consumer may be smaller
+ # depending on when the cnsgen generation time is updated
+ # and when first csn get replicated.
+ # Since we use timeSkew has threshold value to detect
+ # whether there are time skew or not,
+ # lets add a significative margin (longer than the test duration)
+ # to avoid any risk of erroneous failure
+ timeSkewMargin = 300
+ DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew+timeSkewMargin)
+
+ # Step 4: Init both masters from that ldif
+ m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+ m1.start()
+ m2.start()
+
+ # Step 5: Perform on line init from master1 to consumer1
+ # and from master2 to consumer2
+ m1_c1.begin_reinit()
+ m2_c2.begin_reinit()
+ (done, error) = m1_c1.wait_reinit()
+ assert done is True
+ assert error is False
+ (done, error) = m2_c2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 6: Perform update on both masters
+ repl.test_replication(m1, c1)
+ repl.test_replication(m2, c2)
+
+ # Step 7: Check that c1 has no time skew
+ # Stop server to insure that dse.ldif is uptodate
+ c1.stop()
+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
+ c1_timeSkew = int(c1_nsState['time_skew'])
+ log.debug(f"c1 time skew: {c1_timeSkew}")
+ if (c1_timeSkew >= timeSkew):
+ log.error(f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}")
+ assert False
+ c1.start()
+
+ # Step 8: Check that c2 has time skew
+ # Stop server to insure that dse.ldif is uptodate
+ c2.stop()
+ c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0]
+ c2_timeSkew = int(c2_nsState['time_skew'])
+ log.debug(f"c2 time skew: {c2_timeSkew}")
+ if (c2_timeSkew < timeSkew):
+ log.error(f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}")
+ assert False
+ c2.start()
+
+ # Step 9: Perform on line init from master1 to master2
+ m1_c1.pause()
+ m1_m2.resume()
+ m1_m2.begin_reinit()
+ (done, error) = m1_m2.wait_reinit()
+ assert done is True
+ assert error is False
+
+ # Step 10: Perform update on master2
+ repl.test_replication(m2, c1)
+
+ # Step 11: Check that c1 has time skew
+ # Stop server to insure that dse.ldif is uptodate
+ c1.stop()
+ c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0]
+ c1_timeSkew = int(c1_nsState['time_skew'])
+ log.debug(f"c1 time skew: {c1_timeSkew}")
+ if (c1_timeSkew < timeSkew):
+ log.error(f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}")
+ assert False
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 638471744..b2605011a 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -698,6 +698,7 @@ void replica_dump(Replica *r);
void replica_set_enabled(Replica *r, PRBool enable);
Replica *replica_get_replica_from_dn(const Slapi_DN *dn);
Replica *replica_get_replica_from_root(const char *repl_root);
+int replica_check_generation(Replica *r, const RUV *remote_ruv);
int replica_update_ruv(Replica *replica, const CSN *csn, const char *replica_purl);
Replica *replica_get_replica_for_op(Slapi_PBlock *pb);
/* the functions below manipulate replica hash */
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index 29b1fb073..af5e5897c 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -2161,26 +2161,12 @@ examine_update_vector(Private_Repl_Protocol *prp, RUV *remote_ruv)
} else if (NULL == remote_ruv) {
return_value = EXAMINE_RUV_PRISTINE_REPLICA;
} else {
- char *local_gen = NULL;
- char *remote_gen = ruv_get_replica_generation(remote_ruv);
- Object *local_ruv_obj;
- RUV *local_ruv;
-
PR_ASSERT(NULL != prp->replica);
- local_ruv_obj = replica_get_ruv(prp->replica);
- if (NULL != local_ruv_obj) {
- local_ruv = (RUV *)object_get_data(local_ruv_obj);
- PR_ASSERT(local_ruv);
- local_gen = ruv_get_replica_generation(local_ruv);
- object_release(local_ruv_obj);
- }
- if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) {
- return_value = EXAMINE_RUV_GENERATION_MISMATCH;
- } else {
+ if (replica_check_generation(prp->replica, remote_ruv)) {
return_value = EXAMINE_RUV_OK;
+ } else {
+ return_value = EXAMINE_RUV_GENERATION_MISMATCH;
}
- slapi_ch_free((void **)&remote_gen);
- slapi_ch_free((void **)&local_gen);
}
return return_value;
}
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index f0ea0f8ef..7e56d6557 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -812,6 +812,36 @@ replica_set_ruv(Replica *r, RUV *ruv)
replica_unlock(r->repl_lock);
}
+/*
+ * Check if replica generation is the same than the remote ruv one
+ */
+int
+replica_check_generation(Replica *r, const RUV *remote_ruv)
+{
+ int return_value;
+ char *local_gen = NULL;
+ char *remote_gen = ruv_get_replica_generation(remote_ruv);
+ Object *local_ruv_obj;
+ RUV *local_ruv;
+
+ PR_ASSERT(NULL != r);
+ local_ruv_obj = replica_get_ruv(r);
+ if (NULL != local_ruv_obj) {
+ local_ruv = (RUV *)object_get_data(local_ruv_obj);
+ PR_ASSERT(local_ruv);
+ local_gen = ruv_get_replica_generation(local_ruv);
+ object_release(local_ruv_obj);
+ }
+ if (NULL == remote_gen || NULL == local_gen || strcmp(remote_gen, local_gen) != 0) {
+ return_value = PR_FALSE;
+ } else {
+ return_value = PR_TRUE;
+ }
+ slapi_ch_free_string(&remote_gen);
+ slapi_ch_free_string(&local_gen);
+ return return_value;
+}
+
/*
* Update one particular CSN in an RUV. This is meant to be called
* whenever (a) the server has processed a client operation and
@@ -1298,6 +1328,11 @@ replica_update_csngen_state_ext(Replica *r, const RUV *ruv, const CSN *extracsn)
PR_ASSERT(r && ruv);
+ if (!replica_check_generation(r, ruv)) /* ruv has wrong generation - we are done */
+ {
+ return 0;
+ }
+
rc = ruv_get_max_csn(ruv, &csn);
if (rc != RUV_SUCCESS) {
return -1;
@@ -3713,8 +3748,8 @@ replica_update_ruv_consumer(Replica *r, RUV *supplier_ruv)
replica_lock(r->repl_lock);
local_ruv = (RUV *)object_get_data(r->repl_ruv);
-
- if (is_cleaned_rid(supplier_id) || local_ruv == NULL) {
+ if (is_cleaned_rid(supplier_id) || local_ruv == NULL ||
+ !replica_check_generation(r, supplier_ruv)) {
replica_unlock(r->repl_lock);
return;
}
diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py
index f2725add9..6e6be7cd2 100644
--- a/src/lib389/lib389/dseldif.py
+++ b/src/lib389/lib389/dseldif.py
@@ -316,6 +316,43 @@ class DSEldif(DSLint):
return states
+ def _increaseTimeSkew(self, suffix, timeSkew):
+ # Increase csngen state local_offset by timeSkew
+ # Warning: instance must be stopped before calling this function
+ assert (timeSkew >= 0)
+ nsState = self.readNsState(suffix)[0]
+ self._instance.log.debug(f'_increaseTimeSkew nsState is {nsState}')
+ oldNsState = self.get(nsState['dn'], 'nsState', True)
+ self._instance.log.debug(f'oldNsState is {oldNsState}')
+
+ # Lets reencode the new nsState
+ from lib389.utils import print_nice_time
+ if pack('<h', 1) == pack('=h',1):
+ end = '<'
+ elif pack('>h', 1) == pack('=h',1):
+ end = '>'
+ else:
+ raise ValueError("Unknown endian, unable to proceed")
+
+ thelen = len(oldNsState)
+ if thelen <= 20:
+ pad = 2 # padding for short H values
+ timefmt = 'I' # timevals are unsigned 32-bit int
+ else:
+ pad = 6 # padding for short H values
+ timefmt = 'Q' # timevals are unsigned 64-bit int
+ fmtstr = "%sH%dx3%sH%dx" % (end, pad, timefmt, pad)
+ newNsState = base64.b64encode(pack(fmtstr, int(nsState['rid']),
+ int(nsState['gen_time']), int(nsState['local_offset'])+timeSkew,
+ int(nsState['remote_offset']), int(nsState['seq_num'])))
+ newNsState = newNsState.decode('utf-8')
+ self._instance.log.debug(f'newNsState is {newNsState}')
+ # Lets replace the value.
+ (entry_dn_i, attr_data) = self._find_attr(nsState['dn'], 'nsState')
+ attr_i = next(iter(attr_data))
+ self._contents[entry_dn_i + attr_i] = f"nsState:: {newNsState}"
+ self._update()
+
class FSChecks(DSLint):
"""This is for the healthcheck feature, check commonly used system config files the
--
2.26.2

View File

@ -1,159 +0,0 @@
From 0b0147cdaad0f1fc54451c23b6e5d70da178736f Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 11 Nov 2020 08:59:18 -0500
Subject: [PATCH 7/8] Issue 4383 - Do not normalize escaped spaces in a DN
Bug Description: Adding an entry with an escaped leading space leads to many
problems. Mainly id2entry can get corrupted during an
import of such an entry, and the entryrdn index is not
updated correctly
Fix Description: In slapi_dn_normalize_ext() leave an escaped space intact.
Relates: https://github.com/389ds/389-ds-base/issues/4383
Reviewed by: firstyear, progier, and tbordaz (Thanks!!!)
---
.../tests/suites/syntax/acceptance_test.py | 75 ++++++++++++++++++-
ldap/servers/slapd/dn.c | 8 +-
2 files changed, 77 insertions(+), 6 deletions(-)
diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py
index 543718689..7939a99a7 100644
--- a/dirsrvtests/tests/suites/syntax/acceptance_test.py
+++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2019 Red Hat, Inc.
+# Copyright (C) 2020 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -7,13 +7,12 @@
# --- END COPYRIGHT BLOCK ---
import ldap
-import logging
import pytest
import os
from lib389.schema import Schema
from lib389.config import Config
from lib389.idm.user import UserAccounts
-from lib389.idm.group import Groups
+from lib389.idm.group import Group, Groups
from lib389._constants import DEFAULT_SUFFIX
from lib389.topologies import log, topology_st as topo
@@ -127,7 +126,7 @@ def test_invalid_dn_syntax_crash(topo):
4. Success
"""
- # Create group
+ # Create group
groups = Groups(topo.standalone, DEFAULT_SUFFIX)
group = groups.create(properties={'cn': ' test'})
@@ -145,6 +144,74 @@ def test_invalid_dn_syntax_crash(topo):
groups.list()
+@pytest.mark.parametrize("props, rawdn", [
+ ({'cn': ' leadingSpace'}, "cn=\\20leadingSpace,ou=Groups,dc=example,dc=com"),
+ ({'cn': 'trailingSpace '}, "cn=trailingSpace\\20,ou=Groups,dc=example,dc=com")])
+def test_dn_syntax_spaces_delete(topo, props, rawdn):
+ """Test that an entry with a space as the first character in the DN can be
+ deleted without error. We also want to make sure the indexes are properly
+ updated by repeatedly adding and deleting the entry, and that the entry cache
+ is properly maintained.
+
+ :id: b993f37c-c2b0-4312-992c-a9048ff98965
+ :parametrized: yes
+ :setup: Standalone Instance
+ :steps:
+ 1. Create a group with a DN that has a space as the first/last
+ character.
+ 2. Delete group
+ 3. Add group
+ 4. Modify group
+ 5. Restart server and modify entry
+ 6. Delete group
+ 7. Add group back
+ 8. Delete group using specific DN
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ """
+
+ # Create group
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties=props.copy())
+
+ # Delete group (verifies DN/RDN parsing works and cache is correct)
+ group.delete()
+
+ # Add group again (verifies entryrdn index was properly updated)
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties=props.copy())
+
+ # Modify the group (verifies dn/rdn parsing is correct)
+ group.replace('description', 'escaped space group')
+
+ # Restart the server. This will pull the entry from the database and
+ # convert it into a cache entry, which is different than how a client
+ # first adds an entry and is put into the cache before being written to
+ # disk.
+ topo.standalone.restart()
+
+ # Make sure we can modify the entry (verifies cache entry was created
+ # correctly)
+ group.replace('description', 'escaped space group after restart')
+
+ # Make sure it can still be deleted (verifies cache again).
+ group.delete()
+
+ # Add it back so we can delete it using a specific DN (sanity test to verify
+ # another DN/RDN parsing variation).
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties=props.copy())
+ group = Group(topo.standalone, dn=rawdn)
+ group.delete()
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c
index 2af3f38fc..3980b897f 100644
--- a/ldap/servers/slapd/dn.c
+++ b/ldap/servers/slapd/dn.c
@@ -894,8 +894,7 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len)
s++;
}
}
- } else if (s + 2 < ends &&
- isxdigit(*(s + 1)) && isxdigit(*(s + 2))) {
+ } else if (s + 2 < ends && isxdigit(*(s + 1)) && isxdigit(*(s + 2))) {
/* esc hexpair ==> real character */
int n = slapi_hexchar2int(*(s + 1));
int n2 = slapi_hexchar2int(*(s + 2));
@@ -903,6 +902,11 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len)
if (n == 0) { /* don't change \00 */
*d++ = *++s;
*d++ = *++s;
+ } else if (n == 32) { /* leave \20 (space) intact */
+ *d++ = *s;
+ *d++ = *++s;
+ *d++ = *++s;
+ s++;
} else {
*d++ = n;
s += 3;
--
2.26.2

View File

@ -1,560 +0,0 @@
From 220dbafa048269105b3f7958a5d5bfd1d988da26 Mon Sep 17 00:00:00 2001
From: Simon Pichugin <spichugi@redhat.com>
Date: Tue, 30 Jun 2020 15:39:30 +0200
Subject: [PATCH 8/8] Issue 49300 - entryUSN is duplicated after memberOf
operation
Bug Description: When we assign a member to a group we have two
oprations - group modification and user modification.
As a result, they both have the same entryUSN because USN Plugin
assigns entryUSN value in bepreop but increments the counter
in the postop and a lot of things can happen in between.
Fix Description: Increment the counter in bepreop together with
entryUSN assignment. Also, decrement the counter in bepostop if
the failuer has happened.
Add test suite to cover the change.
https://pagure.io/389-ds-base/issue/49300
Reviewed by: tbordaz (Thanks!)
---
.../tests/suites/plugins/entryusn_test.py | 240 ++++++++++++++++++
ldap/servers/plugins/usn/usn.c | 109 ++++----
ldap/servers/slapd/pblock.c | 14 +-
ldap/servers/slapd/pblock_v3.h | 1 +
ldap/servers/slapd/slapi-plugin.h | 3 +
5 files changed, 322 insertions(+), 45 deletions(-)
create mode 100644 dirsrvtests/tests/suites/plugins/entryusn_test.py
diff --git a/dirsrvtests/tests/suites/plugins/entryusn_test.py b/dirsrvtests/tests/suites/plugins/entryusn_test.py
new file mode 100644
index 000000000..721315419
--- /dev/null
+++ b/dirsrvtests/tests/suites/plugins/entryusn_test.py
@@ -0,0 +1,240 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import ldap
+import logging
+import pytest
+from lib389._constants import DEFAULT_SUFFIX
+from lib389.config import Config
+from lib389.plugins import USNPlugin, MemberOfPlugin
+from lib389.idm.group import Groups
+from lib389.idm.user import UserAccounts
+from lib389.idm.organizationalunit import OrganizationalUnit
+from lib389.tombstone import Tombstones
+from lib389.rootdse import RootDSE
+from lib389.topologies import topology_st, topology_m2
+
+log = logging.getLogger(__name__)
+
+USER_NUM = 10
+GROUP_NUM = 3
+
+
+def check_entryusn_no_duplicates(entryusn_list):
+ """Check that all values in the list are unique"""
+
+ if len(entryusn_list) > len(set(entryusn_list)):
+ raise AssertionError(f"EntryUSN values have duplicates, please, check logs")
+
+
+def check_lastusn_after_restart(inst):
+ """Check that last usn is the same after restart"""
+
+ root_dse = RootDSE(inst)
+ last_usn_before = root_dse.get_attr_val_int("lastusn;userroot")
+ inst.restart()
+ last_usn_after = root_dse.get_attr_val_int("lastusn;userroot")
+ assert last_usn_after == last_usn_before
+
+
+@pytest.fixture(scope="module")
+def setup(topology_st, request):
+ """
+ Enable USN plug-in
+ Enable MEMBEROF plugin
+ Add test entries
+ """
+
+ inst = topology_st.standalone
+
+ log.info("Enable the USN plugin...")
+ plugin = USNPlugin(inst)
+ plugin.enable()
+
+ log.info("Enable the MEMBEROF plugin...")
+ plugin = MemberOfPlugin(inst)
+ plugin.enable()
+
+ inst.restart()
+
+ users_list = []
+ log.info("Adding test entries...")
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
+ for id in range(USER_NUM):
+ user = users.create_test_user(uid=id)
+ users_list.append(user)
+
+ groups_list = []
+ log.info("Adding test groups...")
+ groups = Groups(inst, DEFAULT_SUFFIX)
+ for id in range(GROUP_NUM):
+ group = groups.create(properties={'cn': f'test_group{id}'})
+ groups_list.append(group)
+
+ def fin():
+ for user in users_list:
+ try:
+ user.delete()
+ except ldap.NO_SUCH_OBJECT:
+ pass
+ for group in groups_list:
+ try:
+ group.delete()
+ except ldap.NO_SUCH_OBJECT:
+ pass
+ request.addfinalizer(fin)
+
+ return {"users": users_list,
+ "groups": groups_list}
+
+
+def test_entryusn_no_duplicates(topology_st, setup):
+ """Verify that entryUSN is not duplicated after memberOf operation
+
+ :id: 1a7d382d-1214-4d56-b9c2-9c4ed57d1683
+ :setup: Standalone instance, Groups and Users, USN and memberOf are enabled
+ :steps:
+ 1. Add a member to group 1
+ 2. Add a member to group 1 and 2
+ 3. Check that entryUSNs are different
+ 4. Check that lastusn before and after a restart are the same
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Success
+ """
+
+ inst = topology_st.standalone
+ config = Config(inst)
+ config.replace('nsslapd-accesslog-level', '260') # Internal op
+ config.replace('nsslapd-errorlog-level', '65536')
+ config.replace('nsslapd-plugin-logging', 'on')
+ entryusn_list = []
+
+ users = setup["users"]
+ groups = setup["groups"]
+
+ groups[0].replace('member', users[0].dn)
+ entryusn_list.append(users[0].get_attr_val_int('entryusn'))
+ log.info(f"{users[0].dn}_1: {entryusn_list[-1:]}")
+ entryusn_list.append(groups[0].get_attr_val_int('entryusn'))
+ log.info(f"{groups[0].dn}_1: {entryusn_list[-1:]}")
+ check_entryusn_no_duplicates(entryusn_list)
+
+ groups[1].replace('member', [users[0].dn, users[1].dn])
+ entryusn_list.append(users[0].get_attr_val_int('entryusn'))
+ log.info(f"{users[0].dn}_2: {entryusn_list[-1:]}")
+ entryusn_list.append(users[1].get_attr_val_int('entryusn'))
+ log.info(f"{users[1].dn}_2: {entryusn_list[-1:]}")
+ entryusn_list.append(groups[1].get_attr_val_int('entryusn'))
+ log.info(f"{groups[1].dn}_2: {entryusn_list[-1:]}")
+ check_entryusn_no_duplicates(entryusn_list)
+
+ check_lastusn_after_restart(inst)
+
+
+def test_entryusn_is_same_after_failure(topology_st, setup):
+ """Verify that entryUSN is the same after failed operation
+
+ :id: 1f227533-370a-48c1-b920-9b3b0bcfc32e
+ :setup: Standalone instance, Groups and Users, USN and memberOf are enabled
+ :steps:
+ 1. Get current group's entryUSN value
+ 2. Try to modify the group with an invalid syntax
+ 3. Get new group's entryUSN value and compare with old
+ 4. Check that lastusn before and after a restart are the same
+ :expectedresults:
+ 1. Success
+ 2. Invalid Syntax error
+ 3. Should be the same
+ 4. Success
+ """
+
+ inst = topology_st.standalone
+ users = setup["users"]
+
+ # We need this update so we get the latest USN pointed to our entry
+ users[0].replace('description', 'update')
+
+ entryusn_before = users[0].get_attr_val_int('entryusn')
+ users[0].replace('description', 'update')
+ try:
+ users[0].replace('uid', 'invalid update')
+ except ldap.NOT_ALLOWED_ON_RDN:
+ pass
+ users[0].replace('description', 'second update')
+ entryusn_after = users[0].get_attr_val_int('entryusn')
+
+ # entryUSN should be OLD + 2 (only two user updates)
+ assert entryusn_after == (entryusn_before + 2)
+
+ check_lastusn_after_restart(inst)
+
+
+def test_entryusn_after_repl_delete(topology_m2):
+ """Verify that entryUSN is incremented on 1 after delete operation which creates a tombstone
+
+ :id: 1704cf65-41bc-4347-bdaf-20fc2431b218
+ :setup: An instance with replication, Users, USN enabled
+ :steps:
+ 1. Try to delete a user
+ 2. Check the tombstone has the incremented USN
+ 3. Try to delete ou=People with users
+ 4. Check the entry has a not incremented entryUSN
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Should fail with Not Allowed On Non-leaf error
+ 4. Success
+ """
+
+ inst = topology_m2.ms["master1"]
+ plugin = USNPlugin(inst)
+ plugin.enable()
+ inst.restart()
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
+
+ try:
+ user_1 = users.create_test_user()
+ user_rdn = user_1.rdn
+ tombstones = Tombstones(inst, DEFAULT_SUFFIX)
+
+ user_1.replace('description', 'update_ts')
+ user_usn = user_1.get_attr_val_int('entryusn')
+
+ user_1.delete()
+
+ ts = tombstones.get(user_rdn)
+ ts_usn = ts.get_attr_val_int('entryusn')
+
+ assert (user_usn + 1) == ts_usn
+
+ user_1 = users.create_test_user()
+ org = OrganizationalUnit(inst, f"ou=People,{DEFAULT_SUFFIX}")
+ org.replace('description', 'update_ts')
+ ou_usn_before = org.get_attr_val_int('entryusn')
+ try:
+ org.delete()
+ except ldap.NOT_ALLOWED_ON_NONLEAF:
+ pass
+ ou_usn_after = org.get_attr_val_int('entryusn')
+ assert ou_usn_before == ou_usn_after
+
+ finally:
+ try:
+ user_1.delete()
+ except ldap.NO_SUCH_OBJECT:
+ pass
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/ldap/servers/plugins/usn/usn.c b/ldap/servers/plugins/usn/usn.c
index 12ba040c6..f2cc8a62c 100644
--- a/ldap/servers/plugins/usn/usn.c
+++ b/ldap/servers/plugins/usn/usn.c
@@ -333,6 +333,12 @@ _usn_add_next_usn(Slapi_Entry *e, Slapi_Backend *be)
}
slapi_ch_free_string(&usn_berval.bv_val);
+ /*
+ * increment the counter now and decrement in the bepostop
+ * if the operation will fail
+ */
+ slapi_counter_increment(be->be_usn_counter);
+
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"<-- _usn_add_next_usn\n");
@@ -370,6 +376,12 @@ _usn_mod_next_usn(LDAPMod ***mods, Slapi_Backend *be)
*mods = slapi_mods_get_ldapmods_passout(&smods);
+ /*
+ * increment the counter now and decrement in the bepostop
+ * if the operation will fail
+ */
+ slapi_counter_increment(be->be_usn_counter);
+
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"<-- _usn_mod_next_usn\n");
return LDAP_SUCCESS;
@@ -420,6 +432,7 @@ usn_betxnpreop_delete(Slapi_PBlock *pb)
{
Slapi_Entry *e = NULL;
Slapi_Backend *be = NULL;
+ int32_t tombstone_incremented = 0;
int rc = SLAPI_PLUGIN_SUCCESS;
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
@@ -441,7 +454,9 @@ usn_betxnpreop_delete(Slapi_PBlock *pb)
goto bail;
}
_usn_add_next_usn(e, be);
+ tombstone_incremented = 1;
bail:
+ slapi_pblock_set(pb, SLAPI_USN_INCREMENT_FOR_TOMBSTONE, &tombstone_incremented);
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"<-- usn_betxnpreop_delete\n");
@@ -483,7 +498,7 @@ bail:
return rc;
}
-/* count up the counter */
+/* count down the counter */
static int
usn_bepostop(Slapi_PBlock *pb)
{
@@ -493,25 +508,24 @@ usn_bepostop(Slapi_PBlock *pb)
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"--> usn_bepostop\n");
- /* if op is not successful, don't increment the counter */
+ /* if op is not successful, decrement the counter, else - do nothing */
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc);
if (LDAP_SUCCESS != rc) {
- /* no plugin failure */
- rc = SLAPI_PLUGIN_SUCCESS;
- goto bail;
- }
+ slapi_pblock_get(pb, SLAPI_BACKEND, &be);
+ if (NULL == be) {
+ rc = LDAP_PARAM_ERROR;
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
+ rc = SLAPI_PLUGIN_FAILURE;
+ goto bail;
+ }
- slapi_pblock_get(pb, SLAPI_BACKEND, &be);
- if (NULL == be) {
- rc = LDAP_PARAM_ERROR;
- slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
- rc = SLAPI_PLUGIN_FAILURE;
- goto bail;
+ if (be->be_usn_counter) {
+ slapi_counter_decrement(be->be_usn_counter);
+ }
}
- if (be->be_usn_counter) {
- slapi_counter_increment(be->be_usn_counter);
- }
+ /* no plugin failure */
+ rc = SLAPI_PLUGIN_SUCCESS;
bail:
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"<-- usn_bepostop\n");
@@ -519,13 +533,14 @@ bail:
return rc;
}
-/* count up the counter */
+/* count down the counter on a failure and mod ignore */
static int
usn_bepostop_modify(Slapi_PBlock *pb)
{
int rc = SLAPI_PLUGIN_FAILURE;
Slapi_Backend *be = NULL;
LDAPMod **mods = NULL;
+ int32_t do_decrement = 0;
int i;
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
@@ -534,9 +549,7 @@ usn_bepostop_modify(Slapi_PBlock *pb)
/* if op is not successful, don't increment the counter */
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc);
if (LDAP_SUCCESS != rc) {
- /* no plugin failure */
- rc = SLAPI_PLUGIN_SUCCESS;
- goto bail;
+ do_decrement = 1;
}
slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods);
@@ -545,25 +558,29 @@ usn_bepostop_modify(Slapi_PBlock *pb)
if (mods[i]->mod_op & LDAP_MOD_IGNORE) {
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"usn_bepostop_modify - MOD_IGNORE detected\n");
- goto bail; /* conflict occurred.
- skip incrementing the counter. */
+ do_decrement = 1; /* conflict occurred.
+ decrement he counter. */
} else {
break;
}
}
}
- slapi_pblock_get(pb, SLAPI_BACKEND, &be);
- if (NULL == be) {
- rc = LDAP_PARAM_ERROR;
- slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
- rc = SLAPI_PLUGIN_FAILURE;
- goto bail;
+ if (do_decrement) {
+ slapi_pblock_get(pb, SLAPI_BACKEND, &be);
+ if (NULL == be) {
+ rc = LDAP_PARAM_ERROR;
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
+ rc = SLAPI_PLUGIN_FAILURE;
+ goto bail;
+ }
+ if (be->be_usn_counter) {
+ slapi_counter_decrement(be->be_usn_counter);
+ }
}
- if (be->be_usn_counter) {
- slapi_counter_increment(be->be_usn_counter);
- }
+ /* no plugin failure */
+ rc = SLAPI_PLUGIN_SUCCESS;
bail:
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"<-- usn_bepostop_modify\n");
@@ -573,34 +590,38 @@ bail:
/* count up the counter */
/* if the op is delete and the op was not successful, remove preventryusn */
+/* the function is executed on TXN level */
static int
usn_bepostop_delete(Slapi_PBlock *pb)
{
int rc = SLAPI_PLUGIN_FAILURE;
Slapi_Backend *be = NULL;
+ int32_t tombstone_incremented = 0;
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"--> usn_bepostop_delete\n");
- /* if op is not successful, don't increment the counter */
+ /* if op is not successful and it is a tombstone entry, decrement the counter */
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc);
if (LDAP_SUCCESS != rc) {
- /* no plugin failure */
- rc = SLAPI_PLUGIN_SUCCESS;
- goto bail;
- }
+ slapi_pblock_get(pb, SLAPI_USN_INCREMENT_FOR_TOMBSTONE, &tombstone_incremented);
+ if (tombstone_incremented) {
+ slapi_pblock_get(pb, SLAPI_BACKEND, &be);
+ if (NULL == be) {
+ rc = LDAP_PARAM_ERROR;
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
+ rc = SLAPI_PLUGIN_FAILURE;
+ goto bail;
+ }
- slapi_pblock_get(pb, SLAPI_BACKEND, &be);
- if (NULL == be) {
- rc = LDAP_PARAM_ERROR;
- slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
- rc = SLAPI_PLUGIN_FAILURE;
- goto bail;
+ if (be->be_usn_counter) {
+ slapi_counter_decrement(be->be_usn_counter);
+ }
+ }
}
- if (be->be_usn_counter) {
- slapi_counter_increment(be->be_usn_counter);
- }
+ /* no plugin failure */
+ rc = SLAPI_PLUGIN_SUCCESS;
bail:
slapi_log_err(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
"<-- usn_bepostop_delete\n");
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index cb562e938..454ea9cc3 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -2436,7 +2436,7 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value)
(*(char **)value) = NULL;
}
break;
-
+
case SLAPI_SEARCH_CTRLS:
if (pblock->pb_intop != NULL) {
(*(LDAPControl ***)value) = pblock->pb_intop->pb_search_ctrls;
@@ -2479,6 +2479,14 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value)
}
break;
+ case SLAPI_USN_INCREMENT_FOR_TOMBSTONE:
+ if (pblock->pb_intop != NULL) {
+ (*(int32_t *)value) = pblock->pb_intop->pb_usn_tombstone_incremented;
+ } else {
+ (*(int32_t *)value) = 0;
+ }
+ break;
+
/* ACI Target Check */
case SLAPI_ACI_TARGET_CHECK:
if (pblock->pb_misc != NULL) {
@@ -4156,6 +4164,10 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value)
pblock->pb_intop->pb_paged_results_cookie = *(int *)value;
break;
+ case SLAPI_USN_INCREMENT_FOR_TOMBSTONE:
+ pblock->pb_intop->pb_usn_tombstone_incremented = *((int32_t *)value);
+ break;
+
/* ACI Target Check */
case SLAPI_ACI_TARGET_CHECK:
_pblock_assert_pb_misc(pblock);
diff --git a/ldap/servers/slapd/pblock_v3.h b/ldap/servers/slapd/pblock_v3.h
index 7ec2f37d6..90498c0b0 100644
--- a/ldap/servers/slapd/pblock_v3.h
+++ b/ldap/servers/slapd/pblock_v3.h
@@ -161,6 +161,7 @@ typedef struct _slapi_pblock_intop
int pb_paged_results_index; /* stash SLAPI_PAGED_RESULTS_INDEX */
int pb_paged_results_cookie; /* stash SLAPI_PAGED_RESULTS_COOKIE */
+ int32_t pb_usn_tombstone_incremented; /* stash SLAPI_PAGED_RESULTS_COOKIE */
} slapi_pblock_intop;
/* Stuff that is rarely used, but still present */
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 04c02cf7c..589830bb4 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -7483,6 +7483,9 @@ typedef enum _slapi_op_note_t {
#define SLAPI_PAGED_RESULTS_INDEX 1945
#define SLAPI_PAGED_RESULTS_COOKIE 1949
+/* USN Plugin flag for tombstone entries */
+#define SLAPI_USN_INCREMENT_FOR_TOMBSTONE 1950
+
/* ACI Target Check */
#define SLAPI_ACI_TARGET_CHECK 1946
--
2.26.2

View File

@ -1,95 +0,0 @@
From 340b81a59cee365e7300e57c1ca5f4866373954c Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Wed, 16 Dec 2020 16:30:28 +0100
Subject: [PATCH 1/4] Issue 4480 - Unexpected info returned to ldap request
(#4491)
Bug description:
If the bind entry does not exist, the bind result info
reports that 'No such entry'. It should not give any
information if the target entry exists or not
Fix description:
Does not return any additional information during a bind
relates: https://github.com/389ds/389-ds-base/issues/4480
Reviewed by: William Brown, Viktor Ashirov, Mark Reynolds (thank you all)
Platforms tested: F31
---
dirsrvtests/tests/suites/basic/basic_test.py | 30 ++++++++++++++++++++
ldap/servers/slapd/back-ldbm/ldbm_config.c | 2 +-
ldap/servers/slapd/result.c | 2 +-
3 files changed, 32 insertions(+), 2 deletions(-)
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
index 120207321..e9afa1e7e 100644
--- a/dirsrvtests/tests/suites/basic/basic_test.py
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
@@ -1400,6 +1400,36 @@ def test_dscreate_multiple_dashes_name(dscreate_long_instance):
assert not dscreate_long_instance.exists()
+def test_bind_invalid_entry(topology_st):
+ """Test the failing bind does not return information about the entry
+
+ :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f
+
+ :setup: Standalone instance
+
+ :steps:
+ 1: bind as non existing entry
+ 2: check that bind info does not report 'No such entry'
+
+ :expectedresults:
+ 1: pass
+ 2: pass
+ """
+
+ topology_st.standalone.restart()
+ INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX
+ try:
+ topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD)
+ except ldap.LDAPError as e:
+ log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY)
+ log.info('exception description: ' + e.args[0]['desc'])
+ if 'info' in e.args[0]:
+ log.info('exception info: ' + e.args[0]['info'])
+ assert e.args[0]['desc'] == 'Invalid credentials'
+ assert 'info' not in e.args[0]
+ pass
+
+ log.info('test_bind_invalid_entry: PASSED')
if __name__ == '__main__':
# Run isolated
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
index 88c186359..dee5fc088 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
@@ -1266,7 +1266,7 @@ ldbm_config_search_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
if (attrs) {
for (size_t i = 0; attrs[i]; i++) {
if (ldbm_config_moved_attr(attrs[i])) {
- slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry");
+ slapi_pblock_set(pb, SLAPI_RESULT_TEXT, "at least one required attribute has been moved to the BDB scecific configuration entry");
break;
}
}
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
index 61efb6f8d..40c5dcc57 100644
--- a/ldap/servers/slapd/result.c
+++ b/ldap/servers/slapd/result.c
@@ -355,7 +355,7 @@ send_ldap_result_ext(
if (text) {
pbtext = text;
} else {
- slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &pbtext);
+ slapi_pblock_get(pb, SLAPI_RESULT_TEXT, &pbtext);
}
if (operation == NULL) {
--
2.26.2

View File

@ -1,782 +0,0 @@
From 2923940ffa0db88df986dd00d74ad812ccd71188 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Wed, 20 Jan 2021 16:42:15 -0500
Subject: [PATCH 2/4] Issue 5442 - Search results are different between RHDS10
and RHDS11
Bug Description: In 1.4.x we introduced a change that was overly strict about
how a search on a non-existent subtree returned its error code.
It was changed from returning an error 32 to an error 0 with
zero entries returned.
Fix Description: When finding the entry and processing acl's make sure to
gather the aci's that match the resource even if the resource
does not exist. This requires some extra checks when processing
the target attribute.
relates: https://github.com/389ds/389-ds-base/issues/4542
Reviewed by: firstyear, elkris, and tbordaz (Thanks!)
Apply Thierry's changes
round 2
Apply more suggestions from Thierry
---
dirsrvtests/tests/suites/acl/misc_test.py | 108 +++++++-
ldap/servers/plugins/acl/acl.c | 296 ++++++++++------------
ldap/servers/slapd/back-ldbm/findentry.c | 6 +-
src/lib389/lib389/_mapped_object.py | 4 +-
4 files changed, 239 insertions(+), 175 deletions(-)
diff --git a/dirsrvtests/tests/suites/acl/misc_test.py b/dirsrvtests/tests/suites/acl/misc_test.py
index 8f122b7a7..b64961c0c 100644
--- a/dirsrvtests/tests/suites/acl/misc_test.py
+++ b/dirsrvtests/tests/suites/acl/misc_test.py
@@ -11,7 +11,7 @@
import os
import pytest
-from lib389._constants import DEFAULT_SUFFIX, PW_DM
+from lib389._constants import DEFAULT_SUFFIX, PW_DM, DN_DM
from lib389.idm.user import UserAccount, UserAccounts
from lib389._mapped_object import DSLdapObject
from lib389.idm.account import Accounts, Anonymous
@@ -399,14 +399,112 @@ def test_do_bind_as_201_distinct_users(topo, clean, aci_of_user):
user = uas.create_test_user(uid=i, gid=i)
user.set('userPassword', PW_DM)
- for i in range(len(uas.list())):
- uas.list()[i].bind(PW_DM)
+ users = uas.list()
+ for user in users:
+ user.bind(PW_DM)
ACLPlugin(topo.standalone).replace("nsslapd-aclpb-max-selected-acls", '220')
topo.standalone.restart()
- for i in range(len(uas.list())):
- uas.list()[i].bind(PW_DM)
+ users = uas.list()
+ for user in users:
+ user.bind(PW_DM)
+
+
+def test_info_disclosure(request, topo):
+ """Test that a search returns 32 when base entry does not exist
+
+ :id: f6dec4c2-65a3-41e4-a4c0-146196863333
+ :setup: Standalone Instance
+ :steps:
+ 1. Add aci
+ 2. Add test user
+ 3. Bind as user and search for non-existent entry
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Error 32 is returned
+ """
+
+ ACI_TARGET = "(targetattr = \"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX)
+ ACI_ALLOW = "(version 3.0; acl \"Read/Search permission for all users\"; allow (read,search)"
+ ACI_SUBJECT = "(userdn=\"ldap:///all\");)"
+ ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT
+
+ # Get current ACi's so we can restore them when we are done
+ suffix = Domain(topo.standalone, DEFAULT_SUFFIX)
+ preserved_acis = suffix.get_attr_vals_utf8('aci')
+
+ def finofaci():
+ domain = Domain(topo.standalone, DEFAULT_SUFFIX)
+ try:
+ domain.remove_all('aci')
+ domain.replace_values('aci', preserved_acis)
+ except:
+ pass
+ request.addfinalizer(finofaci)
+
+ # Remove aci's
+ suffix.remove_all('aci')
+
+ # Add test user
+ USER_DN = "uid=test,ou=people," + DEFAULT_SUFFIX
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
+ users.create(properties={
+ 'uid': 'test',
+ 'cn': 'test',
+ 'sn': 'test',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/test',
+ 'userPassword': PW_DM
+ })
+
+ # bind as user
+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM)
+
+ # Search fo existing base DN
+ test = Domain(conn, DEFAULT_SUFFIX)
+ try:
+ test.get_attr_vals_utf8_l('dc')
+ assert False
+ except IndexError:
+ pass
+
+ # Search for a non existent bases
+ subtree = Domain(conn, "ou=does_not_exist," + DEFAULT_SUFFIX)
+ try:
+ subtree.get_attr_vals_utf8_l('objectclass')
+ except IndexError:
+ pass
+ subtree = Domain(conn, "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX)
+ try:
+ subtree.get_attr_vals_utf8_l('objectclass')
+ except IndexError:
+ pass
+ # Try ONE level search instead of BASE
+ try:
+ Accounts(conn, "ou=does_not_exist," + DEFAULT_SUFFIX).filter("(objectclass=top)", ldap.SCOPE_ONELEVEL)
+ except IndexError:
+ pass
+
+ # add aci
+ suffix.add('aci', ACI)
+
+ # Search for a non existent entry which should raise an exception
+ with pytest.raises(ldap.NO_SUCH_OBJECT):
+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM)
+ subtree = Domain(conn, "ou=does_not_exist," + DEFAULT_SUFFIX)
+ subtree.get_attr_vals_utf8_l('objectclass')
+ with pytest.raises(ldap.NO_SUCH_OBJECT):
+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM)
+ subtree = Domain(conn, "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX)
+ subtree.get_attr_vals_utf8_l('objectclass')
+ with pytest.raises(ldap.NO_SUCH_OBJECT):
+ conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM)
+ DN = "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX
+ Accounts(conn, DN).filter("(objectclass=top)", ldap.SCOPE_ONELEVEL, strict=True)
+
if __name__ == "__main__":
diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c
index 41a909a18..4e811f73a 100644
--- a/ldap/servers/plugins/acl/acl.c
+++ b/ldap/servers/plugins/acl/acl.c
@@ -2111,10 +2111,11 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
aci_right = aci->aci_access;
res_right = aclpb->aclpb_access;
if (!(aci_right & res_right)) {
- /* If we are looking for read/search and the acl has read/search
- ** then go further because if targets match we may keep that
- ** acl in the entry cache list.
- */
+ /*
+ * If we are looking for read/search and the acl has read/search
+ * then go further because if targets match we may keep that
+ * acl in the entry cache list.
+ */
if (!((res_right & (SLAPI_ACL_SEARCH | SLAPI_ACL_READ)) &&
(aci_right & (SLAPI_ACL_SEARCH | SLAPI_ACL_READ)))) {
matches = ACL_FALSE;
@@ -2122,30 +2123,29 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
}
}
-
- /* first Let's see if the entry is under the subtree where the
- ** ACL resides. We can't let somebody affect a target beyond the
- ** scope of where the ACL resides
- ** Example: ACL is located in "ou=engineering, o=ace industry, c=us
- ** but if the target is "o=ace industry, c=us", then we are in trouble.
- **
- ** If the aci is in the rootdse and the entry is not, then we do not
- ** match--ie. acis in the rootdse do NOT apply below...for the moment.
- **
- */
+ /*
+ * First Let's see if the entry is under the subtree where the
+ * ACL resides. We can't let somebody affect a target beyond the
+ * scope of where the ACL resides
+ * Example: ACL is located in "ou=engineering, o=ace industry, c=us
+ * but if the target is "o=ace industry, c=us", then we are in trouble.
+ *
+ * If the aci is in the rootdse and the entry is not, then we do not
+ * match--ie. acis in the rootdse do NOT apply below...for the moment.
+ */
res_ndn = slapi_sdn_get_ndn(aclpb->aclpb_curr_entry_sdn);
aci_ndn = slapi_sdn_get_ndn(aci->aci_sdn);
- if (!slapi_sdn_issuffix(aclpb->aclpb_curr_entry_sdn, aci->aci_sdn) || (!slapi_is_rootdse(res_ndn) && slapi_is_rootdse(aci_ndn))) {
-
- /* cant' poke around */
+ if (!slapi_sdn_issuffix(aclpb->aclpb_curr_entry_sdn, aci->aci_sdn) ||
+ (!slapi_is_rootdse(res_ndn) && slapi_is_rootdse(aci_ndn)))
+ {
+ /* can't poke around */
matches = ACL_FALSE;
goto acl__resource_match_aci_EXIT;
}
/*
- ** We have a single ACI which we need to find if it applies to
- ** the resource or not.
- */
+ * We have a single ACI which we need to find if it applies to the resource or not.
+ */
if ((aci->aci_type & ACI_TARGET_DN) && (aclpb->aclpb_curr_entry_sdn)) {
char *avaType;
struct berval *avaValue;
@@ -2173,25 +2173,23 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
char *avaType;
struct berval *avaValue;
char logbuf[1024];
-
- /* We are evaluating the moddn permission.
- * The aci contains target_to and target_from
- *
- * target_to filter must be checked against the resource ndn that was stored in
- * aclpb->aclpb_curr_entry_sdn
- *
- * target_from filter must be check against the entry ndn that is in aclpb->aclpb_moddn_source_sdn
- * (sdn was stored in the pblock)
- */
+ /*
+ * We are evaluating the moddn permission.
+ * The aci contains target_to and target_from
+ *
+ * target_to filter must be checked against the resource ndn that was stored in
+ * aclpb->aclpb_curr_entry_sdn
+ *
+ * target_from filter must be check against the entry ndn that is in aclpb->aclpb_moddn_source_sdn
+ * (sdn was stored in the pblock)
+ */
if (aci->target_to) {
f = aci->target_to;
dn_matched = ACL_TRUE;
/* Now check if the filter is a simple or substring filter */
if (aci->aci_type & ACI_TARGET_MODDN_TO_PATTERN) {
- /* This is a filter with substring
- * e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com
- */
+ /* This is a filter with substring e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com */
slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_to substring: %s\n",
slapi_filter_to_string(f, logbuf, sizeof(logbuf)));
if ((rv = acl_match_substring(f, (char *)res_ndn, 0 /* match suffix */)) != ACL_TRUE) {
@@ -2204,9 +2202,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
}
}
} else {
- /* This is a filter without substring
- * e.g. ldap:///cn=accounts,dc=example,dc=com
- */
+ /* This is a filter without substring e.g. ldap:///cn=accounts,dc=example,dc=com */
slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_to: %s\n",
slapi_filter_to_string(f, logbuf, sizeof(logbuf)));
slapi_filter_get_ava(f, &avaType, &avaValue);
@@ -2230,8 +2226,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
/* Now check if the filter is a simple or substring filter */
if (aci->aci_type & ACI_TARGET_MODDN_FROM_PATTERN) {
/* This is a filter with substring
- * e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com
- */
+ * e.g. ldap:///uid=*,cn=accounts,dc=example,dc=com
+ */
slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_from substring: %s\n",
slapi_filter_to_string(f, logbuf, sizeof(logbuf)));
if ((rv = acl_match_substring(f, (char *)slapi_sdn_get_dn(aclpb->aclpb_moddn_source_sdn), 0 /* match suffix */)) != ACL_TRUE) {
@@ -2243,11 +2239,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
goto acl__resource_match_aci_EXIT;
}
}
-
} else {
- /* This is a filter without substring
- * e.g. ldap:///cn=accounts,dc=example,dc=com
- */
+ /* This is a filter without substring e.g. ldap:///cn=accounts,dc=example,dc=com */
slapi_log_err(SLAPI_LOG_ACL, plugin_name, "acl__resource_match_aci - moddn target_from: %s\n",
slapi_filter_to_string(f, logbuf, sizeof(logbuf)));
if (!slapi_dn_issuffix(slapi_sdn_get_dn(aclpb->aclpb_moddn_source_sdn), avaValue->bv_val)) {
@@ -2269,10 +2262,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
}
if (aci->aci_type & ACI_TARGET_PATTERN) {
-
f = aci->target;
dn_matched = ACL_TRUE;
-
if ((rv = acl_match_substring(f, (char *)res_ndn, 0 /* match suffux */)) != ACL_TRUE) {
dn_matched = ACL_FALSE;
if (rv == ACL_ERR) {
@@ -2296,7 +2287,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
/*
* Is it a (target="ldap://cn=*,($dn),o=sun.com") kind of thing.
- */
+ */
if (aci->aci_type & ACI_TARGET_MACRO_DN) {
/*
* See if the ($dn) component matches the string and
@@ -2306,8 +2297,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
* entry is the same one don't recalculate it--
* this flag only works for search right now, could
* also optimise for mods by making it work for mods.
- */
-
+ */
if ((aclpb->aclpb_res_type & ACLPB_NEW_ENTRY) == 0) {
/*
* Here same entry so just look up the matched value,
@@ -2356,8 +2346,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
* If there is already an entry for this aci in this
* aclpb then remove it--it's an old value for a
* different entry.
- */
-
+ */
acl_ht_add_and_freeOld(aclpb->aclpb_macro_ht,
(PLHashNumber)aci->aci_index,
matched_val);
@@ -2381,30 +2370,27 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
}
/*
- ** Here, if there's a targetfilter field, see if it matches.
- **
- ** The commented out code below was an erroneous attempt to skip
- ** this test. It is wrong because: 1. you need to store
- ** whether the last test matched or not (you cannot just assume it did)
- ** and 2. It may not be the same aci, so the previous matched
- ** value is a function of the aci.
- ** May be interesting to build such a cache...but no evidence for
- ** for that right now. See Bug 383424.
- **
- **
- ** && ((aclpb->aclpb_state & ACLPB_SEARCH_BASED_ON_LIST) ||
- ** (aclpb->aclpb_res_type & ACLPB_NEW_ENTRY))
- */
+ * Here, if there's a targetfilter field, see if it matches.
+ *
+ * The commented out code below was an erroneous attempt to skip
+ * this test. It is wrong because: 1. you need to store
+ * whether the last test matched or not (you cannot just assume it did)
+ * and 2. It may not be the same aci, so the previous matched
+ * value is a function of the aci.
+ * May be interesting to build such a cache...but no evidence for
+ * for that right now. See Bug 383424.
+ *
+ *
+ * && ((aclpb->aclpb_state & ACLPB_SEARCH_BASED_ON_LIST) ||
+ * (aclpb->aclpb_res_type & ACLPB_NEW_ENTRY))
+ */
if (aci->aci_type & ACI_TARGET_FILTER) {
int filter_matched = ACL_TRUE;
-
/*
* Check for macros.
* For targetfilter we need to fake the lasinfo structure--it's
* created "naturally" for subjects but not targets.
- */
-
-
+ */
if (aci->aci_type & ACI_TARGET_FILTER_MACRO_DN) {
lasInfo *lasinfo = NULL;
@@ -2419,11 +2405,9 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
ACL_EVAL_TARGET_FILTER);
slapi_ch_free((void **)&lasinfo);
} else {
-
-
if (slapi_vattr_filter_test(NULL, aclpb->aclpb_curr_entry,
aci->targetFilter,
- 0 /*don't do acess chk*/) != 0) {
+ 0 /*don't do access check*/) != 0) {
filter_matched = ACL_FALSE;
}
}
@@ -2450,7 +2434,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
* Check to see if we need to evaluate any targetattrfilters.
* They look as follows:
* (targetattrfilters="add=sn:(sn=rob) && gn:(gn!=byrne),
- * del=sn:(sn=rob) && gn:(gn=byrne)")
+ * del=sn:(sn=rob) && gn:(gn=byrne)")
*
* For ADD/DELETE:
* If theres's a targetattrfilter then each add/del filter
@@ -2458,29 +2442,25 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
* by each value of the attribute in the entry.
*
* For MODIFY:
- * If there's a targetattrfilter then the add/del filter
+ * If there's a targetattrfilter then the add/del filter
* must be satisfied by the attribute to be added/deleted.
* (MODIFY acl is evaluated one value at a time).
*
*
- */
-
+ */
if (((aclpb->aclpb_access & SLAPI_ACL_ADD) &&
(aci->aci_type & ACI_TARGET_ATTR_ADD_FILTERS)) ||
((aclpb->aclpb_access & SLAPI_ACL_DELETE) &&
- (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS))) {
-
+ (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS)))
+ {
Targetattrfilter **attrFilterArray = NULL;
-
Targetattrfilter *attrFilter = NULL;
-
Slapi_Attr *attr_ptr = NULL;
Slapi_Value *sval;
const struct berval *attrVal;
int k;
int done;
-
if ((aclpb->aclpb_access & SLAPI_ACL_ADD) &&
(aci->aci_type & ACI_TARGET_ATTR_ADD_FILTERS)) {
@@ -2497,28 +2477,20 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
while (attrFilterArray && attrFilterArray[num_attrs] && attr_matched) {
attrFilter = attrFilterArray[num_attrs];
-
/*
- * If this filter applies to an attribute in the entry,
- * apply it to the entry.
- * Otherwise just ignore it.
- *
- */
-
- if (slapi_entry_attr_find(aclpb->aclpb_curr_entry,
- attrFilter->attr_str,
- &attr_ptr) == 0) {
-
+ * If this filter applies to an attribute in the entry,
+ * apply it to the entry.
+ * Otherwise just ignore it.
+ *
+ */
+ if (slapi_entry_attr_find(aclpb->aclpb_curr_entry, attrFilter->attr_str, &attr_ptr) == 0) {
/*
- * This is an applicable filter.
- * The filter is to be appplied to the entry being added
- * or deleted.
- * The filter needs to be satisfied by _each_ occurence
- * of the attribute in the entry--otherwise you
- * could satisfy the filter and then put loads of other
- * values in on the back of it.
- */
-
+ * This is an applicable filter.
+ * The filter is to be applied to the entry being added or deleted.
+ * The filter needs to be satisfied by _each_ occurrence of the
+ * attribute in the entry--otherwise you could satisfy the filter
+ * and then put loads of other values in on the back of it.
+ */
sval = NULL;
attrVal = NULL;
k = slapi_attr_first_value(attr_ptr, &sval);
@@ -2528,12 +2500,11 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
if (acl__make_filter_test_entry(&aclpb->aclpb_filter_test_entry,
attrFilter->attr_str,
- (struct berval *)attrVal) == LDAP_SUCCESS) {
-
+ (struct berval *)attrVal) == LDAP_SUCCESS)
+ {
attr_matched = acl__test_filter(aclpb->aclpb_filter_test_entry,
attrFilter->filter,
- 1 /* Do filter sense evaluation below */
- );
+ 1 /* Do filter sense evaluation below */);
done = !attr_matched;
slapi_entry_free(aclpb->aclpb_filter_test_entry);
}
@@ -2542,19 +2513,19 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
} /* while */
/*
- * Here, we applied an applicable filter to the entry.
- * So if attr_matched is ACL_TRUE then every value
- * of the attribute in the entry satisfied the filter.
- * Otherwise, attr_matched is ACL_FALSE and not every
- * value satisfied the filter, so we will teminate the
- * scan of the filter list.
- */
+ * Here, we applied an applicable filter to the entry.
+ * So if attr_matched is ACL_TRUE then every value
+ * of the attribute in the entry satisfied the filter.
+ * Otherwise, attr_matched is ACL_FALSE and not every
+ * value satisfied the filter, so we will terminate the
+ * scan of the filter list.
+ */
}
num_attrs++;
} /* while */
-/*
+ /*
* Here, we've applied all the applicable filters to the entry.
* Each one must have been satisfied by all the values of the attribute.
* The result of this is stored in attr_matched.
@@ -2585,7 +2556,8 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
} else if (((aclpb->aclpb_access & ACLPB_SLAPI_ACL_WRITE_ADD) &&
(aci->aci_type & ACI_TARGET_ATTR_ADD_FILTERS)) ||
((aclpb->aclpb_access & ACLPB_SLAPI_ACL_WRITE_DEL) &&
- (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS))) {
+ (aci->aci_type & ACI_TARGET_ATTR_DEL_FILTERS)))
+ {
/*
* Here, it's a modify add/del and we have attr filters.
* So, we need to scan the add/del filter list to find the filter
@@ -2629,11 +2601,10 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
* Otherwise, ignore the targetattrfilters.
*/
if (found) {
-
if (acl__make_filter_test_entry(&aclpb->aclpb_filter_test_entry,
aclpb->aclpb_curr_attrEval->attrEval_name,
- aclpb->aclpb_curr_attrVal) == LDAP_SUCCESS) {
-
+ aclpb->aclpb_curr_attrVal) == LDAP_SUCCESS)
+ {
attr_matched = acl__test_filter(aclpb->aclpb_filter_test_entry,
attrFilter->filter,
1 /* Do filter sense evaluation below */
@@ -2651,20 +2622,21 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
* Here this attribute appeared and was matched in a
* targetattrfilters list, so record this fact so we do
* not have to scan the targetattr list for the attribute.
- */
+ */
attr_matched_in_targetattrfilters = 1;
}
} /* targetvaluefilters */
- /* There are 3 cases by which acis are selected.
- ** 1) By scanning the whole list and picking based on the resource.
- ** 2) By picking a subset of the list which will be used for the whole
- ** acl evaluation.
- ** 3) A finer granularity, i.e, a selected list of acls which will be
- ** used for only that entry's evaluation.
- */
+ /*
+ * There are 3 cases by which acis are selected.
+ * 1) By scanning the whole list and picking based on the resource.
+ * 2) By picking a subset of the list which will be used for the whole
+ * acl evaluation.
+ * 3) A finer granularity, i.e, a selected list of acls which will be
+ * used for only that entry's evaluation.
+ */
if (!(skip_attrEval) && (aclpb->aclpb_state & ACLPB_SEARCH_BASED_ON_ENTRY_LIST) &&
(res_right & SLAPI_ACL_SEARCH) &&
((aci->aci_access & SLAPI_ACL_READ) || (aci->aci_access & SLAPI_ACL_SEARCH))) {
@@ -2680,7 +2652,6 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
}
}
-
/* If we are suppose to skip attr eval, then let's skip it */
if ((aclpb->aclpb_access & SLAPI_ACL_SEARCH) && (!skip_attrEval) &&
(aclpb->aclpb_res_type & ACLPB_NEW_ENTRY)) {
@@ -2697,9 +2668,10 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
goto acl__resource_match_aci_EXIT;
}
- /* We need to check again because we don't want to select this handle
- ** if the right doesn't match for now.
- */
+ /*
+ * We need to check again because we don't want to select this handle
+ * if the right doesn't match for now.
+ */
if (!(aci_right & res_right)) {
matches = ACL_FALSE;
goto acl__resource_match_aci_EXIT;
@@ -2718,20 +2690,16 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
* rbyrneXXX if we had a proper permission for modrdn eg SLAPI_ACL_MODRDN
* then we would not need this crappy way of telling it was a MODRDN
* request ie. SLAPI_ACL_WRITE && !(c_attrEval).
- */
-
+ */
c_attrEval = aclpb->aclpb_curr_attrEval;
/*
* If we've already matched on targattrfilter then do not
* bother to look at the attrlist.
- */
-
+ */
if (!attr_matched_in_targetattrfilters) {
-
/* match target attr */
- if ((c_attrEval) &&
- (aci->aci_type & ACI_TARGET_ATTR)) {
+ if ((c_attrEval) && (aci->aci_type & ACI_TARGET_ATTR)) {
/* there is a target ATTR */
Targetattr **attrArray = aci->targetAttr;
Targetattr *attr = NULL;
@@ -2773,46 +2741,43 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
matches = (attr_matched ? ACL_TRUE : ACL_FALSE);
}
-
aclpb->aclpb_state &= ~ACLPB_ATTR_STAR_MATCHED;
/* figure out how it matched, i.e star matched */
- if (matches && star_matched && num_attrs == 1 &&
- !(aclpb->aclpb_state & ACLPB_FOUND_ATTR_RULE))
+ if (matches && star_matched && num_attrs == 1 && !(aclpb->aclpb_state & ACLPB_FOUND_ATTR_RULE)) {
aclpb->aclpb_state |= ACLPB_ATTR_STAR_MATCHED;
- else {
+ } else {
/* we are here means that there is a specific
- ** attr in the rule for this resource.
- ** We need to avoid this case
- ** Rule 1: (targetattr = "uid")
- ** Rule 2: (targetattr = "*")
- ** we cannot use STAR optimization
- */
+ * attr in the rule for this resource.
+ * We need to avoid this case
+ * Rule 1: (targetattr = "uid")
+ * Rule 2: (targetattr = "*")
+ * we cannot use STAR optimization
+ */
aclpb->aclpb_state |= ACLPB_FOUND_ATTR_RULE;
aclpb->aclpb_state &= ~ACLPB_ATTR_STAR_MATCHED;
}
- } else if ((c_attrEval) ||
- (aci->aci_type & ACI_TARGET_ATTR)) {
+ } else if ((c_attrEval) || (aci->aci_type & ACI_TARGET_ATTR)) {
if ((aci_right & ACL_RIGHTS_TARGETATTR_NOT_NEEDED) &&
(aclpb->aclpb_access & ACL_RIGHTS_TARGETATTR_NOT_NEEDED)) {
/*
- ** Targetattr rule doesn't make any sense
- ** in this case. So select this rule
- ** default: matches = ACL_TRUE;
- */
+ * Targetattr rule doesn't make any sense
+ * in this case. So select this rule
+ * default: matches = ACL_TRUE;
+ */
;
- } else if (aci_right & SLAPI_ACL_WRITE &&
+ } else if ((aci_right & SLAPI_ACL_WRITE) &&
(aci->aci_type & ACI_TARGET_ATTR) &&
!(c_attrEval) &&
(aci->aci_type & ACI_HAS_ALLOW_RULE)) {
/* We need to handle modrdn operation. Modrdn doesn't
- ** change any attrs but changes the RDN and so (attr=NULL).
- ** Here we found an acl which has a targetattr but
- ** the resource doesn't need one. In that case, we should
- ** consider this acl.
- ** the opposite is true if it is a deny rule, only a deny without
- ** any targetattr should deny modrdn
- ** default: matches = ACL_TRUE;
- */
+ * change any attrs but changes the RDN and so (attr=NULL).
+ * Here we found an acl which has a targetattr but
+ * the resource doesn't need one. In that case, we should
+ * consider this acl.
+ * the opposite is true if it is a deny rule, only a deny without
+ * any targetattr should deny modrdn
+ * default: matches = ACL_TRUE;
+ */
;
} else {
matches = ACL_FALSE;
@@ -2821,16 +2786,16 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a
} /* !attr_matched_in_targetattrfilters */
/*
- ** Here we are testing if we find a entry test rule (which should
- ** be rare). In that case, just remember it. An entry test rule
- ** doesn't have "(targetattr)".
- */
+ * Here we are testing if we find a entry test rule (which should
+ * be rare). In that case, just remember it. An entry test rule
+ * doesn't have "(targetattr)".
+ */
if ((aclpb->aclpb_state & ACLPB_EVALUATING_FIRST_ATTR) &&
(!(aci->aci_type & ACI_TARGET_ATTR))) {
aclpb->aclpb_state |= ACLPB_FOUND_A_ENTRY_TEST_RULE;
}
-/*
+ /*
* Generic exit point for this routine:
* matches is ACL_TRUE if the aci matches the target of the resource,
* ACL_FALSE othrewise.
@@ -2853,6 +2818,7 @@ acl__resource_match_aci_EXIT:
return (matches);
}
+
/* Macro to determine if the cached result is valid or not. */
#define ACL_CACHED_RESULT_VALID(result) \
(((result & ACLPB_CACHE_READ_RES_ALLOW) && \
diff --git a/ldap/servers/slapd/back-ldbm/findentry.c b/ldap/servers/slapd/back-ldbm/findentry.c
index 6e53a0aea..bff751c88 100644
--- a/ldap/servers/slapd/back-ldbm/findentry.c
+++ b/ldap/servers/slapd/back-ldbm/findentry.c
@@ -93,7 +93,6 @@ find_entry_internal_dn(
size_t tries = 0;
int isroot = 0;
int op_type;
- char *errbuf = NULL;
/* get the managedsait ldap message control */
slapi_pblock_get(pb, SLAPI_MANAGEDSAIT, &managedsait);
@@ -207,8 +206,8 @@ find_entry_internal_dn(
break;
}
if (acl_type > 0) {
- err = plugin_call_acl_plugin(pb, me->ep_entry, NULL, NULL, acl_type,
- ACLPLUGIN_ACCESS_DEFAULT, &errbuf);
+ char *dummy_attr = "1.1";
+ err = slapi_access_allowed(pb, me->ep_entry, dummy_attr, NULL, acl_type);
}
if (((acl_type > 0) && err) || (op_type == SLAPI_OPERATION_BIND)) {
/*
@@ -237,7 +236,6 @@ find_entry_internal_dn(
CACHE_RETURN(&inst->inst_cache, &me);
}
- slapi_ch_free_string(&errbuf);
slapi_log_err(SLAPI_LOG_TRACE, "find_entry_internal_dn", "<= Not found (%s)\n",
slapi_sdn_get_dn(sdn));
return (NULL);
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
index c60837601..ca6ea6ef8 100644
--- a/src/lib389/lib389/_mapped_object.py
+++ b/src/lib389/lib389/_mapped_object.py
@@ -1190,7 +1190,7 @@ class DSLdapObjects(DSLogging, DSLints):
# Now actually commit the creation req
return co.ensure_state(rdn, properties, self._basedn)
- def filter(self, search, scope=None):
+ def filter(self, search, scope=None, strict=False):
# This will yield and & filter for objectClass with as many terms as needed.
if search:
search_filter = _gen_and([self._get_objectclass_filter(), search])
@@ -1211,5 +1211,7 @@ class DSLdapObjects(DSLogging, DSLints):
insts = [self._entry_to_instance(dn=r.dn, entry=r) for r in results]
except ldap.NO_SUCH_OBJECT:
# There are no objects to select from, se we return an empty array
+ if strict:
+ raise ldap.NO_SUCH_OBJECT
insts = []
return insts
--
2.26.2

View File

@ -1,145 +0,0 @@
From 4fb3023a55529c9d5332e3425ae8da590a8ebb69 Mon Sep 17 00:00:00 2001
From: tbordaz <tbordaz@redhat.com>
Date: Mon, 1 Feb 2021 09:28:25 +0100
Subject: [PATCH 3/4] Issue 4581 - A failed re-indexing leaves the database in
broken state (#4582)
Bug description:
During reindex the numsubordinates attribute is not updated in parent entries.
The consequence is that the internal counter job->numsubordinates==0.
Later when indexing the ancestorid, the server can show the progression of this
indexing with a ratio using job->numsubordinates==0.
Division with 0 -> SIGFPE
Fix description:
if the numsubordinates is NULL, log a message without a division.
relates: https://github.com/389ds/389-ds-base/issues/4581
Reviewed by: Pierre Rogier, Mark Reynolds, Simon Pichugin, Teko Mihinto (thanks !!)
Platforms tested: F31
---
.../slapd/back-ldbm/db-bdb/bdb_import.c | 72 ++++++++++++++-----
1 file changed, 54 insertions(+), 18 deletions(-)
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
index 15574e60f..9713b52f6 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
@@ -468,18 +468,30 @@ bdb_get_nonleaf_ids(backend *be, DB_TXN *txn, IDList **idl, ImportJob *job)
}
key_count++;
if (!(key_count % PROGRESS_INTERVAL)) {
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
- "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
- (key_count * 100 / job->numsubordinates), key_count);
+ if (job->numsubordinates) {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
+ "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
+ (key_count * 100 / job->numsubordinates), key_count);
+ } else {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
+ "Gathering ancestorid non-leaf IDs: processed %d ancestors...",
+ key_count);
+ }
started_progress_logging = 1;
}
} while (ret == 0 && !(job->flags & FLAG_ABORT));
if (started_progress_logging) {
/* finish what we started logging */
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
- "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
- (key_count * 100 / job->numsubordinates), key_count);
+ if (job->numsubordinates) {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
+ "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
+ (key_count * 100 / job->numsubordinates), key_count);
+ } else {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
+ "Gathering ancestorid non-leaf IDs: processed %d ancestors",
+ key_count);
+ }
}
import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
"Finished gathering ancestorid non-leaf IDs.");
@@ -660,9 +672,15 @@ bdb_ancestorid_default_create_index(backend *be, ImportJob *job)
key_count++;
if (!(key_count % PROGRESS_INTERVAL)) {
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
- "Creating ancestorid index: processed %d%% (ID count %d)",
- (key_count * 100 / job->numsubordinates), key_count);
+ if (job->numsubordinates) {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
+ "Creating ancestorid index: processed %d%% (ID count %d)",
+ (key_count * 100 / job->numsubordinates), key_count);
+ } else {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
+ "Creating ancestorid index: processed %d ancestors...",
+ key_count);
+ }
started_progress_logging = 1;
}
@@ -743,9 +761,15 @@ out:
if (ret == 0) {
if (started_progress_logging) {
/* finish what we started logging */
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
- "Creating ancestorid index: processed %d%% (ID count %d)",
- (key_count * 100 / job->numsubordinates), key_count);
+ if (job->numsubordinates) {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
+ "Creating ancestorid index: processed %d%% (ID count %d)",
+ (key_count * 100 / job->numsubordinates), key_count);
+ } else {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
+ "Creating ancestorid index: processed %d ancestors",
+ key_count);
+ }
}
import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
"Created ancestorid index (old idl).");
@@ -869,9 +893,15 @@ bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job)
key_count++;
if (!(key_count % PROGRESS_INTERVAL)) {
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
- "Creating ancestorid index: progress %d%% (ID count %d)",
- (key_count * 100 / job->numsubordinates), key_count);
+ if (job->numsubordinates) {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
+ "Creating ancestorid index: progress %d%% (ID count %d)",
+ (key_count * 100 / job->numsubordinates), key_count);
+ } else {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
+ "Creating ancestorid index: progress %d ancestors...",
+ key_count);
+ }
started_progress_logging = 1;
}
@@ -932,9 +962,15 @@ out:
if (ret == 0) {
if (started_progress_logging) {
/* finish what we started logging */
- import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
- "Creating ancestorid index: processed %d%% (ID count %d)",
- (key_count * 100 / job->numsubordinates), key_count);
+ if (job->numsubordinates) {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
+ "Creating ancestorid index: processed %d%% (ID count %d)",
+ (key_count * 100 / job->numsubordinates), key_count);
+ } else {
+ import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
+ "Creating ancestorid index: processed %d ancestors",
+ key_count);
+ }
}
import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
"Created ancestorid index (new idl).");
--
2.26.2

View File

@ -1,163 +0,0 @@
From 861f17d2cb50fc649feee004be1ce08d2e3873f8 Mon Sep 17 00:00:00 2001
From: Mark Reynolds <mreynolds@redhat.com>
Date: Tue, 9 Feb 2021 14:02:59 -0500
Subject: [PATCH 4/4] Issue 4609 - CVE - info disclosure when authenticating
Description: If you bind as a user that does not exist. Error 49 is returned
instead of error 32. As error 32 discloses that the entry does
not exist. When you bind as an entry that does not have userpassword
set then error 48 (inappropriate auth) is returned, but this
discloses that the entry does indeed exist. Instead we should
always return error 49, even if the password is not set in the
entry. This way we do not disclose to an attacker if the Bind
DN exists or not.
Relates: https://github.com/389ds/389-ds-base/issues/4609
Reviewed by: tbordaz(Thanks!)
---
dirsrvtests/tests/suites/basic/basic_test.py | 72 +++++++++++++++++++-
ldap/servers/slapd/back-ldbm/ldbm_bind.c | 4 +-
ldap/servers/slapd/dse.c | 7 +-
3 files changed, 78 insertions(+), 5 deletions(-)
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
index e9afa1e7e..6244782fa 100644
--- a/dirsrvtests/tests/suites/basic/basic_test.py
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
@@ -13,7 +13,7 @@
from subprocess import check_output, PIPE, run
from lib389 import DirSrv
-from lib389.idm.user import UserAccounts
+from lib389.idm.user import UserAccount, UserAccounts
import pytest
from lib389.tasks import *
from lib389.utils import *
@@ -1062,6 +1062,76 @@ def test_search_ou(topology_st):
assert len(entries) == 0
+def test_bind_invalid_entry(topology_st):
+ """Test the failing bind does not return information about the entry
+
+ :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f
+ :customerscenario: True
+ :setup: Standalone instance
+ :steps:
+ 1: bind as non existing entry
+ 2: check that bind info does not report 'No such entry'
+ :expectedresults:
+ 1: pass
+ 2: pass
+ """
+
+ topology_st.standalone.restart()
+ INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX
+ try:
+ topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD)
+ except ldap.LDAPError as e:
+ log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY)
+ log.info('exception description: ' + e.args[0]['desc'])
+ if 'info' in e.args[0]:
+ log.info('exception info: ' + e.args[0]['info'])
+ assert e.args[0]['desc'] == 'Invalid credentials'
+ assert 'info' not in e.args[0]
+ pass
+
+ log.info('test_bind_invalid_entry: PASSED')
+
+ # reset credentials
+ topology_st.standalone.simple_bind_s(DN_DM, PW_DM)
+
+
+def test_bind_entry_missing_passwd(topology_st):
+ """
+ :id: af209149-8fb8-48cb-93ea-3e82dd7119d2
+ :setup: Standalone Instance
+ :steps:
+ 1. Bind as database entry that does not have userpassword set
+ 2. Bind as database entry that does not exist
+ 1. Bind as cn=config entry that does not have userpassword set
+ 2. Bind as cn=config entry that does not exist
+ :expectedresults:
+ 1. Fails with error 49
+ 2. Fails with error 49
+ 3. Fails with error 49
+ 4. Fails with error 49
+ """
+ user = UserAccount(topology_st.standalone, DEFAULT_SUFFIX)
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
+ # Bind as the suffix root entry which does not have a userpassword
+ user.bind("some_password")
+
+ user = UserAccount(topology_st.standalone, "cn=not here," + DEFAULT_SUFFIX)
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
+ # Bind as the entry which does not exist
+ user.bind("some_password")
+
+ # Test cn=config since it has its own code path
+ user = UserAccount(topology_st.standalone, "cn=config")
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
+ # Bind as the config entry which does not have a userpassword
+ user.bind("some_password")
+
+ user = UserAccount(topology_st.standalone, "cn=does not exist,cn=config")
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
+ # Bind as an entry under cn=config that does not exist
+ user.bind("some_password")
+
+
@pytest.mark.bz1044135
@pytest.mark.ds47319
def test_connection_buffer_size(topology_st):
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_bind.c b/ldap/servers/slapd/back-ldbm/ldbm_bind.c
index fa450ecd5..38d115a32 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_bind.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_bind.c
@@ -76,8 +76,8 @@ ldbm_back_bind(Slapi_PBlock *pb)
case LDAP_AUTH_SIMPLE: {
Slapi_Value cv;
if (slapi_entry_attr_find(e->ep_entry, "userpassword", &attr) != 0) {
- slapi_send_ldap_result(pb, LDAP_INAPPROPRIATE_AUTH, NULL,
- NULL, 0, NULL);
+ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "Entry does not have userpassword set");
+ slapi_send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL, NULL, 0, NULL);
CACHE_RETURN(&inst->inst_cache, &e);
rc = SLAPI_BIND_FAIL;
goto bail;
diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c
index 0e22d3cec..0d3268046 100644
--- a/ldap/servers/slapd/dse.c
+++ b/ldap/servers/slapd/dse.c
@@ -1443,7 +1443,8 @@ dse_bind(Slapi_PBlock *pb) /* JCM There should only be one exit point from this
ec = dse_get_entry_copy(pdse, sdn, DSE_USE_LOCK);
if (ec == NULL) {
- slapi_send_ldap_result(pb, LDAP_NO_SUCH_OBJECT, NULL, NULL, 0, NULL);
+ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "Entry does not exist");
+ slapi_send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL, NULL, 0, NULL);
return (SLAPI_BIND_FAIL);
}
@@ -1451,7 +1452,8 @@ dse_bind(Slapi_PBlock *pb) /* JCM There should only be one exit point from this
case LDAP_AUTH_SIMPLE: {
Slapi_Value cv;
if (slapi_entry_attr_find(ec, "userpassword", &attr) != 0) {
- slapi_send_ldap_result(pb, LDAP_INAPPROPRIATE_AUTH, NULL, NULL, 0, NULL);
+ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "Entry does not have userpassword set");
+ slapi_send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL, NULL, 0, NULL);
slapi_entry_free(ec);
return SLAPI_BIND_FAIL;
}
@@ -1459,6 +1461,7 @@ dse_bind(Slapi_PBlock *pb) /* JCM There should only be one exit point from this
slapi_value_init_berval(&cv, cred);
if (slapi_pw_find_sv(bvals, &cv) != 0) {
+ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, "Invalid credentials");
slapi_send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL, NULL, 0, NULL);
slapi_entry_free(ec);
value_done(&cv);
--
2.26.2

View File

@ -1,97 +0,0 @@
From 82db41ae6f76464a6ee3cbfdca8019bc809b3cf3 Mon Sep 17 00:00:00 2001
From: William Brown <william@blackhats.net.au>
Date: Thu, 26 Nov 2020 09:08:13 +1000
Subject: [PATCH] Issue 4460 - BUG - lib389 should use system tls policy
Bug Description: Due to some changes in dsrc for tlsreqcert
and how def open was structured in lib389, the system ldap.conf
policy was ignored.
Fix Description: Default to using the system ldap.conf policy
if undefined in lib389 or the tls_reqcert param in dsrc.
fixes: #4460
Author: William Brown <william@blackhats.net.au>
Review by: ???
---
src/lib389/lib389/__init__.py | 11 +++++++----
src/lib389/lib389/cli_base/dsrc.py | 16 +++++++++-------
2 files changed, 16 insertions(+), 11 deletions(-)
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 63d44b60a..dc18b2bfe 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -962,7 +962,7 @@ class DirSrv(SimpleLDAPObject, object):
# Now, we are still an allocated ds object so we can be re-installed
self.state = DIRSRV_STATE_ALLOCATED
- def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=ldap.OPT_X_TLS_HARD,
+ def open(self, uri=None, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=None,
usercert=None, userkey=None):
'''
It opens a ldap bound connection to dirsrv so that online
@@ -1025,9 +1025,12 @@ class DirSrv(SimpleLDAPObject, object):
try:
# Note this sets LDAP.OPT not SELF. Because once self has opened
# it can NOT change opts on reused (ie restart)
- self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert)
- self.log.debug("Using certificate policy %s", reqcert)
- self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", reqcert)
+ if reqcert is not None:
+ self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert)
+ self.log.debug("Using lib389 certificate policy %s", reqcert)
+ else:
+ self.log.debug("Using /etc/openldap/ldap.conf certificate policy")
+ self.log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s", self.get_option(ldap.OPT_X_TLS_REQUIRE_CERT))
except ldap.LDAPError as e:
self.log.fatal('TLS negotiation failed: %s', e)
raise e
diff --git a/src/lib389/lib389/cli_base/dsrc.py b/src/lib389/lib389/cli_base/dsrc.py
index 9cad23437..8a4a2a55d 100644
--- a/src/lib389/lib389/cli_base/dsrc.py
+++ b/src/lib389/lib389/cli_base/dsrc.py
@@ -45,7 +45,7 @@ def dsrc_arg_concat(args, dsrc_inst):
'tls_cacertdir': None,
'tls_cert': None,
'tls_key': None,
- 'tls_reqcert': ldap.OPT_X_TLS_HARD,
+ 'tls_reqcert': None,
'starttls': args.starttls,
'prompt': False,
'pwdfile': None,
@@ -134,21 +134,23 @@ def dsrc_to_ldap(path, instance_name, log):
dsrc_inst['binddn'] = config.get(instance_name, 'binddn', fallback=None)
dsrc_inst['saslmech'] = config.get(instance_name, 'saslmech', fallback=None)
if dsrc_inst['saslmech'] is not None and dsrc_inst['saslmech'] not in ['EXTERNAL', 'PLAIN']:
- raise Exception("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name))
+ raise ValueError("%s [%s] saslmech must be one of EXTERNAL or PLAIN" % (path, instance_name))
dsrc_inst['tls_cacertdir'] = config.get(instance_name, 'tls_cacertdir', fallback=None)
dsrc_inst['tls_cert'] = config.get(instance_name, 'tls_cert', fallback=None)
dsrc_inst['tls_key'] = config.get(instance_name, 'tls_key', fallback=None)
- dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback='hard')
- if dsrc_inst['tls_reqcert'] not in ['never', 'allow', 'hard']:
- raise Exception("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name,
- path))
+ dsrc_inst['tls_reqcert'] = config.get(instance_name, 'tls_reqcert', fallback=None)
if dsrc_inst['tls_reqcert'] == 'never':
dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_NEVER
elif dsrc_inst['tls_reqcert'] == 'allow':
dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_ALLOW
- else:
+ elif dsrc_inst['tls_reqcert'] == 'hard':
dsrc_inst['tls_reqcert'] = ldap.OPT_X_TLS_HARD
+ elif dsrc_inst['tls_reqcert'] is None:
+ # Use system value
+ pass
+ else:
+ raise ValueError("dsrc tls_reqcert value invalid. %s [%s] tls_reqcert should be one of never, allow or hard" % (instance_name, path))
dsrc_inst['starttls'] = config.getboolean(instance_name, 'starttls', fallback=False)
dsrc_inst['pwdfile'] = None
dsrc_inst['prompt'] = False
--
2.26.2

View File

@ -1,39 +0,0 @@
From 2d6ca042adcf0dc2bbf9b898d698bbf62514c4a5 Mon Sep 17 00:00:00 2001
From: Firstyear <william@blackhats.net.au>
Date: Fri, 4 Dec 2020 10:14:33 +1000
Subject: [PATCH] Issue 4460 - BUG - add machine name to subject alt names in
SSCA (#4472)
Bug Description: During SSCA creation, the server cert did not have
the machine name, which meant that the cert would not work without
reqcert = never.
Fix Description: Add the machine name as an alt name during SSCA
creation. It is not guaranteed this value is correct, but it
is better than nothing.
relates: https://github.com/389ds/389-ds-base/issues/4460
Author: William Brown <william@blackhats.net.au>
Review by: mreynolds389, droideck
---
src/lib389/lib389/instance/setup.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index 45c7dfdd4..21260ee20 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -870,7 +870,7 @@ class SetupDs(object):
tlsdb_inst = NssSsl(dbpath=os.path.join(etc_dirsrv_path, dir))
tlsdb_inst.import_rsa_crt(ca)
- csr = tlsdb.create_rsa_key_and_csr()
+ csr = tlsdb.create_rsa_key_and_csr(alt_names=[general['full_machine_name']])
(ca, crt) = ssca.rsa_ca_sign_csr(csr)
tlsdb.import_rsa_crt(ca, crt)
if general['selinux']:
--
2.26.2

View File

@ -0,0 +1,933 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "addr2line"
version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
dependencies = [
"gimli",
]
[[package]]
name = "adler"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "ahash"
version = "0.7.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd"
dependencies = [
"getrandom",
"once_cell",
"version_check",
]
[[package]]
name = "ansi_term"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
dependencies = [
"winapi",
]
[[package]]
name = "atty"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
dependencies = [
"hermit-abi",
"libc",
"winapi",
]
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "backtrace"
version = "0.3.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837"
dependencies = [
"addr2line",
"cc",
"cfg-if",
"libc",
"miniz_oxide",
"object",
"rustc-demangle",
]
[[package]]
name = "base64"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07"
[[package]]
name = "byteorder"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "cbindgen"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
dependencies = [
"clap",
"log",
"proc-macro2",
"quote",
"serde",
"serde_json",
"syn 1.0.109",
"tempfile",
"toml",
]
[[package]]
name = "cc"
version = "1.0.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
dependencies = [
"jobserver",
"libc",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "clap"
version = "2.34.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
dependencies = [
"ansi_term",
"atty",
"bitflags 1.3.2",
"strsim",
"textwrap",
"unicode-width",
"vec_map",
]
[[package]]
name = "concread"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcc9816f5ac93ebd51c37f7f9a6bf2b40dfcd42978ad2aea5d542016e9244cf6"
dependencies = [
"ahash",
"crossbeam",
"crossbeam-epoch",
"crossbeam-utils",
"lru",
"parking_lot",
"rand",
"smallvec",
"tokio",
]
[[package]]
name = "crossbeam"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8"
dependencies = [
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-epoch",
"crossbeam-queue",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-queue"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"
[[package]]
name = "entryuuid"
version = "0.1.0"
dependencies = [
"cc",
"libc",
"paste",
"slapi_r_plugin",
"uuid",
]
[[package]]
name = "entryuuid_syntax"
version = "0.1.0"
dependencies = [
"cc",
"libc",
"paste",
"slapi_r_plugin",
"uuid",
]
[[package]]
name = "errno"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245"
dependencies = [
"libc",
"windows-sys",
]
[[package]]
name = "fastrand"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5"
[[package]]
name = "fernet"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
dependencies = [
"base64",
"byteorder",
"getrandom",
"openssl",
"zeroize",
]
[[package]]
name = "foreign-types"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
dependencies = [
"foreign-types-shared",
]
[[package]]
name = "foreign-types-shared"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]]
name = "getrandom"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "gimli"
version = "0.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
[[package]]
name = "hashbrown"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
dependencies = [
"ahash",
]
[[package]]
name = "hermit-abi"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
dependencies = [
"libc",
]
[[package]]
name = "instant"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
dependencies = [
"cfg-if",
]
[[package]]
name = "itoa"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
[[package]]
name = "jobserver"
version = "0.1.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d"
dependencies = [
"libc",
]
[[package]]
name = "libc"
version = "0.2.152"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7"
[[package]]
name = "librnsslapd"
version = "0.1.0"
dependencies = [
"cbindgen",
"libc",
"slapd",
]
[[package]]
name = "librslapd"
version = "0.1.0"
dependencies = [
"cbindgen",
"concread",
"libc",
"slapd",
]
[[package]]
name = "linux-raw-sys"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456"
[[package]]
name = "lock_api"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]]
name = "lru"
version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a"
dependencies = [
"hashbrown",
]
[[package]]
name = "memchr"
version = "2.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149"
[[package]]
name = "miniz_oxide"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
dependencies = [
"adler",
]
[[package]]
name = "object"
version = "0.32.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441"
dependencies = [
"memchr",
]
[[package]]
name = "once_cell"
version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "openssl"
version = "0.10.62"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671"
dependencies = [
"bitflags 2.4.1",
"cfg-if",
"foreign-types",
"libc",
"once_cell",
"openssl-macros",
"openssl-sys",
]
[[package]]
name = "openssl-macros"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.48",
]
[[package]]
name = "openssl-sys"
version = "0.9.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7"
dependencies = [
"cc",
"libc",
"pkg-config",
"vcpkg",
]
[[package]]
name = "parking_lot"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
dependencies = [
"instant",
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
dependencies = [
"cfg-if",
"instant",
"libc",
"redox_syscall 0.2.16",
"smallvec",
"winapi",
]
[[package]]
name = "paste"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
dependencies = [
"paste-impl",
"proc-macro-hack",
]
[[package]]
name = "paste-impl"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
dependencies = [
"proc-macro-hack",
]
[[package]]
name = "pin-project-lite"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
[[package]]
name = "pkg-config"
version = "0.3.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a"
[[package]]
name = "ppv-lite86"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "proc-macro-hack"
version = "0.5.20+deprecated"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
[[package]]
name = "proc-macro2"
version = "1.0.76"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pwdchan"
version = "0.1.0"
dependencies = [
"base64",
"cc",
"libc",
"openssl",
"paste",
"slapi_r_plugin",
"uuid",
]
[[package]]
name = "quote"
version = "1.0.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom",
]
[[package]]
name = "redox_syscall"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
dependencies = [
"bitflags 1.3.2",
]
[[package]]
name = "redox_syscall"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"
dependencies = [
"bitflags 1.3.2",
]
[[package]]
name = "rsds"
version = "0.1.0"
[[package]]
name = "rustc-demangle"
version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
[[package]]
name = "rustix"
version = "0.38.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca"
dependencies = [
"bitflags 2.4.1",
"errno",
"libc",
"linux-raw-sys",
"windows-sys",
]
[[package]]
name = "ryu"
version = "1.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c"
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "serde"
version = "1.0.195"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.195"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.48",
]
[[package]]
name = "serde_json"
version = "1.0.111"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "slapd"
version = "0.1.0"
dependencies = [
"fernet",
]
[[package]]
name = "slapi_r_plugin"
version = "0.1.0"
dependencies = [
"libc",
"paste",
"uuid",
]
[[package]]
name = "smallvec"
version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2593d31f82ead8df961d8bd23a64c2ccf2eb5dd34b0a34bfb4dd54011c72009e"
[[package]]
name = "strsim"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.48"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "tempfile"
version = "3.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa"
dependencies = [
"cfg-if",
"fastrand",
"redox_syscall 0.4.1",
"rustix",
"windows-sys",
]
[[package]]
name = "textwrap"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
dependencies = [
"unicode-width",
]
[[package]]
name = "tokio"
version = "1.35.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104"
dependencies = [
"backtrace",
"pin-project-lite",
"tokio-macros",
]
[[package]]
name = "tokio-macros"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.48",
]
[[package]]
name = "toml"
version = "0.5.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234"
dependencies = [
"serde",
]
[[package]]
name = "unicode-ident"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "unicode-width"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85"
[[package]]
name = "uuid"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
dependencies = [
"getrandom",
]
[[package]]
name = "vcpkg"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
[[package]]
name = "vec_map"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows-sys"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
[[package]]
name = "windows_i686_gnu"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
[[package]]
name = "windows_i686_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
[[package]]
name = "zeroize"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d"
dependencies = [
"zeroize_derive",
]
[[package]]
name = "zeroize_derive"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.48",
]

View File

@ -16,7 +16,7 @@ ExcludeArch: i686
%global use_Socket6 0
%global use_asan 0
%global use_rust 0
%global use_rust 1
%global use_legacy 1
%global bundle_jemalloc 1
%if %{use_asan}
@ -25,7 +25,7 @@ ExcludeArch: i686
%if %{bundle_jemalloc}
%global jemalloc_name jemalloc
%global jemalloc_ver 5.2.1
%global jemalloc_ver 5.3.0
%global __provides_exclude ^libjemalloc\\.so.*$
%endif
@ -42,11 +42,14 @@ ExcludeArch: i686
# set PIE flag
%global _hardened_build 1
# Filter argparse-manpage from autogenerated package Requires
%global __requires_exclude ^python.*argparse-manpage
Summary: 389 Directory Server (base)
Name: 389-ds-base
Version: 1.4.3.8
Release: %{?relprefix}7%{?prerel}%{?dist}
License: GPLv3+
Version: 1.4.3.39
Release: %{?relprefix}3%{?prerel}%{?dist}
License: GPLv3+ and (ASL 2.0 or MIT)
URL: https://www.port389.org
Group: System Environment/Daemons
Conflicts: selinux-policy-base < 3.9.8
@ -54,8 +57,117 @@ Conflicts: freeipa-server < 4.0.3
Obsoletes: %{name} <= 1.4.0.9
Provides: ldif2ldbm >= 0
BuildRequires: nspr-devel
BuildRequires: nss-devel >= 3.34
##### Bundled cargo crates list - START #####
Provides: bundled(crate(addr2line)) = 0.21.0
Provides: bundled(crate(adler)) = 1.0.2
Provides: bundled(crate(ahash)) = 0.7.7
Provides: bundled(crate(ansi_term)) = 0.12.1
Provides: bundled(crate(atty)) = 0.2.14
Provides: bundled(crate(autocfg)) = 1.1.0
Provides: bundled(crate(backtrace)) = 0.3.69
Provides: bundled(crate(base64)) = 0.13.1
Provides: bundled(crate(bitflags)) = 1.3.2
Provides: bundled(crate(bitflags)) = 2.4.1
Provides: bundled(crate(byteorder)) = 1.5.0
Provides: bundled(crate(cbindgen)) = 0.9.1
Provides: bundled(crate(cc)) = 1.0.83
Provides: bundled(crate(cfg-if)) = 1.0.0
Provides: bundled(crate(clap)) = 2.34.0
Provides: bundled(crate(concread)) = 0.2.21
Provides: bundled(crate(crossbeam)) = 0.8.4
Provides: bundled(crate(crossbeam-channel)) = 0.5.11
Provides: bundled(crate(crossbeam-deque)) = 0.8.5
Provides: bundled(crate(crossbeam-epoch)) = 0.9.18
Provides: bundled(crate(crossbeam-queue)) = 0.3.11
Provides: bundled(crate(crossbeam-utils)) = 0.8.19
Provides: bundled(crate(entryuuid)) = 0.1.0
Provides: bundled(crate(entryuuid_syntax)) = 0.1.0
Provides: bundled(crate(errno)) = 0.3.8
Provides: bundled(crate(fastrand)) = 2.0.1
Provides: bundled(crate(fernet)) = 0.1.4
Provides: bundled(crate(foreign-types)) = 0.3.2
Provides: bundled(crate(foreign-types-shared)) = 0.1.1
Provides: bundled(crate(getrandom)) = 0.2.12
Provides: bundled(crate(gimli)) = 0.28.1
Provides: bundled(crate(hashbrown)) = 0.12.3
Provides: bundled(crate(hermit-abi)) = 0.1.19
Provides: bundled(crate(instant)) = 0.1.12
Provides: bundled(crate(itoa)) = 1.0.10
Provides: bundled(crate(jobserver)) = 0.1.27
Provides: bundled(crate(libc)) = 0.2.152
Provides: bundled(crate(librnsslapd)) = 0.1.0
Provides: bundled(crate(librslapd)) = 0.1.0
Provides: bundled(crate(linux-raw-sys)) = 0.4.12
Provides: bundled(crate(lock_api)) = 0.4.11
Provides: bundled(crate(log)) = 0.4.20
Provides: bundled(crate(lru)) = 0.7.8
Provides: bundled(crate(memchr)) = 2.7.1
Provides: bundled(crate(miniz_oxide)) = 0.7.1
Provides: bundled(crate(object)) = 0.32.2
Provides: bundled(crate(once_cell)) = 1.19.0
Provides: bundled(crate(openssl)) = 0.10.62
Provides: bundled(crate(openssl-macros)) = 0.1.1
Provides: bundled(crate(openssl-sys)) = 0.9.98
Provides: bundled(crate(parking_lot)) = 0.11.2
Provides: bundled(crate(parking_lot_core)) = 0.8.6
Provides: bundled(crate(paste)) = 0.1.18
Provides: bundled(crate(paste-impl)) = 0.1.18
Provides: bundled(crate(pin-project-lite)) = 0.2.13
Provides: bundled(crate(pkg-config)) = 0.3.28
Provides: bundled(crate(ppv-lite86)) = 0.2.17
Provides: bundled(crate(proc-macro-hack)) = 0.5.20+deprecated
Provides: bundled(crate(proc-macro2)) = 1.0.76
Provides: bundled(crate(pwdchan)) = 0.1.0
Provides: bundled(crate(quote)) = 1.0.35
Provides: bundled(crate(rand)) = 0.8.5
Provides: bundled(crate(rand_chacha)) = 0.3.1
Provides: bundled(crate(rand_core)) = 0.6.4
Provides: bundled(crate(redox_syscall)) = 0.2.16
Provides: bundled(crate(redox_syscall)) = 0.4.1
Provides: bundled(crate(rsds)) = 0.1.0
Provides: bundled(crate(rustc-demangle)) = 0.1.23
Provides: bundled(crate(rustix)) = 0.38.30
Provides: bundled(crate(ryu)) = 1.0.16
Provides: bundled(crate(scopeguard)) = 1.2.0
Provides: bundled(crate(serde)) = 1.0.195
Provides: bundled(crate(serde_derive)) = 1.0.195
Provides: bundled(crate(serde_json)) = 1.0.111
Provides: bundled(crate(slapd)) = 0.1.0
Provides: bundled(crate(slapi_r_plugin)) = 0.1.0
Provides: bundled(crate(smallvec)) = 1.12.0
Provides: bundled(crate(strsim)) = 0.8.0
Provides: bundled(crate(syn)) = 1.0.109
Provides: bundled(crate(syn)) = 2.0.48
Provides: bundled(crate(tempfile)) = 3.9.0
Provides: bundled(crate(textwrap)) = 0.11.0
Provides: bundled(crate(tokio)) = 1.35.1
Provides: bundled(crate(tokio-macros)) = 2.2.0
Provides: bundled(crate(toml)) = 0.5.11
Provides: bundled(crate(unicode-ident)) = 1.0.12
Provides: bundled(crate(unicode-width)) = 0.1.11
Provides: bundled(crate(uuid)) = 0.8.2
Provides: bundled(crate(vcpkg)) = 0.2.15
Provides: bundled(crate(vec_map)) = 0.8.2
Provides: bundled(crate(version_check)) = 0.9.4
Provides: bundled(crate(wasi)) = 0.11.0+wasi_snapshot_preview1
Provides: bundled(crate(winapi)) = 0.3.9
Provides: bundled(crate(winapi-i686-pc-windows-gnu)) = 0.4.0
Provides: bundled(crate(winapi-x86_64-pc-windows-gnu)) = 0.4.0
Provides: bundled(crate(windows-sys)) = 0.52.0
Provides: bundled(crate(windows-targets)) = 0.52.0
Provides: bundled(crate(windows_aarch64_gnullvm)) = 0.52.0
Provides: bundled(crate(windows_aarch64_msvc)) = 0.52.0
Provides: bundled(crate(windows_i686_gnu)) = 0.52.0
Provides: bundled(crate(windows_i686_msvc)) = 0.52.0
Provides: bundled(crate(windows_x86_64_gnu)) = 0.52.0
Provides: bundled(crate(windows_x86_64_gnullvm)) = 0.52.0
Provides: bundled(crate(windows_x86_64_msvc)) = 0.52.0
Provides: bundled(crate(zeroize)) = 1.7.0
Provides: bundled(crate(zeroize_derive)) = 1.4.2
##### Bundled cargo crates list - END #####
BuildRequires: nspr-devel >= 4.32
BuildRequires: nss-devel >= 3.67.0-7
BuildRequires: perl-generators
BuildRequires: openldap-devel
BuildRequires: libdb-devel
@ -115,6 +227,7 @@ BuildRequires: python%{python3_pkgversion}-argcomplete
BuildRequires: python%{python3_pkgversion}-argparse-manpage
BuildRequires: python%{python3_pkgversion}-policycoreutils
BuildRequires: python%{python3_pkgversion}-libselinux
BuildRequires: python%{python3_pkgversion}-cryptography
# For cockpit
BuildRequires: rsync
@ -137,7 +250,8 @@ Requires: python%{python3_pkgversion}-ldap
# this is needed to setup SSL if you are not using the
# administration server package
Requires: nss-tools
Requires: nss >= 3.34
Requires: nspr >= 4.32
Requires: nss >= 3.67.0-7
# these are not found by the auto-dependency method
# they are required to support the mandatory LDAP SASL mechs
@ -174,45 +288,16 @@ Source2: %{name}-devel.README
%if %{bundle_jemalloc}
Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2
%endif
Patch01: 0001-Issue-51076-prevent-unnecessarily-duplication-of-the.patch
Patch02: 0002-Ticket-51082-abort-when-a-empty-valueset-is-freed.patch
Patch03: 0003-Issue-51091-healthcheck-json-report-fails-when-mappi.patch
Patch04: 0004-Issue-51076-remove-unnecessary-slapi-entry-dups.patch
Patch05: 0005-Issue-51086-Improve-dscreate-instance-name-validatio.patch
Patch06: 0006-Issue-51102-RFE-ds-replcheck-make-online-timeout-con.patch
Patch07: 0007-Issue-51110-Fix-ASAN-ODR-warnings.patch
Patch08: 0008-Issue-51095-abort-operation-if-CSN-can-not-be-genera.patch
Patch09: 0009-Issue-51113-Allow-using-uid-for-replication-manager-.patch
Patch10: 0010-Issue-50931-RFE-AD-filter-rewriter-for-ObjectCategor.patch
Patch11: 0011-Issue-50746-Add-option-to-healthcheck-to-list-all-th.patch
Patch12: 0012-Issue-50984-Memory-leaks-in-disk-monitoring.patch
Patch13: 0013-Issue-50984-Memory-leaks-in-disk-monitoring.patch
Patch14: 0014-Issue-50201-nsIndexIDListScanLimit-accepts-any-value.patch
Patch15: 0015-Issue-51157-Reindex-task-may-create-abandoned-index-.patch
Patch16: 0016-Issue-51165-add-new-access-log-keywords-for-wtime-an.patch
Patch17: 0017-Issue-50912-pwdReset-can-be-modified-by-a-user.patch
Patch18: 0018-Issue-50791-Healthcheck-should-look-for-notes-A-F-in.patch
Patch19: 0019-Issue-51144-dsctl-fails-with-instance-names-that-con.patch
Patch20: 0020-Ticket-49859-A-distinguished-value-can-be-missing-in.patch
Patch21: 0021-Issue-49256-log-warning-when-thread-number-is-very-d.patch
Patch22: 0022-Issue-51188-db2ldif-crashes-when-LDIF-file-can-t-be-.patch
Patch23: 0023-Issue-51086-Fix-instance-name-length-for-interactive.patch
Patch24: 0024-Issue-51129-SSL-alert-The-value-of-sslVersionMax-TLS.patch
Patch25: 0025-Issue-50984-Memory-leaks-in-disk-monitoring.patch
Patch26: 0026-Issue-4297-On-ADD-replication-URP-issue-internal-sea.patch
Patch27: 0027-Issue-4297-2nd-fix-for-on-ADD-replication-URP-issue-.patch
Patch28: 0028-Issue-51233-ds-replcheck-crashes-in-offline-mode.patch
Patch29: 0029-Issue-4429-NULL-dereference-in-revert_cache.patch
Patch30: 0030-ticket-2058-Add-keep-alive-entry-after-on-line-initi.patch
Patch31: 0031-do-not-add-referrals-for-masters-with-different-data.patch
Patch32: 0032-Issue-4383-Do-not-normalize-escaped-spaces-in-a-DN.patch
Patch33: 0033-Issue-49300-entryUSN-is-duplicated-after-memberOf-op.patch
Patch34: 0034-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch
Patch35: 0035-Issue-5442-Search-results-are-different-between-RHDS.patch
Patch36: 0036-Issue-4581-A-failed-re-indexing-leaves-the-database-.patch
Patch37: 0037-Issue-4609-CVE-info-disclosure-when-authenticating.patch
Patch38: 0038-Issue-4460-BUG-lib389-should-use-system-tls-policy.patch
Patch39: 0039-Issue-4460-BUG-add-machine-name-to-subject-alt-names.patch
%if %{use_rust}
Source4: vendor-%{version}-1.tar.gz
Source5: Cargo-%{version}-1.lock
%endif
Patch01: 0001-issue-5647-covscan-memory-leak-in-audit-log-when-add.patch
Patch02: 0002-Issue-5647-Fix-unused-variable-warning-from-previous.patch
Patch03: 0003-Issue-5407-sync_repl-crashes-if-enabled-while-dynami.patch
Patch04: 0004-Issue-5547-automember-plugin-improvements.patch
Patch05: 0001-Issue-3527-Support-HAProxy-and-Instance-on-the-same-.patch
%description
389 Directory Server is an LDAPv3 compliant server. The base package includes
@ -226,8 +311,8 @@ Please see http://seclists.org/oss-sec/2016/q1/363 for more information.
%package libs
Summary: Core libraries for 389 Directory Server
Group: System Environment/Daemons
BuildRequires: nspr-devel
BuildRequires: nss-devel >= 3.34
BuildRequires: nspr-devel >= 4.32
BuildRequires: nss-devel >= 3.67.0-7
BuildRequires: openldap-devel
BuildRequires: libdb-devel
BuildRequires: cyrus-sasl-devel
@ -280,8 +365,8 @@ Summary: Development libraries for 389 Directory Server
Group: Development/Libraries
Requires: %{name}-libs = %{version}-%{release}
Requires: pkgconfig
Requires: nspr-devel
Requires: nss-devel >= 3.34
Requires: nspr-devel >= 4.32
Requires: nss-devel >= 3.67.0-7
Requires: openldap-devel
Requires: libtalloc
Requires: libevent
@ -308,6 +393,7 @@ SNMP Agent for the 389 Directory Server base package.
Summary: A library for accessing, testing, and configuring the 389 Directory Server
BuildArch: noarch
Group: Development/Libraries
Requires: 389-ds-base
Requires: openssl
Requires: iproute
Requires: platform-python
@ -321,6 +407,7 @@ Requires: python%{python3_pkgversion}-argcomplete
Requires: python%{python3_pkgversion}-libselinux
Requires: python%{python3_pkgversion}-setuptools
Requires: python%{python3_pkgversion}-distro
Requires: python%{python3_pkgversion}-cryptography
%{?python_provide:%python_provide python%{python3_pkgversion}-lib389}
%description -n python%{python3_pkgversion}-lib389
@ -339,6 +426,10 @@ A cockpit UI Plugin for configuring and administering the 389 Directory Server
%prep
%autosetup -p1 -v -n %{name}-%{version}%{?prerel}
%if %{use_rust}
tar xvzf %{SOURCE4}
cp %{SOURCE5} src/Cargo.lock
%endif
%if %{bundle_jemalloc}
%setup -q -n %{name}-%{version}%{?prerel} -T -D -b 3
%endif
@ -356,7 +447,7 @@ ASAN_FLAGS="--enable-asan --enable-debug"
%endif
%if %{use_rust}
RUST_FLAGS="--enable-rust"
RUST_FLAGS="--enable-rust --enable-rust-offline"
%endif
%if %{use_legacy}
@ -636,6 +727,7 @@ exit 0
%{_sbindir}/ns-slapd
%{_mandir}/man8/ns-slapd.8.gz
%{_libexecdir}/%{pkgname}/ds_systemd_ask_password_acl
%{_libexecdir}/%{pkgname}/ds_selinux_restorecon.sh
%{_mandir}/man5/99user.ldif.5.gz
%{_mandir}/man5/certmap.conf.5.gz
%{_mandir}/man5/slapd-collations.conf.5.gz
@ -690,9 +782,6 @@ exit 0
%if %{bundle_jemalloc}
%{_libdir}/%{pkgname}/lib/libjemalloc.so.2
%endif
%if %{use_rust}
%{_libdir}/%{pkgname}/librsds.so
%endif
%if %{use_legacy}
%files legacy-tools
@ -830,270 +919,55 @@ exit 0
%doc README.md
%changelog
* Thu Mar 11 2021 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-7
- Bump version to 1.4.3.8-7
- Resolves: Bug 1908705 - CVE-2020-35518 389-ds:1.4/389-ds-base: information disclosure during the binding of a DN
- Resolves: Bug 1936461 - A failed re-indexing leaves the database in broken state.
- Resolves: Bug 1912481 - Server-Cert.crt created using dscreate has Subject:CN =localhost instead of hostname.
* Thu Mar 14 2024 Simon Pichugin <spichugi@redhat.com> - 1.4.3.39-3
- Bump version to 1.4.3.39-3
- Resolves: RHEL-19240 - RFE Add PROXY protocol support to 389-ds-base via confiuration item - similar to Postfix
* Thu Dec 3 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-6
- Bump version to 1.4.3.8-6
- Resolves: Bug 1904348 - Duplicate entryUSN numbers for different LDAP entries in the same backend
- Resolves: Bug 1904349 - group rdn with leading space char and add fails error 21 invalid syntax and delete fails error 32
- Resolves: Bug 1904350 - do not add referrals for masters with different data generation
- Resolves: Bug 1904351 - create keep alive entry after on line init
- Resolves: Bug 1904352 - NULL dereference in revert_cache()
- Resolves: Bug 1904353 - ds-replcheck crashes in offline mode
- Resolves: Bug 1904347 - Entries conflict not resolved by replication
* Mon Feb 05 2024 Thierry Bordaz <tbordaz@redhat.com> - 1.4.3.39-2
- Bump version to 1.4.3.39-2
- Resolves: RHEL-23209 - CVE-2024-1062 389-ds:1.4/389-ds-base: a heap overflow leading to denail-of-servce while writing a value larger than 256 chars (in log_entry_attr)
- Resolves: RHEL-5390 - schema-compat-plugin expensive with automember rebuild
- Resolves: RHEL-5135 - crash in sync_update_persist_op() of content sync plugin
* Wed Aug 5 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-5
- Bump version to 1.4.3.8-5
- Resolves: Bug 1841086 - SSL alert: The value of sslVersionMax "TLS1.3" is higher than the supported version
- Resolves: Bug 1800529 - Memory leaks in disk monitoring
- Resolves: Bug 1748227 - Instance name length is not enforced
- Resolves: Bug 1849418 - python3-lib389 pulls unnecessary bash-completion package
* Tue Jan 16 2024 Simon Pichugin <spichugi@redhat.com> - 1.4.3.39-1
- Bump version to 1.4.3.39-1
- Resolves: RHEL-19028 - Rebase 389-ds-base in RHEL 8.10 to 1.4.3.39
- Resolves: RHEL-19240 - [RFE] Add PROXY protocol support to 389-ds-base
- Resolves: RHEL-5143 - SELinux labeling for dirsrv files seen during ipa install/uninstall should be moved to DEBUG.
- Resolves: RHEL-5107 - bdb_start - Detected Disorderly Shutdown directory server is not starting
- Resolves: RHEL-16338 - ns-slapd crash in slapi_attr_basetype
- Resolves: RHEL-14025 - After an upgrade the LDAP server won't start if nsslapd-conntablesize is present in the dse.ldif file.
* Fri Jun 26 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-4
- Bump version to 1.4.3.8-4
- Resolves: Bug 1806978 - ns-slapd crashes during db2ldif
- Resolves: Bug 1450863 - Log warning when tuning of nsslapd-threadnumber above or below the optimal value
- Resolves: Bug 1647017 - A distinguished value of a single valued attribute can be missing in an entry
- Resolves: Bug 1806573 - Dsctl healthcheck doesn't work when using instance name with 'slapd-'
- Resolves: Bug 1807773 - dsctl healthcheck : typo in DSREPLLE0002 Lint error suggested resolution commands
- Resolves: Bug 1843567 - Healthcheck to find notes=F
- Resolves: Bug 1845094 - User/Directory Manager can modify Password Policy attribute "pwdReset"
- Resolves: Bug 1850275 - Add new access log keywords for time spent in work queue and actual operation time
- Resolves: Bug 1442386 - Recreating an index while changing case will create an indexfile with the old name (different case) and after restart the indexfile is abandoned
- Resolves: Bug 1672574 - nsIndexIDListScanLimit accepts any value
- Resolves: Bug 1800529 - Memory leaks in disk monitoring
* Fri Jun 5 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-3
- Bump version to 1.4.3.8-3
- Resolves: Bug 1835619 - Healthcheck with --json option reports "Object of type 'bytes' is not JSON serializable" when mapping tree is deleted
- Resolves: Bug 1836428 - Directory Server ds-replcheck RFE to add a timeout command-line arg/value to wait longer when connecting to a replica server
- Resolves: Bug 1843090 - abort when a empty valueset is freed
- Resolves: Bug 1843156 - Prevent unnecessarily duplication of the target entry
- Resolves: Bug 1843157 - Check for clock errors and time skew
- Resolves: Bug 1843159 - RFE AD filter rewriter for ObjectCategory
- Resolves: Bug 1843162 - Creating Replication Manager fails if uid=repman is used
- Resolves: Bug 1816851 - Add option to healthcheck to list all the lint reports
- Resolves: Bug 1748227 - Instance name length is not enforced
- Resolves: Bug 1748244 - dscreate doesn't sanitize instance name
* Fri Dec 08 2023 James Chapman <jachapma@redhat.com> - 1.4.3.38-1
- Bump version to 1.4.3.38-1
- Resolves: RHEL-19028 - Rebase 389-ds-base in RHEL 8.10 to 1.4.3.38
* Mon May 11 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-2
- Bump version to 1.4.3.8-2
- Resolves: Bug 1833350 - Remove cockpit dependancies that are breaking builds
* Wed Aug 16 2023 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.37-1
- Bump versionto 1.4.3.37-1
- Resolves: rhbz#2224505 - Paged search impacts performance
- Resolves: rhbz#2220890 - healthcheck tool needs to be updates for new default password storage scheme
- Resolves: rhbz#2218235 - python3-lib389: Python tarfile extraction needs change to avoid a warning
- Resolves: rhbz#2210491 - dtablesize being set to soft maxfiledescriptor limit causing massive slowdown in large enviroments.
- Resolves: rhbz#2149967 - SELinux labeling for dirsrv files seen during ipa install/uninstall should be moved to DEBUG
* Mon May 11 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-1
- Bump version to 1.4.3.8-1
- Resolves: Bug 1833350 - Rebase 389-ds-base for RHEL 8.3
- Resolves: Bug 1728943 - [RFE] Advance options in RHDS Disk Monitoring Framework
- Resolves: Bug 1775285 - [RFE] Implement the Password Policy attribute "pwdReset"
- Resolves: Bug 1638875 - [RFE] extract key/certs pem file into a private namespace
- Resolves: Bug 1758478 - AddressSanitizer: heap-buffer-overflow in ldap_utf8prev
- Resolves: Bug 1795943 - Port dbmon.sh from legacy tools package
- Resolves: Bug 1798394 - Port dbgen from legacy tools package
- Resolves: Bug 1800529 - Memory leaks in disk monitoring
- Resolves: Bug 1807419 - Unable to create a suffix with countryName either via dscreate or the admin console
- Resolves: Bug 1816848 - Database links: get_monitor() takes 1 positional argument but 2 were given
- Resolves: Bug 1816854 - Setting nsslapd-allowed-sasl-mechanisms truncates the value
- Resolves: Bug 1816857 - Searches on cn=config takes values with spaces and makes multiple attributes out of them
- Resolves: Bug 1816859 - lib389 - Replace exec() with setattr()
- Resolves: Bug 1816862 - Memory leak in indirect COS
- Resolves: Bug 1829071 - Installation of RHDS 11 fails on RHEL8 server with IPv6 disabled
- Resolves: Bug 1833515 - set 'nsslapd-enable-upgrade-hash: off' as this raises warnings in IPA
- Resolves: Bug 1790986 - cenotaph errors on modrdn operations
- Resolves: Bug 1769734 - Heavy StartTLS connection load can randomly fail with err=1
- Resolves: Bug 1758501 - LeakSanitizer: detected memory leaks in changelog5_init and perfctrs_init
* Tue Jul 11 2023 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.36-2
- Bump version to 1.4.3.36-2
- Resolves: rhbz#2220890 - healthcheck tool needs to be updates for new default password storage scheme
* Fri May 8 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.8-0
- Bump version to 1.4.3.8-0
- Issue 51078 - Add nsslapd-enable-upgrade-hash to the schema
- Issue 51054 - Revise ACI target syntax checking
- Issue 51068 - deadlock when updating the schema
- Issue 51060 - unable to set sslVersionMin to TLS1.0
- Issue 51064 - Unable to install server where IPv6 is disabled
- Issue 51051 - CLI fix consistency issues with confirmations
- Issue 49731 - undo db_home_dir under /dev/shm/dirsrv for now
- Issue 51054 - AddressSanitizer: heap-buffer-overflow in ldap_utf8prev
- Issue 51047 - React deprecating ComponentWillMount
- Issue 50499 - fix npm audit issues
- Issue 50545 - Port dbgen.pl to dsctl
* Wed Jun 14 2023 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.36-1
- Bump version to 1.4.3.36-1
- Resolves: rhbz#2188628 - Rebase 389-ds-base in RHEL 8.9 to 1.4.3.36
* Wed Apr 22 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.7-1
- Bump version to 1.4.3.7
- Issue 51024 - syncrepl_entry callback does not contain attributes added by postoperation plugins
- Issue 51035 - Heavy StartTLS connection load can randomly fail with err=1
- Issue 49731 - undo db_home_dir under /dev/shm/dirsrv for now
- Issue 51031 - UI - transition between two instances needs improvement
* Mon May 22 2023 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.35-1
- Bump version to 1.4.3.35-1
- Resolves: rhbz#2188628 - Rebase 389-ds-base in RHEL 8.9 to 1.4.3.35
* Thu Apr 16 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.6-1
- Bump version to 1.4.3.6
- Issue 50933 - 10rfc2307compat.ldif is not ready to set used by default
- Issue 50931 - RFE AD filter rewriter for ObjectCategory
- Issue 51016 - Fix memory leaks in changelog5_init and perfctrs_init
- Issue 50980 - RFE extend usability for slapi_compute_add_search_rewriter and slapi_compute_add_evaluator
- Issue 51008 - dbhome in containers
- Issue 50875 - Refactor passwordUserAttributes's and passwordBadWords's code
- Issue 51014 - slapi_pal.c possible static buffer overflow
- Issue 50545 - remove dbmon "incr" option from arg parser
- Issue 50545 - Port dbmon.sh to dsconf
- Issue 51005 - AttributeUniqueness plugin's DN parameter should not have a default value
- Issue 49731 - Fix additional issues with setting db home directory by default
- Issue 50337 - Replace exec() with setattr()
- Issue 50905 - intermittent SSL hang with rhds
- Issue 50952 - SSCA lacks basicConstraint:CA
- Issue 50640 - Database links: get_monitor() takes 1 positional argument but 2 were given
- Issue 50869 - Setting nsslapd-allowed-sasl-mechanisms truncates the value
* Tue Nov 15 2022 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.32-1
- Bump version to 1.4.3.32-1
- Resolves: Bug 2098138 - broken nsslapd-subtree-rename-switch option in rhds11
- Resolves: Bug 2119063 - entryuuid fixup tasks fails because entryUUID is not mutable
- Resolves: Bug 2136610 - [RFE] Add 'cn' attribute to IPA audit logs
- Resolves: Bug 2142638 - pam mutex lock causing high etimes, affecting red hat internal sso
- Resolves: Bug 2096795 - [RFE] Support ECDSA private keys for TLS
* Wed Apr 1 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.5-1
- Bump version to 1.4.3.5
- Issue 50994 - Fix latest UI bugs found by QE
- Issue 50933 - rfc2307compat.ldif
- Issue 50337 - Replace exec() with setattr()
- Issue 50984 - Memory leaks in disk monitoring
- Issue 50984 - Memory leaks in disk monitoring
- Issue 49731 - dscreate fails in silent mode because of db_home_dir
- Issue 50975 - Revise UI branding with new minimized build
- Issue 49437 - Fix memory leak with indirect COS
- Issue 49731 - Do not add db_home_dir to template-dse.ldif
- Issue 49731 - set and use db_home_directory by default
- Issue 50971 - fix BSD_SOURCE
- Issue 50744 - -n option of dbverify does not work
- Issue 50952 - SSCA lacks basicConstraint:CA
- Issue 50976 - Clean up Web UI source directory from unused files
- Issue 50955 - Fix memory leaks in chaining plugin(part 2)
- Issue 50966 - UI - Database indexes not using typeAhead correctly
- Issue 50974 - UI - wrong title in "Delete Suffix" popup
- Issue 50972 - Fix cockpit plugin build
- Issue 49761 - Fix CI test suite issues
- Issue 50971 - Support building on FreeBSD.
- Issue 50960 - [RFE] Advance options in RHDS Disk Monitoring Framework
- Issue 50800 - wildcards in rootdn-allow-ip attribute are not accepted
- Issue 50963 - We should bundle *.min.js files of Console
- Issue 50860 - Port Password Policy test cases from TET to python3 Password grace limit section.
- Issue 50860 - Port Password Policy test cases from TET to python3 series of bugs Port final
- Issue 50954 - buildnum.py - fix date formatting issue
* Mon Mar 16 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.4-1
- Bump version to 1.4.3.4
- Issue 50954 - Port buildnum.pl to python(part 2)
- Issue 50955 - Fix memory leaks in chaining plugin
- Issue 50954 - Port buildnum.pl to python
- Issue 50947 - change 00core.ldif objectClasses for openldap migration
- Issue 50755 - setting nsslapd-db-home-directory is overriding db_directory
- Issue 50937 - Update CLI for new backend split configuration
- Issue 50860 - Port Password Policy test cases from TET to python3 pwp.sh
- Issue 50945 - givenname alias of gn from openldap
- Issue 50935 - systemd override in lib389 for dscontainer
- Issue 50499 - Fix npm audit issues
- Issue 49761 - Fix CI test suite issues
- Issue 50618 - clean compiler warning and log level
- Issue 50889 - fix compiler issues
- Issue 50884 - Health check tool DSEldif check fails
- Issue 50926 - Remove dual spinner and other UI fixes
- Issue 50928 - Unable to create a suffix with countryName
- Issue 50758 - Only Recommend bash-completion, not Require
- Issue 50923 - Fix a test regression
- Issue 50904 - Connect All React Components And Refactor the Main Navigation Tab Code
- Issue 50920 - cl-dump exit code is 0 even if command fails with invalid arguments
- Issue 50923 - Add test - dsctl fails to remove instances with dashes in the name
- Issue 50919 - Backend delete fails using dsconf
- Issue 50872 - dsconf can't create GSSAPI replication agreements
- Issue 50912 - RFE - add password policy attribute pwdReset
- Issue 50914 - No error returned when adding an entry matching filters for a non existing automember group
- Issue 50889 - Extract pem files into a private namespace
- Issue 50909 - nsDS5ReplicaId cant be set to the old value it had before
- Issue 50686 - Port fractional replication test cases from TET to python3 final
- Issue 49845 - Remove pkgconfig check for libasan
- Issue:50860 - Port Password Policy test cases from TET to python3 bug624080
- Issue:50860 - Port Password Policy test cases from TET to python3 series of bugs
- Issue 50786 - connection table freelist
- Issue 50618 - support cgroupv2
- Issue 50900 - Fix cargo offline build
- Issue 50898 - ldclt core dumped when run with -e genldif option
* Mon Feb 17 2020 Matus Honek <mhonek@redhat.com> - 1.4.3.3-3
- Bring back the necessary c_rehash util (#1803370)
* Fri Feb 14 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.3-2
- Bump version to 1.4.3.3-2
- Remove unneeded perl dependencies
- Change bash-completion to "Recommends" instead of "Requires"
* Thu Feb 13 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.3-1
- Bump version to 1.4.3.3
- Issue 50855 - remove unused file from UI
- Issue 50855 - UI: Port Server Tab to React
- Issue 49845 - README does not contain complete information on building
- Issue 50686 - Port fractional replication test cases from TET to python3 part 1
- Issue 49623 - cont cenotaph errors on modrdn operations
- Issue 50882 - Fix healthcheck errors for instances that do not have TLS enabled
- Issue 50886 - Typo in the replication debug message
- Issue 50873 - Fix healthcheck and virtual attr check
- Issue 50873 - Fix issues with healthcheck tool
- Issue 50028 - Add a new CI test case
- Issue 49946 - Add a new CI test case
- Issue 50117 - Add a new CI test case
- Issue 50787 - fix implementation of attr unique
- Issue 50859 - support running only with ldaps socket
- Issue 50823 - dsctl doesn't work with 'slapd-' in the instance name
- Issue 49624 - cont - DB Deadlock on modrdn appears to corrupt database and entry cache
- Issue 50867 - Fix minor buildsys issues
- Issue 50737 - Allow building with rust online without vendoring
- Issue 50831 - add cargo.lock to allow offline builds
- Issue 50694 - import PEM certs on startup
- Issue 50857 - Memory leak in ACI using IP subject
- Issue 49761 - Fix CI test suite issues
- Issue 50853 - Fix NULL pointer deref in config setting
- Issue 50850 - Fix dsctl healthcheck for python36
- Issue 49990 - Need to enforce a hard maximum limit for file descriptors
- Issue 48707 - ldapssotoken for authentication
* Tue Jan 28 2020 Fedora Release Engineering <releng@fedoraproject.org> - 1.4.3.2-1.1
- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild
* Thu Jan 23 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.2-1
- Bump version to 1.4.3.2
- Issue 49254 - Fix compiler failures and warnings
- Issue 50741 - cont bdb_start - Detected Disorderly Shutdown
- Issue 50836 - Port Schema UI tab to React
- Issue 50842 - Decrease 389-console Cockpit component size
- Issue 50790 - Add result text when filter is invalid
- Issue 50627 - Add ASAN logs to HTML report
- Issue 50834 - Incorrectly setting the NSS default SSL version max
- Issue 50829 - Disk monitoring rotated log cleanup causes heap-use-after-free
- Issue 50709 - (cont) Several memory leaks reported by Valgrind for 389-ds 1.3.9.1-10
- Issue 50784 - performance testing scripts
- Issue 50599 - Fix memory leak when removing db region files
- Issue 49395 - Set the default TLS version min to TLS1.2
- Issue 50818 - dsconf pwdpolicy get error
- Issue 50824 - dsctl remove fails with "name 'ensure_str' is not defined"
- Issue 50599 - Remove db region files prior to db recovery
- Issue 50812 - dscontainer executable should be placed under /usr/libexec/dirsrv/
- Issue 50816 - dsconf allows the root password to be set to nothing
- Issue 50798 - incorrect bytes in format string(fix import issue)
* Thu Jan 16 2020 Adam Williamson <awilliam@redhat.com> - 1.4.3.1-3
- Backport two more import/missing function fixes
* Wed Jan 15 2020 Adam Williamson <awilliam@redhat.com> - 1.4.3.1-2
- Backport 828aad0 to fix missing imports from 1.4.3.1
* Mon Jan 13 2020 Mark Reynolds <mreynolds@redhat.com> - 1.4.3.1-1
- Bump version to 1.4.3.1
- Issue 50798 - incorrect bytes in format string
- Issue 50545 - Add the new replication monitor functionality to UI
- Issue 50806 - Fix minor issues in lib389 health checks
- Issue 50690 - Port Password Storage test cases from TET to python3 part 1
- Issue 49761 - Fix CI test suite issues
- Issue 49761 - Fix CI test suite issues
- Issue 50754 - Add Restore Change Log option to CLI
- Issue 48055 - CI test - automember_plugin(part2)
- Issue 50667 - dsctl -l did not respect PREFIX
- Issue 50780 - More CLI fixes
- Issue 50649 - lib389 without defaults.inf
- Issue 50780 - Fix UI issues
- Issue 50727 - correct mistaken options in filter validation patch
- Issue 50779 - lib389 - conflict compare fails for DN's with spaces
- Set branch version to 1.4.3.0