diff --git a/SOURCES/0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch b/SOURCES/0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch new file mode 100644 index 0000000..fb3211a --- /dev/null +++ b/SOURCES/0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch @@ -0,0 +1,132 @@ +From 48b30739f33d1eb526dbdd45c820129c4a4c4bcb Mon Sep 17 00:00:00 2001 +From: progier389 <72748589+progier389@users.noreply.github.com> +Date: Tue, 12 Jan 2021 11:06:24 +0100 +Subject: [PATCH] Issue 4504 - Insure ldapi is enabled in repl_monitor_test.py + (Needed on RHEL) (#4527) + +(cherry picked from commit 279556bc78ed743d7a053069621d999ec045866f) +--- + .../tests/suites/clu/repl_monitor_test.py | 67 +++++++++---------- + 1 file changed, 31 insertions(+), 36 deletions(-) + +diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +index eb18d2da2..b2cb840b3 100644 +--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py ++++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +@@ -9,7 +9,6 @@ + import time + import subprocess + import pytest +-import re + + from lib389.cli_conf.replication import get_repl_monitor_info + from lib389.tasks import * +@@ -18,6 +17,8 @@ from lib389.topologies import topology_m2 + from lib389.cli_base import FakeArgs + from lib389.cli_base.dsrc import dsrc_arg_concat + from lib389.cli_base import connect_instance ++from lib389.replica import Replicas ++ + + pytestmark = pytest.mark.tier0 + +@@ -68,25 +69,6 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No + log.info('Reset log file') + f.truncate(0) + +-def get_hostnames_from_log(port1, port2): +- # Get the supplier host names as displayed in replication monitor output +- with open(LOG_FILE, 'r') as logfile: +- logtext = logfile.read() +- # search for Supplier :hostname:port +- # and use \D to insure there is no more number is after +- # the matched port (i.e that 10 is not matching 101) +- regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)' +- match=re.search(regexp, logtext) +- host_m1 = 'localhost.localdomain' +- if (match is not None): +- host_m1 = match.group(2) +- # Same for master 2 +- regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)' +- match=re.search(regexp, logtext) +- host_m2 = 'localhost.localdomain' +- if (match is not None): +- host_m2 = match.group(2) +- return (host_m1, host_m2) + + @pytest.mark.ds50545 + @pytest.mark.bz1739718 +@@ -115,6 +97,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + m1 = topology_m2.ms["master1"] + m2 = topology_m2.ms["master2"] + ++ # Enable ldapi if not already done. ++ for inst in [topology_m2.ms["master1"], topology_m2.ms["master2"]]: ++ if not inst.can_autobind(): ++ # Update ns-slapd instance ++ inst.config.set('nsslapd-ldapilisten', 'on') ++ inst.config.set('nsslapd-ldapiautobind', 'on') ++ inst.restart() ++ # Ensure that updates have been sent both ways. ++ replicas = Replicas(m1) ++ replica = replicas.get(DEFAULT_SUFFIX) ++ replica.test_replication([m2]) ++ replicas = Replicas(m2) ++ replica = replicas.get(DEFAULT_SUFFIX) ++ replica.test_replication([m1]) ++ ++ alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')', ++ 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'] ++ + connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port) + content_list = ['Replica Root: dc=example,dc=com', + 'Replica ID: 1', +@@ -177,9 +177,20 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + '001', + m1.host + ':' + str(m1.port)] + ++ dsrc_content = '[repl-monitor-connections]\n' \ ++ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ '\n' \ ++ '[repl-monitor-aliases]\n' \ ++ 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ ++ 'M2 = ' + m2.host + ':' + str(m2.port) ++ + connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] + ++ aliases = ['M1=' + m1.host + ':' + str(m1.port), ++ 'M2=' + m2.host + ':' + str(m2.port)] ++ + args = FakeArgs() + args.connections = connections + args.aliases = None +@@ -187,24 +198,8 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + + log.info('Run replication monitor with connections option') + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) +- (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port) + check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) + +- # Prepare the data for next tests +- aliases = ['M1=' + host_m1 + ':' + str(m1.port), +- 'M2=' + host_m2 + ':' + str(m2.port)] +- +- alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')', +- 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')'] +- +- dsrc_content = '[repl-monitor-connections]\n' \ +- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ +- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ +- '\n' \ +- '[repl-monitor-aliases]\n' \ +- 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \ +- 'M2 = ' + host_m2 + ':' + str(m2.port) +- + log.info('Run replication monitor with aliases option') + args.aliases = aliases + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) +-- +2.26.2 + diff --git a/SOURCES/0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch b/SOURCES/0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch new file mode 100644 index 0000000..44636c8 --- /dev/null +++ b/SOURCES/0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch @@ -0,0 +1,51 @@ +From f84e75de9176218d3b47a447d07fe8fb7ca3d72f Mon Sep 17 00:00:00 2001 +From: Barbora Simonova +Date: Mon, 11 Jan 2021 15:51:24 +0100 +Subject: [PATCH] Issue 4315 - performance search rate: nagle triggers high + rate of setsocketopt + +Description: +The config value of nsslapd-nagle is now set to 'off' by default. +Added a test case, that checks the value. + +Relates: https://github.com/389ds/389-ds-base/issues/4315 + +Reviewed by: droideck (Thanks!) +--- + .../tests/suites/config/config_test.py | 20 +++++++++++++++++++ + 1 file changed, 20 insertions(+) + +diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py +index 38d1ed9ac..fda16a530 100644 +--- a/dirsrvtests/tests/suites/config/config_test.py ++++ b/dirsrvtests/tests/suites/config/config_test.py +@@ -41,6 +41,26 @@ def big_file(): + return TEMP_BIG_FILE + + ++@pytest.mark.bz1897248 ++@pytest.mark.ds4315 ++@pytest.mark.skipif(ds_is_older('1.4.3.16'), reason="This config setting exists in 1.4.3.16 and higher") ++def test_nagle_default_value(topo): ++ """Test that nsslapd-nagle attribute is off by default ++ ++ :id: 00361f5d-d638-4d39-8231-66fa52637203 ++ :setup: Standalone instance ++ :steps: ++ 1. Create instance ++ 2. Check the value of nsslapd-nagle ++ :expectedresults: ++ 1. Success ++ 2. The value of nsslapd-nagle should be off ++ """ ++ ++ log.info('Check the value of nsslapd-nagle attribute is off by default') ++ assert topo.standalone.config.get_attr_val_utf8('nsslapd-nagle') == 'off' ++ ++ + def test_maxbersize_repl(topology_m2, big_file): + """maxbersize is ignored in the replicated operations. + +-- +2.26.2 + diff --git a/SOURCES/0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch b/SOURCES/0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch new file mode 100644 index 0000000..ba8f9d2 --- /dev/null +++ b/SOURCES/0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch @@ -0,0 +1,98 @@ +From 00ccec335792e3fa44712427463c64eb1ff9c5be Mon Sep 17 00:00:00 2001 +From: progier389 +Date: Tue, 12 Jan 2021 17:45:41 +0100 +Subject: [PATCH] Issue 4504 - insure that repl_monitor_test use ldapi (for + RHEL) - fix merge issue (#4533) + +(cherry picked from commit a880fddc192414d6283ea6832491b7349e5471dc) +--- + .../tests/suites/clu/repl_monitor_test.py | 47 ++++++++++++++----- + 1 file changed, 36 insertions(+), 11 deletions(-) + +diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +index b2cb840b3..caf6a9099 100644 +--- a/dirsrvtests/tests/suites/clu/repl_monitor_test.py ++++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py +@@ -9,6 +9,7 @@ + import time + import subprocess + import pytest ++import re + + from lib389.cli_conf.replication import get_repl_monitor_info + from lib389.tasks import * +@@ -69,6 +70,25 @@ def check_value_in_log_and_reset(content_list, second_list=None, single_value=No + log.info('Reset log file') + f.truncate(0) + ++def get_hostnames_from_log(port1, port2): ++ # Get the supplier host names as displayed in replication monitor output ++ with open(LOG_FILE, 'r') as logfile: ++ logtext = logfile.read() ++ # search for Supplier :hostname:port ++ # and use \D to insure there is no more number is after ++ # the matched port (i.e that 10 is not matching 101) ++ regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)' ++ match=re.search(regexp, logtext) ++ host_m1 = 'localhost.localdomain' ++ if (match is not None): ++ host_m1 = match.group(2) ++ # Same for master 2 ++ regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)' ++ match=re.search(regexp, logtext) ++ host_m2 = 'localhost.localdomain' ++ if (match is not None): ++ host_m2 = match.group(2) ++ return (host_m1, host_m2) + + @pytest.mark.ds50545 + @pytest.mark.bz1739718 +@@ -177,20 +197,9 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + '001', + m1.host + ':' + str(m1.port)] + +- dsrc_content = '[repl-monitor-connections]\n' \ +- 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ +- 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ +- '\n' \ +- '[repl-monitor-aliases]\n' \ +- 'M1 = ' + m1.host + ':' + str(m1.port) + '\n' \ +- 'M2 = ' + m2.host + ':' + str(m2.port) +- + connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] + +- aliases = ['M1=' + m1.host + ':' + str(m1.port), +- 'M2=' + m2.host + ':' + str(m2.port)] +- + args = FakeArgs() + args.connections = connections + args.aliases = None +@@ -198,8 +207,24 @@ def test_dsconf_replication_monitor(topology_m2, set_log_file): + + log.info('Run replication monitor with connections option') + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) ++ (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port) + check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) + ++ # Prepare the data for next tests ++ aliases = ['M1=' + host_m1 + ':' + str(m1.port), ++ 'M2=' + host_m2 + ':' + str(m2.port)] ++ ++ alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')', ++ 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')'] ++ ++ dsrc_content = '[repl-monitor-connections]\n' \ ++ 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ ++ '\n' \ ++ '[repl-monitor-aliases]\n' \ ++ 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \ ++ 'M2 = ' + host_m2 + ':' + str(m2.port) ++ + log.info('Run replication monitor with aliases option') + args.aliases = aliases + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) +-- +2.26.2 + diff --git a/SOURCES/0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch b/SOURCES/0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch new file mode 100644 index 0000000..593e2cd --- /dev/null +++ b/SOURCES/0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch @@ -0,0 +1,70 @@ +From 2afc65fd1750afcb1667545da5625f5a932aacdd Mon Sep 17 00:00:00 2001 +From: Simon Pichugin +Date: Wed, 13 Jan 2021 15:16:08 +0100 +Subject: [PATCH] Issue 4528 - Fix cn=monitor SCOPE_ONE search (#4529) + +Bug Description: While doing a ldapsearch on "cn=monitor" is +throwing err=32 with -s one. + +Fix Description: 'cn=monitor' is not a real entry so we should not +trying to check if the searched suffix (cm=monitor or its children) +belongs to the searched backend. + +Fixes: #4528 + +Reviewed by: @mreynolds389 @Firstyear @tbordaz (Thanks!) +--- + ldap/servers/slapd/opshared.c | 15 ++++++++++----- + 1 file changed, 10 insertions(+), 5 deletions(-) + +diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c +index c0bc5dcd0..f5ed71144 100644 +--- a/ldap/servers/slapd/opshared.c ++++ b/ldap/servers/slapd/opshared.c +@@ -240,6 +240,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + int rc = 0; + int internal_op; + Slapi_DN *basesdn = NULL; ++ Slapi_DN monitorsdn = {0}; + Slapi_DN *sdn = NULL; + Slapi_Operation *operation = NULL; + Slapi_Entry *referral = NULL; +@@ -765,9 +766,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + } + } else { + /* be_suffix null means that we are searching the default backend +- * -> don't change the search parameters in pblock +- */ +- if (be_suffix != NULL) { ++ * -> don't change the search parameters in pblock ++ * Also, we skip this block for 'cn=monitor' search and its subsearches ++ * as they are done by callbacks from monitor.c */ ++ slapi_sdn_init_dn_byref(&monitorsdn, "cn=monitor"); ++ if (!((be_suffix == NULL) || slapi_sdn_issuffix(basesdn, &monitorsdn))) { + if ((be_name == NULL) && (scope == LDAP_SCOPE_ONELEVEL)) { + /* one level searches + * - depending on the suffix of the backend we might have to +@@ -789,8 +792,10 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + } else if (slapi_sdn_issuffix(basesdn, be_suffix)) { + int tmp_scope = LDAP_SCOPE_ONELEVEL; + slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &tmp_scope); +- } else ++ } else { ++ slapi_sdn_done(&monitorsdn); + goto next_be; ++ } + } + + /* subtree searches : +@@ -811,7 +816,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) + } + } + } +- ++ slapi_sdn_done(&monitorsdn); + slapi_pblock_set(pb, SLAPI_BACKEND, be); + slapi_pblock_set(pb, SLAPI_PLUGIN, be->be_database); + slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, NULL); +-- +2.26.2 + diff --git a/SOURCES/0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch b/SOURCES/0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch new file mode 100644 index 0000000..7133049 --- /dev/null +++ b/SOURCES/0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch @@ -0,0 +1,3866 @@ +From 6969181628f2c664d5f82c89c15bbc0a2487e21f Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 19 Nov 2020 15:46:19 -0500 +Subject: [PATCH 1/2] Issue 4384 - Use MONOTONIC clock for all timing events + and conditions + +Bug Description: All of the server's event handling and replication were + based on REALTIME clocks, which can be influenced by the + system changing. This could causes massive delays, and + simply cause unexpected behavior. + +Fix Description: Move all condition variables to use pthread instead of NSPR + functions. Also make sure we use MONOTONIC clocks when we + get the current time when checking for timeouts and other + timed events. + +Relates: https://github.com/389ds/389-ds-base/issues/4384 + +Reviewed by: elkris, firstyear, and tbordaz (Thanks!!!) + +Apply firstyear's sugestions + +Apply Firstyear's other suggestions + +Apply Thierry's suggestions +--- + Makefile.am | 2 +- + .../tests/suites/plugins/entryusn_test.py | 3 + + ldap/servers/plugins/chainingdb/cb_add.c | 2 +- + ldap/servers/plugins/chainingdb/cb_compare.c | 2 +- + .../plugins/chainingdb/cb_conn_stateless.c | 16 +- + ldap/servers/plugins/chainingdb/cb_delete.c | 2 +- + ldap/servers/plugins/chainingdb/cb_instance.c | 3 +- + ldap/servers/plugins/chainingdb/cb_modify.c | 2 +- + ldap/servers/plugins/chainingdb/cb_modrdn.c | 2 +- + ldap/servers/plugins/chainingdb/cb_search.c | 8 +- + ldap/servers/plugins/cos/cos_cache.c | 4 +- + ldap/servers/plugins/dna/dna.c | 2 +- + ldap/servers/plugins/passthru/ptconn.c | 2 +- + ldap/servers/plugins/referint/referint.c | 85 +++++--- + ldap/servers/plugins/replication/repl5.h | 3 +- + .../plugins/replication/repl5_backoff.c | 4 +- + .../plugins/replication/repl5_connection.c | 12 +- + .../plugins/replication/repl5_inc_protocol.c | 91 ++++---- + .../plugins/replication/repl5_mtnode_ext.c | 3 +- + .../plugins/replication/repl5_prot_private.h | 6 +- + .../plugins/replication/repl5_replica.c | 10 +- + .../replication/repl5_replica_config.c | 197 +++++++++++------- + .../plugins/replication/repl5_tot_protocol.c | 71 ++++--- + ldap/servers/plugins/replication/repl_extop.c | 4 +- + .../plugins/replication/windows_connection.c | 2 +- + .../replication/windows_inc_protocol.c | 82 +++++--- + .../replication/windows_tot_protocol.c | 24 ++- + ldap/servers/plugins/retrocl/retrocl_trim.c | 2 +- + ldap/servers/plugins/roles/roles_cache.c | 4 +- + ldap/servers/plugins/sync/sync.h | 4 +- + ldap/servers/plugins/sync/sync_persist.c | 54 +++-- + .../slapd/back-ldbm/db-bdb/bdb_import.c | 49 ++--- + .../back-ldbm/db-bdb/bdb_import_threads.c | 29 +-- + .../back-ldbm/db-bdb/bdb_instance_config.c | 8 +- + .../slapd/back-ldbm/db-bdb/bdb_layer.c | 129 +++++++----- + .../slapd/back-ldbm/db-bdb/bdb_layer.h | 10 +- + ldap/servers/slapd/back-ldbm/import.h | 6 +- + ldap/servers/slapd/connection.c | 88 ++++---- + ldap/servers/slapd/daemon.c | 64 ++++-- + ldap/servers/slapd/eventq.c | 132 ++++++++---- + ldap/servers/slapd/house.c | 58 ++++-- + ldap/servers/slapd/libmakefile | 2 +- + ldap/servers/slapd/psearch.c | 63 +++--- + ldap/servers/slapd/regex.c | 2 +- + ldap/servers/slapd/slapi-plugin.h | 7 + + .../slapd/{slapi2nspr.c => slapi2runtime.c} | 87 +++++--- + ldap/servers/slapd/task.c | 4 +- + ldap/servers/slapd/time.c | 10 +- + 48 files changed, 877 insertions(+), 579 deletions(-) + rename ldap/servers/slapd/{slapi2nspr.c => slapi2runtime.c} (69%) + +diff --git a/Makefile.am b/Makefile.am +index 0e5f04f91..f7bf1c44c 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -1455,7 +1455,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \ + ldap/servers/slapd/security_wrappers.c \ + ldap/servers/slapd/slapd_plhash.c \ + ldap/servers/slapd/slapi_counter.c \ +- ldap/servers/slapd/slapi2nspr.c \ ++ ldap/servers/slapd/slapi2runtime.c \ + ldap/servers/slapd/snmp_collator.c \ + ldap/servers/slapd/sort.c \ + ldap/servers/slapd/ssl.c \ +diff --git a/dirsrvtests/tests/suites/plugins/entryusn_test.py b/dirsrvtests/tests/suites/plugins/entryusn_test.py +index ad3d7f209..da0538f74 100644 +--- a/dirsrvtests/tests/suites/plugins/entryusn_test.py ++++ b/dirsrvtests/tests/suites/plugins/entryusn_test.py +@@ -6,9 +6,11 @@ + # See LICENSE for details. + # --- END COPYRIGHT BLOCK --- + # ++import os + import ldap + import logging + import pytest ++import time + from lib389._constants import DEFAULT_SUFFIX + from lib389.config import Config + from lib389.plugins import USNPlugin, MemberOfPlugin +@@ -211,6 +213,7 @@ def test_entryusn_after_repl_delete(topology_m2): + user_usn = user_1.get_attr_val_int('entryusn') + + user_1.delete() ++ time.sleep(1) # Gives a little time for tombstone creation to complete + + ts = tombstones.get(user_rdn) + ts_usn = ts.get_attr_val_int('entryusn') +diff --git a/ldap/servers/plugins/chainingdb/cb_add.c b/ldap/servers/plugins/chainingdb/cb_add.c +index a9f9c0f87..b7ae7267d 100644 +--- a/ldap/servers/plugins/chainingdb/cb_add.c ++++ b/ldap/servers/plugins/chainingdb/cb_add.c +@@ -130,7 +130,7 @@ chaining_back_add(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + /* Send LDAP operation to the remote host */ +diff --git a/ldap/servers/plugins/chainingdb/cb_compare.c b/ldap/servers/plugins/chainingdb/cb_compare.c +index 25dfa87b5..8d7fdd06b 100644 +--- a/ldap/servers/plugins/chainingdb/cb_compare.c ++++ b/ldap/servers/plugins/chainingdb/cb_compare.c +@@ -126,7 +126,7 @@ chaining_back_compare(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + /* +diff --git a/ldap/servers/plugins/chainingdb/cb_conn_stateless.c b/ldap/servers/plugins/chainingdb/cb_conn_stateless.c +index 9beb459ef..a2003221e 100644 +--- a/ldap/servers/plugins/chainingdb/cb_conn_stateless.c ++++ b/ldap/servers/plugins/chainingdb/cb_conn_stateless.c +@@ -453,7 +453,7 @@ cb_get_connection(cb_conn_pool *pool, + conn->ld = ld; + conn->status = CB_CONNSTATUS_OK; + conn->refcount = 0; /* incremented below */ +- conn->opentime = slapi_current_utc_time(); ++ conn->opentime = slapi_current_rel_time_t(); + conn->ThreadId = PR_MyThreadId(); /* store the thread id */ + conn->next = NULL; + if (secure) { +@@ -488,7 +488,7 @@ cb_get_connection(cb_conn_pool *pool, + } + + if (!secure) +- slapi_wait_condvar(pool->conn.conn_list_cv, NULL); ++ slapi_wait_condvar_pt(pool->conn.conn_list_cv, pool->conn.conn_list_mutex, NULL); + + if (cb_debug_on()) { + slapi_log_err(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM, +@@ -639,7 +639,7 @@ cb_check_for_stale_connections(cb_conn_pool *pool) + slapi_lock_mutex(pool->conn.conn_list_mutex); + + if (connlifetime > 0) +- curtime = slapi_current_utc_time(); ++ curtime = slapi_current_rel_time_t(); + + if (pool->secure) { + myself = PR_ThreadSelf(); +@@ -860,7 +860,7 @@ cb_ping_farm(cb_backend_instance *cb, cb_outgoing_conn *cnx, time_t end_time) + if (cnx && (cnx->status != CB_CONNSTATUS_OK)) /* Known problem */ + return LDAP_SERVER_DOWN; + +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + if (end_time && ((now <= end_time) || (end_time < 0))) + return LDAP_SUCCESS; + +@@ -905,7 +905,7 @@ cb_update_failed_conn_cpt(cb_backend_instance *cb) + slapi_unlock_mutex(cb->monitor_availability.cpt_lock); + if (cb->monitor_availability.cpt >= CB_NUM_CONN_BEFORE_UNAVAILABILITY) { + /* we reach the limit of authorized failed connections => we setup the chaining BE state to unavailable */ +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); + cb->monitor_availability.unavailableTimeLimit = now + CB_UNAVAILABLE_PERIOD; + slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); +@@ -938,7 +938,7 @@ cb_check_availability(cb_backend_instance *cb, Slapi_PBlock *pb) + time_t now; + if (cb->monitor_availability.farmserver_state == FARMSERVER_UNAVAILABLE) { + slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + if (now >= cb->monitor_availability.unavailableTimeLimit) { + cb->monitor_availability.unavailableTimeLimit = now + CB_INFINITE_TIME; /* to be sure only one thread can do the test */ + slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); +@@ -951,7 +951,7 @@ cb_check_availability(cb_backend_instance *cb, Slapi_PBlock *pb) + "cb_check_availability - ping the farm server and check if it's still unavailable"); + if (cb_ping_farm(cb, NULL, 0) != LDAP_SUCCESS) { /* farm still unavailable... Just change the timelimit */ + slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + cb->monitor_availability.unavailableTimeLimit = now + CB_UNAVAILABLE_PERIOD; + slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); + cb_send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, "FARM SERVER TEMPORARY UNAVAILABLE", 0, NULL); +@@ -961,7 +961,7 @@ cb_check_availability(cb_backend_instance *cb, Slapi_PBlock *pb) + } else { + /* farm is back !*/ + slapi_lock_mutex(cb->monitor_availability.lock_timeLimit); +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + cb->monitor_availability.unavailableTimeLimit = now; /* the unavailable period is finished */ + slapi_unlock_mutex(cb->monitor_availability.lock_timeLimit); + /* The farmer server state backs to FARMSERVER_AVAILABLE, but this already done in cb_ping_farm, and also the reset of cpt*/ +diff --git a/ldap/servers/plugins/chainingdb/cb_delete.c b/ldap/servers/plugins/chainingdb/cb_delete.c +index e76fb6b95..94f84b55d 100644 +--- a/ldap/servers/plugins/chainingdb/cb_delete.c ++++ b/ldap/servers/plugins/chainingdb/cb_delete.c +@@ -117,7 +117,7 @@ chaining_back_delete(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + /* +diff --git a/ldap/servers/plugins/chainingdb/cb_instance.c b/ldap/servers/plugins/chainingdb/cb_instance.c +index cd5abb834..bc1864c1a 100644 +--- a/ldap/servers/plugins/chainingdb/cb_instance.c ++++ b/ldap/servers/plugins/chainingdb/cb_instance.c +@@ -1947,7 +1947,8 @@ cb_instance_add_config_callback(Slapi_PBlock *pb __attribute__((unused)), + * we can't call recursively into the DSE to do more adds, they'll + * silently fail. instead, schedule the adds to happen in 1 second. + */ +- inst->eq_ctx = slapi_eq_once(cb_instance_add_monitor_later, (void *)inst, time(NULL) + 1); ++ inst->eq_ctx = slapi_eq_once(cb_instance_add_monitor_later, (void *)inst, ++ slapi_current_rel_time_t() + 1); + } + + /* Get the list of operational attrs defined in the schema */ +diff --git a/ldap/servers/plugins/chainingdb/cb_modify.c b/ldap/servers/plugins/chainingdb/cb_modify.c +index f81edf4a6..e53da9e40 100644 +--- a/ldap/servers/plugins/chainingdb/cb_modify.c ++++ b/ldap/servers/plugins/chainingdb/cb_modify.c +@@ -125,7 +125,7 @@ chaining_back_modify(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + /* +diff --git a/ldap/servers/plugins/chainingdb/cb_modrdn.c b/ldap/servers/plugins/chainingdb/cb_modrdn.c +index 95a068be7..d648253c7 100644 +--- a/ldap/servers/plugins/chainingdb/cb_modrdn.c ++++ b/ldap/servers/plugins/chainingdb/cb_modrdn.c +@@ -129,7 +129,7 @@ chaining_back_modrdn(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + /* +diff --git a/ldap/servers/plugins/chainingdb/cb_search.c b/ldap/servers/plugins/chainingdb/cb_search.c +index d47cbc8e4..ffc8f56f8 100644 +--- a/ldap/servers/plugins/chainingdb/cb_search.c ++++ b/ldap/servers/plugins/chainingdb/cb_search.c +@@ -236,7 +236,7 @@ chainingdb_build_candidate_list(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + rc = ldap_search_ext(ld, target, scope, filter, attrs, attrsonly, +@@ -503,7 +503,7 @@ chainingdb_next_search_entry(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + while (1) { +@@ -579,7 +579,7 @@ chainingdb_next_search_entry(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + /* The server sent one of the entries found by the search */ +@@ -611,7 +611,7 @@ chainingdb_next_search_entry(Slapi_PBlock *pb) + + /* heart-beat management */ + if (cb->max_idle_time > 0) { +- endtime = slapi_current_utc_time() + cb->max_idle_time; ++ endtime = slapi_current_rel_time_t() + cb->max_idle_time; + } + + parse_rc = ldap_parse_reference(ctx->ld, res, &referrals, NULL, 1); +diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c +index eb9bd77f9..d404ff901 100644 +--- a/ldap/servers/plugins/cos/cos_cache.c ++++ b/ldap/servers/plugins/cos/cos_cache.c +@@ -346,7 +346,7 @@ cos_cache_init(void) + if (ret == 0) { + slapi_lock_mutex(start_lock); + while (!started) { +- while (slapi_wait_condvar(start_cond, NULL) == 0) ++ while (slapi_wait_condvar_pt(start_cond, start_lock, NULL) == 0) + ; + } + slapi_unlock_mutex(start_lock); +@@ -401,7 +401,7 @@ cos_cache_wait_on_change(void *arg __attribute__((unused))) + * thread notifies our condvar, and so we will not miss any + * notifications, including the shutdown notification. + */ +- slapi_wait_condvar(something_changed, NULL); ++ slapi_wait_condvar_pt(something_changed, change_lock, NULL); + } else { + /* Something to do...do it below */ + } +diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c +index 16c625bb0..1cb54580b 100644 +--- a/ldap/servers/plugins/dna/dna.c ++++ b/ldap/servers/plugins/dna/dna.c +@@ -907,7 +907,7 @@ dna_load_plugin_config(Slapi_PBlock *pb, int use_eventq) + * performing the operation at this point when + * starting up would cause the change to not + * get changelogged. */ +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + eq_ctx = slapi_eq_once(dna_update_config_event, NULL, now + 30); + } else { + dna_update_config_event(0, NULL); +diff --git a/ldap/servers/plugins/passthru/ptconn.c b/ldap/servers/plugins/passthru/ptconn.c +index 49040f651..637d33843 100644 +--- a/ldap/servers/plugins/passthru/ptconn.c ++++ b/ldap/servers/plugins/passthru/ptconn.c +@@ -233,7 +233,7 @@ passthru_get_connection(PassThruServer *srvr, LDAP **ldp) + slapi_log_err(SLAPI_LOG_PLUGIN, PASSTHRU_PLUGIN_SUBSYSTEM, + "... passthru_get_connection waiting for conn to free up\n"); + #endif +- slapi_wait_condvar(srvr->ptsrvr_connlist_cv, NULL); ++ slapi_wait_condvar_pt(srvr->ptsrvr_connlist_cv, srvr->ptsrvr_connlist_mutex, NULL); + + #ifdef PASSTHRU_VERBOSE_LOGGING + slapi_log_err(SLAPI_LOG_PLUGIN, PASSTHRU_PLUGIN_SUBSYSTEM, +diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c +index eb4b089fb..fd5356d72 100644 +--- a/ldap/servers/plugins/referint/referint.c ++++ b/ldap/servers/plugins/referint/referint.c +@@ -71,8 +71,9 @@ void referint_get_config(int *delay, char **logfile); + /* global thread control stuff */ + static PRLock *referint_mutex = NULL; + static PRThread *referint_tid = NULL; +-static PRLock *keeprunning_mutex = NULL; +-static PRCondVar *keeprunning_cv = NULL; ++static pthread_mutex_t keeprunning_mutex; ++static pthread_cond_t keeprunning_cv; ++ + static int keeprunning = 0; + static referint_config *config = NULL; + static Slapi_DN *_ConfigAreaDN = NULL; +@@ -1302,12 +1303,38 @@ referint_postop_start(Slapi_PBlock *pb) + * -1 = integrity off + */ + if (referint_get_delay() > 0) { ++ pthread_condattr_t condAttr; ++ + /* initialize the cv and lock */ + if (!use_txn && (NULL == referint_mutex)) { + referint_mutex = PR_NewLock(); + } +- keeprunning_mutex = PR_NewLock(); +- keeprunning_cv = PR_NewCondVar(keeprunning_mutex); ++ if ((rc = pthread_mutex_init(&keeprunning_mutex, NULL)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", ++ "cannot create new lock. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); ++ } ++ if ((rc = pthread_condattr_init(&condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", ++ "cannot create new condition attribute variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); ++ } ++ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", ++ "cannot set condition attr clock. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); ++ } ++ if ((rc = pthread_cond_init(&keeprunning_cv, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "referint_postop_start", ++ "cannot create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); ++ } ++ pthread_condattr_destroy(&condAttr); /* no longer needed */ ++ + keeprunning = 1; + + referint_tid = PR_CreateThread(PR_USER_THREAD, +@@ -1337,13 +1364,11 @@ int + referint_postop_close(Slapi_PBlock *pb __attribute__((unused))) + { + /* signal the thread to exit */ +- if (NULL != keeprunning_mutex) { +- PR_Lock(keeprunning_mutex); ++ if (referint_get_delay() > 0) { ++ pthread_mutex_lock(&keeprunning_mutex); + keeprunning = 0; +- if (NULL != keeprunning_cv) { +- PR_NotifyCondVar(keeprunning_cv); +- } +- PR_Unlock(keeprunning_mutex); ++ pthread_cond_signal(&keeprunning_cv); ++ pthread_mutex_unlock(&keeprunning_mutex); + } + + slapi_destroy_rwlock(config_rwlock); +@@ -1369,6 +1394,7 @@ referint_thread_func(void *arg __attribute__((unused))) + char *iter = NULL; + Slapi_DN *sdn = NULL; + Slapi_DN *tmpsuperior = NULL; ++ struct timespec current_time = {0}; + int delay; + int no_changes; + +@@ -1383,20 +1409,22 @@ referint_thread_func(void *arg __attribute__((unused))) + no_changes = 1; + while (no_changes) { + +- PR_Lock(keeprunning_mutex); ++ pthread_mutex_lock(&keeprunning_mutex); + if (keeprunning == 0) { +- PR_Unlock(keeprunning_mutex); ++ pthread_mutex_unlock(&keeprunning_mutex); + break; + } +- PR_Unlock(keeprunning_mutex); ++ pthread_mutex_unlock(&keeprunning_mutex); + + referint_lock(); + if ((prfd = PR_Open(logfilename, PR_RDONLY, REFERINT_DEFAULT_FILE_MODE)) == NULL) { + referint_unlock(); + /* go back to sleep and wait for this file */ +- PR_Lock(keeprunning_mutex); +- PR_WaitCondVar(keeprunning_cv, PR_SecondsToInterval(delay)); +- PR_Unlock(keeprunning_mutex); ++ pthread_mutex_lock(&keeprunning_mutex); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += delay; ++ pthread_cond_timedwait(&keeprunning_cv, &keeprunning_mutex, ¤t_time); ++ pthread_mutex_unlock(&keeprunning_mutex); + } else { + no_changes = 0; + } +@@ -1407,12 +1435,12 @@ referint_thread_func(void *arg __attribute__((unused))) + * loop before trying to do the changes. The server + * will pick them up on next startup as file still exists + */ +- PR_Lock(keeprunning_mutex); ++ pthread_mutex_lock(&keeprunning_mutex); + if (keeprunning == 0) { +- PR_Unlock(keeprunning_mutex); ++ pthread_mutex_unlock(&keeprunning_mutex); + break; + } +- PR_Unlock(keeprunning_mutex); ++ pthread_mutex_unlock(&keeprunning_mutex); + + while (GetNextLine(thisline, MAX_LINE, prfd)) { + ptoken = ldap_utf8strtok_r(thisline, delimiter, &iter); +@@ -1459,21 +1487,16 @@ referint_thread_func(void *arg __attribute__((unused))) + referint_unlock(); + + /* wait on condition here */ +- PR_Lock(keeprunning_mutex); +- PR_WaitCondVar(keeprunning_cv, PR_SecondsToInterval(delay)); +- PR_Unlock(keeprunning_mutex); ++ pthread_mutex_lock(&keeprunning_mutex); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += delay; ++ pthread_cond_timedwait(&keeprunning_cv, &keeprunning_mutex, ¤t_time); ++ pthread_mutex_unlock(&keeprunning_mutex); + } + + /* cleanup resources allocated in start */ +- if (NULL != keeprunning_mutex) { +- PR_DestroyLock(keeprunning_mutex); +- } +- if (NULL != referint_mutex) { +- PR_DestroyLock(referint_mutex); +- } +- if (NULL != keeprunning_cv) { +- PR_DestroyCondVar(keeprunning_cv); +- } ++ pthread_mutex_destroy(&keeprunning_mutex); ++ pthread_cond_destroy(&keeprunning_cv); + slapi_ch_free_string(&logfilename); + } + +diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h +index f1c596a3f..06e747811 100644 +--- a/ldap/servers/plugins/replication/repl5.h ++++ b/ldap/servers/plugins/replication/repl5.h +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2010 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. + * All rights reserved. + * +@@ -28,6 +28,7 @@ + #include "llist.h" + #include "repl5_ruv.h" + #include "plstr.h" ++#include + + #define START_UPDATE_DELAY 2 /* 2 second */ + #define REPLICA_TYPE_WINDOWS 1 +diff --git a/ldap/servers/plugins/replication/repl5_backoff.c b/ldap/servers/plugins/replication/repl5_backoff.c +index 40848b96d..40ec75dd7 100644 +--- a/ldap/servers/plugins/replication/repl5_backoff.c ++++ b/ldap/servers/plugins/replication/repl5_backoff.c +@@ -110,7 +110,7 @@ backoff_reset(Backoff_Timer *bt, slapi_eq_fn_t callback, void *callback_data) + bt->next_interval = bt->initial_interval; + } + /* Schedule the callback */ +- bt->last_fire_time = slapi_current_utc_time(); ++ bt->last_fire_time = slapi_current_rel_time_t(); + return_value = bt->last_fire_time + bt->next_interval; + bt->pending_event = slapi_eq_once(bt->callback, bt->callback_arg, + return_value); +@@ -177,7 +177,7 @@ backoff_expired(Backoff_Timer *bt, int margin) + + PR_ASSERT(NULL != bt); + PR_Lock(bt->lock); +- return_value = (slapi_current_utc_time() >= (bt->last_fire_time + bt->next_interval + margin)); ++ return_value = (slapi_current_rel_time_t() >= (bt->last_fire_time + bt->next_interval + margin)); + PR_Unlock(bt->lock); + return return_value; + } +diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c +index cf57c2156..bc9ca424b 100644 +--- a/ldap/servers/plugins/replication/repl5_connection.c ++++ b/ldap/servers/plugins/replication/repl5_connection.c +@@ -402,7 +402,7 @@ conn_read_result_ex(Repl_Connection *conn, char **retoidp, struct berval **retda + } + if (block) { + /* Did the connection's timeout expire ? */ +- time_now = slapi_current_utc_time(); ++ time_now = slapi_current_rel_time_t(); + if (conn->timeout.tv_sec <= (time_now - start_time)) { + /* We timed out */ + rc = 0; +@@ -676,7 +676,7 @@ conn_is_available(Repl_Connection *conn) + { + time_t poll_timeout_sec = 1; /* Polling for 1sec */ + time_t yield_delay_msec = 100; /* Delay to wait */ +- time_t start_time = slapi_current_utc_time(); ++ time_t start_time = slapi_current_rel_time_t(); + time_t time_now; + ConnResult return_value = CONN_OPERATION_SUCCESS; + +@@ -686,7 +686,7 @@ conn_is_available(Repl_Connection *conn) + /* in case of timeout we return CONN_TIMEOUT only + * if the RA.timeout is exceeded + */ +- time_now = slapi_current_utc_time(); ++ time_now = slapi_current_rel_time_t(); + if (conn->timeout.tv_sec <= (time_now - start_time)) { + break; + } else { +@@ -1010,7 +1010,7 @@ linger_timeout(time_t event_time __attribute__((unused)), void *arg) + void + conn_start_linger(Repl_Connection *conn) + { +- time_t now; ++ time_t now = slapi_current_rel_time_t(); + + PR_ASSERT(NULL != conn); + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, +@@ -1022,7 +1022,7 @@ conn_start_linger(Repl_Connection *conn) + agmt_get_long_name(conn->agmt)); + return; + } +- now = slapi_current_utc_time(); ++ + PR_Lock(conn->lock); + if (conn->linger_active) { + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, +@@ -1989,7 +1989,7 @@ repl5_start_debug_timeout(int *setlevel) + { + Slapi_Eq_Context eqctx = 0; + if (s_debug_timeout && s_debug_level) { +- time_t now = slapi_current_utc_time(); ++ time_t now = slapi_current_rel_time_t(); + eqctx = slapi_eq_once(repl5_debug_timeout_callback, setlevel, + s_debug_timeout + now); + } +diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c +index af5e5897c..4bb384882 100644 +--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c ++++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -129,7 +129,7 @@ typedef struct result_data + * don't see any updates for a period equal to this interval, + * we go ahead and start a replication session, just to be safe + */ +-#define MAX_WAIT_BETWEEN_SESSIONS PR_SecondsToInterval(60 * 5) /* 5 minutes */ ++#define MAX_WAIT_BETWEEN_SESSIONS 300 /* 5 minutes */ + + /* + * tests if the protocol has been shutdown and we need to quit +@@ -145,7 +145,7 @@ typedef struct result_data + /* Forward declarations */ + static PRUint32 event_occurred(Private_Repl_Protocol *prp, PRUint32 event); + static void reset_events(Private_Repl_Protocol *prp); +-static void protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration); ++static void protocol_sleep(Private_Repl_Protocol *prp, int32_t duration); + static int send_updates(Private_Repl_Protocol *prp, RUV *ruv, PRUint32 *num_changes_sent); + static void repl5_inc_backoff_expired(time_t timer_fire_time, void *arg); + static int examine_update_vector(Private_Repl_Protocol *prp, RUV *ruv); +@@ -253,7 +253,7 @@ repl5_inc_result_threadmain(void *param) + char *uniqueid = NULL; + char *ldap_error_string = NULL; + time_t time_now = 0; +- time_t start_time = slapi_current_utc_time(); ++ time_t start_time = slapi_current_rel_time_t(); + int connection_error = 0; + int operation_code = 0; + int backoff_time = 1; +@@ -275,7 +275,7 @@ repl5_inc_result_threadmain(void *param) + /* We need to a) check that the 'real' timeout hasn't expired and + * b) implement a backoff sleep to avoid spinning */ + /* Did the connection's timeout expire ? */ +- time_now = slapi_current_utc_time(); ++ time_now = slapi_current_rel_time_t(); + if (conn_get_timeout(conn) <= (time_now - start_time)) { + /* We timed out */ + conres = CONN_TIMEOUT; +@@ -358,7 +358,7 @@ repl5_inc_result_threadmain(void *param) + /* Should we stop ? */ + PR_Lock(rd->lock); + if (!finished && yield_session && rd->abort != SESSION_ABORTED && rd->abort_time == 0) { +- rd->abort_time = slapi_current_utc_time(); ++ rd->abort_time = slapi_current_rel_time_t(); + rd->abort = SESSION_ABORTED; /* only set the abort time once */ + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "repl5_inc_result_threadmain - " + "Abort control detected, setting abort time...(%s)\n", +@@ -532,13 +532,11 @@ repl5_inc_delete(Private_Repl_Protocol **prpp) + (*prpp)->stop(*prpp); + } + /* Then, delete all resources used by the protocol */ +- if ((*prpp)->lock) { +- PR_DestroyLock((*prpp)->lock); +- (*prpp)->lock = NULL; ++ if (&((*prpp)->lock)) { ++ pthread_mutex_destroy(&((*prpp)->lock)); + } +- if ((*prpp)->cvar) { +- PR_DestroyCondVar((*prpp)->cvar); +- (*prpp)->cvar = NULL; ++ if (&((*prpp)->cvar)) { ++ pthread_cond_destroy(&(*prpp)->cvar); + } + slapi_ch_free((void **)&(*prpp)->private); + slapi_ch_free((void **)prpp); +@@ -712,7 +710,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) + conn_set_agmt_changed(prp->conn); + } else if (event_occurred(prp, EVENT_TRIGGERING_CRITERIA_MET)) { /* change available */ + /* just ignore it and go to sleep */ +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } else if ((e1 = event_occurred(prp, EVENT_WINDOW_CLOSED)) || + event_occurred(prp, EVENT_BACKOFF_EXPIRED)) { + /* this events - should not occur - log a warning and go to sleep */ +@@ -720,13 +718,13 @@ repl5_inc_run(Private_Repl_Protocol *prp) + "repl5_inc_run - %s: " + "Event %s should not occur in state %s; going to sleep\n", + agmt_get_long_name(prp->agmt), e1 ? event2name(EVENT_WINDOW_CLOSED) : event2name(EVENT_BACKOFF_EXPIRED), state2name(current_state)); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } else { + /* wait until window opens or an event occurs */ + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "repl5_inc_run - %s: Waiting for update window to open\n", + agmt_get_long_name(prp->agmt)); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } + break; + +@@ -850,7 +848,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) + } + next_state = STATE_BACKOFF; + backoff_reset(prp_priv->backoff, repl5_inc_backoff_expired, (void *)prp); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + use_busy_backoff_timer = PR_FALSE; + } + break; +@@ -899,13 +897,13 @@ repl5_inc_run(Private_Repl_Protocol *prp) + */ + if (STATE_BACKOFF == next_state) { + /* Step the backoff timer */ +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + next_fire_time = backoff_step(prp_priv->backoff); + /* And go back to sleep */ + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "repl5_inc_run - %s: Replication session backing off for %ld seconds\n", + agmt_get_long_name(prp->agmt), next_fire_time - now); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } else { + /* Destroy the backoff timer, since we won't need it anymore */ + backoff_delete(&prp_priv->backoff); +@@ -923,7 +921,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) + next_state = STATE_READY_TO_ACQUIRE; + } else { + /* ignore changes and go to sleep */ +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } + } else if (event_occurred(prp, EVENT_WINDOW_OPENED)) { + /* this should never happen - log an error and go to sleep */ +@@ -931,7 +929,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) + "Event %s should not occur in state %s; going to sleep\n", + agmt_get_long_name(prp->agmt), event2name(EVENT_WINDOW_OPENED), + state2name(current_state)); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } + break; + +@@ -1178,7 +1176,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) + reset_events(prp); + } + +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + break; + + case STATE_STOP_NORMAL_TERMINATION: +@@ -1209,20 +1207,28 @@ repl5_inc_run(Private_Repl_Protocol *prp) + * Go to sleep until awakened. + */ + static void +-protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration) ++protocol_sleep(Private_Repl_Protocol *prp, int32_t duration) + { + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + /* we should not go to sleep if there are events available to be processed. + Otherwise, we can miss the event that suppose to wake us up */ +- if (prp->eventbits == 0) +- PR_WaitCondVar(prp->cvar, duration); +- else { ++ if (prp->eventbits == 0) { ++ if (duration > 0) { ++ struct timespec current_time = {0}; ++ /* get the current monotonic time and add our interval */ ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += duration; ++ pthread_cond_timedwait(&(prp->cvar), &(prp->lock), ¤t_time); ++ } else { ++ pthread_cond_wait(&(prp->cvar), &(prp->lock)); ++ } ++ } else { + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "protocol_sleep - %s: Can't go to sleep: event bits - %x\n", + agmt_get_long_name(prp->agmt), prp->eventbits); + } +- PR_Unlock(prp->lock); ++ pthread_mutex_unlock(&(prp->lock)); + } + + /* +@@ -1235,10 +1241,10 @@ static void + event_notify(Private_Repl_Protocol *prp, PRUint32 event) + { + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + prp->eventbits |= event; +- PR_NotifyCondVar(prp->cvar); +- PR_Unlock(prp->lock); ++ pthread_cond_signal(&(prp->cvar)); ++ pthread_mutex_unlock(&(prp->lock)); + } + + /* +@@ -1250,10 +1256,10 @@ event_occurred(Private_Repl_Protocol *prp, PRUint32 event) + { + PRUint32 return_value; + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + return_value = (prp->eventbits & event); + prp->eventbits &= ~event; /* Clear event */ +- PR_Unlock(prp->lock); ++ pthread_mutex_unlock(&(prp->lock)); + return return_value; + } + +@@ -1261,9 +1267,9 @@ static void + reset_events(Private_Repl_Protocol *prp) + { + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + prp->eventbits = 0; +- PR_Unlock(prp->lock); ++ pthread_mutex_unlock(&(prp->lock)); + } + + /* +@@ -1882,7 +1888,7 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu + /* See if the result thread has hit a problem */ + + if (!finished && rd->abort_time) { +- time_t current_time = slapi_current_utc_time(); ++ time_t current_time = slapi_current_rel_time_t(); + if ((current_time - rd->abort_time) >= release_timeout) { + rd->result = UPDATE_YIELD; + return_value = UPDATE_YIELD; +@@ -2088,7 +2094,9 @@ Private_Repl_Protocol * + Repl_5_Inc_Protocol_new(Repl_Protocol *rp) + { + repl5_inc_private *rip = NULL; +- Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_malloc(sizeof(Private_Repl_Protocol)); ++ pthread_condattr_t cattr; /* the pthread condition attr */ ++ Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); ++ + prp->delete = repl5_inc_delete; + prp->run = repl5_inc_run; + prp->stop = repl5_inc_stop; +@@ -2099,12 +2107,19 @@ Repl_5_Inc_Protocol_new(Repl_Protocol *rp) + prp->notify_window_closed = repl5_inc_notify_window_closed; + prp->update_now = repl5_inc_update_now; + prp->replica = prot_get_replica(rp); +- if ((prp->lock = PR_NewLock()) == NULL) { ++ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { ++ goto loser; ++ } ++ if (pthread_condattr_init(&cattr) != 0) { ++ goto loser; ++ } ++ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { + goto loser; + } +- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { ++ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { + goto loser; + } ++ pthread_condattr_destroy(&cattr); + prp->stopped = 0; + prp->terminate = 0; + prp->eventbits = 0; +diff --git a/ldap/servers/plugins/replication/repl5_mtnode_ext.c b/ldap/servers/plugins/replication/repl5_mtnode_ext.c +index 08a58613b..82e230958 100644 +--- a/ldap/servers/plugins/replication/repl5_mtnode_ext.c ++++ b/ldap/servers/plugins/replication/repl5_mtnode_ext.c +@@ -82,7 +82,8 @@ multimaster_mtnode_construct_replicas() + } + } + /* Wait a few seconds for everything to startup before resuming any replication tasks */ +- slapi_eq_once(replica_check_for_tasks, (void *)replica_get_root(r), time(NULL) + 5); ++ slapi_eq_once(replica_check_for_tasks, (void *)replica_get_root(r), ++ slapi_current_rel_time_t() + 5); + } + } + } +diff --git a/ldap/servers/plugins/replication/repl5_prot_private.h b/ldap/servers/plugins/replication/repl5_prot_private.h +index 5b2e1b3ca..0673f1978 100644 +--- a/ldap/servers/plugins/replication/repl5_prot_private.h ++++ b/ldap/servers/plugins/replication/repl5_prot_private.h +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -32,8 +32,6 @@ typedef struct private_repl_protocol + void (*notify_window_opened)(struct private_repl_protocol *); + void (*notify_window_closed)(struct private_repl_protocol *); + void (*update_now)(struct private_repl_protocol *); +- PRLock *lock; +- PRCondVar *cvar; + int stopped; + int terminate; + PRUint32 eventbits; +@@ -46,6 +44,8 @@ typedef struct private_repl_protocol + int repl50consumer; /* Flag to tell us if this is a 5.0-style consumer we're talking to */ + int repl71consumer; /* Flag to tell us if this is a 7.1-style consumer we're talking to */ + int repl90consumer; /* Flag to tell us if this is a 9.0-style consumer we're talking to */ ++ pthread_mutex_t lock; ++ pthread_cond_t cvar; + } Private_Repl_Protocol; + + extern Private_Repl_Protocol *Repl_5_Inc_Protocol_new(Repl_Protocol *rp); +diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c +index 7e56d6557..c1d376c72 100644 +--- a/ldap/servers/plugins/replication/repl5_replica.c ++++ b/ldap/servers/plugins/replication/repl5_replica.c +@@ -232,7 +232,7 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, + In that case the updated would fail but nothing bad would happen. The next + scheduled update would save the state */ + r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, +- slapi_current_utc_time() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); ++ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); + + if (r->tombstone_reap_interval > 0) { + /* +@@ -240,7 +240,7 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, + * This will allow the server to fully start before consuming resources. + */ + r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, +- slapi_current_utc_time() + r->tombstone_reap_interval, ++ slapi_current_rel_time_t() + r->tombstone_reap_interval, + 1000 * r->tombstone_reap_interval); + } + +@@ -1088,7 +1088,7 @@ replica_is_updatedn(Replica *r, const Slapi_DN *sdn) + if (r->groupdn_list) { + /* check and rebuild groupdns */ + if (r->updatedn_group_check_interval > -1) { +- time_t now = slapi_current_utc_time(); ++ time_t now = slapi_current_rel_time_t(); + if (now - r->updatedn_group_last_check > r->updatedn_group_check_interval) { + Slapi_ValueSet *updatedn_groups_copy = NULL; + ReplicaUpdateDNList groupdn_list = replica_updatedn_list_new(NULL); +@@ -1512,7 +1512,7 @@ replica_set_enabled(Replica *r, PRBool enable) + if (r->repl_eqcxt_rs == NULL) /* event is not already registered */ + { + r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, +- slapi_current_utc_time() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); ++ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); + } + } else /* disable */ + { +@@ -3637,7 +3637,7 @@ replica_set_tombstone_reap_interval(Replica *r, long interval) + r->tombstone_reap_interval = interval; + if (interval > 0 && r->repl_eqcxt_tr == NULL) { + r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, +- slapi_current_utc_time() + r->tombstone_reap_interval, ++ slapi_current_rel_time_t() + r->tombstone_reap_interval, + 1000 * r->tombstone_reap_interval); + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "replica_set_tombstone_reap_interval - tombstone_reap event (interval=%" PRId64 ") was %s\n", +diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c +index d64d4bf45..a969ef82f 100644 +--- a/ldap/servers/plugins/replication/repl5_replica_config.c ++++ b/ldap/servers/plugins/replication/repl5_replica_config.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -31,14 +31,17 @@ + #define CLEANALLRUVLEN 11 + #define REPLICA_RDN "cn=replica" + ++#define CLEANALLRUV_MAX_WAIT 7200 /* 2 hours */ ++#define CLEANALLRUV_SLEEP 5 ++ + int slapi_log_urp = SLAPI_LOG_REPL; + static ReplicaId cleaned_rids[CLEANRID_BUFSIZ] = {0}; + static ReplicaId pre_cleaned_rids[CLEANRID_BUFSIZ] = {0}; + static ReplicaId aborted_rids[CLEANRID_BUFSIZ] = {0}; + static PRLock *rid_lock = NULL; + static PRLock *abort_rid_lock = NULL; +-static PRLock *notify_lock = NULL; +-static PRCondVar *notify_cvar = NULL; ++static pthread_mutex_t notify_lock; ++static pthread_cond_t notify_cvar; + static PRLock *task_count_lock = NULL; + static int32_t clean_task_count = 0; + static int32_t abort_task_count = 0; +@@ -105,6 +108,9 @@ dont_allow_that(Slapi_PBlock *pb __attribute__((unused)), + int + replica_config_init() + { ++ int rc = 0; ++ pthread_condattr_t condAttr; ++ + s_configLock = PR_NewLock(); + + if (s_configLock == NULL) { +@@ -134,18 +140,31 @@ replica_config_init() + PR_GetError()); + return -1; + } +- if ((notify_lock = PR_NewLock()) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - " +- "Failed to create notify lock; NSPR error - %d\n", +- PR_GetError()); ++ if ((rc = pthread_mutex_init(¬ify_lock, NULL)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", ++ "Failed to create notify lock: error %d (%s)\n", ++ rc, strerror(rc)); + return -1; + } +- if ((notify_cvar = PR_NewCondVar(notify_lock)) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - " +- "Failed to create notify cond var; NSPR error - %d\n", +- PR_GetError()); ++ if ((rc = pthread_condattr_init(&condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", ++ "Failed to create notify new condition attribute variable. error %d (%s)\n", ++ rc, strerror(rc)); + return -1; + } ++ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", ++ "Cannot set condition attr clock. error %d (%s)\n", ++ rc, strerror(rc)); ++ return -1; ++ } ++ if ((rc = pthread_cond_init(¬ify_cvar, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "replica_config_init", ++ "Failed to create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ return -1; ++ } ++ pthread_condattr_destroy(&condAttr); + + /* config DSE must be initialized before we get here */ + slapi_config_register_callback(SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, CONFIG_BASE, LDAP_SCOPE_SUBTREE, +@@ -1674,9 +1693,13 @@ replica_cleanallruv_thread(void *arg) + * to startup timing issues, we need to wait before grabbing the replica obj, as + * the backends might not be online yet. + */ +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(10)); +- PR_Unlock(notify_lock); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += 10; ++ ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + data->replica = replica_get_replica_from_dn(data->sdn); + if (data->replica == NULL) { + cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Unable to retrieve repl object from dn(%s).", data->sdn); +@@ -1720,15 +1743,18 @@ replica_cleanallruv_thread(void *arg) + ruv_obj = replica_get_ruv(data->replica); + ruv = object_get_data(ruv_obj); + while (data->maxcsn && !is_task_aborted(data->rid) && !is_cleaned_rid(data->rid) && !slapi_is_shutting_down()) { ++ struct timespec current_time = {0}; + if (csn_get_replicaid(data->maxcsn) == 0 || + ruv_covers_csn_cleanallruv(ruv, data->maxcsn) || + strcasecmp(data->force, "yes") == 0) { + /* We are caught up, now we can clean the ruv's */ + break; + } +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(5)); +- PR_Unlock(notify_lock); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += CLEANALLRUV_SLEEP; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + } + object_release(ruv_obj); + /* +@@ -1796,18 +1822,20 @@ replica_cleanallruv_thread(void *arg) + /* + * need to sleep between passes + */ +- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Not all replicas have received the " +- "cleanallruv extended op, retrying in %d seconds", ++ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, ++ "Not all replicas have received the cleanallruv extended op, retrying in %d seconds", + interval); + if (!slapi_is_shutting_down()) { +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); +- PR_Unlock(notify_lock); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + } +- if (interval < 14400) { /* 4 hour max */ +- interval = interval * 2; +- } else { +- interval = 14400; ++ interval *= 2; ++ if (interval >= CLEANALLRUV_MAX_WAIT) { ++ interval = CLEANALLRUV_MAX_WAIT; + } + } + /* +@@ -1857,18 +1885,19 @@ replica_cleanallruv_thread(void *arg) + * Need to sleep between passes unless we are shutting down + */ + if (!slapi_is_shutting_down()) { +- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Replicas have not been cleaned yet, " +- "retrying in %d seconds", ++ struct timespec current_time = {0}; ++ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, ++ "Replicas have not been cleaned yet, retrying in %d seconds", + interval); +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); +- PR_Unlock(notify_lock); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + } +- +- if (interval < 14400) { /* 4 hour max */ +- interval = interval * 2; +- } else { +- interval = 14400; ++ interval *= 2; ++ if (interval >= CLEANALLRUV_MAX_WAIT) { ++ interval = CLEANALLRUV_MAX_WAIT; + } + } /* while */ + +@@ -2081,15 +2110,17 @@ check_replicas_are_done_cleaning(cleanruv_data *data) + "Not all replicas finished cleaning, retrying in %d seconds", + interval); + if (!slapi_is_shutting_down()) { +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); +- PR_Unlock(notify_lock); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_lock(¬ify_lock); + } + +- if (interval < 14400) { /* 4 hour max */ +- interval = interval * 2; +- } else { +- interval = 14400; ++ interval *= 2; ++ if (interval >= CLEANALLRUV_MAX_WAIT) { ++ interval = CLEANALLRUV_MAX_WAIT; + } + } + slapi_ch_free_string(&filter); +@@ -2190,14 +2221,16 @@ check_replicas_are_done_aborting(cleanruv_data *data) + cleanruv_log(data->task, data->rid, ABORT_CLEANALLRUV_ID, SLAPI_LOG_NOTICE, + "Not all replicas finished aborting, retrying in %d seconds", interval); + if (!slapi_is_shutting_down()) { +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); +- PR_Unlock(notify_lock); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + } +- if (interval < 14400) { /* 4 hour max */ +- interval = interval * 2; +- } else { +- interval = 14400; ++ interval *= 2; ++ if (interval >= CLEANALLRUV_MAX_WAIT) { ++ interval = CLEANALLRUV_MAX_WAIT; + } + } + slapi_ch_free_string(&filter); +@@ -2248,14 +2281,16 @@ check_agmts_are_caught_up(cleanruv_data *data, char *maxcsn) + cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_NOTICE, + "Not all replicas caught up, retrying in %d seconds", interval); + if (!slapi_is_shutting_down()) { +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); +- PR_Unlock(notify_lock); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + } +- if (interval < 14400) { /* 4 hour max */ +- interval = interval * 2; +- } else { +- interval = 14400; ++ interval *= 2; ++ if (interval >= CLEANALLRUV_MAX_WAIT) { ++ interval = CLEANALLRUV_MAX_WAIT; + } + } + slapi_ch_free_string(&rid_text); +@@ -2310,14 +2345,16 @@ check_agmts_are_alive(Replica *replica, ReplicaId rid, Slapi_Task *task) + interval); + + if (!slapi_is_shutting_down()) { +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); +- PR_Unlock(notify_lock); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + } +- if (interval < 14400) { /* 4 hour max */ +- interval = interval * 2; +- } else { +- interval = 14400; ++ interval *= 2; ++ if (interval >= CLEANALLRUV_MAX_WAIT) { ++ interval = CLEANALLRUV_MAX_WAIT; + } + } + if (is_task_aborted(rid)) { +@@ -3093,16 +3130,18 @@ replica_abort_task_thread(void *arg) + * Need to sleep between passes. unless we are shutting down + */ + if (!slapi_is_shutting_down()) { ++ struct timespec current_time = {0}; + cleanruv_log(data->task, data->rid, ABORT_CLEANALLRUV_ID, SLAPI_LOG_NOTICE, "Retrying in %d seconds", interval); +- PR_Lock(notify_lock); +- PR_WaitCondVar(notify_cvar, PR_SecondsToInterval(interval)); +- PR_Unlock(notify_lock); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_timedwait(¬ify_cvar, ¬ify_lock, ¤t_time); ++ pthread_mutex_unlock(¬ify_lock); + } + +- if (interval < 14400) { /* 4 hour max */ +- interval = interval * 2; +- } else { +- interval = 14400; ++ interval *= 2; ++ if (interval >= CLEANALLRUV_MAX_WAIT) { ++ interval = CLEANALLRUV_MAX_WAIT; + } + } /* while */ + +@@ -3536,10 +3575,10 @@ check_and_set_abort_cleanruv_task_count(void) + + PR_Lock(task_count_lock); + if (abort_task_count > CLEANRIDSIZ) { +- rc = -1; +- } else { +- abort_task_count++; +- } ++ rc = -1; ++ } else { ++ abort_task_count++; ++ } + PR_Unlock(task_count_lock); + + return rc; +@@ -3551,11 +3590,9 @@ check_and_set_abort_cleanruv_task_count(void) + void + stop_ruv_cleaning() + { +- if (notify_lock) { +- PR_Lock(notify_lock); +- PR_NotifyCondVar(notify_cvar); +- PR_Unlock(notify_lock); +- } ++ pthread_mutex_lock(¬ify_lock); ++ pthread_cond_signal(¬ify_cvar); ++ pthread_mutex_unlock(¬ify_lock); + } + + /* +diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c +index a25839f21..f67263c3e 100644 +--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c ++++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -45,7 +45,7 @@ typedef struct callback_data + unsigned long num_entries; + time_t sleep_on_busy; + time_t last_busy; +- PRLock *lock; /* Lock to protect access to this structure, the message id list and to force memory barriers */ ++ pthread_mutex_t lock; /* Lock to protect access to this structure, the message id list and to force memory barriers */ + PRThread *result_tid; /* The async result thread */ + operation_id_list_item *message_id_list; /* List of IDs for outstanding operations */ + int abort; /* Flag used to tell the sending thread asyncronously that it should abort (because an error came up in a result) */ +@@ -113,7 +113,7 @@ repl5_tot_result_threadmain(void *param) + while (!finished) { + int message_id = 0; + time_t time_now = 0; +- time_t start_time = slapi_current_utc_time(); ++ time_t start_time = slapi_current_rel_time_t(); + int backoff_time = 1; + + /* Read the next result */ +@@ -130,7 +130,7 @@ repl5_tot_result_threadmain(void *param) + /* We need to a) check that the 'real' timeout hasn't expired and + * b) implement a backoff sleep to avoid spinning */ + /* Did the connection's timeout expire ? */ +- time_now = slapi_current_utc_time(); ++ time_now = slapi_current_rel_time_t(); + if (conn_get_timeout(conn) <= (time_now - start_time)) { + /* We timed out */ + conres = CONN_TIMEOUT; +@@ -142,11 +142,11 @@ repl5_tot_result_threadmain(void *param) + backoff_time <<= 1; + } + /* Should we stop ? */ +- PR_Lock(cb->lock); ++ pthread_mutex_lock(&(cb->lock)); + if (cb->stop_result_thread) { + finished = 1; + } +- PR_Unlock(cb->lock); ++ pthread_mutex_unlock(&(cb->lock)); + } else { + /* Something other than a timeout, so we exit the loop */ + break; +@@ -164,21 +164,21 @@ repl5_tot_result_threadmain(void *param) + /* Was the result itself an error ? */ + if (0 != conres) { + /* If so then we need to take steps to abort the update process */ +- PR_Lock(cb->lock); ++ pthread_mutex_lock(&(cb->lock)); + cb->abort = 1; + if (conres == CONN_NOT_CONNECTED) { + cb->rc = LDAP_CONNECT_ERROR; + } +- PR_Unlock(cb->lock); ++ pthread_mutex_unlock(&(cb->lock)); + } + /* Should we stop ? */ +- PR_Lock(cb->lock); ++ pthread_mutex_lock(&(cb->lock)); + /* if the connection is not connected, then we cannot read any more + results - we are finished */ + if (cb->stop_result_thread || (conres == CONN_NOT_CONNECTED)) { + finished = 1; + } +- PR_Unlock(cb->lock); ++ pthread_mutex_unlock(&(cb->lock)); + } + } + +@@ -209,9 +209,9 @@ repl5_tot_destroy_async_result_thread(callback_data *cb_data) + int retval = 0; + PRThread *tid = cb_data->result_tid; + if (tid) { +- PR_Lock(cb_data->lock); ++ pthread_mutex_lock(&(cb_data->lock)); + cb_data->stop_result_thread = 1; +- PR_Unlock(cb_data->lock); ++ pthread_mutex_unlock(&(cb_data->lock)); + (void)PR_JoinThread(tid); + } + return retval; +@@ -248,7 +248,7 @@ repl5_tot_waitfor_async_results(callback_data *cb_data) + /* Keep pulling results off the LDAP connection until we catch up to the last message id stored in the rd */ + while (!done) { + /* Lock the structure to force memory barrier */ +- PR_Lock(cb_data->lock); ++ pthread_mutex_lock(&(cb_data->lock)); + /* Are we caught up ? */ + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "repl5_tot_waitfor_async_results - %d %d\n", +@@ -260,7 +260,7 @@ repl5_tot_waitfor_async_results(callback_data *cb_data) + if (cb_data->abort && LOST_CONN_ERR(cb_data->rc)) { + done = 1; /* no connection == no more results */ + } +- PR_Unlock(cb_data->lock); ++ pthread_mutex_unlock(&(cb_data->lock)); + /* If not then sleep a bit */ + DS_Sleep(PR_SecondsToInterval(1)); + loops++; +@@ -482,9 +482,9 @@ retry: + cb_data.rc = 0; + cb_data.num_entries = 1UL; + cb_data.sleep_on_busy = 0UL; +- cb_data.last_busy = slapi_current_utc_time(); ++ cb_data.last_busy = slapi_current_rel_time_t(); + cb_data.flowcontrol_detection = 0; +- cb_data.lock = PR_NewLock(); ++ pthread_mutex_init(&(cb_data.lock), NULL); + + /* This allows during perform_operation to check the callback data + * especially to do flow contol on delta send msgid / recv msgid +@@ -541,9 +541,9 @@ retry: + cb_data.rc = 0; + cb_data.num_entries = 0UL; + cb_data.sleep_on_busy = 0UL; +- cb_data.last_busy = slapi_current_utc_time(); ++ cb_data.last_busy = slapi_current_rel_time_t(); + cb_data.flowcontrol_detection = 0; +- cb_data.lock = PR_NewLock(); ++ pthread_mutex_init(&(cb_data.lock), NULL); + + /* This allows during perform_operation to check the callback data + * especially to do flow contol on delta send msgid / recv msgid +@@ -633,9 +633,7 @@ done: + type_nsds5ReplicaFlowControlWindow); + } + conn_set_tot_update_cb(prp->conn, NULL); +- if (cb_data.lock) { +- PR_DestroyLock(cb_data.lock); +- } ++ pthread_mutex_destroy(&(cb_data.lock)); + prp->stopped = 1; + } + +@@ -700,7 +698,9 @@ Private_Repl_Protocol * + Repl_5_Tot_Protocol_new(Repl_Protocol *rp) + { + repl5_tot_private *rip = NULL; +- Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_malloc(sizeof(Private_Repl_Protocol)); ++ pthread_condattr_t cattr; ++ Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); ++ + prp->delete = repl5_tot_delete; + prp->run = repl5_tot_run; + prp->stop = repl5_tot_stop; +@@ -710,12 +710,19 @@ Repl_5_Tot_Protocol_new(Repl_Protocol *rp) + prp->notify_window_opened = repl5_tot_noop; + prp->notify_window_closed = repl5_tot_noop; + prp->update_now = repl5_tot_noop; +- if ((prp->lock = PR_NewLock()) == NULL) { ++ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { ++ goto loser; ++ } ++ if (pthread_condattr_init(&cattr) != 0) { ++ goto loser; ++ } ++ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { + goto loser; + } +- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { ++ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { + goto loser; + } ++ pthread_condattr_destroy(&cattr); + prp->stopped = 1; + prp->terminate = 0; + prp->eventbits = 0; +@@ -744,13 +751,11 @@ repl5_tot_delete(Private_Repl_Protocol **prpp) + (*prpp)->stop(*prpp); + } + /* Then, delete all resources used by the protocol */ +- if ((*prpp)->lock) { +- PR_DestroyLock((*prpp)->lock); +- (*prpp)->lock = NULL; ++ if (&((*prpp)->lock)) { ++ pthread_mutex_destroy(&((*prpp)->lock)); + } +- if ((*prpp)->cvar) { +- PR_DestroyCondVar((*prpp)->cvar); +- (*prpp)->cvar = NULL; ++ if (&((*prpp)->cvar)) { ++ pthread_cond_destroy(&(*prpp)->cvar); + } + slapi_ch_free((void **)&(*prpp)->private); + slapi_ch_free((void **)prpp); +@@ -824,9 +829,9 @@ send_entry(Slapi_Entry *e, void *cb_data) + + /* see if the result reader thread encountered + a fatal error */ +- PR_Lock(((callback_data *)cb_data)->lock); ++ pthread_mutex_lock((&((callback_data *)cb_data)->lock)); + rc = ((callback_data *)cb_data)->abort; +- PR_Unlock(((callback_data *)cb_data)->lock); ++ pthread_mutex_unlock((&((callback_data *)cb_data)->lock)); + if (rc) { + conn_disconnect(prp->conn); + ((callback_data *)cb_data)->rc = -1; +@@ -889,7 +894,7 @@ send_entry(Slapi_Entry *e, void *cb_data) + } + + if (rc == CONN_BUSY) { +- time_t now = slapi_current_utc_time(); ++ time_t now = slapi_current_rel_time_t(); + if ((now - *last_busyp) < (*sleep_on_busyp + 10)) { + *sleep_on_busyp += 5; + } else { +diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c +index af486f730..ef2025dd9 100644 +--- a/ldap/servers/plugins/replication/repl_extop.c ++++ b/ldap/servers/plugins/replication/repl_extop.c +@@ -1176,7 +1176,7 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb) + /* now that the changelog is open and started, we can alos cretae the + * keep alive entry without risk that db and cl will not match + */ +- replica_subentry_check(replica_get_root(r), replica_get_rid(r)); ++ replica_subentry_check((Slapi_DN *)replica_get_root(r), replica_get_rid(r)); + } + + /* ONREPL code that dealt with new RUV, etc was moved into the code +@@ -1474,7 +1474,7 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb) + * Launch the cleanruv monitoring thread. Once all the replicas are cleaned it will release the rid + */ + +- cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Launching cleanAllRUV thread...\n"); ++ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Launching cleanAllRUV thread..."); + data = (cleanruv_data *)slapi_ch_calloc(1, sizeof(cleanruv_data)); + if (data == NULL) { + slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "multimaster_extop_cleanruv - CleanAllRUV Task - Failed to allocate " +diff --git a/ldap/servers/plugins/replication/windows_connection.c b/ldap/servers/plugins/replication/windows_connection.c +index 011b328bf..ce0662544 100644 +--- a/ldap/servers/plugins/replication/windows_connection.c ++++ b/ldap/servers/plugins/replication/windows_connection.c +@@ -1121,7 +1121,7 @@ windows_conn_start_linger(Repl_Connection *conn) + agmt_get_long_name(conn->agmt)); + return; + } +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + PR_Lock(conn->lock); + if (conn->linger_active) { + slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, +diff --git a/ldap/servers/plugins/replication/windows_inc_protocol.c b/ldap/servers/plugins/replication/windows_inc_protocol.c +index 1c07534e3..3d548e5ed 100644 +--- a/ldap/servers/plugins/replication/windows_inc_protocol.c ++++ b/ldap/servers/plugins/replication/windows_inc_protocol.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -48,7 +48,7 @@ typedef struct windows_inc_private + char *ruv; /* RUV on remote replica (use diff type for this? - ggood */ + Backoff_Timer *backoff; + Repl_Protocol *rp; +- PRLock *lock; ++ pthread_mutex_t *lock; + PRUint32 eventbits; + } windows_inc_private; + +@@ -96,7 +96,7 @@ typedef struct windows_inc_private + * don't see any updates for a period equal to this interval, + * we go ahead and start a replication session, just to be safe + */ +-#define MAX_WAIT_BETWEEN_SESSIONS PR_SecondsToInterval(60 * 5) /* 5 minutes */ ++#define MAX_WAIT_BETWEEN_SESSIONS 300 /* 5 minutes */ + /* + * tests if the protocol has been shutdown and we need to quit + * event_occurred resets the bits in the bit flag, so whoever tests for shutdown +@@ -108,7 +108,7 @@ typedef struct windows_inc_private + /* Forward declarations */ + static PRUint32 event_occurred(Private_Repl_Protocol *prp, PRUint32 event); + static void reset_events(Private_Repl_Protocol *prp); +-static void protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration); ++static void protocol_sleep(Private_Repl_Protocol *prp, int32_t duration); + static int send_updates(Private_Repl_Protocol *prp, RUV *ruv, PRUint32 *num_changes_sent, int do_send); + static void windows_inc_backoff_expired(time_t timer_fire_time, void *arg); + static int windows_examine_update_vector(Private_Repl_Protocol *prp, RUV *ruv); +@@ -143,13 +143,11 @@ windows_inc_delete(Private_Repl_Protocol **prpp) + (*prpp)->stopped = 1; + (*prpp)->stop(*prpp); + } +- if ((*prpp)->lock) { +- PR_DestroyLock((*prpp)->lock); +- (*prpp)->lock = NULL; ++ if (&((*prpp)->lock)) { ++ pthread_mutex_destroy(&((*prpp)->lock)); + } +- if ((*prpp)->cvar) { +- PR_DestroyCondVar((*prpp)->cvar); +- (*prpp)->cvar = NULL; ++ if (&((*prpp)->cvar)) { ++ pthread_cond_destroy(&(*prpp)->cvar); + } + slapi_ch_free((void **)&(*prpp)->private); + slapi_ch_free((void **)prpp); +@@ -360,7 +358,7 @@ windows_inc_run(Private_Repl_Protocol *prp) + } else if (event_occurred(prp, EVENT_TRIGGERING_CRITERIA_MET)) /* change available */ + { + /* just ignore it and go to sleep */ +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } else if ((e1 = event_occurred(prp, EVENT_WINDOW_CLOSED)) || + event_occurred(prp, EVENT_BACKOFF_EXPIRED)) { + /* this events - should not occur - log a warning and go to sleep */ +@@ -370,18 +368,18 @@ windows_inc_run(Private_Repl_Protocol *prp) + agmt_get_long_name(prp->agmt), + e1 ? event2name(EVENT_WINDOW_CLOSED) : event2name(EVENT_BACKOFF_EXPIRED), + state2name(current_state)); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } else if (event_occurred(prp, EVENT_RUN_DIRSYNC)) /* periodic_dirsync */ + { + /* just ignore it and go to sleep */ +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } else { + /* wait until window opens or an event occurs */ + slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, + "windows_inc_run - %s: " + "Waiting for update window to open\n", + agmt_get_long_name(prp->agmt)); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } + break; + +@@ -536,7 +534,7 @@ windows_inc_run(Private_Repl_Protocol *prp) + } + next_state = STATE_BACKOFF; + backoff_reset(prp_priv->backoff, windows_inc_backoff_expired, (void *)prp); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + use_busy_backoff_timer = PR_FALSE; + } + break; +@@ -605,7 +603,7 @@ windows_inc_run(Private_Repl_Protocol *prp) + agmt_get_long_name(prp->agmt), + next_fire_time - now); + +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } else { + /* Destroy the backoff timer, since we won't need it anymore */ + backoff_delete(&prp_priv->backoff); +@@ -624,7 +622,7 @@ windows_inc_run(Private_Repl_Protocol *prp) + next_state = STATE_READY_TO_ACQUIRE; + } else { + /* ignore changes and go to sleep */ +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } + } else if (event_occurred(prp, EVENT_WINDOW_OPENED)) { + /* this should never happen - log an error and go to sleep */ +@@ -632,7 +630,7 @@ windows_inc_run(Private_Repl_Protocol *prp) + "event %s should not occur in state %s; going to sleep\n", + agmt_get_long_name(prp->agmt), + event2name(EVENT_WINDOW_OPENED), state2name(current_state)); +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + } + break; + case STATE_SENDING_UPDATES: +@@ -856,7 +854,7 @@ windows_inc_run(Private_Repl_Protocol *prp) + reset_events(prp); + } + +- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT); ++ protocol_sleep(prp, 0); + break; + + case STATE_STOP_NORMAL_TERMINATION: +@@ -891,21 +889,29 @@ windows_inc_run(Private_Repl_Protocol *prp) + * Go to sleep until awakened. + */ + static void +-protocol_sleep(Private_Repl_Protocol *prp, PRIntervalTime duration) ++protocol_sleep(Private_Repl_Protocol *prp, int32_t duration) + { + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> protocol_sleep\n"); + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + /* we should not go to sleep if there are events available to be processed. + Otherwise, we can miss the event that suppose to wake us up */ +- if (prp->eventbits == 0) +- PR_WaitCondVar(prp->cvar, duration); +- else { ++ if (prp->eventbits == 0) { ++ if (duration > 0) { ++ struct timespec current_time = {0}; ++ /* get the current monotonic time and add our interval */ ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += duration; ++ pthread_cond_timedwait(&(prp->cvar), &(prp->lock), ¤t_time); ++ } else { ++ pthread_cond_wait(&(prp->cvar), &(prp->lock)); ++ } ++ } else { + slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, + "protocol_sleep - %s: Can't go to sleep: event bits - %x\n", + agmt_get_long_name(prp->agmt), prp->eventbits); + } +- PR_Unlock(prp->lock); ++ pthread_mutex_unlock(&(prp->lock)); + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= protocol_sleep\n"); + } + +@@ -921,10 +927,10 @@ event_notify(Private_Repl_Protocol *prp, PRUint32 event) + { + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> event_notify\n"); + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + prp->eventbits |= event; +- PR_NotifyCondVar(prp->cvar); +- PR_Unlock(prp->lock); ++ pthread_cond_signal(&(prp->cvar)); ++ pthread_mutex_unlock(&(prp->lock)); + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= event_notify\n"); + } + +@@ -941,10 +947,10 @@ event_occurred(Private_Repl_Protocol *prp, PRUint32 event) + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> event_occurred\n"); + + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + return_value = (prp->eventbits & event); + prp->eventbits &= ~event; /* Clear event */ +- PR_Unlock(prp->lock); ++ pthread_mutex_unlock(&(prp->lock)); + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= event_occurred\n"); + return return_value; + } +@@ -954,9 +960,9 @@ reset_events(Private_Repl_Protocol *prp) + { + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> reset_events\n"); + PR_ASSERT(NULL != prp); +- PR_Lock(prp->lock); ++ pthread_mutex_lock(&(prp->lock)); + prp->eventbits = 0; +- PR_Unlock(prp->lock); ++ pthread_mutex_unlock(&(prp->lock)); + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= reset_events\n"); + } + +@@ -1416,6 +1422,7 @@ Windows_Inc_Protocol_new(Repl_Protocol *rp) + { + windows_inc_private *rip = NULL; + Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); ++ pthread_condattr_t cattr; + + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> Windows_Inc_Protocol_new\n"); + +@@ -1429,12 +1436,19 @@ Windows_Inc_Protocol_new(Repl_Protocol *rp) + prp->notify_window_closed = windows_inc_notify_window_closed; + prp->update_now = windows_inc_update_now; + prp->replica = prot_get_replica(rp); +- if ((prp->lock = PR_NewLock()) == NULL) { ++ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { ++ goto loser; ++ } ++ if (pthread_condattr_init(&cattr) != 0) { ++ goto loser; ++ } ++ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { + goto loser; + } +- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { ++ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { + goto loser; + } ++ pthread_condattr_destroy(&cattr); /* no longer needed */ + prp->stopped = 0; + prp->terminate = 0; + prp->eventbits = 0; +diff --git a/ldap/servers/plugins/replication/windows_tot_protocol.c b/ldap/servers/plugins/replication/windows_tot_protocol.c +index da244c166..f67e4dbd2 100644 +--- a/ldap/servers/plugins/replication/windows_tot_protocol.c ++++ b/ldap/servers/plugins/replication/windows_tot_protocol.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -326,6 +326,7 @@ Windows_Tot_Protocol_new(Repl_Protocol *rp) + { + windows_tot_private *rip = NULL; + Private_Repl_Protocol *prp = (Private_Repl_Protocol *)slapi_ch_calloc(1, sizeof(Private_Repl_Protocol)); ++ pthread_condattr_t cattr; + + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> Windows_Tot_Protocol_new\n"); + +@@ -339,12 +340,19 @@ Windows_Tot_Protocol_new(Repl_Protocol *rp) + prp->notify_window_closed = windows_tot_noop; + prp->replica = prot_get_replica(rp); + prp->update_now = windows_tot_noop; +- if ((prp->lock = PR_NewLock()) == NULL) { ++ if (pthread_mutex_init(&(prp->lock), NULL) != 0) { + goto loser; + } +- if ((prp->cvar = PR_NewCondVar(prp->lock)) == NULL) { ++ if (pthread_condattr_init(&cattr) != 0) { + goto loser; + } ++ if (pthread_condattr_setclock(&cattr, CLOCK_MONOTONIC) != 0) { ++ goto loser; ++ } ++ if (pthread_cond_init(&(prp->cvar), &cattr) != 0) { ++ goto loser; ++ } ++ pthread_condattr_destroy(&cattr); + prp->stopped = 1; + prp->terminate = 0; + prp->eventbits = 0; +@@ -373,13 +381,11 @@ windows_tot_delete(Private_Repl_Protocol **prpp) + (*prpp)->stop(*prpp); + } + /* Then, delete all resources used by the protocol */ +- if ((*prpp)->lock) { +- PR_DestroyLock((*prpp)->lock); +- (*prpp)->lock = NULL; ++ if (&((*prpp)->lock)) { ++ pthread_mutex_destroy(&((*prpp)->lock)); + } +- if ((*prpp)->cvar) { +- PR_DestroyCondVar((*prpp)->cvar); +- (*prpp)->cvar = NULL; ++ if (&((*prpp)->cvar)) { ++ pthread_cond_destroy(&(*prpp)->cvar); + } + slapi_ch_free((void **)&(*prpp)->private); + slapi_ch_free((void **)prpp); +diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c +index d031dc3f8..a3e16c4e1 100644 +--- a/ldap/servers/plugins/retrocl/retrocl_trim.c ++++ b/ldap/servers/plugins/retrocl/retrocl_trim.c +@@ -241,7 +241,7 @@ trim_changelog(void) + int me, lt; + + +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + + PR_Lock(ts.ts_s_trim_mutex); + me = ts.ts_c_max_age; +diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c +index de99ba233..3d076a4cb 100644 +--- a/ldap/servers/plugins/roles/roles_cache.c ++++ b/ldap/servers/plugins/roles/roles_cache.c +@@ -343,7 +343,7 @@ roles_cache_create_suffix(Slapi_DN *sdn) + + slapi_lock_mutex(new_suffix->create_lock); + if (new_suffix->is_ready != 1) { +- slapi_wait_condvar(new_suffix->suffix_created, NULL); ++ slapi_wait_condvar_pt(new_suffix->suffix_created, new_suffix->create_lock, NULL); + } + slapi_unlock_mutex(new_suffix->create_lock); + +@@ -384,7 +384,7 @@ roles_cache_wait_on_change(void *arg) + test roles_def->keeprunning before + going to sleep. + */ +- slapi_wait_condvar(roles_def->something_changed, NULL); ++ slapi_wait_condvar_pt(roles_def->something_changed, roles_def->change_lock, NULL); + + slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, "roles_cache_wait_on_change - notified\n"); + +diff --git a/ldap/servers/plugins/sync/sync.h b/ldap/servers/plugins/sync/sync.h +index 51d0da6e0..7241fddbf 100644 +--- a/ldap/servers/plugins/sync/sync.h ++++ b/ldap/servers/plugins/sync/sync.h +@@ -201,8 +201,8 @@ typedef struct sync_request_list + { + Slapi_RWLock *sync_req_rwlock; /* R/W lock struct to serialize access */ + SyncRequest *sync_req_head; /* Head of list */ +- PRLock *sync_req_cvarlock; /* Lock for cvar */ +- PRCondVar *sync_req_cvar; /* ps threads sleep on this */ ++ pthread_mutex_t sync_req_cvarlock; /* Lock for cvar */ ++ pthread_cond_t sync_req_cvar; /* ps threads sleep on this */ + int sync_req_max_persist; + int sync_req_cur_persist; + } SyncRequestList; +diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c +index 598c6868d..d13f142b0 100644 +--- a/ldap/servers/plugins/sync/sync_persist.c ++++ b/ldap/servers/plugins/sync/sync_persist.c +@@ -463,19 +463,40 @@ int + sync_persist_initialize(int argc, char **argv) + { + if (!SYNC_IS_INITIALIZED()) { ++ pthread_condattr_t sync_req_condAttr; /* cond var attribute */ ++ int rc = 0; ++ + sync_request_list = (SyncRequestList *)slapi_ch_calloc(1, sizeof(SyncRequestList)); + if ((sync_request_list->sync_req_rwlock = slapi_new_rwlock()) == NULL) { + slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_initialize - Cannot initialize lock structure(1).\n"); + return (-1); + } +- if ((sync_request_list->sync_req_cvarlock = PR_NewLock()) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_initialize - Cannot initialize lock structure(2).\n"); ++ if (pthread_mutex_init(&(sync_request_list->sync_req_cvarlock), NULL) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", ++ "Failed to create lock: error %d (%s)\n", ++ rc, strerror(rc)); ++ return (-1); ++ } ++ if ((rc = pthread_condattr_init(&sync_req_condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", ++ "Failed to create new condition attribute variable. error %d (%s)\n", ++ rc, strerror(rc)); + return (-1); + } +- if ((sync_request_list->sync_req_cvar = PR_NewCondVar(sync_request_list->sync_req_cvarlock)) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, SYNC_PLUGIN_SUBSYSTEM, "sync_persist_initialize - Cannot initialize condition variable.\n"); ++ if ((rc = pthread_condattr_setclock(&sync_req_condAttr, CLOCK_MONOTONIC)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", ++ "Cannot set condition attr clock. error %d (%s)\n", ++ rc, strerror(rc)); + return (-1); + } ++ if ((rc = pthread_cond_init(&(sync_request_list->sync_req_cvar), &sync_req_condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "sync_persist_initialize", ++ "Failed to create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ return (-1); ++ } ++ pthread_condattr_destroy(&sync_req_condAttr); /* no longer needed */ ++ + sync_request_list->sync_req_head = NULL; + sync_request_list->sync_req_cur_persist = 0; + sync_request_list->sync_req_max_persist = SYNC_MAX_CONCURRENT; +@@ -617,8 +638,8 @@ sync_persist_terminate_all() + } + + slapi_destroy_rwlock(sync_request_list->sync_req_rwlock); +- PR_DestroyLock(sync_request_list->sync_req_cvarlock); +- PR_DestroyCondVar(sync_request_list->sync_req_cvar); ++ pthread_mutex_destroy(&(sync_request_list->sync_req_cvarlock)); ++ pthread_cond_destroy(&(sync_request_list->sync_req_cvar)); + + /* it frees the structures, just in case it remained connected sync_repl client */ + for (req = sync_request_list->sync_req_head; NULL != req; req = next) { +@@ -725,9 +746,9 @@ static void + sync_request_wakeup_all(void) + { + if (SYNC_IS_INITIALIZED()) { +- PR_Lock(sync_request_list->sync_req_cvarlock); +- PR_NotifyAllCondVar(sync_request_list->sync_req_cvar); +- PR_Unlock(sync_request_list->sync_req_cvarlock); ++ pthread_mutex_lock(&(sync_request_list->sync_req_cvarlock)); ++ pthread_cond_broadcast(&(sync_request_list->sync_req_cvar)); ++ pthread_mutex_unlock(&(sync_request_list->sync_req_cvarlock)); + } + } + +@@ -817,7 +838,7 @@ sync_send_results(void *arg) + goto done; + } + +- PR_Lock(sync_request_list->sync_req_cvarlock); ++ pthread_mutex_lock(&(sync_request_list->sync_req_cvarlock)); + + while ((conn_acq_flag == 0) && !req->req_complete && !plugin_closing) { + /* Check for an abandoned operation */ +@@ -833,7 +854,12 @@ sync_send_results(void *arg) + * connection code. Wake up every second to check if thread + * should terminate. + */ +- PR_WaitCondVar(sync_request_list->sync_req_cvar, PR_SecondsToInterval(1)); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += 1; ++ pthread_cond_timedwait(&(sync_request_list->sync_req_cvar), ++ &(sync_request_list->sync_req_cvarlock), ++ ¤t_time); + } else { + /* dequeue the item */ + int attrsonly; +@@ -864,7 +890,7 @@ sync_send_results(void *arg) + * Send the result. Since send_ldap_search_entry can block for + * up to 30 minutes, we relinquish all locks before calling it. + */ +- PR_Unlock(sync_request_list->sync_req_cvarlock); ++ pthread_mutex_unlock(&(sync_request_list->sync_req_cvarlock)); + + /* + * The entry is in the right scope and matches the filter +@@ -910,13 +936,13 @@ sync_send_results(void *arg) + ldap_controls_free(ectrls); + slapi_ch_array_free(noattrs); + } +- PR_Lock(sync_request_list->sync_req_cvarlock); ++ pthread_mutex_lock(&(sync_request_list->sync_req_cvarlock)); + + /* Deallocate our wrapper for this entry */ + sync_node_free(&qnode); + } + } +- PR_Unlock(sync_request_list->sync_req_cvarlock); ++ pthread_mutex_unlock(&(sync_request_list->sync_req_cvarlock)); + + /* indicate the end of search */ + sync_release_connection(req->req_pblock, conn, op, conn_acq_flag == 0); +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c +index 1e4830e99..ba783ee59 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c +@@ -1,5 +1,5 @@ + /** BEGIN COPYRIGHT BLOCK +- * Copyright (C) 2019 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -1429,21 +1429,22 @@ import_free_job(ImportJob *job) + * To avoid freeing fifo queue under bulk_import_queue use + * job lock to synchronize + */ +- if (job->wire_lock) +- PR_Lock(job->wire_lock); ++ if (&job->wire_lock) { ++ pthread_mutex_lock(&job->wire_lock); ++ } + + import_fifo_destroy(job); + +- if (job->wire_lock) +- PR_Unlock(job->wire_lock); ++ if (&job->wire_lock) { ++ pthread_mutex_unlock(&job->wire_lock); ++ } + } + +- if (NULL != job->uuid_namespace) ++ if (NULL != job->uuid_namespace) { + slapi_ch_free((void **)&job->uuid_namespace); +- if (job->wire_lock) +- PR_DestroyLock(job->wire_lock); +- if (job->wire_cv) +- PR_DestroyCondVar(job->wire_cv); ++ } ++ pthread_mutex_destroy(&job->wire_lock); ++ pthread_cond_destroy(&job->wire_cv); + slapi_ch_free((void **)&job->task_status); + } + +@@ -1777,7 +1778,7 @@ import_monitor_threads(ImportJob *job, int *status) + goto error_abort; + } + +- last_time = slapi_current_utc_time(); ++ last_time = slapi_current_rel_time_t(); + job->start_time = last_time; + import_clear_progress_history(job); + +@@ -1789,7 +1790,7 @@ import_monitor_threads(ImportJob *job, int *status) + + /* First calculate the time interval since last reported */ + if (0 == (count % display_interval)) { +- time_now = slapi_current_utc_time(); ++ time_now = slapi_current_rel_time_t(); + time_interval = time_now - last_time; + last_time = time_now; + /* Now calculate our rate of progress overall for this chunk */ +@@ -2232,7 +2233,7 @@ bdb_import_main(void *arg) + opstr = "Reindexing"; + } + PR_ASSERT(inst != NULL); +- beginning = slapi_current_utc_time(); ++ beginning = slapi_current_rel_time_t(); + + /* Decide which indexes are needed */ + if (job->flags & FLAG_INDEX_ATTRS) { +@@ -2251,9 +2252,9 @@ bdb_import_main(void *arg) + ret = import_fifo_init(job); + if (ret) { + if (!(job->flags & FLAG_USE_FILES)) { +- PR_Lock(job->wire_lock); +- PR_NotifyCondVar(job->wire_cv); +- PR_Unlock(job->wire_lock); ++ pthread_mutex_lock(&job->wire_lock); ++ pthread_cond_signal(&job->wire_cv); ++ pthread_mutex_unlock(&job->wire_lock); + } + goto error; + } +@@ -2315,9 +2316,9 @@ bdb_import_main(void *arg) + } else { + /* release the startup lock and let the entries start queueing up + * in for import */ +- PR_Lock(job->wire_lock); +- PR_NotifyCondVar(job->wire_cv); +- PR_Unlock(job->wire_lock); ++ pthread_mutex_lock(&job->wire_lock); ++ pthread_cond_signal(&job->wire_cv); ++ pthread_mutex_unlock(&job->wire_lock); + } + + /* Run as many passes as we need to complete the job or die honourably in +@@ -2499,7 +2500,7 @@ error: + import_log_notice(job, SLAPI_LOG_WARNING, "bdb_import_main", "Failed to close database"); + } + } +- end = slapi_current_utc_time(); ++ end = slapi_current_rel_time_t(); + if (verbose && (0 == ret)) { + int seconds_to_import = end - beginning; + size_t entries_processed = job->lead_ID - (job->starting_ID - 1); +@@ -3393,7 +3394,7 @@ import_mega_merge(ImportJob *job) + passes, (long unsigned int)job->number_indexers); + } + +- beginning = slapi_current_utc_time(); ++ beginning = slapi_current_rel_time_t(); + /* Iterate over the files */ + for (current_worker = job->worker_list; + (ret == 0) && (current_worker != NULL); +@@ -3405,9 +3406,9 @@ import_mega_merge(ImportJob *job) + time_t file_end = 0; + int key_count = 0; + +- file_beginning = slapi_current_utc_time(); ++ file_beginning = slapi_current_rel_time_t(); + ret = import_merge_one_file(current_worker, passes, &key_count); +- file_end = slapi_current_utc_time(); ++ file_end = slapi_current_rel_time_t(); + if (key_count == 0) { + import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "No files to merge for \"%s\".", + current_worker->index_info->name); +@@ -3426,7 +3427,7 @@ import_mega_merge(ImportJob *job) + } + } + +- end = slapi_current_utc_time(); ++ end = slapi_current_rel_time_t(); + if (0 == ret) { + int seconds_to_merge = end - beginning; + import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "Merging completed in %d seconds.", +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c +index 5c7d9c8f7..905a84e74 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c +@@ -1,5 +1,5 @@ + /** BEGIN COPYRIGHT BLOCK +- * Copyright (C) 2019 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -3151,8 +3151,9 @@ bulk_import_start(Slapi_PBlock *pb) + (1024 * 1024); + } + import_subcount_stuff_init(job->mothers); +- job->wire_lock = PR_NewLock(); +- job->wire_cv = PR_NewCondVar(job->wire_lock); ++ ++ pthread_mutex_init(&job->wire_lock, NULL); ++ pthread_cond_init(&job->wire_cv, NULL); + + /* COPIED from ldif2ldbm.c : */ + +@@ -3175,7 +3176,7 @@ bulk_import_start(Slapi_PBlock *pb) + + /* END OF COPIED SECTION */ + +- PR_Lock(job->wire_lock); ++ pthread_mutex_lock(&job->wire_lock); + vlv_init(job->inst); + + /* create thread for import_main, so we can return */ +@@ -3188,7 +3189,7 @@ bulk_import_start(Slapi_PBlock *pb) + slapi_log_err(SLAPI_LOG_ERR, "bulk_import_start", + "Unable to spawn import thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", + prerr, slapd_pr_strerror(prerr)); +- PR_Unlock(job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + ret = -2; + goto fail; + } +@@ -3204,8 +3205,8 @@ bulk_import_start(Slapi_PBlock *pb) + /* (don't want to send the success code back to the LDAP client until + * we're ready for the adds to start rolling in) + */ +- PR_WaitCondVar(job->wire_cv, PR_INTERVAL_NO_TIMEOUT); +- PR_Unlock(job->wire_lock); ++ pthread_cond_wait(&job->wire_cv, &job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + + return 0; + +@@ -3243,13 +3244,13 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) + return -1; + } + +- PR_Lock(job->wire_lock); ++ pthread_mutex_lock(&job->wire_lock); + /* Let's do this inside the lock !*/ + id = job->lead_ID + 1; + /* generate uniqueid if necessary */ + if (import_generate_uniqueid(job, entry) != UID_SUCCESS) { + import_abort_all(job, 1); +- PR_Unlock(job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + return -1; + } + +@@ -3258,7 +3259,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) + if ((ep == NULL) || (ep->ep_entry == NULL)) { + import_abort_all(job, 1); + backentry_free(&ep); /* release the backend wrapper, here */ +- PR_Unlock(job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + return -1; + } + +@@ -3304,7 +3305,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) + if (job->flags & FLAG_ABORT) { + backentry_clear_entry(ep); /* entry is released in the frontend on failure*/ + backentry_free(&ep); /* release the backend wrapper, here */ +- PR_Unlock(job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + return -2; + } + +@@ -3342,7 +3343,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) + /* entry is released in the frontend on failure*/ + backentry_clear_entry(ep); + backentry_free(&ep); /* release the backend wrapper */ +- PR_Unlock(job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + return -1; + } + sepp = PL_strchr(sepp + 1, ','); +@@ -3368,7 +3369,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) + (long unsigned int)newesize, (long unsigned int)job->fifo.bsize); + backentry_clear_entry(ep); /* entry is released in the frontend on failure*/ + backentry_free(&ep); /* release the backend wrapper, here */ +- PR_Unlock(job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + return -1; + } + /* Now check if fifo has enough space for the new entry */ +@@ -3394,7 +3395,7 @@ bulk_import_queue(ImportJob *job, Slapi_Entry *entry) + job->trailing_ID = id - job->fifo.size; + } + +- PR_Unlock(job->wire_lock); ++ pthread_mutex_unlock(&job->wire_lock); + return 0; + } + +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c +index 0ac3694b6..5d6010f46 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c +@@ -1,5 +1,5 @@ + /** BEGIN COPYRIGHT BLOCK +- * Copyright (C) 2019 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -270,10 +270,8 @@ bdb_instance_cleanup(struct ldbm_instance *inst) + slapi_ch_free_string(&inst_dirp); + } + slapi_destroy_rwlock(inst_env->bdb_env_lock); +- PR_DestroyCondVar(inst_env->bdb_thread_count_cv); +- inst_env->bdb_thread_count_cv = NULL; +- PR_DestroyLock(inst_env->bdb_thread_count_lock); +- inst_env->bdb_thread_count_lock = NULL; ++ pthread_mutex_destroy(&(inst_env->bdb_thread_count_lock)); ++ pthread_cond_destroy(&(inst_env->bdb_thread_count_cv)); + slapi_ch_free((void **)&inst->inst_db); + /* + slapi_destroy_rwlock(((bdb_db_env *)inst->inst_db)->bdb_env_lock); +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +index 464f89f4d..6cccad8e6 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +@@ -1,5 +1,5 @@ + /** BEGIN COPYRIGHT BLOCK +- * Copyright (C) 2019 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -52,16 +52,16 @@ + return. + */ + #define INCR_THREAD_COUNT(pEnv) \ +- PR_Lock(pEnv->bdb_thread_count_lock); \ ++ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); \ + ++pEnv->bdb_thread_count; \ +- PR_Unlock(pEnv->bdb_thread_count_lock) ++ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock) + + #define DECR_THREAD_COUNT(pEnv) \ +- PR_Lock(pEnv->bdb_thread_count_lock); \ ++ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); \ + if (--pEnv->bdb_thread_count == 0) { \ +- PR_NotifyCondVar(pEnv->bdb_thread_count_cv); \ ++ pthread_cond_broadcast(&pEnv->bdb_thread_count_cv); \ + } \ +- PR_Unlock(pEnv->bdb_thread_count_lock) ++ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock) + + #define NEWDIR_MODE 0755 + #define DB_REGION_PREFIX "__db." +@@ -91,9 +91,12 @@ static int trans_batch_txn_max_sleep = 50; + static PRBool log_flush_thread = PR_FALSE; + static int txn_in_progress_count = 0; + static int *txn_log_flush_pending = NULL; +-static PRLock *sync_txn_log_flush = NULL; +-static PRCondVar *sync_txn_log_flush_done = NULL; +-static PRCondVar *sync_txn_log_do_flush = NULL; ++ ++static pthread_mutex_t sync_txn_log_flush; ++static pthread_cond_t sync_txn_log_flush_done; ++static pthread_cond_t sync_txn_log_do_flush; ++ ++ + static int bdb_db_remove_ex(bdb_db_env *env, char const path[], char const dbName[], PRBool use_lock); + static int bdb_restore_file_check(struct ldbminfo *li); + +@@ -181,12 +184,12 @@ bdb_set_batch_transactions(void *arg __attribute__((unused)), void *value, char + } else { + if (val == 0) { + if (log_flush_thread) { +- PR_Lock(sync_txn_log_flush); ++ pthread_mutex_lock(&sync_txn_log_flush); + } + trans_batch_limit = FLUSH_REMOTEOFF; + if (log_flush_thread) { + log_flush_thread = PR_FALSE; +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + } + } else if (val > 0) { + if (trans_batch_limit == FLUSH_REMOTEOFF) { +@@ -217,12 +220,12 @@ bdb_set_batch_txn_min_sleep(void *arg __attribute__((unused)), void *value, char + } else { + if (val == 0) { + if (log_flush_thread) { +- PR_Lock(sync_txn_log_flush); ++ pthread_mutex_lock(&sync_txn_log_flush); + } + trans_batch_txn_min_sleep = FLUSH_REMOTEOFF; + if (log_flush_thread) { + log_flush_thread = PR_FALSE; +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + } + } else if (val > 0) { + if (trans_batch_txn_min_sleep == FLUSH_REMOTEOFF || !log_flush_thread) { +@@ -249,12 +252,12 @@ bdb_set_batch_txn_max_sleep(void *arg __attribute__((unused)), void *value, char + } else { + if (val == 0) { + if (log_flush_thread) { +- PR_Lock(sync_txn_log_flush); ++ pthread_mutex_lock(&sync_txn_log_flush); + } + trans_batch_txn_max_sleep = FLUSH_REMOTEOFF; + if (log_flush_thread) { + log_flush_thread = PR_FALSE; +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + } + } else if (val > 0) { + if (trans_batch_txn_max_sleep == FLUSH_REMOTEOFF || !log_flush_thread) { +@@ -725,10 +728,9 @@ bdb_free_env(void **arg) + slapi_destroy_rwlock((*env)->bdb_env_lock); + (*env)->bdb_env_lock = NULL; + } +- PR_DestroyCondVar((*env)->bdb_thread_count_cv); +- (*env)->bdb_thread_count_cv = NULL; +- PR_DestroyLock((*env)->bdb_thread_count_lock); +- (*env)->bdb_thread_count_lock = NULL; ++ pthread_mutex_destroy(&((*env)->bdb_thread_count_lock)); ++ pthread_cond_destroy(&((*env)->bdb_thread_count_cv)); ++ + slapi_ch_free((void **)env); + return; + } +@@ -746,11 +748,15 @@ bdb_make_env(bdb_db_env **env, struct ldbminfo *li) + int ret; + Object *inst_obj; + ldbm_instance *inst = NULL; ++ pthread_condattr_t condAttr; + + pEnv = (bdb_db_env *)slapi_ch_calloc(1, sizeof(bdb_db_env)); + +- pEnv->bdb_thread_count_lock = PR_NewLock(); +- pEnv->bdb_thread_count_cv = PR_NewCondVar(pEnv->bdb_thread_count_lock); ++ pthread_mutex_init(&pEnv->bdb_thread_count_lock, NULL); ++ pthread_condattr_init(&condAttr); ++ pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC); ++ pthread_cond_init(&pEnv->bdb_thread_count_cv, &condAttr); ++ pthread_condattr_destroy(&condAttr); /* no longer needed */ + + if ((ret = db_env_create(&pEnv->bdb_DB_ENV, 0)) != 0) { + slapi_log_err(SLAPI_LOG_ERR, +@@ -2013,9 +2019,9 @@ bdb_pre_close(struct ldbminfo *li) + return; + + /* first, see if there are any housekeeping threads running */ +- PR_Lock(pEnv->bdb_thread_count_lock); ++ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); + threadcount = pEnv->bdb_thread_count; +- PR_Unlock(pEnv->bdb_thread_count_lock); ++ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock); + + if (threadcount) { + PRIntervalTime cvwaittime = PR_MillisecondsToInterval(DBLAYER_SLEEP_INTERVAL * 100); +@@ -2023,7 +2029,7 @@ bdb_pre_close(struct ldbminfo *li) + /* Print handy-dandy log message */ + slapi_log_err(SLAPI_LOG_INFO, "bdb_pre_close", "Waiting for %d database threads to stop\n", + threadcount); +- PR_Lock(pEnv->bdb_thread_count_lock); ++ pthread_mutex_lock(&pEnv->bdb_thread_count_lock); + /* Tell them to stop - we wait until the last possible moment to invoke + this. If we do this much sooner than this, we could find ourselves + in a situation where the threads see the stop_threads and exit before +@@ -2034,6 +2040,7 @@ bdb_pre_close(struct ldbminfo *li) + conf->bdb_stop_threads = 1; + /* Wait for them to exit */ + while (pEnv->bdb_thread_count > 0) { ++ struct timespec current_time = {0}; + PRIntervalTime before = PR_IntervalNow(); + /* There are 3 ways to wake up from this WaitCondVar: + 1) The last database thread exits and calls NotifyCondVar - thread_count +@@ -2041,7 +2048,9 @@ bdb_pre_close(struct ldbminfo *li) + 2) Timeout - in this case, thread_count will be > 0 - bad + 3) A bad error occurs - bad - will be reported as a timeout + */ +- PR_WaitCondVar(pEnv->bdb_thread_count_cv, cvwaittime); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += DBLAYER_SLEEP_INTERVAL / 10; /* cvwaittime but in seconds */ ++ pthread_cond_timedwait(&pEnv->bdb_thread_count_cv, &pEnv->bdb_thread_count_lock, ¤t_time); + if (pEnv->bdb_thread_count > 0) { + /* still at least 1 thread running - see if this is a timeout */ + if ((PR_IntervalNow() - before) >= cvwaittime) { +@@ -2052,7 +2061,7 @@ bdb_pre_close(struct ldbminfo *li) + /* else just a spurious interrupt */ + } + } +- PR_Unlock(pEnv->bdb_thread_count_lock); ++ pthread_mutex_unlock(&pEnv->bdb_thread_count_lock); + if (timedout) { + slapi_log_err(SLAPI_LOG_ERR, + "bdb_pre_close", "Timeout after [%d] milliseconds; leave %d database thread(s)...\n", +@@ -2645,12 +2654,12 @@ bdb_txn_begin(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool + and new parent for any nested transactions created */ + if (use_lock && log_flush_thread) { + int txn_id = new_txn.back_txn_txn->id(new_txn.back_txn_txn); +- PR_Lock(sync_txn_log_flush); ++ pthread_mutex_lock(&sync_txn_log_flush); + txn_in_progress_count++; + slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_begin_ext", + "Batchcount: %d, txn_in_progress: %d, curr_txn: %x\n", + trans_batch_count, txn_in_progress_count, txn_id); +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + } + dblayer_push_pvt_txn(&new_txn); + if (txn) { +@@ -2717,11 +2726,11 @@ bdb_txn_commit(struct ldbminfo *li, back_txn *txn, PRBool use_lock) + if ((conf->bdb_durable_transactions) && use_lock) { + if (trans_batch_limit > 0 && log_flush_thread) { + /* let log_flush thread do the flushing */ +- PR_Lock(sync_txn_log_flush); ++ pthread_mutex_lock(&sync_txn_log_flush); + txn_batch_slot = trans_batch_count++; + txn_log_flush_pending[txn_batch_slot] = txn_id; +- slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", "(before notify): batchcount: %d, " +- "txn_in_progress: %d, curr_txn: %x\n", ++ slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", ++ "(before notify): batchcount: %d, txn_in_progress: %d, curr_txn: %x\n", + trans_batch_count, + txn_in_progress_count, txn_id); + /* +@@ -2731,8 +2740,9 @@ bdb_txn_commit(struct ldbminfo *li, back_txn *txn, PRBool use_lock) + * - there is no other outstanding txn + */ + if (trans_batch_count > trans_batch_limit || +- trans_batch_count == txn_in_progress_count) { +- PR_NotifyCondVar(sync_txn_log_do_flush); ++ trans_batch_count == txn_in_progress_count) ++ { ++ pthread_cond_signal(&sync_txn_log_do_flush); + } + /* + * We need to wait until the txn has been flushed before continuing +@@ -2740,14 +2750,14 @@ bdb_txn_commit(struct ldbminfo *li, back_txn *txn, PRBool use_lock) + * PR_WaitCondvar releases and reaquires the lock + */ + while (txn_log_flush_pending[txn_batch_slot] == txn_id) { +- PR_WaitCondVar(sync_txn_log_flush_done, PR_INTERVAL_NO_TIMEOUT); ++ pthread_cond_wait(&sync_txn_log_flush_done, &sync_txn_log_flush); + } + txn_in_progress_count--; +- slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", "(before unlock): batchcount: %d, " +- "txn_in_progress: %d, curr_txn %x\n", ++ slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", ++ "(before unlock): batchcount: %d, txn_in_progress: %d, curr_txn %x\n", + trans_batch_count, + txn_in_progress_count, txn_id); +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + } else if (trans_batch_limit == FLUSH_REMOTEOFF) { /* user remotely turned batching off */ + LOG_FLUSH(pEnv->bdb_DB_ENV, 0); + } +@@ -2799,9 +2809,9 @@ bdb_txn_abort(struct ldbminfo *li, back_txn *txn, PRBool use_lock) + int txn_id = db_txn->id(db_txn); + bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env; + if (use_lock && log_flush_thread) { +- PR_Lock(sync_txn_log_flush); ++ pthread_mutex_lock(&sync_txn_log_flush); + txn_in_progress_count--; +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_abort_ext", + "Batchcount: %d, txn_in_progress: %d, curr_txn: %x\n", + trans_batch_count, txn_in_progress_count, txn_id); +@@ -3420,11 +3430,18 @@ bdb_start_log_flush_thread(struct ldbminfo *li) + int max_threads = config_get_threadnumber(); + + if ((BDB_CONFIG(li)->bdb_durable_transactions) && +- (BDB_CONFIG(li)->bdb_enable_transactions) && (trans_batch_limit > 0)) { ++ (BDB_CONFIG(li)->bdb_enable_transactions) && (trans_batch_limit > 0)) ++ { + /* initialize the synchronization objects for the log_flush and worker threads */ +- sync_txn_log_flush = PR_NewLock(); +- sync_txn_log_flush_done = PR_NewCondVar(sync_txn_log_flush); +- sync_txn_log_do_flush = PR_NewCondVar(sync_txn_log_flush); ++ pthread_condattr_t condAttr; ++ ++ pthread_mutex_init(&sync_txn_log_flush, NULL); ++ pthread_condattr_init(&condAttr); ++ pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC); ++ pthread_cond_init(&sync_txn_log_do_flush, &condAttr); ++ pthread_cond_init(&sync_txn_log_flush_done, NULL); ++ pthread_condattr_destroy(&condAttr); /* no longer needed */ ++ + txn_log_flush_pending = (int *)slapi_ch_malloc(max_threads * sizeof(int)); + log_flush_thread = PR_TRUE; + if (NULL == PR_CreateThread(PR_USER_THREAD, +@@ -3451,7 +3468,7 @@ bdb_start_log_flush_thread(struct ldbminfo *li) + static int + log_flush_threadmain(void *param) + { +- PRIntervalTime interval_wait, interval_flush, interval_def; ++ PRIntervalTime interval_flush, interval_def; + PRIntervalTime last_flush = 0; + int i; + int do_flush = 0; +@@ -3464,7 +3481,6 @@ log_flush_threadmain(void *param) + INCR_THREAD_COUNT(pEnv); + + interval_flush = PR_MillisecondsToInterval(trans_batch_txn_min_sleep); +- interval_wait = PR_MillisecondsToInterval(trans_batch_txn_max_sleep); + interval_def = PR_MillisecondsToInterval(300); /*used while no txn or txn batching */ + /* LK this is only needed if online change of + * of txn config is supported ??? +@@ -3473,10 +3489,10 @@ log_flush_threadmain(void *param) + if (BDB_CONFIG(li)->bdb_enable_transactions) { + if (trans_batch_limit > 0) { + /* synchronize flushing thread with workers */ +- PR_Lock(sync_txn_log_flush); ++ pthread_mutex_lock(&sync_txn_log_flush); + if (!log_flush_thread) { + /* batch transactions was disabled while waiting for the lock */ +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + break; + } + slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(in loop): batchcount: %d, " +@@ -3502,20 +3518,31 @@ log_flush_threadmain(void *param) + slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(before notify): batchcount: %d, " + "txn_in_progress: %d\n", + trans_batch_count, txn_in_progress_count); +- PR_NotifyAllCondVar(sync_txn_log_flush_done); ++ pthread_cond_broadcast(&sync_txn_log_flush_done); + } + /* wait until flushing conditions are met */ + while ((trans_batch_count == 0) || +- (trans_batch_count < trans_batch_limit && trans_batch_count < txn_in_progress_count)) { ++ (trans_batch_count < trans_batch_limit && trans_batch_count < txn_in_progress_count)) ++ { ++ struct timespec current_time = {0}; ++ /* convert milliseconds to nano seconds */ ++ int32_t nano_sec_sleep = trans_batch_txn_max_sleep * 1000000; + if (BDB_CONFIG(li)->bdb_stop_threads) + break; + if (PR_IntervalNow() - last_flush > interval_flush) { + do_flush = 1; + break; + } +- PR_WaitCondVar(sync_txn_log_do_flush, interval_wait); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ if (current_time.tv_nsec + nano_sec_sleep > 1000000000) { ++ /* nano sec will overflow, just bump the seconds */ ++ current_time.tv_sec++; ++ } else { ++ current_time.tv_nsec += nano_sec_sleep; ++ } ++ pthread_cond_timedwait(&sync_txn_log_do_flush, &sync_txn_log_flush, ¤t_time); + } +- PR_Unlock(sync_txn_log_flush); ++ pthread_mutex_unlock(&sync_txn_log_flush); + slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(wakeup): batchcount: %d, " + "txn_in_progress: %d\n", + trans_batch_count, txn_in_progress_count); +diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h +index bf00d2e9a..6bb04d21a 100644 +--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h ++++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h +@@ -1,5 +1,5 @@ + /** BEGIN COPYRIGHT BLOCK +- * Copyright (C) 2019 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -18,10 +18,10 @@ typedef struct bdb_db_env + Slapi_RWLock *bdb_env_lock; + int bdb_openflags; + int bdb_priv_flags; +- PRLock *bdb_thread_count_lock; /* lock for thread_count_cv */ +- PRCondVar *bdb_thread_count_cv; /* condition variable for housekeeping thread shutdown */ +- PRInt32 bdb_thread_count; /* Tells us how many threads are running, +- * used to figure out when they're all stopped */ ++ pthread_mutex_t bdb_thread_count_lock; /* lock for thread_count_cv */ ++ pthread_cond_t bdb_thread_count_cv; /* condition variable for housekeeping thread shutdown */ ++ PRInt32 bdb_thread_count; /* Tells us how many threads are running, ++ * used to figure out when they're all stopped */ + } bdb_db_env; + + /* structure which holds our stuff */ +diff --git a/ldap/servers/slapd/back-ldbm/import.h b/ldap/servers/slapd/back-ldbm/import.h +index db77a602b..bfa74ed49 100644 +--- a/ldap/servers/slapd/back-ldbm/import.h ++++ b/ldap/servers/slapd/back-ldbm/import.h +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -130,8 +130,8 @@ typedef struct + char **exclude_subtrees; /* list of subtrees to NOT import */ + Fifo fifo; /* entry fifo for indexing */ + char *task_status; /* transient state info for the end-user */ +- PRLock *wire_lock; /* lock for serializing wire imports */ +- PRCondVar *wire_cv; /* ... and ordering the startup */ ++ pthread_mutex_t wire_lock; /* lock for serializing wire imports */ ++ pthread_cond_t wire_cv; /* ... and ordering the startup */ + PRThread *main_thread; /* for FRI: import_main() thread id */ + int encrypt; + Slapi_Value *usn_value; /* entryusn for import */ +diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c +index 88b7dc3be..1883fe711 100644 +--- a/ldap/servers/slapd/connection.c ++++ b/ldap/servers/slapd/connection.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -64,8 +64,10 @@ struct Slapi_work_q + + static struct Slapi_work_q *head_work_q = NULL; /* global work queue head */ + static struct Slapi_work_q *tail_work_q = NULL; /* global work queue tail */ +-static PRLock *work_q_lock = NULL; /* protects head_conn_q and tail_conn_q */ +-static PRCondVar *work_q_cv; /* used by operation threads to wait for work - when there is a conn in the queue waiting to be processed */ ++static pthread_mutex_t work_q_lock; /* protects head_conn_q and tail_conn_q */ ++static pthread_cond_t work_q_cv; /* used by operation threads to wait for work - ++ * when there is a conn in the queue waiting ++ * to be processed */ + static PRInt32 work_q_size; /* size of conn_q */ + static PRInt32 work_q_size_max; /* high water mark of work_q_size */ + #define WORK_Q_EMPTY (work_q_size == 0) +@@ -409,7 +411,7 @@ connection_reset(Connection *conn, int ns, PRNetAddr *from, int fromLen __attrib + + /* initialize the remaining connection fields */ + conn->c_ldapversion = LDAP_VERSION3; +- conn->c_starttime = slapi_current_utc_time(); ++ conn->c_starttime = slapi_current_rel_time_t(); + conn->c_idlesince = conn->c_starttime; + conn->c_flags = is_SSL ? CONN_FLAG_SSL : 0; + conn->c_authtype = slapi_ch_strdup(SLAPD_AUTH_NONE); +@@ -424,32 +426,40 @@ connection_reset(Connection *conn, int ns, PRNetAddr *from, int fromLen __attrib + void + init_op_threads() + { +- int i; +- PRErrorCode errorCode; +- int max_threads = config_get_threadnumber(); +- /* Initialize the locks and cv */ ++ pthread_condattr_t condAttr; ++ int32_t max_threads = config_get_threadnumber(); ++ int32_t rc; + +- if ((work_q_lock = PR_NewLock()) == NULL) { +- errorCode = PR_GetError(); +- slapi_log_err(SLAPI_LOG_ERR, +- "init_op_threads", "PR_NewLock failed for work_q_lock, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", +- errorCode, slapd_pr_strerror(errorCode)); ++ /* Initialize the locks and cv */ ++ if ((rc = pthread_mutex_init(&work_q_lock, NULL)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", ++ "Cannot create new lock. error %d (%s)\n", ++ rc, strerror(rc)); + exit(-1); + } +- +- if ((work_q_cv = PR_NewCondVar(work_q_lock)) == NULL) { +- errorCode = PR_GetError(); +- slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", "PR_NewCondVar failed for work_q_cv, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", +- errorCode, slapd_pr_strerror(errorCode)); ++ if ((rc = pthread_condattr_init(&condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", ++ "Cannot create new condition attribute variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(-1); ++ } else if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", ++ "Cannot set condition attr clock. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(-1); ++ } else if ((rc = pthread_cond_init(&work_q_cv, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", ++ "Cannot create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); + exit(-1); + } ++ pthread_condattr_destroy(&condAttr); /* no longer needed */ + + work_q_stack = PR_CreateStack("connection_work_q"); +- + op_stack = PR_CreateStack("connection_operation"); + + /* start the operation threads */ +- for (i = 0; i < max_threads; i++) { ++ for (size_t i = 0; i < max_threads; i++) { + PR_SetConcurrency(4); + if (PR_CreateThread(PR_USER_THREAD, + (VFP)(void *)connection_threadmain, NULL, +@@ -457,7 +467,8 @@ init_op_threads() + PR_UNJOINABLE_THREAD, + SLAPD_DEFAULT_THREAD_STACKSIZE) == NULL) { + int prerr = PR_GetError(); +- slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", "PR_CreateThread failed, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", ++ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads", ++ "PR_CreateThread failed, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", + prerr, slapd_pr_strerror(prerr)); + } else { + g_incr_active_threadcnt(); +@@ -949,16 +960,23 @@ connection_make_new_pb(Slapi_PBlock *pb, Connection *conn) + } + + int +-connection_wait_for_new_work(Slapi_PBlock *pb, PRIntervalTime interval) ++connection_wait_for_new_work(Slapi_PBlock *pb, int32_t interval) + { + int ret = CONN_FOUND_WORK_TO_DO; + work_q_item *wqitem = NULL; + struct Slapi_op_stack *op_stack_obj = NULL; + +- PR_Lock(work_q_lock); ++ pthread_mutex_lock(&work_q_lock); + + while (!op_shutdown && WORK_Q_EMPTY) { +- PR_WaitCondVar(work_q_cv, interval); ++ if (interval == 0 ) { ++ pthread_cond_wait(&work_q_cv, &work_q_lock); ++ } else { ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += interval; ++ pthread_cond_timedwait(&work_q_cv, &work_q_lock, ¤t_time); ++ } + } + + if (op_shutdown) { +@@ -975,7 +993,7 @@ connection_wait_for_new_work(Slapi_PBlock *pb, PRIntervalTime interval) + slapi_pblock_set(pb, SLAPI_OPERATION, op_stack_obj->op); + } + +- PR_Unlock(work_q_lock); ++ pthread_mutex_unlock(&work_q_lock); + return ret; + } + +@@ -1353,7 +1371,7 @@ connection_check_activity_level(Connection *conn) + /* store current count in the previous count slot */ + conn->c_private->previous_op_count = current_count; + /* update the last checked time */ +- conn->c_private->previous_count_check_time = slapi_current_utc_time(); ++ conn->c_private->previous_count_check_time = slapi_current_rel_time_t(); + pthread_mutex_unlock(&(conn->c_mutex)); + slapi_log_err(SLAPI_LOG_CONNS, "connection_check_activity_level", "conn %" PRIu64 " activity level = %d\n", conn->c_connid, delta_count); + } +@@ -1463,7 +1481,7 @@ connection_threadmain() + { + Slapi_PBlock *pb = slapi_pblock_new(); + /* wait forever for new pb until one is available or shutdown */ +- PRIntervalTime interval = PR_INTERVAL_NO_TIMEOUT; /* PR_SecondsToInterval(10); */ ++ int32_t interval = 0; /* used be 10 seconds */ + Connection *conn = NULL; + Operation *op; + ber_tag_t tag = 0; +@@ -1503,7 +1521,7 @@ connection_threadmain() + + switch (ret) { + case CONN_NOWORK: +- PR_ASSERT(interval != PR_INTERVAL_NO_TIMEOUT); /* this should never happen with PR_INTERVAL_NO_TIMEOUT */ ++ PR_ASSERT(interval != 0); /* this should never happen */ + continue; + case CONN_SHUTDOWN: + slapi_log_err(SLAPI_LOG_TRACE, "connection_threadmain", +@@ -1610,7 +1628,7 @@ connection_threadmain() + conn->c_opsinitiated, conn->c_refcnt, conn->c_flags); + } + +- curtime = slapi_current_utc_time(); ++ curtime = slapi_current_rel_time_t(); + #define DB_PERF_TURBO 1 + #if defined(DB_PERF_TURBO) + /* If it's been a while since we last did it ... */ +@@ -1914,7 +1932,7 @@ add_work_q(work_q_item *wqitem, struct Slapi_op_stack *op_stack_obj) + new_work_q->op_stack_obj = op_stack_obj; + new_work_q->next_work_item = NULL; + +- PR_Lock(work_q_lock); ++ pthread_mutex_lock(&work_q_lock); + if (tail_work_q == NULL) { + tail_work_q = new_work_q; + head_work_q = new_work_q; +@@ -1926,8 +1944,8 @@ add_work_q(work_q_item *wqitem, struct Slapi_op_stack *op_stack_obj) + if (work_q_size > work_q_size_max) { + work_q_size_max = work_q_size; + } +- PR_NotifyCondVar(work_q_cv); /* notify waiters in connection_wait_for_new_work */ +- PR_Unlock(work_q_lock); ++ pthread_cond_signal(&work_q_cv); /* notify waiters in connection_wait_for_new_work */ ++ pthread_mutex_unlock(&work_q_lock); + } + + /* get_work_q(): will get a work_q_item from the beginning of the work queue, return NULL if +@@ -1975,9 +1993,9 @@ op_thread_cleanup() + op_stack_size, work_q_size_max, work_q_stack_size_max); + + PR_AtomicIncrement(&op_shutdown); +- PR_Lock(work_q_lock); +- PR_NotifyAllCondVar(work_q_cv); /* tell any thread waiting in connection_wait_for_new_work to shutdown */ +- PR_Unlock(work_q_lock); ++ pthread_mutex_lock(&work_q_lock); ++ pthread_cond_broadcast(&work_q_cv); /* tell any thread waiting in connection_wait_for_new_work to shutdown */ ++ pthread_mutex_unlock(&work_q_lock); + } + + /* do this after all worker threads have terminated */ +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index bfd965263..0071ed86a 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -81,8 +81,9 @@ static int readsignalpipe = SLAPD_INVALID_SOCKET; + #define FDS_SIGNAL_PIPE 0 + + static PRThread *disk_thread_p = NULL; +-static PRCondVar *diskmon_cvar = NULL; +-static PRLock *diskmon_mutex = NULL; ++static pthread_cond_t diskmon_cvar; ++static pthread_mutex_t diskmon_mutex; ++ + void disk_monitoring_stop(void); + + typedef struct listener_info +@@ -441,9 +442,13 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) + + while (!g_get_shutdown()) { + if (!first_pass) { +- PR_Lock(diskmon_mutex); +- PR_WaitCondVar(diskmon_cvar, PR_SecondsToInterval(10)); +- PR_Unlock(diskmon_mutex); ++ struct timespec current_time = {0}; ++ ++ pthread_mutex_lock(&diskmon_mutex); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += 10; ++ pthread_cond_timedwait(&diskmon_cvar, &diskmon_mutex, ¤t_time); ++ pthread_mutex_unlock(&diskmon_mutex); + /* + * We need to subtract from disk_space to account for the + * logging we just did, it doesn't hurt if we subtract a +@@ -622,7 +627,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) + "Disk space on (%s) is too far below the threshold(%" PRIu64 " bytes). " + "Waiting %d minutes for disk space to be cleaned up before shutting slapd down...\n", + dirstr, threshold, (grace_period / 60)); +- start = slapi_current_utc_time(); ++ start = slapi_current_rel_time_t(); + now = start; + while ((now - start) < grace_period) { + if (g_get_shutdown()) { +@@ -685,7 +690,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) + immediate_shutdown = 1; + goto cleanup; + } +- now = slapi_current_utc_time(); ++ now = slapi_current_rel_time_t(); + } + + if (ok_now) { +@@ -1005,21 +1010,34 @@ slapd_daemon(daemon_ports_t *ports) + * and the monitoring thread. + */ + if (config_get_disk_monitoring()) { +- if ((diskmon_mutex = PR_NewLock()) == NULL) { ++ pthread_condattr_t condAttr; ++ int rc = 0; ++ ++ if ((rc = pthread_mutex_init(&diskmon_mutex, NULL)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", "cannot create new lock. error %d (%s)\n", ++ rc, strerror(rc)); ++ g_set_shutdown(SLAPI_SHUTDOWN_EXIT); ++ } ++ if ((rc = pthread_condattr_init(&condAttr)) != 0) { + slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", +- "Cannot create new lock for disk space monitoring. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", +- PR_GetError(), slapd_pr_strerror(PR_GetError())); ++ "cannot create new condition attribute variable. error %d (%s)\n", ++ rc, strerror(rc)); + g_set_shutdown(SLAPI_SHUTDOWN_EXIT); + } +- if (diskmon_mutex) { +- if ((diskmon_cvar = PR_NewCondVar(diskmon_mutex)) == NULL) { +- slapi_log_err(SLAPI_LOG_EMERG, "slapd_daemon", +- "Cannot create new condition variable for disk space monitoring. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", +- PR_GetError(), slapd_pr_strerror(PR_GetError())); +- g_set_shutdown(SLAPI_SHUTDOWN_EXIT); +- } ++ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", ++ "cannot set condition attr clock. error %d (%s)\n", ++ rc, strerror(rc)); ++ g_set_shutdown(SLAPI_SHUTDOWN_EXIT); ++ } ++ if ((rc = pthread_cond_init(&diskmon_cvar, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "slapd_daemon", ++ "cannot create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ g_set_shutdown(SLAPI_SHUTDOWN_EXIT); + } +- if (diskmon_mutex && diskmon_cvar) { ++ pthread_condattr_destroy(&condAttr); ++ if (rc == 0) { + disk_thread_p = PR_CreateThread(PR_SYSTEM_THREAD, + (VFP)(void *)disk_monitoring_thread, NULL, + PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, +@@ -1508,7 +1526,7 @@ static void + handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused))) + { + Connection *c; +- time_t curtime = slapi_current_utc_time(); ++ time_t curtime = slapi_current_rel_time_t(); + + #if LDAP_ERROR_LOGGING + if (slapd_ldap_debug & LDAP_DEBUG_CONNS) { +@@ -2884,8 +2902,8 @@ void + disk_monitoring_stop(void) + { + if (disk_thread_p) { +- PR_Lock(diskmon_mutex); +- PR_NotifyCondVar(diskmon_cvar); +- PR_Unlock(diskmon_mutex); ++ pthread_mutex_lock(&diskmon_mutex); ++ pthread_cond_signal(&diskmon_cvar); ++ pthread_mutex_unlock(&diskmon_mutex); + } + } +diff --git a/ldap/servers/slapd/eventq.c b/ldap/servers/slapd/eventq.c +index a491acd0a..e1900724f 100644 +--- a/ldap/servers/slapd/eventq.c ++++ b/ldap/servers/slapd/eventq.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -52,8 +52,8 @@ typedef struct _slapi_eq_context + */ + typedef struct _event_queue + { +- PRLock *eq_lock; +- PRCondVar *eq_cv; ++ pthread_mutex_t eq_lock; ++ pthread_cond_t eq_cv; + slapi_eq_context *eq_queue; + } event_queue; + +@@ -74,8 +74,8 @@ static PRThread *eq_loop_tid = NULL; + static int eq_running = 0; + static int eq_stopped = 0; + static int eq_initialized = 0; +-PRLock *ss_lock = NULL; +-PRCondVar *ss_cv = NULL; ++static pthread_mutex_t ss_lock; ++static pthread_cond_t ss_cv; + PRCallOnceType init_once = {0}; + + /* Forward declarations */ +@@ -170,7 +170,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) + + PR_ASSERT(eq_initialized); + if (!eq_stopped) { +- PR_Lock(eq->eq_lock); ++ pthread_mutex_lock(&(eq->eq_lock)); + p = &(eq->eq_queue); + while (!found && *p != NULL) { + if ((*p)->ec_id == ctx) { +@@ -182,7 +182,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) + p = &((*p)->ec_next); + } + } +- PR_Unlock(eq->eq_lock); ++ pthread_mutex_unlock(&(eq->eq_lock)); + } + slapi_log_err(SLAPI_LOG_HOUSE, NULL, + "cancellation of event id %p requested: %s\n", +@@ -223,7 +223,7 @@ eq_enqueue(slapi_eq_context *newec) + slapi_eq_context **p; + + PR_ASSERT(NULL != newec); +- PR_Lock(eq->eq_lock); ++ pthread_mutex_lock(&(eq->eq_lock)); + /* Insert in order (sorted by start time) in the list */ + for (p = &(eq->eq_queue); *p != NULL; p = &((*p)->ec_next)) { + if ((*p)->ec_when > newec->ec_when) { +@@ -236,8 +236,8 @@ eq_enqueue(slapi_eq_context *newec) + newec->ec_next = NULL; + } + *p = newec; +- PR_NotifyCondVar(eq->eq_cv); /* wake up scheduler thread */ +- PR_Unlock(eq->eq_lock); ++ pthread_cond_signal(&(eq->eq_cv)); /* wake up scheduler thread */ ++ pthread_mutex_unlock(&(eq->eq_lock)); + } + + +@@ -251,12 +251,12 @@ eq_dequeue(time_t now) + { + slapi_eq_context *retptr = NULL; + +- PR_Lock(eq->eq_lock); ++ pthread_mutex_lock(&(eq->eq_lock)); + if (NULL != eq->eq_queue && eq->eq_queue->ec_when <= now) { + retptr = eq->eq_queue; + eq->eq_queue = retptr->ec_next; + } +- PR_Unlock(eq->eq_lock); ++ pthread_mutex_unlock(&(eq->eq_lock)); + return retptr; + } + +@@ -271,7 +271,7 @@ static void + eq_call_all(void) + { + slapi_eq_context *p; +- time_t curtime = slapi_current_utc_time(); ++ time_t curtime = slapi_current_rel_time_t(); + + while ((p = eq_dequeue(curtime)) != NULL) { + /* Call the scheduled function */ +@@ -299,34 +299,35 @@ static void + eq_loop(void *arg __attribute__((unused))) + { + while (eq_running) { +- time_t curtime = slapi_current_utc_time(); +- PRIntervalTime timeout; ++ time_t curtime = slapi_current_rel_time_t(); + int until; +- PR_Lock(eq->eq_lock); ++ ++ pthread_mutex_lock(&(eq->eq_lock)); + while (!((NULL != eq->eq_queue) && (eq->eq_queue->ec_when <= curtime))) { + if (!eq_running) { +- PR_Unlock(eq->eq_lock); ++ pthread_mutex_unlock(&(eq->eq_lock)); + goto bye; + } + /* Compute new timeout */ + if (NULL != eq->eq_queue) { ++ struct timespec current_time = slapi_current_rel_time_hr(); + until = eq->eq_queue->ec_when - curtime; +- timeout = PR_SecondsToInterval(until); ++ current_time.tv_sec += until; ++ pthread_cond_timedwait(&eq->eq_cv, &eq->eq_lock, ¤t_time); + } else { +- timeout = PR_INTERVAL_NO_TIMEOUT; ++ pthread_cond_wait(&eq->eq_cv, &eq->eq_lock); + } +- PR_WaitCondVar(eq->eq_cv, timeout); +- curtime = slapi_current_utc_time(); ++ curtime = slapi_current_rel_time_t(); + } + /* There is some work to do */ +- PR_Unlock(eq->eq_lock); ++ pthread_mutex_unlock(&(eq->eq_lock)); + eq_call_all(); + } + bye: + eq_stopped = 1; +- PR_Lock(ss_lock); +- PR_NotifyAllCondVar(ss_cv); +- PR_Unlock(ss_lock); ++ pthread_mutex_lock(&ss_lock); ++ pthread_cond_broadcast(&ss_cv); ++ pthread_mutex_unlock(&ss_lock); + } + + +@@ -336,23 +337,50 @@ bye: + static PRStatus + eq_create(void) + { +- PR_ASSERT(NULL == eq->eq_lock); +- if ((eq->eq_lock = PR_NewLock()) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); ++ pthread_condattr_t condAttr; ++ int rc = 0; ++ ++ /* Init the eventq mutex and cond var */ ++ if (pthread_mutex_init(&eq->eq_lock, NULL) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ "Failed to create lock: error %d (%s)\n", ++ rc, strerror(rc)); + exit(1); + } +- if ((eq->eq_cv = PR_NewCondVar(eq->eq_lock)) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); ++ if ((rc = pthread_condattr_init(&condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ "Failed to create new condition attribute variable. error %d (%s)\n", ++ rc, strerror(rc)); + exit(1); + } +- if ((ss_lock = PR_NewLock()) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); ++ if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ "Cannot set condition attr clock. error %d (%s)\n", ++ rc, strerror(rc)); + exit(1); + } +- if ((ss_cv = PR_NewCondVar(ss_lock)) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); ++ if ((rc = pthread_cond_init(&eq->eq_cv, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ "Failed to create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); + exit(1); + } ++ ++ /* Init the "ss" mutex and condition var */ ++ if (pthread_mutex_init(&ss_lock, NULL) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ "Failed to create ss lock: error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); ++ } ++ if ((rc = pthread_cond_init(&ss_cv, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ "Failed to create new ss condition variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); ++ } ++ pthread_condattr_destroy(&condAttr); /* no longer needed */ ++ + eq->eq_queue = NULL; + eq_initialized = 1; + return PR_SUCCESS; +@@ -411,7 +439,7 @@ eq_stop() + { + slapi_eq_context *p, *q; + +- if (NULL == eq || NULL == eq->eq_lock) { /* never started */ ++ if (NULL == eq) { /* never started */ + eq_stopped = 1; + return; + } +@@ -423,12 +451,24 @@ eq_stop() + * it acknowledges by setting eq_stopped. + */ + while (!eq_stopped) { +- PR_Lock(eq->eq_lock); +- PR_NotifyAllCondVar(eq->eq_cv); +- PR_Unlock(eq->eq_lock); +- PR_Lock(ss_lock); +- PR_WaitCondVar(ss_cv, PR_MillisecondsToInterval(100)); +- PR_Unlock(ss_lock); ++ struct timespec current_time = {0}; ++ ++ pthread_mutex_lock(&(eq->eq_lock)); ++ pthread_cond_broadcast(&(eq->eq_cv)); ++ pthread_mutex_unlock(&(eq->eq_lock)); ++ ++ pthread_mutex_lock(&ss_lock); ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ if (current_time.tv_nsec + 100000000 > 1000000000) { ++ /* nanoseconds will overflow, adjust the seconds and nanoseconds */ ++ current_time.tv_sec++; ++ /* Add the remainder to nanoseconds */ ++ current_time.tv_nsec = (current_time.tv_nsec + 100000000) - 1000000000; ++ } else { ++ current_time.tv_nsec += 100000000; /* 100 ms */ ++ } ++ pthread_cond_timedwait(&ss_cv, &ss_lock, ¤t_time); ++ pthread_mutex_unlock(&ss_lock); + } + (void)PR_JoinThread(eq_loop_tid); + /* +@@ -438,7 +478,7 @@ eq_stop() + * The downside is that the event queue can't be stopped and restarted + * easily. + */ +- PR_Lock(eq->eq_lock); ++ pthread_mutex_lock(&(eq->eq_lock)); + p = eq->eq_queue; + while (p != NULL) { + q = p->ec_next; +@@ -449,7 +489,7 @@ eq_stop() + */ + p = q; + } +- PR_Unlock(eq->eq_lock); ++ pthread_mutex_unlock(&(eq->eq_lock)); + slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have shut down\n"); + } + +@@ -463,17 +503,17 @@ slapi_eq_get_arg(Slapi_Eq_Context ctx) + + PR_ASSERT(eq_initialized); + if (eq && !eq_stopped) { +- PR_Lock(eq->eq_lock); ++ pthread_mutex_lock(&(eq->eq_lock)); + p = &(eq->eq_queue); + while (p && *p != NULL) { + if ((*p)->ec_id == ctx) { +- PR_Unlock(eq->eq_lock); ++ pthread_mutex_unlock(&(eq->eq_lock)); + return (*p)->ec_arg; + } else { + p = &((*p)->ec_next); + } + } +- PR_Unlock(eq->eq_lock); ++ pthread_mutex_unlock(&(eq->eq_lock)); + } + return NULL; + } +diff --git a/ldap/servers/slapd/house.c b/ldap/servers/slapd/house.c +index ff139a4a5..ac1d94f26 100644 +--- a/ldap/servers/slapd/house.c ++++ b/ldap/servers/slapd/house.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -23,17 +23,15 @@ + #define SLAPD_HOUSEKEEPING_INTERVAL 30 /* seconds */ + + static PRThread *housekeeping_tid = NULL; +-static PRLock *housekeeping_mutex = NULL; +-static PRCondVar *housekeeping_cvar = NULL; ++static pthread_mutex_t housekeeping_mutex; ++static pthread_cond_t housekeeping_cvar; + + + static void + housecleaning(void *cur_time __attribute__((unused))) + { +- int interval; +- +- interval = PR_SecondsToInterval(SLAPD_HOUSEKEEPING_INTERVAL); + while (!g_get_shutdown()) { ++ struct timespec current_time = {0}; + /* + * Looks simple, but could potentially take a long time. + */ +@@ -42,9 +40,15 @@ housecleaning(void *cur_time __attribute__((unused))) + if (g_get_shutdown()) { + break; + } +- PR_Lock(housekeeping_mutex); +- PR_WaitCondVar(housekeeping_cvar, interval); +- PR_Unlock(housekeeping_mutex); ++ ++ /* get the current monotonic time and add our interval */ ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += SLAPD_HOUSEKEEPING_INTERVAL; ++ ++ /* Now we wait... */ ++ pthread_mutex_lock(&housekeeping_mutex); ++ pthread_cond_timedwait(&housekeeping_cvar, &housekeeping_mutex, ¤t_time); ++ pthread_mutex_unlock(&housekeeping_mutex); + } + } + +@@ -52,20 +56,31 @@ PRThread * + housekeeping_start(time_t cur_time, void *arg __attribute__((unused))) + { + static time_t thread_start_time; ++ pthread_condattr_t condAttr; ++ int rc = 0; + + if (housekeeping_tid) { + return housekeeping_tid; + } + +- if ((housekeeping_mutex = PR_NewLock()) == NULL) { ++ if ((rc = pthread_mutex_init(&housekeeping_mutex, NULL)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", ++ "housekeeping cannot create new lock. error %d (%s)\n", ++ rc, strerror(rc)); ++ } else if ((rc = pthread_condattr_init(&condAttr)) != 0) { + slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", +- "housekeeping cannot create new lock. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", +- PR_GetError(), slapd_pr_strerror(PR_GetError())); +- } else if ((housekeeping_cvar = PR_NewCondVar(housekeeping_mutex)) == NULL) { ++ "housekeeping cannot create new condition attribute variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ } else if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { + slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", +- "housekeeping cannot create new condition variable. " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", +- PR_GetError(), slapd_pr_strerror(PR_GetError())); ++ "housekeeping cannot set condition attr clock. error %d (%s)\n", ++ rc, strerror(rc)); ++ } else if ((rc = pthread_cond_init(&housekeeping_cvar, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", ++ "housekeeping cannot create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); + } else { ++ pthread_condattr_destroy(&condAttr); /* no longer needed */ + thread_start_time = cur_time; + if ((housekeeping_tid = PR_CreateThread(PR_USER_THREAD, + (VFP)housecleaning, (void *)&thread_start_time, +@@ -84,9 +99,16 @@ void + housekeeping_stop() + { + if (housekeeping_tid) { +- PR_Lock(housekeeping_mutex); +- PR_NotifyCondVar(housekeeping_cvar); +- PR_Unlock(housekeeping_mutex); ++ /* Notify the thread */ ++ pthread_mutex_lock(&housekeeping_mutex); ++ pthread_cond_signal(&housekeeping_cvar); ++ pthread_mutex_unlock(&housekeeping_mutex); ++ ++ /* Wait for the thread to finish */ + (void)PR_JoinThread(housekeeping_tid); ++ ++ /* Clean it all up */ ++ pthread_mutex_destroy(&housekeeping_mutex); ++ pthread_cond_destroy(&housekeeping_cvar); + } + } +diff --git a/ldap/servers/slapd/libmakefile b/ldap/servers/slapd/libmakefile +index b3ecabc29..3559c0104 100644 +--- a/ldap/servers/slapd/libmakefile ++++ b/ldap/servers/slapd/libmakefile +@@ -46,7 +46,7 @@ LIBSLAPD_OBJS=plugin_role.o getfilelist.o libglobs.o log.o ch_malloc.o entry.o p + filter.o filtercmp.o filterentry.o operation.o schemaparse.o pw.o \ + backend.o defbackend.o ava.o charray.o regex.o \ + str2filter.o dynalib.o plugin.o plugin_syntax.o plugin_mr.o \ +- slapi2nspr.o rwlock.o control.o plugin_internal_op.o \ ++ slapi2runtime.o rwlock.o control.o plugin_internal_op.o \ + result.o pw_retry.o agtmmap.o referral.o snmp_collator.o util.o \ + dse.o errormap.o computed.o match.o fileio.o \ + generation.o localhost.o ssl.o factory.o auditlog.o \ +diff --git a/ldap/servers/slapd/psearch.c b/ldap/servers/slapd/psearch.c +index 6820a5d75..c60e6a8ed 100644 +--- a/ldap/servers/slapd/psearch.c ++++ b/ldap/servers/slapd/psearch.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -59,10 +59,10 @@ typedef struct _psearch + */ + typedef struct _psearch_list + { +- Slapi_RWLock *pl_rwlock; /* R/W lock struct to serialize access */ +- PSearch *pl_head; /* Head of list */ +- PRLock *pl_cvarlock; /* Lock for cvar */ +- PRCondVar *pl_cvar; /* ps threads sleep on this */ ++ Slapi_RWLock *pl_rwlock; /* R/W lock struct to serialize access */ ++ PSearch *pl_head; /* Head of list */ ++ pthread_mutex_t pl_cvarlock; /* Lock for cvar */ ++ pthread_cond_t pl_cvar; /* ps threads sleep on this */ + } PSearch_List; + + /* +@@ -101,21 +101,26 @@ void + ps_init_psearch_system() + { + if (!PS_IS_INITIALIZED()) { ++ int32_t rc = 0; ++ + psearch_list = (PSearch_List *)slapi_ch_calloc(1, sizeof(PSearch_List)); + if ((psearch_list->pl_rwlock = slapi_new_rwlock()) == NULL) { + slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", "Cannot initialize lock structure. " + "The server is terminating.\n"); + exit(-1); + } +- if ((psearch_list->pl_cvarlock = PR_NewLock()) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", "Cannot create new lock. " +- "The server is terminating.\n"); +- exit(-1); ++ ++ if ((rc = pthread_mutex_init(&(psearch_list->pl_cvarlock), NULL)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", ++ "Cannot create new lock. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); + } +- if ((psearch_list->pl_cvar = PR_NewCondVar(psearch_list->pl_cvarlock)) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, "ps_init_psearch_system", "Cannot create new condition variable. " +- "The server is terminating.\n"); +- exit(-1); ++ if ((rc = pthread_cond_init(&(psearch_list->pl_cvar), NULL)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "housekeeping_start", ++ "housekeeping cannot create new condition variable. error %d (%s)\n", ++ rc, strerror(rc)); ++ exit(1); + } + psearch_list->pl_head = NULL; + } +@@ -288,7 +293,7 @@ ps_send_results(void *arg) + pb_conn->c_connid, pb_op ? pb_op->o_opid : -1); + } + +- PR_Lock(psearch_list->pl_cvarlock); ++ pthread_mutex_lock(&(psearch_list->pl_cvarlock)); + + while ((conn_acq_flag == 0) && slapi_atomic_load_64(&(ps->ps_complete), __ATOMIC_ACQUIRE) == 0) { + /* Check for an abandoned operation */ +@@ -300,7 +305,7 @@ ps_send_results(void *arg) + } + if (NULL == ps->ps_eq_head) { + /* Nothing to do */ +- PR_WaitCondVar(psearch_list->pl_cvar, PR_INTERVAL_NO_TIMEOUT); ++ pthread_cond_wait(&(psearch_list->pl_cvar), &(psearch_list->pl_cvarlock)); + } else { + /* dequeue the item */ + int attrsonly; +@@ -330,17 +335,17 @@ ps_send_results(void *arg) + } + + /* +- * Send the result. Since send_ldap_search_entry can block for +- * up to 30 minutes, we relinquish all locks before calling it. +- */ +- PR_Unlock(psearch_list->pl_cvarlock); ++ * Send the result. Since send_ldap_search_entry can block for ++ * up to 30 minutes, we relinquish all locks before calling it. ++ */ ++ pthread_mutex_unlock(&(psearch_list->pl_cvarlock)); + + /* +- * The entry is in the right scope and matches the filter +- * but we need to redo the filter test here to check access +- * controls. See the comments at the slapi_filter_test() +- * call in ps_service_persistent_searches(). +- */ ++ * The entry is in the right scope and matches the filter ++ * but we need to redo the filter test here to check access ++ * controls. See the comments at the slapi_filter_test() ++ * call in ps_service_persistent_searches(). ++ */ + slapi_pblock_get(ps->ps_pblock, SLAPI_SEARCH_FILTER, &f); + + /* See if the entry meets the filter and ACL criteria */ +@@ -358,13 +363,13 @@ ps_send_results(void *arg) + } + } + +- PR_Lock(psearch_list->pl_cvarlock); ++ pthread_mutex_lock(&(psearch_list->pl_cvarlock)); + + /* Deallocate our wrapper for this entry */ + pe_ch_free(&peq); + } + } +- PR_Unlock(psearch_list->pl_cvarlock); ++ pthread_mutex_unlock(&(psearch_list->pl_cvarlock)); + ps_remove(ps); + + /* indicate the end of search */ +@@ -474,9 +479,9 @@ void + ps_wakeup_all() + { + if (PS_IS_INITIALIZED()) { +- PR_Lock(psearch_list->pl_cvarlock); +- PR_NotifyAllCondVar(psearch_list->pl_cvar); +- PR_Unlock(psearch_list->pl_cvarlock); ++ pthread_mutex_lock(&(psearch_list->pl_cvarlock)); ++ pthread_cond_broadcast(&(psearch_list->pl_cvar)); ++ pthread_mutex_unlock(&(psearch_list->pl_cvarlock)); + } + } + +diff --git a/ldap/servers/slapd/regex.c b/ldap/servers/slapd/regex.c +index 97249a4c5..a17c354fd 100644 +--- a/ldap/servers/slapd/regex.c ++++ b/ldap/servers/slapd/regex.c +@@ -72,7 +72,7 @@ int + slapi_re_exec(Slapi_Regex *re_handle, const char *subject, time_t time_up) + { + int rc; +- time_t curtime = slapi_current_utc_time(); ++ time_t curtime = slapi_current_rel_time_t(); + + if (NULL == re_handle || NULL == re_handle->re_pcre || NULL == subject) { + return LDAP_PARAM_ERROR; +diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h +index f9ac8b46c..55ded5eb8 100644 +--- a/ldap/servers/slapd/slapi-plugin.h ++++ b/ldap/servers/slapd/slapi-plugin.h +@@ -6086,6 +6086,7 @@ Slapi_CondVar *slapi_new_condvar(Slapi_Mutex *mutex); + void slapi_destroy_condvar(Slapi_CondVar *cvar); + int slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout); + int slapi_notify_condvar(Slapi_CondVar *cvar, int notify_all); ++int slapi_wait_condvar_pt(Slapi_CondVar *cvar, Slapi_Mutex *mutex, struct timeval *timeout); + + /** + * Creates a new read/write lock +@@ -6777,6 +6778,12 @@ struct timespec slapi_current_time_hr(void); + * \return timespec of the current monotonic time. + */ + struct timespec slapi_current_rel_time_hr(void); ++/** ++ * Returns the current system time as a hr clock ++ * ++ * \return time_t of the current monotonic time. ++ */ ++time_t slapi_current_rel_time_t(void); + /** + * Returns the current system time as a hr clock in UTC timezone. + * This clock adjusts with ntp steps, and should NOT be +diff --git a/ldap/servers/slapd/slapi2nspr.c b/ldap/servers/slapd/slapi2runtime.c +similarity index 69% +rename from ldap/servers/slapd/slapi2nspr.c +rename to ldap/servers/slapd/slapi2runtime.c +index 232d1599e..85dc4c9a8 100644 +--- a/ldap/servers/slapd/slapi2nspr.c ++++ b/ldap/servers/slapd/slapi2runtime.c +@@ -1,6 +1,6 @@ + /** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. +- * Copyright (C) 2005 Red Hat, Inc. ++ * Copyright (C) 2020 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). +@@ -14,6 +14,8 @@ + /* + * slapi2nspr.c - expose a subset of the NSPR20/21 API to SLAPI plugin writers + * ++ * Also include slapi2pthread functions ++ * + */ + + #include "slap.h" +@@ -44,47 +46,50 @@ + Slapi_Mutex * + slapi_new_mutex(void) + { +- return ((Slapi_Mutex *)PR_NewLock()); ++ pthread_mutex_t *new_mutex = (pthread_mutex_t *)slapi_ch_calloc(1, sizeof(pthread_mutex_t)); ++ pthread_mutex_init(new_mutex, NULL); ++ return ((Slapi_Mutex *)new_mutex); + } + +- + /* + * Function: slapi_destroy_mutex +- * Description: behaves just like PR_DestroyLock(). ++ * Description: behaves just like pthread_mutex_destroy(). + */ + void + slapi_destroy_mutex(Slapi_Mutex *mutex) + { + if (mutex != NULL) { +- PR_DestroyLock((PRLock *)mutex); ++ pthread_mutex_destroy((pthread_mutex_t *)mutex); ++ slapi_ch_free((void **)&mutex); + } + } + + + /* + * Function: slapi_lock_mutex +- * Description: behaves just like PR_Lock(). ++ * Description: behaves just like pthread_mutex_lock(). + */ +-void ++inline void __attribute__((always_inline)) + slapi_lock_mutex(Slapi_Mutex *mutex) + { + if (mutex != NULL) { +- PR_Lock((PRLock *)mutex); ++ pthread_mutex_lock((pthread_mutex_t *)mutex); + } + } + + + /* + * Function: slapi_unlock_mutex +- * Description: behaves just like PR_Unlock(). ++ * Description: behaves just like pthread_mutex_unlock(). + * Returns: + * non-zero if mutex was successfully unlocked. + * 0 if mutex is NULL or is not locked by the calling thread. + */ +-int ++inline int __attribute__((always_inline)) + slapi_unlock_mutex(Slapi_Mutex *mutex) + { +- if (mutex == NULL || PR_Unlock((PRLock *)mutex) == PR_FAILURE) { ++ PR_ASSERT(mutex != NULL); ++ if (mutex == NULL || pthread_mutex_unlock((pthread_mutex_t *)mutex) != 0) { + return (0); + } else { + return (1); +@@ -98,13 +103,18 @@ slapi_unlock_mutex(Slapi_Mutex *mutex) + * Returns: pointer to a new condition variable (NULL if one can't be created). + */ + Slapi_CondVar * +-slapi_new_condvar(Slapi_Mutex *mutex) ++slapi_new_condvar(Slapi_Mutex *mutex __attribute__((unused))) + { +- if (mutex == NULL) { +- return (NULL); +- } ++ pthread_cond_t *new_cv = (pthread_cond_t *)slapi_ch_calloc(1, sizeof(pthread_cond_t)); ++ pthread_condattr_t condAttr; ++ ++ pthread_condattr_init(&condAttr); ++ pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC); ++ pthread_cond_init(new_cv, &condAttr); ++ /* Done with the cond attr, it's safe to destroy it */ ++ pthread_condattr_destroy(&condAttr); + +- return ((Slapi_CondVar *)PR_NewCondVar((PRLock *)mutex)); ++ return (Slapi_CondVar *)new_cv; + } + + +@@ -116,7 +126,8 @@ void + slapi_destroy_condvar(Slapi_CondVar *cvar) + { + if (cvar != NULL) { +- PR_DestroyCondVar((PRCondVar *)cvar); ++ pthread_cond_destroy((pthread_cond_t *)cvar); ++ slapi_ch_free((void **)&cvar); + } + } + +@@ -134,23 +145,35 @@ slapi_destroy_condvar(Slapi_CondVar *cvar) + int + slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) + { +- PRIntervalTime prit; ++ /* deprecated in favor of slapi_wait_condvar_pt() which requires that the ++ * mutex be passed in */ ++ return (0); ++} ++ ++int ++slapi_wait_condvar_pt(Slapi_CondVar *cvar, Slapi_Mutex *mutex, struct timeval *timeout) ++{ ++ int32_t rc = 1; + + if (cvar == NULL) { +- return (0); ++ return 0; + } + + if (timeout == NULL) { +- prit = PR_INTERVAL_NO_TIMEOUT; ++ rc = pthread_cond_wait((pthread_cond_t *)cvar, (pthread_mutex_t *)mutex); + } else { +- prit = PR_SecondsToInterval(timeout->tv_sec) + PR_MicrosecondsToInterval(timeout->tv_usec); ++ struct timespec current_time = {0}; ++ clock_gettime(CLOCK_MONOTONIC, ¤t_time); ++ current_time.tv_sec += (timeout->tv_sec + PR_MicrosecondsToInterval(timeout->tv_usec)); ++ rc = pthread_cond_timedwait((pthread_cond_t *)cvar, (pthread_mutex_t *)mutex, ¤t_time); + } + +- if (PR_WaitCondVar((PRCondVar *)cvar, prit) != PR_SUCCESS) { +- return (0); ++ if (rc != 0) { ++ /* something went wrong */ ++ return 0; + } + +- return (1); ++ return 1; /* success */ + } + + +@@ -166,19 +189,19 @@ slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) + int + slapi_notify_condvar(Slapi_CondVar *cvar, int notify_all) + { +- PRStatus prrc; ++ int32_t rc; + + if (cvar == NULL) { +- return (0); ++ return 0; + } + + if (notify_all) { +- prrc = PR_NotifyAllCondVar((PRCondVar *)cvar); ++ rc = pthread_cond_broadcast((pthread_cond_t *)cvar); + } else { +- prrc = PR_NotifyCondVar((PRCondVar *)cvar); ++ rc = pthread_cond_signal((pthread_cond_t *)cvar); + } + +- return (prrc == PR_SUCCESS ? 1 : 0); ++ return (rc == 0 ? 1 : 0); + } + + Slapi_RWLock * +@@ -236,7 +259,7 @@ slapi_destroy_rwlock(Slapi_RWLock *rwlock) + } + } + +-int ++inline int __attribute__((always_inline)) + slapi_rwlock_rdlock(Slapi_RWLock *rwlock) + { + int ret = 0; +@@ -252,7 +275,7 @@ slapi_rwlock_rdlock(Slapi_RWLock *rwlock) + return ret; + } + +-int ++inline int __attribute__((always_inline)) + slapi_rwlock_wrlock(Slapi_RWLock *rwlock) + { + int ret = 0; +@@ -268,7 +291,7 @@ slapi_rwlock_wrlock(Slapi_RWLock *rwlock) + return ret; + } + +-int ++inline int __attribute__((always_inline)) + slapi_rwlock_unlock(Slapi_RWLock *rwlock) + { + int ret = 0; +diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c +index 806077a16..26f281cba 100644 +--- a/ldap/servers/slapd/task.c ++++ b/ldap/servers/slapd/task.c +@@ -380,16 +380,14 @@ slapi_task_status_changed(Slapi_Task *task) + Slapi_PBlock *pb = slapi_pblock_new(); + Slapi_Entry *e; + int ttl; +- time_t expire; + + if ((e = get_internal_entry(pb, task->task_dn))) { + ttl = atoi(slapi_fetch_attr(e, "ttl", DEFAULT_TTL)); + if (ttl > (24*3600)) + ttl = (24*3600); /* be reasonable, allow to check task status not longer than one day */ +- expire = time(NULL) + ttl; + task->task_flags |= SLAPI_TASK_DESTROYING; + /* queue an event to destroy the state info */ +- slapi_eq_once(destroy_task, (void *)task, expire); ++ slapi_eq_once(destroy_task, (void *)task, slapi_current_rel_time_t() + ttl); + } + slapi_free_search_results_internal(pb); + slapi_pblock_destroy(pb); +diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c +index 545538404..0406c3689 100644 +--- a/ldap/servers/slapd/time.c ++++ b/ldap/servers/slapd/time.c +@@ -107,6 +107,14 @@ slapi_current_rel_time_hr(void) + return now; + } + ++time_t ++slapi_current_rel_time_t(void) ++{ ++ struct timespec now = {0}; ++ clock_gettime(CLOCK_MONOTONIC, &now); ++ return now.tv_sec; ++} ++ + struct timespec + slapi_current_utc_time_hr(void) + { +@@ -292,7 +300,7 @@ slapi_timer_result + slapi_timespec_expire_check(struct timespec *expire) + { + /* +- * Check this first, as it makes no timeout virutally free. ++ * Check this first, as it makes no timeout virtually free. + */ + if (expire->tv_sec == 0 && expire->tv_nsec == 0) { + return TIMER_CONTINUE; +-- +2.26.2 + diff --git a/SOURCES/0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch b/SOURCES/0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch new file mode 100644 index 0000000..66a40e8 --- /dev/null +++ b/SOURCES/0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch @@ -0,0 +1,1748 @@ +From 69af412d42acccac660037e1f4026a6a6717634c Mon Sep 17 00:00:00 2001 +From: Mark Reynolds +Date: Thu, 17 Dec 2020 15:25:42 -0500 +Subject: [PATCH 2/2] Issue 4384 - Separate eventq into REALTIME and MONOTONIC + +Description: The recent changes to the eventq "when" time changed + internally from REALTIME to MONOTONIC, and this broke + the API. Create a new API for MONOTONIC clocks, and + keep the original API intact for REALTIME clocks. + +Relates: https://github.com/389ds/389-ds-base/issues/4384 + +Reviewed by: firstyear(Thanks!) +--- + Makefile.am | 1 + + docs/slapi.doxy.in | 1 - + ldap/servers/plugins/chainingdb/cb_instance.c | 6 +- + ldap/servers/plugins/dna/dna.c | 4 +- + .../plugins/replication/repl5_backoff.c | 12 +- + .../plugins/replication/repl5_connection.c | 10 +- + .../plugins/replication/repl5_mtnode_ext.c | 4 +- + .../plugins/replication/repl5_replica.c | 24 +- + .../plugins/replication/repl5_schedule.c | 4 +- + .../plugins/replication/windows_connection.c | 12 +- + .../replication/windows_inc_protocol.c | 7 +- + ldap/servers/plugins/retrocl/retrocl_trim.c | 10 +- + ldap/servers/slapd/daemon.c | 3 +- + ldap/servers/slapd/eventq-deprecated.c | 483 ++++++++++++++++++ + ldap/servers/slapd/eventq.c | 236 ++++----- + ldap/servers/slapd/main.c | 18 +- + ldap/servers/slapd/proto-slap.h | 6 +- + ldap/servers/slapd/slapi-plugin.h | 62 ++- + ldap/servers/slapd/slapi2runtime.c | 23 +- + ldap/servers/slapd/snmp_collator.c | 7 +- + ldap/servers/slapd/task.c | 2 +- + ldap/servers/slapd/uuid.c | 3 +- + 22 files changed, 750 insertions(+), 188 deletions(-) + create mode 100644 ldap/servers/slapd/eventq-deprecated.c + +diff --git a/Makefile.am b/Makefile.am +index f7bf1c44c..ece1ad41a 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -1408,6 +1408,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \ + ldap/servers/slapd/entrywsi.c \ + ldap/servers/slapd/errormap.c \ + ldap/servers/slapd/eventq.c \ ++ ldap/servers/slapd/eventq-deprecated.c \ + ldap/servers/slapd/factory.c \ + ldap/servers/slapd/features.c \ + ldap/servers/slapd/fileio.c \ +diff --git a/docs/slapi.doxy.in b/docs/slapi.doxy.in +index b1e4810ab..1cafc50ce 100644 +--- a/docs/slapi.doxy.in ++++ b/docs/slapi.doxy.in +@@ -759,7 +759,6 @@ WARN_LOGFILE = + # Note: If this tag is empty the current directory is searched. + + INPUT = src/libsds/include/sds.h \ +- docs/job-safety.md \ + # ldap/servers/slapd/slapi-plugin.h \ + + # This tag can be used to specify the character encoding of the source files +diff --git a/ldap/servers/plugins/chainingdb/cb_instance.c b/ldap/servers/plugins/chainingdb/cb_instance.c +index bc1864c1a..7fd85deb0 100644 +--- a/ldap/servers/plugins/chainingdb/cb_instance.c ++++ b/ldap/servers/plugins/chainingdb/cb_instance.c +@@ -217,7 +217,7 @@ cb_instance_free(cb_backend_instance *inst) + slapi_rwlock_wrlock(inst->rwl_config_lock); + + if (inst->eq_ctx != NULL) { +- slapi_eq_cancel(inst->eq_ctx); ++ slapi_eq_cancel_rel(inst->eq_ctx); + inst->eq_ctx = NULL; + } + +@@ -1947,8 +1947,8 @@ cb_instance_add_config_callback(Slapi_PBlock *pb __attribute__((unused)), + * we can't call recursively into the DSE to do more adds, they'll + * silently fail. instead, schedule the adds to happen in 1 second. + */ +- inst->eq_ctx = slapi_eq_once(cb_instance_add_monitor_later, (void *)inst, +- slapi_current_rel_time_t() + 1); ++ inst->eq_ctx = slapi_eq_once_rel(cb_instance_add_monitor_later, (void *)inst, ++ slapi_current_rel_time_t() + 1); + } + + /* Get the list of operational attrs defined in the schema */ +diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c +index 1cb54580b..b46edfcbb 100644 +--- a/ldap/servers/plugins/dna/dna.c ++++ b/ldap/servers/plugins/dna/dna.c +@@ -688,7 +688,7 @@ dna_close(Slapi_PBlock *pb __attribute__((unused))) + slapi_log_err(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, + "--> dna_close\n"); + +- slapi_eq_cancel(eq_ctx); ++ slapi_eq_cancel_rel(eq_ctx); + dna_delete_config(NULL); + slapi_ch_free((void **)&dna_global_config); + slapi_destroy_rwlock(g_dna_cache_lock); +@@ -908,7 +908,7 @@ dna_load_plugin_config(Slapi_PBlock *pb, int use_eventq) + * starting up would cause the change to not + * get changelogged. */ + now = slapi_current_rel_time_t(); +- eq_ctx = slapi_eq_once(dna_update_config_event, NULL, now + 30); ++ eq_ctx = slapi_eq_once_rel(dna_update_config_event, NULL, now + 30); + } else { + dna_update_config_event(0, NULL); + } +diff --git a/ldap/servers/plugins/replication/repl5_backoff.c b/ldap/servers/plugins/replication/repl5_backoff.c +index 40ec75dd7..8c851beb2 100644 +--- a/ldap/servers/plugins/replication/repl5_backoff.c ++++ b/ldap/servers/plugins/replication/repl5_backoff.c +@@ -99,7 +99,7 @@ backoff_reset(Backoff_Timer *bt, slapi_eq_fn_t callback, void *callback_data) + bt->callback_arg = callback_data; + /* Cancel any pending events in the event queue */ + if (NULL != bt->pending_event) { +- slapi_eq_cancel(bt->pending_event); ++ slapi_eq_cancel_rel(bt->pending_event); + bt->pending_event = NULL; + } + /* Compute the first fire time */ +@@ -112,8 +112,8 @@ backoff_reset(Backoff_Timer *bt, slapi_eq_fn_t callback, void *callback_data) + /* Schedule the callback */ + bt->last_fire_time = slapi_current_rel_time_t(); + return_value = bt->last_fire_time + bt->next_interval; +- bt->pending_event = slapi_eq_once(bt->callback, bt->callback_arg, +- return_value); ++ bt->pending_event = slapi_eq_once_rel(bt->callback, bt->callback_arg, ++ return_value); + PR_Unlock(bt->lock); + return return_value; + } +@@ -159,8 +159,8 @@ backoff_step(Backoff_Timer *bt) + /* Schedule the callback, if any */ + bt->last_fire_time += previous_interval; + return_value = bt->last_fire_time + bt->next_interval; +- bt->pending_event = slapi_eq_once(bt->callback, bt->callback_arg, +- return_value); ++ bt->pending_event = slapi_eq_once_rel(bt->callback, bt->callback_arg, ++ return_value); + } + PR_Unlock(bt->lock); + return return_value; +@@ -196,7 +196,7 @@ backoff_delete(Backoff_Timer **btp) + PR_Lock(bt->lock); + /* Cancel any pending events in the event queue */ + if (NULL != bt->pending_event) { +- slapi_eq_cancel(bt->pending_event); ++ slapi_eq_cancel_rel(bt->pending_event); + } + PR_Unlock(bt->lock); + PR_DestroyLock(bt->lock); +diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c +index bc9ca424b..2dd74f9e7 100644 +--- a/ldap/servers/plugins/replication/repl5_connection.c ++++ b/ldap/servers/plugins/replication/repl5_connection.c +@@ -272,7 +272,7 @@ conn_delete(Repl_Connection *conn) + PR_ASSERT(NULL != conn); + PR_Lock(conn->lock); + if (conn->linger_active) { +- if (slapi_eq_cancel(conn->linger_event) == 1) { ++ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { + /* Event was found and cancelled. Destroy the connection object. */ + destroy_it = PR_TRUE; + } else { +@@ -961,7 +961,7 @@ conn_cancel_linger(Repl_Connection *conn) + "conn_cancel_linger - %s - Canceling linger on the connection\n", + agmt_get_long_name(conn->agmt)); + conn->linger_active = PR_FALSE; +- if (slapi_eq_cancel(conn->linger_event) == 1) { ++ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { + conn->refcnt--; + } + conn->linger_event = NULL; +@@ -1030,7 +1030,7 @@ conn_start_linger(Repl_Connection *conn) + agmt_get_long_name(conn->agmt)); + } else { + conn->linger_active = PR_TRUE; +- conn->linger_event = slapi_eq_once(linger_timeout, conn, now + conn->linger_time); ++ conn->linger_event = slapi_eq_once_rel(linger_timeout, conn, now + conn->linger_time); + conn->status = STATUS_LINGERING; + } + PR_Unlock(conn->lock); +@@ -1990,7 +1990,7 @@ repl5_start_debug_timeout(int *setlevel) + Slapi_Eq_Context eqctx = 0; + if (s_debug_timeout && s_debug_level) { + time_t now = slapi_current_rel_time_t(); +- eqctx = slapi_eq_once(repl5_debug_timeout_callback, setlevel, ++ eqctx = slapi_eq_once_rel(repl5_debug_timeout_callback, setlevel, + s_debug_timeout + now); + } + return eqctx; +@@ -2002,7 +2002,7 @@ repl5_stop_debug_timeout(Slapi_Eq_Context eqctx, int *setlevel) + char buf[20]; + + if (eqctx && !*setlevel) { +- (void)slapi_eq_cancel(eqctx); ++ (void)slapi_eq_cancel_rel(eqctx); + } + + if (s_debug_timeout && s_debug_level && *setlevel) { +diff --git a/ldap/servers/plugins/replication/repl5_mtnode_ext.c b/ldap/servers/plugins/replication/repl5_mtnode_ext.c +index 82e230958..2967a47f8 100644 +--- a/ldap/servers/plugins/replication/repl5_mtnode_ext.c ++++ b/ldap/servers/plugins/replication/repl5_mtnode_ext.c +@@ -82,8 +82,8 @@ multimaster_mtnode_construct_replicas() + } + } + /* Wait a few seconds for everything to startup before resuming any replication tasks */ +- slapi_eq_once(replica_check_for_tasks, (void *)replica_get_root(r), +- slapi_current_rel_time_t() + 5); ++ slapi_eq_once_rel(replica_check_for_tasks, (void *)replica_get_root(r), ++ slapi_current_rel_time_t() + 5); + } + } + } +diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c +index c1d376c72..7102e0606 100644 +--- a/ldap/servers/plugins/replication/repl5_replica.c ++++ b/ldap/servers/plugins/replication/repl5_replica.c +@@ -231,17 +231,17 @@ replica_new_from_entry(Slapi_Entry *e, char *errortext, PRBool is_add_operation, + /* ONREPL - the state update can occur before the entry is added to the DIT. + In that case the updated would fail but nothing bad would happen. The next + scheduled update would save the state */ +- r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, +- slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); ++ r->repl_eqcxt_rs = slapi_eq_repeat_rel(replica_update_state, r->repl_name, ++ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); + + if (r->tombstone_reap_interval > 0) { + /* + * Reap Tombstone should be started some time after the plugin started. + * This will allow the server to fully start before consuming resources. + */ +- r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, +- slapi_current_rel_time_t() + r->tombstone_reap_interval, +- 1000 * r->tombstone_reap_interval); ++ r->repl_eqcxt_tr = slapi_eq_repeat_rel(eq_cb_reap_tombstones, r->repl_name, ++ slapi_current_rel_time_t() + r->tombstone_reap_interval, ++ 1000 * r->tombstone_reap_interval); + } + + done: +@@ -303,12 +303,12 @@ replica_destroy(void **arg) + */ + + if (r->repl_eqcxt_rs) { +- slapi_eq_cancel(r->repl_eqcxt_rs); ++ slapi_eq_cancel_rel(r->repl_eqcxt_rs); + r->repl_eqcxt_rs = NULL; + } + + if (r->repl_eqcxt_tr) { +- slapi_eq_cancel(r->repl_eqcxt_tr); ++ slapi_eq_cancel_rel(r->repl_eqcxt_tr); + r->repl_eqcxt_tr = NULL; + } + +@@ -1511,14 +1511,14 @@ replica_set_enabled(Replica *r, PRBool enable) + if (enable) { + if (r->repl_eqcxt_rs == NULL) /* event is not already registered */ + { +- r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name, +- slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); ++ r->repl_eqcxt_rs = slapi_eq_repeat_rel(replica_update_state, r->repl_name, ++ slapi_current_rel_time_t() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); + } + } else /* disable */ + { + if (r->repl_eqcxt_rs) /* event is still registerd */ + { +- slapi_eq_cancel(r->repl_eqcxt_rs); ++ slapi_eq_cancel_rel(r->repl_eqcxt_rs); + r->repl_eqcxt_rs = NULL; + } + } +@@ -3628,7 +3628,7 @@ replica_set_tombstone_reap_interval(Replica *r, long interval) + if (interval > 0 && r->repl_eqcxt_tr && r->tombstone_reap_interval != interval) { + int found; + +- found = slapi_eq_cancel(r->repl_eqcxt_tr); ++ found = slapi_eq_cancel_rel(r->repl_eqcxt_tr); + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "replica_set_tombstone_reap_interval - tombstone_reap event (interval=%" PRId64 ") was %s\n", + r->tombstone_reap_interval, (found ? "cancelled" : "not found")); +@@ -3636,7 +3636,7 @@ replica_set_tombstone_reap_interval(Replica *r, long interval) + } + r->tombstone_reap_interval = interval; + if (interval > 0 && r->repl_eqcxt_tr == NULL) { +- r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name, ++ r->repl_eqcxt_tr = slapi_eq_repeat_rel(eq_cb_reap_tombstones, r->repl_name, + slapi_current_rel_time_t() + r->tombstone_reap_interval, + 1000 * r->tombstone_reap_interval); + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, +diff --git a/ldap/servers/plugins/replication/repl5_schedule.c b/ldap/servers/plugins/replication/repl5_schedule.c +index 9539f4031..ca42df561 100644 +--- a/ldap/servers/plugins/replication/repl5_schedule.c ++++ b/ldap/servers/plugins/replication/repl5_schedule.c +@@ -550,7 +550,7 @@ schedule_window_state_change_event(Schedule *sch) + wakeup_time = PRTime2time_t(tm); + + /* schedule the event */ +- sch->pending_event = slapi_eq_once(window_state_changed, sch, wakeup_time); ++ sch->pending_event = slapi_eq_once_rel(window_state_changed, sch, wakeup_time); + + timestr = get_timestring(&wakeup_time); + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, "%s: Update window will %s at %s\n", +@@ -593,7 +593,7 @@ static void + unschedule_window_state_change_event(Schedule *sch) + { + if (sch->pending_event) { +- slapi_eq_cancel(sch->pending_event); ++ slapi_eq_cancel_rel(sch->pending_event); + sch->pending_event = NULL; + } + } +diff --git a/ldap/servers/plugins/replication/windows_connection.c b/ldap/servers/plugins/replication/windows_connection.c +index ce0662544..5eca5fad1 100644 +--- a/ldap/servers/plugins/replication/windows_connection.c ++++ b/ldap/servers/plugins/replication/windows_connection.c +@@ -204,7 +204,7 @@ windows_conn_delete(Repl_Connection *conn) + PR_ASSERT(NULL != conn); + PR_Lock(conn->lock); + if (conn->linger_active) { +- if (slapi_eq_cancel(conn->linger_event) == 1) { ++ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { + /* Event was found and cancelled. Destroy the connection object. */ + PR_Unlock(conn->lock); + destroy_it = PR_TRUE; +@@ -1052,7 +1052,7 @@ windows_conn_cancel_linger(Repl_Connection *conn) + "windows_conn_cancel_linger - %s: Cancelling linger on the connection\n", + agmt_get_long_name(conn->agmt)); + conn->linger_active = PR_FALSE; +- if (slapi_eq_cancel(conn->linger_event) == 1) { ++ if (slapi_eq_cancel_rel(conn->linger_event) == 1) { + conn->refcnt--; + } + conn->linger_event = NULL; +@@ -1129,7 +1129,7 @@ windows_conn_start_linger(Repl_Connection *conn) + agmt_get_long_name(conn->agmt)); + } else { + conn->linger_active = PR_TRUE; +- conn->linger_event = slapi_eq_once(linger_timeout, conn, now + conn->linger_time); ++ conn->linger_event = slapi_eq_once_rel(linger_timeout, conn, now + conn->linger_time); + conn->status = STATUS_LINGERING; + } + PR_Unlock(conn->lock); +@@ -1822,8 +1822,8 @@ repl5_start_debug_timeout(int *setlevel) + + if (s_debug_timeout && s_debug_level) { + time_t now = time(NULL); +- eqctx = slapi_eq_once(repl5_debug_timeout_callback, setlevel, +- s_debug_timeout + now); ++ eqctx = slapi_eq_once_rel(repl5_debug_timeout_callback, setlevel, ++ s_debug_timeout + now); + } + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "<= repl5_start_debug_timeout\n"); + return eqctx; +@@ -1837,7 +1837,7 @@ repl5_stop_debug_timeout(Slapi_Eq_Context eqctx, int *setlevel) + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> repl5_stop_debug_timeout\n"); + + if (eqctx && !*setlevel) { +- (void)slapi_eq_cancel(eqctx); ++ (void)slapi_eq_cancel_rel(eqctx); + } + + if (s_debug_timeout && s_debug_level && *setlevel) { +diff --git a/ldap/servers/plugins/replication/windows_inc_protocol.c b/ldap/servers/plugins/replication/windows_inc_protocol.c +index 3d548e5ed..c07a8180a 100644 +--- a/ldap/servers/plugins/replication/windows_inc_protocol.c ++++ b/ldap/servers/plugins/replication/windows_inc_protocol.c +@@ -132,7 +132,7 @@ windows_inc_delete(Private_Repl_Protocol **prpp) + slapi_log_err(SLAPI_LOG_TRACE, windows_repl_plugin_name, "=> windows_inc_delete\n"); + /* First, stop the protocol if it isn't already stopped */ + /* Then, delete all resources used by the protocol */ +- rc = slapi_eq_cancel(dirsync); ++ rc = slapi_eq_cancel_rel(dirsync); + slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, + "windows_inc_delete - dirsync: %p, rval: %d\n", dirsync, rc); + /* if backoff is set, delete it (from EQ, as well) */ +@@ -324,12 +324,13 @@ windows_inc_run(Private_Repl_Protocol *prp) + if (interval != current_interval) { + current_interval = interval; + if (dirsync) { +- int rc = slapi_eq_cancel(dirsync); ++ int rc = slapi_eq_cancel_rel(dirsync); + slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, + "windows_inc_run - Cancelled dirsync: %p, rval: %d\n", + dirsync, rc); + } +- dirsync = slapi_eq_repeat(periodic_dirsync, (void *)prp, (time_t)0, interval); ++ dirsync = slapi_eq_repeat_rel(periodic_dirsync, (void *)prp, ++ slapi_current_rel_time_t(), interval); + slapi_log_err(SLAPI_LOG_REPL, windows_repl_plugin_name, + "windows_inc_run - New dirsync: %p\n", dirsync); + } +diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c +index a3e16c4e1..12a395210 100644 +--- a/ldap/servers/plugins/retrocl/retrocl_trim.c ++++ b/ldap/servers/plugins/retrocl/retrocl_trim.c +@@ -460,10 +460,10 @@ retrocl_init_trimming(void) + ts.ts_s_initialized = 1; + retrocl_trimming = 1; + +- retrocl_trim_ctx = slapi_eq_repeat(retrocl_housekeeping, +- NULL, (time_t)0, +- /* in milliseconds */ +- trim_interval * 1000); ++ retrocl_trim_ctx = slapi_eq_repeat_rel(retrocl_housekeeping, ++ NULL, (time_t)0, ++ /* in milliseconds */ ++ trim_interval * 1000); + } + + /* +@@ -487,7 +487,7 @@ retrocl_stop_trimming(void) + */ + retrocl_trimming = 0; + if (retrocl_trim_ctx) { +- slapi_eq_cancel(retrocl_trim_ctx); ++ slapi_eq_cancel_rel(retrocl_trim_ctx); + retrocl_trim_ctx = NULL; + } + PR_DestroyLock(ts.ts_s_trim_mutex); +diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c +index 0071ed86a..7681e88ea 100644 +--- a/ldap/servers/slapd/daemon.c ++++ b/ldap/servers/slapd/daemon.c +@@ -1240,7 +1240,8 @@ slapd_daemon(daemon_ports_t *ports) + slapi_log_err(SLAPI_LOG_TRACE, "slapd_daemon", + "slapd shutting down - waiting for backends to close down\n"); + +- eq_stop(); ++ eq_stop(); /* deprecated */ ++ eq_stop_rel(); + if (!in_referral_mode) { + task_shutdown(); + uniqueIDGenCleanup(); +diff --git a/ldap/servers/slapd/eventq-deprecated.c b/ldap/servers/slapd/eventq-deprecated.c +new file mode 100644 +index 000000000..71a7bf8f5 +--- /dev/null ++++ b/ldap/servers/slapd/eventq-deprecated.c +@@ -0,0 +1,483 @@ ++/** BEGIN COPYRIGHT BLOCK ++ * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. ++ * Copyright (C) 2020 Red Hat, Inc. ++ * All rights reserved. ++ * ++ * License: GPL (version 3 or any later version). ++ * See LICENSE for details. ++ * END COPYRIGHT BLOCK **/ ++ ++#ifdef HAVE_CONFIG_H ++#include ++#endif ++ ++ ++/* ******************************************************** ++eventq-deprecated.c - Event queue/scheduling system. ++ ++There are 3 publicly-accessible entry points: ++ ++slapi_eq_once(): cause an event to happen exactly once ++slapi_eq_repeat(): cause an event to happen repeatedly ++slapi_eq_cancel(): cancel a pending event ++ ++There is also an initialization point which must be ++called by the server to initialize the event queue system: ++eq_start(), and an entry point used to shut down the system: ++eq_stop(). ++ ++These functions are now deprecated in favor of the functions ++in eventq.c which use MONOTONIC clocks instead of REALTIME ++clocks. ++*********************************************************** */ ++ ++#include "slap.h" ++#include "prlock.h" ++#include "prcvar.h" ++#include "prinit.h" ++ ++/* ++ * Private definition of slapi_eq_context. Only this ++ * module (eventq.c) should know about the layout of ++ * this structure. ++ */ ++typedef struct _slapi_eq_context ++{ ++ time_t ec_when; ++ time_t ec_interval; ++ slapi_eq_fn_t ec_fn; ++ void *ec_arg; ++ Slapi_Eq_Context ec_id; ++ struct _slapi_eq_context *ec_next; ++} slapi_eq_context; ++ ++/* ++ * Definition of the event queue. ++ */ ++typedef struct _event_queue ++{ ++ PRLock *eq_lock; ++ PRCondVar *eq_cv; ++ slapi_eq_context *eq_queue; ++} event_queue; ++ ++/* ++ * The event queue itself. ++ */ ++static event_queue eqs = {0}; ++static event_queue *eq = &eqs; ++ ++/* ++ * Thread ID of the main thread loop ++ */ ++static PRThread *eq_loop_tid = NULL; ++ ++/* ++ * Flags used to control startup/shutdown of the event queue ++ */ ++static int eq_running = 0; ++static int eq_stopped = 0; ++static int eq_initialized = 0; ++PRLock *ss_lock = NULL; ++PRCondVar *ss_cv = NULL; ++PRCallOnceType init_once = {0}; ++ ++/* Forward declarations */ ++static slapi_eq_context *eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); ++static void eq_enqueue(slapi_eq_context *newec); ++static slapi_eq_context *eq_dequeue(time_t now); ++static PRStatus eq_create(void); ++ ++ ++/* ******************************************************** */ ++ ++ ++/* ++ * slapi_eq_once: cause an event to happen exactly once. ++ * ++ * Arguments: ++ * fn: the function to call ++ * arg: an argument to pass to the called function ++ * when: the time that the function should be called ++ * Returns: ++ * slapi_eq_context - a handle to an opaque object which ++ * the caller can use to refer to this particular scheduled ++ * event. ++ */ ++Slapi_Eq_Context ++slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) ++{ ++ slapi_eq_context *tmp; ++ PR_ASSERT(eq_initialized); ++ if (!eq_stopped) { ++ ++ Slapi_Eq_Context id; ++ ++ tmp = eq_new(fn, arg, when, 0UL); ++ id = tmp->ec_id; ++ ++ eq_enqueue(tmp); ++ ++ /* After this point, may have */ ++ /* been freed, depending on the thread */ ++ /* scheduling. Too bad */ ++ ++ slapi_log_err(SLAPI_LOG_HOUSE, NULL, ++ "added one-time event id %p at time %ld\n", ++ id, when); ++ return (id); ++ } ++ return NULL; /* JCM - Not sure if this should be 0 or something else. */ ++} ++ ++ ++/* ++ * slapi_eq_repeat: cause an event to happen repeatedly. ++ * ++ * Arguments: ++ * fn: the function to call ++ * arg: an argument to pass to the called function ++ * when: the time that the function should first be called ++ * interval: the amount of time (in milliseconds) between ++ * successive calls to the function ++ * Returns: ++ * slapi_eq_context - a handle to an opaque object which ++ * the caller can use to refer to this particular scheduled ++ */ ++Slapi_Eq_Context ++slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) ++{ ++ slapi_eq_context *tmp; ++ PR_ASSERT(eq_initialized); ++ if (!eq_stopped) { ++ tmp = eq_new(fn, arg, when, interval); ++ eq_enqueue(tmp); ++ slapi_log_err(SLAPI_LOG_HOUSE, NULL, ++ "added repeating event id %p at time %ld, interval %lu\n", ++ tmp->ec_id, when, interval); ++ return (tmp->ec_id); ++ } ++ return NULL; /* JCM - Not sure if this should be 0 or something else. */ ++} ++ ++ ++/* ++ * slapi_eq_cancel: cancel a pending event. ++ * Arguments: ++ * ctx: the context of the event which should be de-scheduled ++ */ ++int ++slapi_eq_cancel(Slapi_Eq_Context ctx) ++{ ++ slapi_eq_context **p, *tmp = NULL; ++ int found = 0; ++ ++ PR_ASSERT(eq_initialized); ++ if (!eq_stopped) { ++ PR_Lock(eq->eq_lock); ++ p = &(eq->eq_queue); ++ while (!found && *p != NULL) { ++ if ((*p)->ec_id == ctx) { ++ tmp = *p; ++ *p = (*p)->ec_next; ++ slapi_ch_free((void **)&tmp); ++ found = 1; ++ } else { ++ p = &((*p)->ec_next); ++ } ++ } ++ PR_Unlock(eq->eq_lock); ++ } ++ slapi_log_err(SLAPI_LOG_HOUSE, NULL, ++ "cancellation of event id %p requested: %s\n", ++ ctx, found ? "cancellation succeeded" : "event not found"); ++ return found; ++} ++ ++ ++/* ++ * Construct a new ec structure ++ */ ++static slapi_eq_context * ++eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) ++{ ++ slapi_eq_context *retptr = (slapi_eq_context *)slapi_ch_calloc(1, sizeof(slapi_eq_context)); ++ ++ retptr->ec_fn = fn; ++ retptr->ec_arg = arg; ++ /* ++ * retptr->ec_when = when < now ? now : when; ++ * we used to amke this check, but it make no sense: when queued, if when ++ * has expired, we'll be executed anyway. save the cycles, and just set ++ * ec_when. ++ */ ++ retptr->ec_when = when; ++ retptr->ec_interval = interval == 0UL ? 0UL : (interval + 999) / 1000; ++ retptr->ec_id = (Slapi_Eq_Context)retptr; ++ return retptr; ++} ++ ++ ++/* ++ * Add a new event to the event queue. ++ */ ++static void ++eq_enqueue(slapi_eq_context *newec) ++{ ++ slapi_eq_context **p; ++ ++ PR_ASSERT(NULL != newec); ++ PR_Lock(eq->eq_lock); ++ /* Insert in order (sorted by start time) in the list */ ++ for (p = &(eq->eq_queue); *p != NULL; p = &((*p)->ec_next)) { ++ if ((*p)->ec_when > newec->ec_when) { ++ break; ++ } ++ } ++ if (NULL != *p) { ++ newec->ec_next = *p; ++ } else { ++ newec->ec_next = NULL; ++ } ++ *p = newec; ++ PR_NotifyCondVar(eq->eq_cv); /* wake up scheduler thread */ ++ PR_Unlock(eq->eq_lock); ++} ++ ++ ++/* ++ * If there is an event in the queue scheduled at time ++ * or before, dequeue it and return a pointer ++ * to it. Otherwise, return NULL. ++ */ ++static slapi_eq_context * ++eq_dequeue(time_t now) ++{ ++ slapi_eq_context *retptr = NULL; ++ ++ PR_Lock(eq->eq_lock); ++ if (NULL != eq->eq_queue && eq->eq_queue->ec_when <= now) { ++ retptr = eq->eq_queue; ++ eq->eq_queue = retptr->ec_next; ++ } ++ PR_Unlock(eq->eq_lock); ++ return retptr; ++} ++ ++ ++/* ++ * Call all events which are due to run. ++ * Note that if we've missed a schedule ++ * opportunity, we don't try to catch up ++ * by calling the function repeatedly. ++ */ ++static void ++eq_call_all(void) ++{ ++ slapi_eq_context *p; ++ time_t curtime = slapi_current_utc_time(); ++ ++ while ((p = eq_dequeue(curtime)) != NULL) { ++ /* Call the scheduled function */ ++ p->ec_fn(p->ec_when, p->ec_arg); ++ slapi_log_err(SLAPI_LOG_HOUSE, NULL, ++ "Event id %p called at %ld (scheduled for %ld)\n", ++ p->ec_id, curtime, p->ec_when); ++ if (0UL != p->ec_interval) { ++ /* This is a repeating event. Requeue it. */ ++ do { ++ p->ec_when += p->ec_interval; ++ } while (p->ec_when < curtime); ++ eq_enqueue(p); ++ } else { ++ slapi_ch_free((void **)&p); ++ } ++ } ++} ++ ++ ++/* ++ * The main event queue loop. ++ */ ++static void ++eq_loop(void *arg __attribute__((unused))) ++{ ++ while (eq_running) { ++ time_t curtime = slapi_current_utc_time(); ++ PRIntervalTime timeout; ++ int until; ++ PR_Lock(eq->eq_lock); ++ while (!((NULL != eq->eq_queue) && (eq->eq_queue->ec_when <= curtime))) { ++ if (!eq_running) { ++ PR_Unlock(eq->eq_lock); ++ goto bye; ++ } ++ /* Compute new timeout */ ++ if (NULL != eq->eq_queue) { ++ until = eq->eq_queue->ec_when - curtime; ++ timeout = PR_SecondsToInterval(until); ++ } else { ++ timeout = PR_INTERVAL_NO_TIMEOUT; ++ } ++ PR_WaitCondVar(eq->eq_cv, timeout); ++ curtime = slapi_current_utc_time(); ++ } ++ /* There is some work to do */ ++ PR_Unlock(eq->eq_lock); ++ eq_call_all(); ++ } ++bye: ++ eq_stopped = 1; ++ PR_Lock(ss_lock); ++ PR_NotifyAllCondVar(ss_cv); ++ PR_Unlock(ss_lock); ++} ++ ++ ++/* ++ * Allocate and initialize the event queue structures. ++ */ ++static PRStatus ++eq_create(void) ++{ ++ PR_ASSERT(NULL == eq->eq_lock); ++ if ((eq->eq_lock = PR_NewLock()) == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); ++ exit(1); ++ } ++ if ((eq->eq_cv = PR_NewCondVar(eq->eq_lock)) == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); ++ exit(1); ++ } ++ if ((ss_lock = PR_NewLock()) == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewLock failed\n"); ++ exit(1); ++ } ++ if ((ss_cv = PR_NewCondVar(ss_lock)) == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create", "PR_NewCondVar failed\n"); ++ exit(1); ++ } ++ eq->eq_queue = NULL; ++ eq_initialized = 1; ++ return PR_SUCCESS; ++} ++ ++ ++/* ++ * eq_start: start the event queue system. ++ * ++ * This should be called exactly once. It will start a ++ * thread which wakes up periodically and schedules events. ++ */ ++void ++eq_start() ++{ ++ PR_ASSERT(eq_initialized); ++ eq_running = 1; ++ if ((eq_loop_tid = PR_CreateThread(PR_USER_THREAD, (VFP)eq_loop, ++ NULL, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, ++ SLAPD_DEFAULT_THREAD_STACKSIZE)) == NULL) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_start", "eq_loop PR_CreateThread failed\n"); ++ exit(1); ++ } ++ slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have started\n"); ++} ++ ++ ++/* ++ * eq_init: initialize the event queue system. ++ * ++ * This function should be called early in server startup. ++ * Once it has been called, the event queue will queue ++ * events, but will not fire any events. Once all of the ++ * server plugins have been started, the eq_start() ++ * function should be called, and events will then start ++ * to fire. ++ */ ++void ++eq_init() ++{ ++ if (!eq_initialized) { ++ if (PR_SUCCESS != PR_CallOnce(&init_once, eq_create)) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_init", "eq_create failed\n"); ++ } ++ } ++} ++ ++ ++/* ++ * eq_stop: shut down the event queue system. ++ * Does not return until event queue is fully ++ * shut down. ++ */ ++void ++eq_stop() ++{ ++ slapi_eq_context *p, *q; ++ ++ if (NULL == eq || NULL == eq->eq_lock) { /* never started */ ++ eq_stopped = 1; ++ return; ++ } ++ ++ eq_stopped = 0; ++ eq_running = 0; ++ /* ++ * Signal the eq thread function to stop, and wait until ++ * it acknowledges by setting eq_stopped. ++ */ ++ while (!eq_stopped) { ++ PR_Lock(eq->eq_lock); ++ PR_NotifyAllCondVar(eq->eq_cv); ++ PR_Unlock(eq->eq_lock); ++ PR_Lock(ss_lock); ++ PR_WaitCondVar(ss_cv, PR_MillisecondsToInterval(100)); ++ PR_Unlock(ss_lock); ++ } ++ (void)PR_JoinThread(eq_loop_tid); ++ /* ++ * XXXggood we don't free the actual event queue data structures. ++ * This is intentional, to allow enqueueing/cancellation of events ++ * even after event queue services have shut down (these are no-ops). ++ * The downside is that the event queue can't be stopped and restarted ++ * easily. ++ */ ++ PR_Lock(eq->eq_lock); ++ p = eq->eq_queue; ++ while (p != NULL) { ++ q = p->ec_next; ++ slapi_ch_free((void **)&p); ++ /* Some ec_arg could get leaked here in shutdown (e.g., replica_name) ++ * This can be fixed by specifying a flag when the context is queued. ++ * [After 6.2] ++ */ ++ p = q; ++ } ++ PR_Unlock(eq->eq_lock); ++ slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have shut down\n"); ++} ++ ++/* ++ * return arg (ec_arg) only if the context is in the event queue ++ */ ++void * ++slapi_eq_get_arg(Slapi_Eq_Context ctx) ++{ ++ slapi_eq_context **p; ++ ++ PR_ASSERT(eq_initialized); ++ if (eq && !eq_stopped) { ++ PR_Lock(eq->eq_lock); ++ p = &(eq->eq_queue); ++ while (p && *p != NULL) { ++ if ((*p)->ec_id == ctx) { ++ PR_Unlock(eq->eq_lock); ++ return (*p)->ec_arg; ++ } else { ++ p = &((*p)->ec_next); ++ } ++ } ++ PR_Unlock(eq->eq_lock); ++ } ++ return NULL; ++} +diff --git a/ldap/servers/slapd/eventq.c b/ldap/servers/slapd/eventq.c +index e1900724f..4c39e08cf 100644 +--- a/ldap/servers/slapd/eventq.c ++++ b/ldap/servers/slapd/eventq.c +@@ -17,14 +17,14 @@ eventq.c - Event queue/scheduling system. + + There are 3 publicly-accessible entry points: + +-slapi_eq_once(): cause an event to happen exactly once +-slapi_eq_repeat(): cause an event to happen repeatedly +-slapi_eq_cancel(): cancel a pending event ++slapi_eq_once_rel(): cause an event to happen exactly once ++slapi_eq_repeat_rel(): cause an event to happen repeatedly ++slapi_eq_cancel_rel(): cancel a pending event + + There is also an initialization point which must be + called by the server to initialize the event queue system: +-eq_start(), and an entry point used to shut down the system: +-eq_stop(). ++eq_start_rel(), and an entry point used to shut down the system: ++eq_stop_rel(). + *********************************************************** */ + + #include "slap.h" +@@ -60,36 +60,36 @@ typedef struct _event_queue + /* + * The event queue itself. + */ +-static event_queue eqs = {0}; +-static event_queue *eq = &eqs; ++static event_queue eqs_rel = {0}; ++static event_queue *eq_rel = &eqs_rel; + + /* + * Thread ID of the main thread loop + */ +-static PRThread *eq_loop_tid = NULL; ++static PRThread *eq_loop_rel_tid = NULL; + + /* + * Flags used to control startup/shutdown of the event queue + */ +-static int eq_running = 0; +-static int eq_stopped = 0; +-static int eq_initialized = 0; +-static pthread_mutex_t ss_lock; +-static pthread_cond_t ss_cv; +-PRCallOnceType init_once = {0}; ++static int eq_rel_running = 0; ++static int eq_rel_stopped = 0; ++static int eq_rel_initialized = 0; ++static pthread_mutex_t ss_rel_lock; ++static pthread_cond_t ss_rel_cv; ++PRCallOnceType init_once_rel = {0}; + + /* Forward declarations */ +-static slapi_eq_context *eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); +-static void eq_enqueue(slapi_eq_context *newec); +-static slapi_eq_context *eq_dequeue(time_t now); +-static PRStatus eq_create(void); ++static slapi_eq_context *eq_new_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); ++static void eq_enqueue_rel(slapi_eq_context *newec); ++static slapi_eq_context *eq_dequeue_rel(time_t now); ++static PRStatus eq_create_rel(void); + + + /* ******************************************************** */ + + + /* +- * slapi_eq_once: cause an event to happen exactly once. ++ * slapi_eq_once_rel: cause an event to happen exactly once. + * + * Arguments: + * fn: the function to call +@@ -101,18 +101,18 @@ static PRStatus eq_create(void); + * event. + */ + Slapi_Eq_Context +-slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) ++slapi_eq_once_rel(slapi_eq_fn_t fn, void *arg, time_t when) + { + slapi_eq_context *tmp; +- PR_ASSERT(eq_initialized); +- if (!eq_stopped) { ++ PR_ASSERT(eq_rel_initialized); ++ if (!eq_rel_stopped) { + + Slapi_Eq_Context id; + +- tmp = eq_new(fn, arg, when, 0UL); ++ tmp = eq_new_rel(fn, arg, when, 0UL); + id = tmp->ec_id; + +- eq_enqueue(tmp); ++ eq_enqueue_rel(tmp); + + /* After this point, may have */ + /* been freed, depending on the thread */ +@@ -128,7 +128,7 @@ slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) + + + /* +- * slapi_eq_repeat: cause an event to happen repeatedly. ++ * slapi_eq_repeat_rel: cause an event to happen repeatedly. + * + * Arguments: + * fn: the function to call +@@ -141,13 +141,13 @@ slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) + * the caller can use to refer to this particular scheduled + */ + Slapi_Eq_Context +-slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) ++slapi_eq_repeat_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) + { + slapi_eq_context *tmp; +- PR_ASSERT(eq_initialized); +- if (!eq_stopped) { +- tmp = eq_new(fn, arg, when, interval); +- eq_enqueue(tmp); ++ PR_ASSERT(eq_rel_initialized); ++ if (!eq_rel_stopped) { ++ tmp = eq_new_rel(fn, arg, when, interval); ++ eq_enqueue_rel(tmp); + slapi_log_err(SLAPI_LOG_HOUSE, NULL, + "added repeating event id %p at time %ld, interval %lu\n", + tmp->ec_id, when, interval); +@@ -158,20 +158,20 @@ slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval + + + /* +- * slapi_eq_cancel: cancel a pending event. ++ * slapi_eq_cancel_rel: cancel a pending event. + * Arguments: + * ctx: the context of the event which should be de-scheduled + */ + int +-slapi_eq_cancel(Slapi_Eq_Context ctx) ++slapi_eq_cancel_rel(Slapi_Eq_Context ctx) + { + slapi_eq_context **p, *tmp = NULL; + int found = 0; + +- PR_ASSERT(eq_initialized); +- if (!eq_stopped) { +- pthread_mutex_lock(&(eq->eq_lock)); +- p = &(eq->eq_queue); ++ PR_ASSERT(eq_rel_initialized); ++ if (!eq_rel_stopped) { ++ pthread_mutex_lock(&(eq_rel->eq_lock)); ++ p = &(eq_rel->eq_queue); + while (!found && *p != NULL) { + if ((*p)->ec_id == ctx) { + tmp = *p; +@@ -182,7 +182,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) + p = &((*p)->ec_next); + } + } +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + } + slapi_log_err(SLAPI_LOG_HOUSE, NULL, + "cancellation of event id %p requested: %s\n", +@@ -195,7 +195,7 @@ slapi_eq_cancel(Slapi_Eq_Context ctx) + * Construct a new ec structure + */ + static slapi_eq_context * +-eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) ++eq_new_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) + { + slapi_eq_context *retptr = (slapi_eq_context *)slapi_ch_calloc(1, sizeof(slapi_eq_context)); + +@@ -218,14 +218,14 @@ eq_new(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) + * Add a new event to the event queue. + */ + static void +-eq_enqueue(slapi_eq_context *newec) ++eq_enqueue_rel(slapi_eq_context *newec) + { + slapi_eq_context **p; + + PR_ASSERT(NULL != newec); +- pthread_mutex_lock(&(eq->eq_lock)); ++ pthread_mutex_lock(&(eq_rel->eq_lock)); + /* Insert in order (sorted by start time) in the list */ +- for (p = &(eq->eq_queue); *p != NULL; p = &((*p)->ec_next)) { ++ for (p = &(eq_rel->eq_queue); *p != NULL; p = &((*p)->ec_next)) { + if ((*p)->ec_when > newec->ec_when) { + break; + } +@@ -236,8 +236,8 @@ eq_enqueue(slapi_eq_context *newec) + newec->ec_next = NULL; + } + *p = newec; +- pthread_cond_signal(&(eq->eq_cv)); /* wake up scheduler thread */ +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_cond_signal(&(eq_rel->eq_cv)); /* wake up scheduler thread */ ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + } + + +@@ -247,16 +247,16 @@ eq_enqueue(slapi_eq_context *newec) + * to it. Otherwise, return NULL. + */ + static slapi_eq_context * +-eq_dequeue(time_t now) ++eq_dequeue_rel(time_t now) + { + slapi_eq_context *retptr = NULL; + +- pthread_mutex_lock(&(eq->eq_lock)); +- if (NULL != eq->eq_queue && eq->eq_queue->ec_when <= now) { +- retptr = eq->eq_queue; +- eq->eq_queue = retptr->ec_next; ++ pthread_mutex_lock(&(eq_rel->eq_lock)); ++ if (NULL != eq_rel->eq_queue && eq_rel->eq_queue->ec_when <= now) { ++ retptr = eq_rel->eq_queue; ++ eq_rel->eq_queue = retptr->ec_next; + } +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + return retptr; + } + +@@ -268,12 +268,12 @@ eq_dequeue(time_t now) + * by calling the function repeatedly. + */ + static void +-eq_call_all(void) ++eq_call_all_rel(void) + { + slapi_eq_context *p; + time_t curtime = slapi_current_rel_time_t(); + +- while ((p = eq_dequeue(curtime)) != NULL) { ++ while ((p = eq_dequeue_rel(curtime)) != NULL) { + /* Call the scheduled function */ + p->ec_fn(p->ec_when, p->ec_arg); + slapi_log_err(SLAPI_LOG_HOUSE, NULL, +@@ -284,7 +284,7 @@ eq_call_all(void) + do { + p->ec_when += p->ec_interval; + } while (p->ec_when < curtime); +- eq_enqueue(p); ++ eq_enqueue_rel(p); + } else { + slapi_ch_free((void **)&p); + } +@@ -296,38 +296,38 @@ eq_call_all(void) + * The main event queue loop. + */ + static void +-eq_loop(void *arg __attribute__((unused))) ++eq_loop_rel(void *arg __attribute__((unused))) + { +- while (eq_running) { ++ while (eq_rel_running) { + time_t curtime = slapi_current_rel_time_t(); + int until; + +- pthread_mutex_lock(&(eq->eq_lock)); +- while (!((NULL != eq->eq_queue) && (eq->eq_queue->ec_when <= curtime))) { +- if (!eq_running) { +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_mutex_lock(&(eq_rel->eq_lock)); ++ while (!((NULL != eq_rel->eq_queue) && (eq_rel->eq_queue->ec_when <= curtime))) { ++ if (!eq_rel_running) { ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + goto bye; + } + /* Compute new timeout */ +- if (NULL != eq->eq_queue) { ++ if (NULL != eq_rel->eq_queue) { + struct timespec current_time = slapi_current_rel_time_hr(); +- until = eq->eq_queue->ec_when - curtime; ++ until = eq_rel->eq_queue->ec_when - curtime; + current_time.tv_sec += until; +- pthread_cond_timedwait(&eq->eq_cv, &eq->eq_lock, ¤t_time); ++ pthread_cond_timedwait(&eq_rel->eq_cv, &eq_rel->eq_lock, ¤t_time); + } else { +- pthread_cond_wait(&eq->eq_cv, &eq->eq_lock); ++ pthread_cond_wait(&eq_rel->eq_cv, &eq_rel->eq_lock); + } + curtime = slapi_current_rel_time_t(); + } + /* There is some work to do */ +- pthread_mutex_unlock(&(eq->eq_lock)); +- eq_call_all(); ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); ++ eq_call_all_rel(); + } + bye: +- eq_stopped = 1; +- pthread_mutex_lock(&ss_lock); +- pthread_cond_broadcast(&ss_cv); +- pthread_mutex_unlock(&ss_lock); ++ eq_rel_stopped = 1; ++ pthread_mutex_lock(&ss_rel_lock); ++ pthread_cond_broadcast(&ss_rel_cv); ++ pthread_mutex_unlock(&ss_rel_lock); + } + + +@@ -335,73 +335,73 @@ bye: + * Allocate and initialize the event queue structures. + */ + static PRStatus +-eq_create(void) ++eq_create_rel(void) + { + pthread_condattr_t condAttr; + int rc = 0; + + /* Init the eventq mutex and cond var */ +- if (pthread_mutex_init(&eq->eq_lock, NULL) != 0) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ if (pthread_mutex_init(&eq_rel->eq_lock, NULL) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", + "Failed to create lock: error %d (%s)\n", + rc, strerror(rc)); + exit(1); + } + if ((rc = pthread_condattr_init(&condAttr)) != 0) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", + "Failed to create new condition attribute variable. error %d (%s)\n", + rc, strerror(rc)); + exit(1); + } + if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", + "Cannot set condition attr clock. error %d (%s)\n", + rc, strerror(rc)); + exit(1); + } +- if ((rc = pthread_cond_init(&eq->eq_cv, &condAttr)) != 0) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ if ((rc = pthread_cond_init(&eq_rel->eq_cv, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", + "Failed to create new condition variable. error %d (%s)\n", + rc, strerror(rc)); + exit(1); + } + + /* Init the "ss" mutex and condition var */ +- if (pthread_mutex_init(&ss_lock, NULL) != 0) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ if (pthread_mutex_init(&ss_rel_lock, NULL) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", + "Failed to create ss lock: error %d (%s)\n", + rc, strerror(rc)); + exit(1); + } +- if ((rc = pthread_cond_init(&ss_cv, &condAttr)) != 0) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_create", ++ if ((rc = pthread_cond_init(&ss_rel_cv, &condAttr)) != 0) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_create_rel", + "Failed to create new ss condition variable. error %d (%s)\n", + rc, strerror(rc)); + exit(1); + } + pthread_condattr_destroy(&condAttr); /* no longer needed */ + +- eq->eq_queue = NULL; +- eq_initialized = 1; ++ eq_rel->eq_queue = NULL; ++ eq_rel_initialized = 1; + return PR_SUCCESS; + } + + + /* +- * eq_start: start the event queue system. ++ * eq_start_rel: start the event queue system. + * + * This should be called exactly once. It will start a + * thread which wakes up periodically and schedules events. + */ + void +-eq_start() ++eq_start_rel() + { +- PR_ASSERT(eq_initialized); +- eq_running = 1; +- if ((eq_loop_tid = PR_CreateThread(PR_USER_THREAD, (VFP)eq_loop, ++ PR_ASSERT(eq_rel_initialized); ++ eq_rel_running = 1; ++ if ((eq_loop_rel_tid = PR_CreateThread(PR_USER_THREAD, (VFP)eq_loop_rel, + NULL, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, + SLAPD_DEFAULT_THREAD_STACKSIZE)) == NULL) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_start", "eq_loop PR_CreateThread failed\n"); ++ slapi_log_err(SLAPI_LOG_ERR, "eq_start_rel", "eq_loop_rel PR_CreateThread failed\n"); + exit(1); + } + slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have started\n"); +@@ -409,55 +409,55 @@ eq_start() + + + /* +- * eq_init: initialize the event queue system. ++ * eq_init_rel: initialize the event queue system. + * + * This function should be called early in server startup. + * Once it has been called, the event queue will queue + * events, but will not fire any events. Once all of the +- * server plugins have been started, the eq_start() ++ * server plugins have been started, the eq_start_rel() + * function should be called, and events will then start + * to fire. + */ + void +-eq_init() ++eq_init_rel() + { +- if (!eq_initialized) { +- if (PR_SUCCESS != PR_CallOnce(&init_once, eq_create)) { +- slapi_log_err(SLAPI_LOG_ERR, "eq_init", "eq_create failed\n"); ++ if (!eq_rel_initialized) { ++ if (PR_SUCCESS != PR_CallOnce(&init_once_rel, eq_create_rel)) { ++ slapi_log_err(SLAPI_LOG_ERR, "eq_init_rel", "eq_create_rel failed\n"); + } + } + } + + + /* +- * eq_stop: shut down the event queue system. ++ * eq_stop_rel: shut down the event queue system. + * Does not return until event queue is fully + * shut down. + */ + void +-eq_stop() ++eq_stop_rel() + { + slapi_eq_context *p, *q; + +- if (NULL == eq) { /* never started */ +- eq_stopped = 1; ++ if (NULL == eq_rel) { /* never started */ ++ eq_rel_stopped = 1; + return; + } + +- eq_stopped = 0; +- eq_running = 0; ++ eq_rel_stopped = 0; ++ eq_rel_running = 0; + /* + * Signal the eq thread function to stop, and wait until +- * it acknowledges by setting eq_stopped. ++ * it acknowledges by setting eq_rel_stopped. + */ +- while (!eq_stopped) { ++ while (!eq_rel_stopped) { + struct timespec current_time = {0}; + +- pthread_mutex_lock(&(eq->eq_lock)); +- pthread_cond_broadcast(&(eq->eq_cv)); +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_mutex_lock(&(eq_rel->eq_lock)); ++ pthread_cond_broadcast(&(eq_rel->eq_cv)); ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + +- pthread_mutex_lock(&ss_lock); ++ pthread_mutex_lock(&ss_rel_lock); + clock_gettime(CLOCK_MONOTONIC, ¤t_time); + if (current_time.tv_nsec + 100000000 > 1000000000) { + /* nanoseconds will overflow, adjust the seconds and nanoseconds */ +@@ -467,10 +467,10 @@ eq_stop() + } else { + current_time.tv_nsec += 100000000; /* 100 ms */ + } +- pthread_cond_timedwait(&ss_cv, &ss_lock, ¤t_time); +- pthread_mutex_unlock(&ss_lock); ++ pthread_cond_timedwait(&ss_rel_cv, &ss_rel_lock, ¤t_time); ++ pthread_mutex_unlock(&ss_rel_lock); + } +- (void)PR_JoinThread(eq_loop_tid); ++ (void)PR_JoinThread(eq_loop_rel_tid); + /* + * XXXggood we don't free the actual event queue data structures. + * This is intentional, to allow enqueueing/cancellation of events +@@ -478,8 +478,8 @@ eq_stop() + * The downside is that the event queue can't be stopped and restarted + * easily. + */ +- pthread_mutex_lock(&(eq->eq_lock)); +- p = eq->eq_queue; ++ pthread_mutex_lock(&(eq_rel->eq_lock)); ++ p = eq_rel->eq_queue; + while (p != NULL) { + q = p->ec_next; + slapi_ch_free((void **)&p); +@@ -489,7 +489,7 @@ eq_stop() + */ + p = q; + } +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + slapi_log_err(SLAPI_LOG_HOUSE, NULL, "event queue services have shut down\n"); + } + +@@ -497,23 +497,23 @@ eq_stop() + * return arg (ec_arg) only if the context is in the event queue + */ + void * +-slapi_eq_get_arg(Slapi_Eq_Context ctx) ++slapi_eq_get_arg_rel(Slapi_Eq_Context ctx) + { + slapi_eq_context **p; + +- PR_ASSERT(eq_initialized); +- if (eq && !eq_stopped) { +- pthread_mutex_lock(&(eq->eq_lock)); +- p = &(eq->eq_queue); ++ PR_ASSERT(eq_rel_initialized); ++ if (eq_rel && !eq_rel_stopped) { ++ pthread_mutex_lock(&(eq_rel->eq_lock)); ++ p = &(eq_rel->eq_queue); + while (p && *p != NULL) { + if ((*p)->ec_id == ctx) { +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + return (*p)->ec_arg; + } else { + p = &((*p)->ec_next); + } + } +- pthread_mutex_unlock(&(eq->eq_lock)); ++ pthread_mutex_unlock(&(eq_rel->eq_lock)); + } + return NULL; + } +diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c +index 104f6826c..dbc8cec15 100644 +--- a/ldap/servers/slapd/main.c ++++ b/ldap/servers/slapd/main.c +@@ -979,7 +979,8 @@ main(int argc, char **argv) + fedse_create_startOK(DSE_FILENAME, DSE_STARTOKFILE, + slapdFrontendConfig->configdir); + +- eq_init(); /* must be done before plugins started */ ++ eq_init(); /* DEPRECATED */ ++ eq_init_rel(); /* must be done before plugins started */ + + /* Start the SNMP collator if counters are enabled. */ + if (config_get_slapi_counters()) { +@@ -1035,7 +1036,8 @@ main(int argc, char **argv) + goto cleanup; + } + +- eq_start(); /* must be done after plugins started */ ++ eq_start(); /* must be done after plugins started - DEPRECATED */ ++ eq_start_rel(); /* must be done after plugins started */ + + #ifdef HPUX10 + /* HPUX linker voodoo */ +@@ -2205,10 +2207,13 @@ slapd_exemode_db2ldif(int argc, char **argv, struct main_config *mcfg) + */ + plugin_get_plugin_dependencies(repl_plg_name, &plugin_list); + +- eq_init(); /* must be done before plugins started */ ++ eq_init(); /* must be done before plugins started - DEPRECATED */ ++ eq_init_rel(); /* must be done before plugins started */ ++ + ps_init_psearch_system(); /* must come before plugin_startall() */ + plugin_startall(argc, argv, plugin_list); +- eq_start(); /* must be done after plugins started */ ++ eq_start(); /* must be done after plugins started - DEPRECATED*/ ++ eq_start_rel(); /* must be done after plugins started */ + charray_free(plugin_list); + } + +@@ -2263,8 +2268,9 @@ slapd_exemode_db2ldif(int argc, char **argv, struct main_config *mcfg) + charray_free(mcfg->cmd_line_instance_names); + charray_free(mcfg->db2ldif_include); + if (mcfg->db2ldif_dump_replica) { +- eq_stop(); /* event queue should be shutdown before closing +- all plugins (especailly, replication plugin) */ ++ eq_stop(); /* DEPRECATED*/ ++ eq_stop_rel(); /* event queue should be shutdown before closing ++ all plugins (especially, replication plugin) */ + plugin_closeall(1 /* Close Backends */, 1 /* Close Globals */); + } + return (return_value); +diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h +index 3acc24f03..87080dd82 100644 +--- a/ldap/servers/slapd/proto-slap.h ++++ b/ldap/servers/slapd/proto-slap.h +@@ -1322,7 +1322,6 @@ void factory_destroy_extension(int type, void *object, void *parent, void **exte + /* + * auditlog.c + */ +- + void write_audit_log_entry(Slapi_PBlock *pb); + void auditlog_hide_unhashed_pw(void); + void auditlog_expose_unhashed_pw(void); +@@ -1334,10 +1333,15 @@ void auditfaillog_expose_unhashed_pw(void); + /* + * eventq.c + */ ++void eq_init_rel(void); ++void eq_start_rel(void); ++void eq_stop_rel(void); ++/* Deprecated eventq that uses REALTIME clock instead of MONOTONIC */ + void eq_init(void); + void eq_start(void); + void eq_stop(void); + ++ + /* + * uniqueidgen.c + */ +diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h +index 55ded5eb8..f76b86e3c 100644 +--- a/ldap/servers/slapd/slapi-plugin.h ++++ b/ldap/servers/slapd/slapi-plugin.h +@@ -6084,7 +6084,7 @@ void slapi_lock_mutex(Slapi_Mutex *mutex); + int slapi_unlock_mutex(Slapi_Mutex *mutex); + Slapi_CondVar *slapi_new_condvar(Slapi_Mutex *mutex); + void slapi_destroy_condvar(Slapi_CondVar *cvar); +-int slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout); ++int slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) __attribute__((deprecated)); + int slapi_notify_condvar(Slapi_CondVar *cvar, int notify_all); + int slapi_wait_condvar_pt(Slapi_CondVar *cvar, Slapi_Mutex *mutex, struct timeval *timeout); + +@@ -8059,24 +8059,24 @@ typedef void (*slapi_eq_fn_t)(time_t when, void *arg); + * + * \param fn The function to call when the event is triggered. + * \param arg An argument to pass to the called function. +- * \param when The time that the function should be called. ++ * \param when The time that the function should be called(MONOTONIC clock). + * + * \return slapi_eq_context + */ +-Slapi_Eq_Context slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when); ++Slapi_Eq_Context slapi_eq_once_rel(slapi_eq_fn_t fn, void *arg, time_t when); + + /** + * Cause an event to happen repeatedly. + * + * \param fn The function to call when the vent is triggered. + * \param arg An argument to pass to the called function. +- * \param when The time that the function should be called. ++ * \param when The time that the function should be called(MONOTONIC clock). + * \param interval The amount of time (in milliseconds) between + * successive calls to the function. + * + * \return slapi_eq_context + */ +-Slapi_Eq_Context slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); ++Slapi_Eq_Context slapi_eq_repeat_rel(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval); + + /** + * Cause a scheduled event to be canceled. +@@ -8086,7 +8086,7 @@ Slapi_Eq_Context slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsig + * \return 1 If event was found and canceled. + * \return 0 If event was not found in the queue. + */ +-int slapi_eq_cancel(Slapi_Eq_Context ctx); ++int slapi_eq_cancel_rel(Slapi_Eq_Context ctx); + + /** + * Return the event's argument. +@@ -8095,7 +8095,55 @@ int slapi_eq_cancel(Slapi_Eq_Context ctx); + * + * \return A pointer to the event argument. + */ +-void *slapi_eq_get_arg(Slapi_Eq_Context ctx); ++void *slapi_eq_get_arg_rel(Slapi_Eq_Context ctx); ++ ++/* ++ * These event queue functions are now DEPRECATED as they REALTIME clocks ++ * instead of the preferred MONOTONIC clocks. ++ */ ++ ++/** ++ * Cause an event to happen exactly once. ++ * ++ * \param fn The function to call when the event is triggered. ++ * \param arg An argument to pass to the called function. ++ * \param when The time that the function should be called(REALTIME clock). ++ * ++ * \return slapi_eq_context ++ */ ++Slapi_Eq_Context slapi_eq_once(slapi_eq_fn_t fn, void *arg, time_t when) __attribute__((deprecated)); ++ ++/** ++ * Cause an event to happen repeatedly. ++ * ++ * \param fn The function to call when the vent is triggered. ++ * \param arg An argument to pass to the called function. ++ * \param when The time that the function should be called(REALTIME clock). ++ * \param interval The amount of time (in milliseconds) between ++ * successive calls to the function. ++ * ++ * \return slapi_eq_context ++ */ ++Slapi_Eq_Context slapi_eq_repeat(slapi_eq_fn_t fn, void *arg, time_t when, unsigned long interval) __attribute__((deprecated)); ++ ++/** ++ * Cause a scheduled event to be canceled. ++ * ++ * \param ctx The event object to cancel ++ * ++ * \return 1 If event was found and canceled. ++ * \return 0 If event was not found in the queue. ++ */ ++int slapi_eq_cancel(Slapi_Eq_Context ctx) __attribute__((deprecated)); ++ ++/** ++ * Return the event's argument. ++ * ++ * \param ctx The event object ++ * ++ * \return A pointer to the event argument. ++ */ ++void *slapi_eq_get_arg(Slapi_Eq_Context ctx) __attribute__((deprecated)); + + /** + * Construct a full path and name of a plugin. +diff --git a/ldap/servers/slapd/slapi2runtime.c b/ldap/servers/slapd/slapi2runtime.c +index 85dc4c9a8..53927934a 100644 +--- a/ldap/servers/slapd/slapi2runtime.c ++++ b/ldap/servers/slapd/slapi2runtime.c +@@ -133,7 +133,7 @@ slapi_destroy_condvar(Slapi_CondVar *cvar) + + + /* +- * Function: slapi_wait_condvar ++ * Function: slapi_wait_condvar (DEPRECATED) + * Description: behaves just like PR_WaitCondVar() except timeout is + * in seconds and microseconds instead of PRIntervalTime units. + * If timeout is NULL, this call blocks indefinitely. +@@ -145,9 +145,26 @@ slapi_destroy_condvar(Slapi_CondVar *cvar) + int + slapi_wait_condvar(Slapi_CondVar *cvar, struct timeval *timeout) + { +- /* deprecated in favor of slapi_wait_condvar_pt() which requires that the ++ /* Deprecated in favor of slapi_wait_condvar_pt() which requires that the + * mutex be passed in */ +- return (0); ++ PRIntervalTime prit; ++ ++ if (cvar == NULL) { ++ return (0); ++ } ++ ++ if (timeout == NULL) { ++ prit = PR_INTERVAL_NO_TIMEOUT; ++ } else { ++ prit = PR_SecondsToInterval(timeout->tv_sec) + PR_MicrosecondsToInterval(timeout->tv_usec); ++ } ++ ++ if (PR_WaitCondVar((PRCondVar *)cvar, prit) != PR_SUCCESS) { ++ return (0); ++ } ++ ++ return (1); ++ + } + + int +diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c +index 3dd3af657..d760515f4 100644 +--- a/ldap/servers/slapd/snmp_collator.c ++++ b/ldap/servers/slapd/snmp_collator.c +@@ -385,8 +385,9 @@ snmp_collator_start() + snmp_collator_init(); + + /* Arrange to be called back periodically to update the mmap'd stats file. */ +- snmp_eq_ctx = slapi_eq_repeat(snmp_collator_update, NULL, (time_t)0, +- SLAPD_SNMP_UPDATE_INTERVAL); ++ snmp_eq_ctx = slapi_eq_repeat_rel(snmp_collator_update, NULL, ++ slapi_current_rel_time_t(), ++ SLAPD_SNMP_UPDATE_INTERVAL); + return 0; + } + +@@ -411,7 +412,7 @@ snmp_collator_stop() + } + + /* Abort any pending events */ +- slapi_eq_cancel(snmp_eq_ctx); ++ slapi_eq_cancel_rel(snmp_eq_ctx); + snmp_collator_stopped = 1; + + /* acquire the semaphore */ +diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c +index 26f281cba..bded287c6 100644 +--- a/ldap/servers/slapd/task.c ++++ b/ldap/servers/slapd/task.c +@@ -387,7 +387,7 @@ slapi_task_status_changed(Slapi_Task *task) + ttl = (24*3600); /* be reasonable, allow to check task status not longer than one day */ + task->task_flags |= SLAPI_TASK_DESTROYING; + /* queue an event to destroy the state info */ +- slapi_eq_once(destroy_task, (void *)task, slapi_current_rel_time_t() + ttl); ++ slapi_eq_once_rel(destroy_task, (void *)task, slapi_current_rel_time_t() + ttl); + } + slapi_free_search_results_internal(pb); + slapi_pblock_destroy(pb); +diff --git a/ldap/servers/slapd/uuid.c b/ldap/servers/slapd/uuid.c +index a8bd6ee6c..31384a544 100644 +--- a/ldap/servers/slapd/uuid.c ++++ b/ldap/servers/slapd/uuid.c +@@ -186,7 +186,8 @@ uuid_init(const char *configDir, const Slapi_DN *configDN, PRBool mtGen) + + /* schedule update task for multithreaded generation */ + if (_state.mtGen) +- slapi_eq_repeat(uuid_update_state, NULL, (time_t)0, UPDATE_INTERVAL); ++ slapi_eq_repeat_rel(uuid_update_state, NULL, slapi_current_rel_time_t(), ++ UPDATE_INTERVAL); + + _state.initialized = PR_TRUE; + return UUID_SUCCESS; +-- +2.26.2 + diff --git a/SPECS/389-ds-base.spec b/SPECS/389-ds-base.spec index 45a2ce3..be63f08 100644 --- a/SPECS/389-ds-base.spec +++ b/SPECS/389-ds-base.spec @@ -45,7 +45,7 @@ ExcludeArch: i686 Summary: 389 Directory Server (base) Name: 389-ds-base Version: 1.4.3.16 -Release: %{?relprefix}7%{?prerel}%{?dist} +Release: %{?relprefix}8%{?prerel}%{?dist} License: GPLv3+ URL: https://www.port389.org Group: System Environment/Daemons @@ -199,6 +199,12 @@ Patch22: 0022-Fix-cherry-pick-erorr.patch Patch23: 0023-Issue-4419-Warn-users-of-skipped-entries-during-ldif.patch Patch24: 0024-Issue-4480-Unexpected-info-returned-to-ldap-request-.patch Patch25: 0025-Issue-4414-disk-monitoring-prevent-division-by-zero-.patch +Patch26: 0026-Issue-4504-Insure-ldapi-is-enabled-in-repl_monitor_t.patch +Patch27: 0027-Issue-4315-performance-search-rate-nagle-triggers-hi.patch +Patch28: 0028-Issue-4504-insure-that-repl_monitor_test-use-ldapi-f.patch +Patch29: 0029-Issue-4528-Fix-cn-monitor-SCOPE_ONE-search-4529.patch +Patch30: 0030-Issue-4384-Use-MONOTONIC-clock-for-all-timing-events.patch +Patch31: 0031-Issue-4384-Separate-eventq-into-REALTIME-and-MONOTON.patch %description 389 Directory Server is an LDAPv3 compliant server. The base package includes @@ -816,6 +822,11 @@ exit 0 %doc README.md %changelog +* Wed Jan 13 2021 Mark Reynolds - 1.4.3.16-8 +- Bump version to 1.4.3.16-8 +- Resolves: Bug 1903539 - cn=monitor is throwing err=32 with scope: -s one +- Resolves: Bug 1893870 - PR_WaitCondVar() issue causes replication delay when clock jumps backwards + * Thu Jan 7 2021 Mark Reynolds - 1.4.3.16-7 - Bump version to 1.4.3.16-7 - Resolves: Bug 1890118 - SIGFPE crash in rhds disk monitoring routine