1449 lines
		
	
	
		
			56 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			1449 lines
		
	
	
		
			56 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
| From a3bb421391a65699a91343b35e7c0cf7f07a9add Mon Sep 17 00:00:00 2001
 | |
| From: progier389 <progier@redhat.com>
 | |
| Date: Thu, 21 Aug 2025 17:30:00 +0200
 | |
| Subject: [PATCH] =?UTF-8?q?Issue=206919=20-=20numSubordinates/tombstoneNum?=
 | |
|  =?UTF-8?q?Subordinates=20are=20inconsisten=E2=80=A6=20(#6920)?=
 | |
| MIME-Version: 1.0
 | |
| Content-Type: text/plain; charset=UTF-8
 | |
| Content-Transfer-Encoding: 8bit
 | |
| 
 | |
| * Issue 6919 - numSubordinates/tombstoneNumSubordinates are inconsistent after import
 | |
| 
 | |
| Problem was that the number of tombstone was not propperly computed.
 | |
| With bdb: tombstoneNumSubordinates was not computed:
 | |
| With lmdb: numSubordinates was also including the tombstones
 | |
| Fixed the numSubordinates/tombstoneNumSubordinates computation during import by:
 | |
| walking the entryrdn C keys (because parentid does not contains tombstone on bdb)
 | |
| checking if the children entry is a tombstone by looking in objectclass index
 | |
| and increasing numSubordinates/tombstoneNumSubordinates subcount accordingly
 | |
| performed some code cleanup.
 | |
| - removed the job->mother containing the hashtable of non leaf entry ids
 | |
| - moved the function that replace the numSubordinates/tombstoneNumSubordinates
 | |
| attribute in an entry back in export.c (rather than duplicating it in
 | |
| db_import.c and db_import.c)
 | |
| - changed a PR_ASSERT that is no more true.
 | |
| 
 | |
| Notes:
 | |
| Not using the parentid index because it does not contains the tombstone on bdb
 | |
| (although it does on lmdb)
 | |
| The new subcount computation algorythm was not possible when the code was origionally written
 | |
| because it requires entrytrdn index and having alls the keys (i.e: no ALLIDs in the indexes)
 | |
| That was why hash table of ids and idlist was used. ( I removed that code that code generate a serious
 | |
| overhead if there is a large number of non leaf entries (typically if users entries have children)
 | |
| 
 | |
| Issue: #6919
 | |
| 
 | |
| Reviewed by: @mreynolds389 (Thanks!)
 | |
| 
 | |
| (cherry picked from commit ea44136753b011be3ee7517883e7ec273e2416bb)
 | |
| ---
 | |
|  .../numsubordinates_replication_test.py       | 124 ++++-
 | |
|  ldap/servers/plugins/replication/cl5_api.c    |   2 +-
 | |
|  .../slapd/back-ldbm/db-bdb/bdb_import.c       | 492 ++++++++----------
 | |
|  .../back-ldbm/db-bdb/bdb_import_threads.c     |  13 -
 | |
|  .../slapd/back-ldbm/db-bdb/bdb_layer.h        |   3 +-
 | |
|  .../slapd/back-ldbm/db-mdb/mdb_import.c       | 264 ++++++----
 | |
|  .../back-ldbm/db-mdb/mdb_import_threads.c     |   1 +
 | |
|  .../slapd/back-ldbm/db-mdb/mdb_instance.c     |   2 +-
 | |
|  .../slapd/back-ldbm/db-mdb/mdb_layer.h        |   2 -
 | |
|  ldap/servers/slapd/back-ldbm/import.c         |  59 +++
 | |
|  ldap/servers/slapd/back-ldbm/import.h         |   2 +-
 | |
|  ldap/servers/slapd/back-ldbm/ldif2ldbm.c      |  43 --
 | |
|  src/lib389/lib389/__init__.py                 |   2 +-
 | |
|  src/lib389/lib389/_mapped_object.py           |   2 +-
 | |
|  14 files changed, 571 insertions(+), 440 deletions(-)
 | |
| 
 | |
| diff --git a/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py b/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
 | |
| index 9ba10657d..2624b2144 100644
 | |
| --- a/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
 | |
| +++ b/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
 | |
| @@ -9,12 +9,15 @@
 | |
|  import os
 | |
|  import logging
 | |
|  import pytest
 | |
| +import re
 | |
|  from lib389._constants import DEFAULT_SUFFIX
 | |
| -from lib389.replica import ReplicationManager
 | |
|  from lib389.idm.organizationalunit import OrganizationalUnits
 | |
|  from lib389.idm.user import UserAccounts
 | |
| -from lib389.topologies import topology_i2 as topo_i2
 | |
| -
 | |
| +from lib389.replica import ReplicationManager
 | |
| +from lib389.tasks import *
 | |
| +from lib389.tombstone import Tombstones
 | |
| +from lib389.topologies import topology_i2 as topo_i2, topology_m2 as topo_m2
 | |
| +from lib389.utils import get_default_db_lib
 | |
|  
 | |
|  pytestmark = pytest.mark.tier1
 | |
|  
 | |
| @@ -26,6 +29,61 @@ else:
 | |
|  log = logging.getLogger(__name__)
 | |
|  
 | |
|  
 | |
| +def get_test_name():
 | |
| +    full_testname = os.getenv('PYTEST_CURRENT_TEST')
 | |
| +    res = re.match('.*::([^ ]+) .*', full_testname)
 | |
| +    assert res
 | |
| +    return res.group(1)
 | |
| +
 | |
| +
 | |
| +@pytest.fixture(scope="function")
 | |
| +def with_container(topo_m2, request):
 | |
| +    # Creates a organizational unit container with proper cleanup
 | |
| +    testname = get_test_name()
 | |
| +    ou = f'test_container_{testname}'
 | |
| +    S1 = topo_m2.ms["supplier1"]
 | |
| +    S2 = topo_m2.ms["supplier2"]
 | |
| +    repl = ReplicationManager(DEFAULT_SUFFIX)
 | |
| +
 | |
| +    log.info(f"Create container ou={ou},{DEFAULT_SUFFIX} on {S1.serverid}")
 | |
| +    ous1 = OrganizationalUnits(S1, DEFAULT_SUFFIX)
 | |
| +    container = ous1.create(properties={
 | |
| +         'ou': ou,
 | |
| +         'description': f'Test container for {testname} test'
 | |
| +    })
 | |
| +
 | |
| +    def fin():
 | |
| +        container.delete(recursive=True)
 | |
| +        repl.wait_for_replication(S1, S2)
 | |
| +    
 | |
| +    if not DEBUGGING:
 | |
| +        request.addfinalizer(fin)
 | |
| +    repl.wait_for_replication(S1, S2)
 | |
| +
 | |
| +    return container
 | |
| +
 | |
| +
 | |
| +def verify_value_against_entries(container, attr, entries, msg):
 | |
| +    # Check that container attr value match the number of entries
 | |
| +    num = container.get_attr_val_int(attr)
 | |
| +    num_entries = len(entries)
 | |
| +    dns = [ e.dn for e in entries ]
 | |
| +    log.debug(f"[{msg}] {attr}: entries: {entries}")
 | |
| +    log.info(f"[{msg}] container is {container}")
 | |
| +    log.info(f"[{msg}] {attr}: {num} (Expecting: {num_entries})")
 | |
| +    assert num == num_entries, (
 | |
| +        f"{attr} attribute has wrong value: {num} {msg}, was expecting: {num_entries}",
 | |
| +        f"entries are {dns}" )
 | |
| +
 | |
| +
 | |
| +def verify_subordinates(inst, container, msg):
 | |
| +    log.info(f"Verify numSubordinates and tombstoneNumSubordinates {msg}")
 | |
| +    tombstones = Tombstones(inst, container.dn).list()
 | |
| +    entries = container.search(scope='one')
 | |
| +    verify_value_against_entries(container, 'numSubordinates', entries, msg)
 | |
| +    verify_value_against_entries(container, 'tombstoneNumSubordinates', tombstones, msg)
 | |
| +
 | |
| +
 | |
|  def test_numsubordinates_tombstone_replication_mismatch(topo_i2):
 | |
|      """Test that numSubordinates values match between replicas after tombstone creation
 | |
|  
 | |
| @@ -136,9 +194,67 @@ def test_numsubordinates_tombstone_replication_mismatch(topo_i2):
 | |
|          f"instance2 has {tombstone_numsubordinates_instance2}. "
 | |
|      )
 | |
|  
 | |
| +def test_numsubordinates_tombstone_after_import(topo_m2, with_container):
 | |
| +    """Test that numSubordinates values are the expected one after an import
 | |
| +
 | |
| +    :id: 67bec454-6bb3-11f0-b9ae-c85309d5c3e3
 | |
| +    :setup: Two suppliers instances with an ou container
 | |
| +    :steps:
 | |
| +        1. Create a container (organizational unit) on the first instance
 | |
| +        2. Create a user object in that container
 | |
| +        3. Delete the user object (this creates a tombstone)
 | |
| +        4. Set up replication between the two instances
 | |
| +        5. Wait for replication to complete
 | |
| +        6. Check numSubordinates on both instances
 | |
| +        7. Check tombstoneNumSubordinates on both instances
 | |
| +        8. Verify that numSubordinates values match on both instances
 | |
| +    :expectedresults:
 | |
| +        1. Container should be created successfully
 | |
| +        2. User object should be created successfully
 | |
| +        3. User object should be deleted successfully
 | |
| +        4. Replication should be set up successfully
 | |
| +        5. Replication should complete successfully
 | |
| +        6. numSubordinates should be accessible on both instances
 | |
| +        7. tombstoneNumSubordinates should be accessible on both instances
 | |
| +        8. numSubordinates values should match on both instances
 | |
| +    """
 | |
| +
 | |
| +    S1 = topo_m2.ms["supplier1"]
 | |
| +    S2 = topo_m2.ms["supplier2"]
 | |
| +    container = with_container
 | |
| +    repl = ReplicationManager(DEFAULT_SUFFIX)
 | |
| +    tasks = Tasks(S1)
 | |
| +
 | |
| +    log.info("Create some user objects in that container")
 | |
| +    users1 = UserAccounts(S1, DEFAULT_SUFFIX, rdn=f"ou={container.rdn}")
 | |
| +    users = {}
 | |
| +    for uid in range(1001,1010):
 | |
| +        users[uid] = users1.create_test_user(uid=uid)
 | |
| +        log.info(f"Created user: {users[uid].dn}")
 | |
| +
 | |
| +    for uid in range(1002,1007,2):
 | |
| +        users[uid].delete()
 | |
| +        log.info(f"Removing user: {users[uid].dn}")
 | |
| +    repl.wait_for_replication(S1, S2)
 | |
| +
 | |
| +    ldif_file = f"{S1.get_ldif_dir()}/export.ldif"
 | |
| +    log.info(f"Export into {ldif_file}")
 | |
| +    args = {EXPORT_REPL_INFO: True,
 | |
| +            TASK_WAIT: True}
 | |
| +    tasks.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
 | |
| +
 | |
| +    verify_subordinates(S1, container, "before importing")
 | |
| +
 | |
| +    # import the ldif file
 | |
| +    log.info(f"Import from {ldif_file}")
 | |
| +    args = {TASK_WAIT: True}
 | |
| +    tasks.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
 | |
| +
 | |
| +    verify_subordinates(S1, container, "after importing")
 | |
| +
 | |
|  
 | |
|  if __name__ == '__main__':
 | |
|      # Run isolated
 | |
|      # -s for DEBUG mode
 | |
|      CURRENT_FILE = os.path.realpath(__file__)
 | |
| -    pytest.main("-s %s" % CURRENT_FILE)
 | |
| \ No newline at end of file
 | |
| +    pytest.main("-s %s" % CURRENT_FILE)
 | |
| diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
 | |
| index 5dbf38054..d88ce7d98 100644
 | |
| --- a/ldap/servers/plugins/replication/cl5_api.c
 | |
| +++ b/ldap/servers/plugins/replication/cl5_api.c
 | |
| @@ -3211,7 +3211,7 @@ _cl5EnumConsumerRUV(const ruv_enum_data *element, void *arg)
 | |
|      RUV *ruv;
 | |
|      CSN *csn = NULL;
 | |
|  
 | |
| -    PR_ASSERT(element && element->csn && arg);
 | |
| +    PR_ASSERT(element && arg);
 | |
|  
 | |
|      ruv = (RUV *)arg;
 | |
|  
 | |
| diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
 | |
| index bc4ae5e2b..39edb7d0e 100644
 | |
| --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
 | |
| +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
 | |
| @@ -30,6 +30,18 @@ static int bdb_ancestorid_create_index(backend *be, ImportJob *job);
 | |
|  static int bdb_ancestorid_default_create_index(backend *be, ImportJob *job);
 | |
|  static int bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job);
 | |
|  
 | |
| +/* Helper struct used to compute numsubordinates */
 | |
| +
 | |
| +typedef struct {
 | |
| +    backend *be;
 | |
| +    DB_TXN *txn;
 | |
| +    const char *attrname;
 | |
| +    struct attrinfo *ai;
 | |
| +    dbi_db_t *db;
 | |
| +    DBC *dbc;
 | |
| +} subcount_cursor_info_t;
 | |
| +
 | |
| +
 | |
|  /* Start of definitions for a simple cache using a hash table */
 | |
|  
 | |
|  typedef struct id2idl
 | |
| @@ -56,6 +68,69 @@ static int bdb_parentid(backend *be, DB_TXN *txn, ID id, ID *ppid);
 | |
|  static int bdb_check_cache(id2idl_hash *ht);
 | |
|  static IDList *bdb_idl_union_allids(backend *be, struct attrinfo *ai, IDList *a, IDList *b);
 | |
|  
 | |
| +
 | |
| +/********** Code to debug numsubordinates/tombstonenumsubordinates computation **********/
 | |
| +
 | |
| +#ifdef DEBUG_SUBCOUNT
 | |
| +#define DEBUG_SUBCOUNT_MSG(msg, ...) { debug_subcount(__FUNCTION__, __LINE__, (msg), __VA_ARGS__); }
 | |
| +#define DUMP_SUBCOUNT_KEY(msg, key, ret) { debug_subcount(__FUNCTION__, __LINE__, "ret=%d size=%u ulen=%u doff=%u dlen=%u", \
 | |
| +                                               ret, (key).size, (key).ulen, (key).doff, (key).dlen); \
 | |
| +                                           if (ret == 0) hexadump(msg, (key).data, 0, (key).size); \
 | |
| +                                           else if (ret == DB_BUFFER_SMALL) \
 | |
| +                                                hexadump(msg, (key).data, 0, (key).ulen); }
 | |
| +
 | |
| +static void
 | |
| +debug_subcount(const char *funcname, int line, char *msg, ...)
 | |
| +{
 | |
| +    va_list ap;
 | |
| +    char buff[1024];
 | |
| +    va_start(ap, msg);
 | |
| +    PR_vsnprintf(buff, (sizeof buff), msg, ap);
 | |
| +    va_end(ap);
 | |
| +    slapi_log_err(SLAPI_LOG_INFO, (char*)funcname, "DEBUG SUBCOUNT [%d] %s\n", line, buff);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Dump a memory buffer in hexa and ascii in error log
 | |
| + *
 | |
| + * addr - The memory buffer address.
 | |
| + * len - The memory buffer lenght.
 | |
| + */
 | |
| +static void
 | |
| +hexadump(char *msg, const void *addr, size_t offset, size_t len)
 | |
| +{
 | |
| +#define  HEXADUMP_TAB 4
 | |
| +/* 4 characters per bytes:  2 hexa digits, 1 space and the ascii  */
 | |
| +#define  HEXADUMP_BUF_SIZE (4*16+HEXADUMP_TAB)
 | |
| +    char hexdigit[] = "0123456789ABCDEF";
 | |
| +
 | |
| +    const unsigned char *pt = addr;
 | |
| +    char buff[HEXADUMP_BUF_SIZE+1];
 | |
| +    memset (buff, ' ', HEXADUMP_BUF_SIZE);
 | |
| +    buff[HEXADUMP_BUF_SIZE] = '\0';
 | |
| +    while (len > 0) {
 | |
| +        int dpl;
 | |
| +        for (dpl = 0; dpl < 16 && len>0; dpl++, len--) {
 | |
| +           buff[3*dpl] = hexdigit[((*pt) >> 4) & 0xf];
 | |
| +           buff[3*dpl+1] = hexdigit[(*pt) & 0xf];
 | |
| +           buff[3*16+HEXADUMP_TAB+dpl] = (*pt>=0x20 && *pt<0x7f) ? *pt : '.';
 | |
| +           pt++;
 | |
| +        }
 | |
| +        for (;dpl < 16; dpl++) {
 | |
| +           buff[3*dpl] = ' ';
 | |
| +           buff[3*dpl+1] = ' ';
 | |
| +           buff[3*16+HEXADUMP_TAB+dpl] = ' ';
 | |
| +        }
 | |
| +        slapi_log_err(SLAPI_LOG_INFO, msg, "[0x%08lx]  %s\n", offset, buff);
 | |
| +        offset += 16;
 | |
| +    }
 | |
| +}
 | |
| +#else
 | |
| +#define DEBUG_SUBCOUNT_MSG(msg, ...)
 | |
| +#define DUMP_SUBCOUNT_KEY(msg, key, ret)
 | |
| +#endif
 | |
| +
 | |
| +
 | |
|  /********** routines to manipulate the entry fifo **********/
 | |
|  
 | |
|  /* this is pretty bogus -- could be a HUGE amount of memory */
 | |
| @@ -994,173 +1069,85 @@ out:
 | |
|  
 | |
|      return ret;
 | |
|  }
 | |
| -/* Update subordinate count in a hint list, given the parent's ID */
 | |
| -int
 | |
| -bdb_import_subcount_mother_init(import_subcount_stuff *mothers, ID parent_id, size_t count)
 | |
| -{
 | |
| -    PR_ASSERT(NULL == PL_HashTableLookup(mothers->hashtable, (void *)((uintptr_t)parent_id)));
 | |
| -    PL_HashTableAdd(mothers->hashtable, (void *)((uintptr_t)parent_id), (void *)count);
 | |
| -    return 0;
 | |
| -}
 | |
|  
 | |
| -/* Look for a subordinate count in a hint list, given the parent's ID */
 | |
| -static int
 | |
| -bdb_import_subcount_mothers_lookup(import_subcount_stuff *mothers,
 | |
| -                               ID parent_id,
 | |
| -                               size_t *count)
 | |
| +static void
 | |
| +bdb_close_subcount_cursor(subcount_cursor_info_t *info)
 | |
|  {
 | |
| -    size_t stored_count = 0;
 | |
| -
 | |
| -    *count = 0;
 | |
| -    /* Lookup hash table for ID */
 | |
| -    stored_count = (size_t)PL_HashTableLookup(mothers->hashtable,
 | |
| -                                              (void *)((uintptr_t)parent_id));
 | |
| -    /* If present, return the count found */
 | |
| -    if (0 != stored_count) {
 | |
| -        *count = stored_count;
 | |
| -        return 0;
 | |
| +    if  (info->dbc) {
 | |
| +        int ret = info->dbc->c_close(info->dbc);
 | |
| +        if (ret) {
 | |
| +            char errfunc[60];
 | |
| +            snprintf(errfunc, (sizeof errfunc), "%s[%s]", __FUNCTION__, info->attrname);
 | |
| +            ldbm_nasty(errfunc, sourcefile, 73, ret);
 | |
| +        }
 | |
| +        info->dbc = NULL;
 | |
| +    }
 | |
| +    if (info->db) {
 | |
| +        dblayer_release_index_file(info->be, info->ai, info->db);
 | |
| +        info->db = NULL;
 | |
| +        info->ai = NULL;
 | |
|      }
 | |
| -    return -1;
 | |
| -}
 | |
| -
 | |
| -/* Update subordinate count in a hint list, given the parent's ID */
 | |
| -int
 | |
| -bdb_import_subcount_mother_count(import_subcount_stuff *mothers, ID parent_id)
 | |
| -{
 | |
| -    size_t stored_count = 0;
 | |
| -
 | |
| -    /* Lookup the hash table for the target ID */
 | |
| -    stored_count = (size_t)PL_HashTableLookup(mothers->hashtable,
 | |
| -                                              (void *)((uintptr_t)parent_id));
 | |
| -    PR_ASSERT(0 != stored_count);
 | |
| -    /* Increment the count */
 | |
| -    stored_count++;
 | |
| -    PL_HashTableAdd(mothers->hashtable, (void *)((uintptr_t)parent_id), (void *)stored_count);
 | |
| -    return 0;
 | |
|  }
 | |
|  
 | |
|  static int
 | |
| -bdb_import_update_entry_subcount(backend *be, ID parentid, size_t sub_count, int isencrypted)
 | |
| +bdb_open_subcount_cursor(backend *be, const char *attrname, DB_TXN *txn, subcount_cursor_info_t *info)
 | |
|  {
 | |
| -    ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
 | |
| +    char errfunc[60];
 | |
| +    DB *db = NULL;
 | |
|      int ret = 0;
 | |
| -    modify_context mc = {0};
 | |
| -    char value_buffer[22] = {0}; /* enough digits for 2^64 children */
 | |
| -    struct backentry *e = NULL;
 | |
| -    int isreplace = 0;
 | |
| -    char *numsub_str = numsubordinates;
 | |
| -
 | |
| -    /* Get hold of the parent */
 | |
| -    e = id2entry(be, parentid, NULL, &ret);
 | |
| -    if ((NULL == e) || (0 != ret)) {
 | |
| -        ldbm_nasty("bdb_import_update_entry_subcount", sourcefile, 5, ret);
 | |
| -        return (0 == ret) ? -1 : ret;
 | |
| -    }
 | |
| -    /* Lock it (not really required since we're single-threaded here, but
 | |
| -     * let's do it so we can reuse the modify routines) */
 | |
| -    cache_lock_entry(&inst->inst_cache, e);
 | |
| -    modify_init(&mc, e);
 | |
| -    mc.attr_encrypt = isencrypted;
 | |
| -    sprintf(value_buffer, "%lu", (long unsigned int)sub_count);
 | |
| -    /* If it is a tombstone entry, add tombstonesubordinates instead of
 | |
| -     * numsubordinates. */
 | |
| -    if (slapi_entry_flag_is_set(e->ep_entry, SLAPI_ENTRY_FLAG_TOMBSTONE)) {
 | |
| -        numsub_str = tombstone_numsubordinates;
 | |
| -    }
 | |
| -    /* attr numsubordinates/tombstonenumsubordinates could already exist in
 | |
| -     * the entry, let's check whether it's already there or not */
 | |
| -    isreplace = (attrlist_find(e->ep_entry->e_attrs, numsub_str) != NULL);
 | |
| -    {
 | |
| -        int op = isreplace ? LDAP_MOD_REPLACE : LDAP_MOD_ADD;
 | |
| -        Slapi_Mods *smods = slapi_mods_new();
 | |
|  
 | |
| -        slapi_mods_add(smods, op | LDAP_MOD_BVALUES, numsub_str,
 | |
| -                       strlen(value_buffer), value_buffer);
 | |
| -        ret = modify_apply_mods(&mc, smods); /* smods passed in */
 | |
| -    }
 | |
| -    if (0 == ret || LDAP_TYPE_OR_VALUE_EXISTS == ret) {
 | |
| -        /* This will correctly index subordinatecount: */
 | |
| -        ret = modify_update_all(be, NULL, &mc, NULL);
 | |
| -        if (0 == ret) {
 | |
| -            modify_switch_entries(&mc, be);
 | |
| -        }
 | |
| +    snprintf(errfunc, (sizeof errfunc), "%s[%s]", __FUNCTION__, attrname);
 | |
| +    info->attrname = attrname;
 | |
| +    info->txn = txn;
 | |
| +    info->be = be;
 | |
| +
 | |
| +    /* Lets get the attrinfo */
 | |
| +    ainfo_get(be, (char*)attrname, &info->ai);
 | |
| +    PR_ASSERT(info->ai);
 | |
| +    /* Lets get the db instance */
 | |
| +    if ((ret = dblayer_get_index_file(be, info->ai, &info->db, 0)) != 0) {
 | |
| +        if (ret == DBI_RC_NOTFOUND) {
 | |
| +            bdb_close_subcount_cursor(info);
 | |
| +            return 0;
 | |
| +        }
 | |
| +        ldbm_nasty(errfunc, sourcefile, 70, ret);
 | |
| +        bdb_close_subcount_cursor(info);
 | |
| +        return ret;
 | |
|      }
 | |
| -    /* entry is unlocked and returned to the cache in modify_term */
 | |
| -    modify_term(&mc, be);
 | |
| -    return ret;
 | |
| -}
 | |
| -struct _import_subcount_trawl_info
 | |
| -{
 | |
| -    struct _import_subcount_trawl_info *next;
 | |
| -    ID id;
 | |
| -    size_t sub_count;
 | |
| -};
 | |
| -typedef struct _import_subcount_trawl_info import_subcount_trawl_info;
 | |
| -
 | |
| -static void
 | |
| -bdb_import_subcount_trawl_add(import_subcount_trawl_info **list, ID id)
 | |
| -{
 | |
| -    import_subcount_trawl_info *new_info = CALLOC(import_subcount_trawl_info);
 | |
|  
 | |
| -    new_info->next = *list;
 | |
| -    new_info->id = id;
 | |
| -    *list = new_info;
 | |
| +    /* Lets get the cursor */
 | |
| +    db = (DB*)(info->db);
 | |
| +    if ((ret = db->cursor(db, info->txn, &info->dbc, 0)) != 0) {
 | |
| +        ldbm_nasty(errfunc, sourcefile, 71, ret);
 | |
| +        bdb_close_subcount_cursor(info);
 | |
| +        ret = bdb_map_error(__FUNCTION__, ret);
 | |
| +    }
 | |
| +    return 0;
 | |
|  }
 | |
|  
 | |
| -static int
 | |
| -bdb_import_subcount_trawl(backend *be,
 | |
| -                      import_subcount_trawl_info *trawl_list,
 | |
| -                      int isencrypted)
 | |
| +static bool
 | |
| +bdb_subcount_is_tombstone(subcount_cursor_info_t *info, DBT *id)
 | |
|  {
 | |
| -    ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
 | |
| -    ID id = 1;
 | |
| -    int ret = 0;
 | |
| -    import_subcount_trawl_info *current = NULL;
 | |
| -    char value_buffer[20]; /* enough digits for 2^64 children */
 | |
| -
 | |
| -    /* OK, we do */
 | |
| -    /* We open id2entry and iterate through it */
 | |
| -    /* Foreach entry, we check to see if its parentID matches any of the
 | |
| -     * values in the trawl list . If so, we bump the sub count for that
 | |
| -     * parent in the list.
 | |
| +    /*
 | |
| +     * Check if record =nstombstone ==> id exists in objectclass index
 | |
|       */
 | |
| -    while (1) {
 | |
| -        struct backentry *e = NULL;
 | |
| -
 | |
| -        /* Get the next entry */
 | |
| -        e = id2entry(be, id, NULL, &ret);
 | |
| -        if ((NULL == e) || (0 != ret)) {
 | |
| -            if (DB_NOTFOUND == ret) {
 | |
| -                break;
 | |
| -            } else {
 | |
| -                ldbm_nasty("bdb_import_subcount_trawl", sourcefile, 8, ret);
 | |
| -                return ret;
 | |
| -            }
 | |
| -        }
 | |
| -        for (current = trawl_list; current != NULL; current = current->next) {
 | |
| -            sprintf(value_buffer, "%lu", (u_long)current->id);
 | |
| -            if (slapi_entry_attr_hasvalue(e->ep_entry, LDBM_PARENTID_STR, value_buffer)) {
 | |
| -                /* If this entry's parent ID matches one we're trawling for,
 | |
| -                 * bump its count */
 | |
| -                current->sub_count++;
 | |
| -            }
 | |
| -        }
 | |
| -        /* Free the entry */
 | |
| -        CACHE_REMOVE(&inst->inst_cache, e);
 | |
| -        CACHE_RETURN(&inst->inst_cache, &e);
 | |
| -        id++;
 | |
| -    }
 | |
| -    /* Now update the parent entries from the list */
 | |
| -    for (current = trawl_list; current != NULL; current = current->next) {
 | |
| -        /* Update the parent entry with the correctly counted subcount */
 | |
| -        ret = bdb_import_update_entry_subcount(be, current->id,
 | |
| -                                           current->sub_count, isencrypted);
 | |
| -        if (0 != ret) {
 | |
| -            ldbm_nasty("bdb_import_subcount_trawl", sourcefile, 10, ret);
 | |
| -            break;
 | |
| -        }
 | |
| +    DBT key = {0};
 | |
| +    DBC *dbc = info->dbc;
 | |
| +    int ret;
 | |
| +    key.flags = DB_DBT_USERMEM;
 | |
| +    key.data = "=nstombstone" ;
 | |
| +    key.size = key.ulen = 13;
 | |
| +    ret = dbc->c_get(dbc, &key, id, DB_GET_BOTH);
 | |
| +
 | |
| +    switch (ret) {
 | |
| +        case 0:
 | |
| +            return true;
 | |
| +        case DB_NOTFOUND:
 | |
| +            return false;
 | |
| +        default:
 | |
| +            ldbm_nasty((char*)__FUNCTION__, sourcefile, 72, ret);
 | |
| +            return false;
 | |
|      }
 | |
| -    return ret;
 | |
|  }
 | |
|  
 | |
|  /*
 | |
| @@ -1172,65 +1159,69 @@ bdb_import_subcount_trawl(backend *be,
 | |
|  static int
 | |
|  bdb_update_subordinatecounts(backend *be, ImportJob *job, DB_TXN *txn)
 | |
|  {
 | |
| -    import_subcount_stuff *mothers = job->mothers;
 | |
| -    int isencrypted = job->encrypt;
 | |
| +    subcount_cursor_info_t c_objectclass = {0};
 | |
| +    subcount_cursor_info_t c_entryrdn = {0};
 | |
|      int started_progress_logging = 0;
 | |
| +    int isencrypted = job->encrypt;
 | |
| +    DBT data = {0};
 | |
| +    DBT key = {0};
 | |
|      int key_count = 0;
 | |
| +    char tmp[11];
 | |
| +    char oldkey[11];
 | |
| +    ID data_data;
 | |
| +    int ret2 = 0;
 | |
|      int ret = 0;
 | |
| -    DB *db = NULL;
 | |
| -    DBC *dbc = NULL;
 | |
| -    struct attrinfo *ai = NULL;
 | |
| -    DBT key = {0};
 | |
| -    dbi_val_t dbikey = {0};
 | |
| -    DBT data = {0};
 | |
| -    import_subcount_trawl_info *trawl_list = NULL;
 | |
| -
 | |
| -    /* Open the parentid index */
 | |
| -    ainfo_get(be, LDBM_PARENTID_STR, &ai);
 | |
|  
 | |
| -    /* Open the parentid index file */
 | |
| -    if ((ret = dblayer_get_index_file(be, ai, (dbi_db_t**)&db, DBOPEN_CREATE)) != 0) {
 | |
| -        ldbm_nasty("bdb_update_subordinatecounts", sourcefile, 67, ret);
 | |
| -        return (ret);
 | |
| +    /* Open cursor on the objectclass index */
 | |
| +    ret = bdb_open_subcount_cursor(be, SLAPI_ATTR_OBJECTCLASS, txn, &c_objectclass);
 | |
| +    if (ret) {
 | |
| +        if (ret != DBI_RC_NOTFOUND) {
 | |
| +            /* No database ==> There is nothing to do. */
 | |
| +            ldbm_nasty((char*)__FUNCTION__, sourcefile, 61, ret);
 | |
| +        }
 | |
| +        return ret;
 | |
|      }
 | |
| -    /* Get a cursor so we can walk through the parentid */
 | |
| -    ret = db->cursor(db, txn, &dbc, 0);
 | |
| -    if (ret != 0) {
 | |
| -        ldbm_nasty("bdb_update_subordinatecounts", sourcefile, 68, ret);
 | |
| -        dblayer_release_index_file(be, ai, db);
 | |
| +    /* Open entryrdn index */
 | |
| +    /* Open cursor on the entryrdn index */
 | |
| +    ret = bdb_open_subcount_cursor(be, LDBM_ENTRYRDN_STR, txn, &c_entryrdn);
 | |
| +    if (ret) {
 | |
| +        ldbm_nasty((char*)__FUNCTION__, sourcefile, 62, ret);
 | |
| +        bdb_close_subcount_cursor(&c_objectclass);
 | |
|          return ret;
 | |
|      }
 | |
|  
 | |
| -    /* Walk along the index */
 | |
| -    while (1) {
 | |
| +    key.flags = DB_DBT_USERMEM;
 | |
| +    key.ulen = sizeof tmp;
 | |
| +    key.data = tmp;
 | |
| +    /* Only the first 4 bytes of the data record interrest us */
 | |
| +    data.flags = DB_DBT_USERMEM | DB_DBT_PARTIAL;
 | |
| +    data.ulen = sizeof data_data;
 | |
| +    data.data = &data_data;
 | |
| +    data.dlen = sizeof (ID);
 | |
| +    data.doff = 0;
 | |
| +
 | |
| +    /* Walk along C* keys (usually starting at C1) */
 | |
| +    strcpy(tmp, "C");
 | |
| +    key.size = 1;
 | |
| +    ret = c_entryrdn.dbc->c_get(c_entryrdn.dbc, &key, &data, DB_SET_RANGE);
 | |
| +
 | |
| +    while (ret == 0) {
 | |
|          size_t sub_count = 0;
 | |
| -        int found_count = 1;
 | |
| +        size_t t_sub_count = 0;
 | |
|          ID parentid = 0;
 | |
|  
 | |
| -        /* Foreach key which is an equality key : */
 | |
| -        data.flags = DB_DBT_MALLOC;
 | |
| -        key.flags = DB_DBT_MALLOC;
 | |
| -        ret = dbc->c_get(dbc, &key, &data, DB_NEXT_NODUP);
 | |
| -        if (NULL != data.data) {
 | |
| -            slapi_ch_free(&(data.data));
 | |
| -            data.data = NULL;
 | |
| -        }
 | |
| -        if (0 != ret) {
 | |
| -            if (ret != DB_NOTFOUND) {
 | |
| -                ldbm_nasty("bdb_update_subordinatecounts", sourcefile, 62, ret);
 | |
| -            }
 | |
| -            if (NULL != key.data) {
 | |
| -                slapi_ch_free(&(key.data));
 | |
| -                key.data = NULL;
 | |
| -            }
 | |
| -            break;
 | |
| -        }
 | |
| +        DUMP_SUBCOUNT_KEY("key:", key, ret);
 | |
| +        DUMP_SUBCOUNT_KEY("data:", data, ret);
 | |
|          /* check if we need to abort */
 | |
|          if (job->flags & FLAG_ABORT) {
 | |
|              import_log_notice(job, SLAPI_LOG_ERR, "bdb_update_subordinatecounts",
 | |
|                                "numsubordinate generation aborted.");
 | |
|              break;
 | |
|          }
 | |
| +        if (0 != ret) {
 | |
| +            ldbm_nasty("bdb_update_subordinatecounts", sourcefile, 63, ret);
 | |
| +            break;
 | |
| +        }
 | |
|          /*
 | |
|           * Do an update count
 | |
|           */
 | |
| @@ -1241,57 +1232,47 @@ bdb_update_subordinatecounts(backend *be, ImportJob *job, DB_TXN *txn)
 | |
|                                key_count);
 | |
|              started_progress_logging = 1;
 | |
|          }
 | |
| +        if (key.size == 0 || *(char *)key.data != 'C') {
 | |
| +            /* No more children */
 | |
| +            break;
 | |
| +        }
 | |
|  
 | |
| -        if (*(char *)key.data == EQ_PREFIX) {
 | |
| -            char *idptr = NULL;
 | |
| -
 | |
| -            /* construct the parent's ID from the key */
 | |
| -            /* Look for the ID in the hint list supplied by the caller */
 | |
| -            /* If its there, we know the answer already */
 | |
| -            idptr = (((char *)key.data) + 1);
 | |
| -            parentid = (ID)atol(idptr);
 | |
| -            PR_ASSERT(0 != parentid);
 | |
| -            ret = bdb_import_subcount_mothers_lookup(mothers, parentid, &sub_count);
 | |
| -            if (0 != ret) {
 | |
| -                IDList *idl = NULL;
 | |
| -
 | |
| -                /* If it's not, we need to compute it ourselves: */
 | |
| -                /* Load the IDL matching the key */
 | |
| -                key.flags = DB_DBT_REALLOC;
 | |
| -                ret = NEW_IDL_NO_ALLID;
 | |
| -                bdb_dbt2dbival(&key, &dbikey, PR_FALSE);
 | |
| -                idl = idl_fetch(be, db, &dbikey, NULL, NULL, &ret);
 | |
| -                bdb_dbival2dbt(&dbikey, &key, PR_TRUE);
 | |
| -                dblayer_value_protect_data(be, &dbikey);
 | |
| -                if ((NULL == idl) || (0 != ret)) {
 | |
| -                    ldbm_nasty("bdb_update_subordinatecounts", sourcefile, 4, ret);
 | |
| -                    dblayer_release_index_file(be, ai, db);
 | |
| -                    return (0 == ret) ? -1 : ret;
 | |
| -                }
 | |
| -                /* The number of IDs in the IDL tells us the number of
 | |
| -                 * subordinates for the entry */
 | |
| -                /* Except, the number might be above the allidsthreshold,
 | |
| -                 * in which case */
 | |
| -                if (ALLIDS(idl)) {
 | |
| -                    /* We add this ID to the list for which to trawl */
 | |
| -                    bdb_import_subcount_trawl_add(&trawl_list, parentid);
 | |
| -                    found_count = 0;
 | |
| -                } else {
 | |
| -                    /* We get the count from the IDL */
 | |
| -                    sub_count = idl->b_nids;
 | |
| -                }
 | |
| -                idl_free(&idl);
 | |
| +        /* construct the parent's ID from the key */
 | |
| +        if (key.size >= sizeof tmp) {
 | |
| +            ldbm_nasty("bdb_update_subordinatecounts", sourcefile, 64, ret);
 | |
| +            break;
 | |
| +        }
 | |
| +        /* Generate expected value for parentid */
 | |
| +        tmp[key.size] = 0;
 | |
| +        parentid = (ID)atol(tmp+1);
 | |
| +        PR_ASSERT(0 != parentid);
 | |
| +        strcpy(oldkey,tmp);
 | |
| +        /* Walk the entries having same key and check if they are tombstone */
 | |
| +        do {
 | |
| +            /* Reorder data_data */
 | |
| +            ID old_data_data = data_data;
 | |
| +            id_internal_to_stored(old_data_data, (char*)&data_data);
 | |
| +            if (!bdb_subcount_is_tombstone(&c_objectclass, &data)) {
 | |
| +                sub_count++;
 | |
| +            } else {
 | |
| +                t_sub_count++;
 | |
|              }
 | |
| -            /* Did we get the count ? */
 | |
| -            if (found_count) {
 | |
| -                PR_ASSERT(0 != sub_count);
 | |
| -                /* If so, update the parent now */
 | |
| -                bdb_import_update_entry_subcount(be, parentid, sub_count, isencrypted);
 | |
| +            DUMP_SUBCOUNT_KEY("key:", key, ret);
 | |
| +            DUMP_SUBCOUNT_KEY("data:", data, ret);
 | |
| +            ret = c_entryrdn.dbc->c_get(c_entryrdn.dbc, &key, &data, DB_NEXT);
 | |
| +            DUMP_SUBCOUNT_KEY("key:", key, ret);
 | |
| +            DUMP_SUBCOUNT_KEY("data:", data, ret);
 | |
| +            if (ret == 0 && key.size < sizeof tmp) {
 | |
| +                tmp[key.size] = 0;
 | |
| +            } else {
 | |
| +                break;
 | |
|              }
 | |
| -        }
 | |
| -        if (NULL != key.data) {
 | |
| -            slapi_ch_free(&(key.data));
 | |
| -            key.data = NULL;
 | |
| +        } while (strcmp(key.data, oldkey) == 0);
 | |
| +        ret2 = import_update_entry_subcount(be, parentid, sub_count, t_sub_count, isencrypted, (dbi_txn_t*)txn);
 | |
| +        if (ret2) {
 | |
| +            ret = ret2;
 | |
| +            ldbm_nasty("bdb_update_subordinatecounts", sourcefile, 65, ret);
 | |
| +            break;
 | |
|          }
 | |
|      }
 | |
|      if (started_progress_logging) {
 | |
| @@ -1301,22 +1282,15 @@ bdb_update_subordinatecounts(backend *be, ImportJob *job, DB_TXN *txn)
 | |
|                            key_count);
 | |
|          job->numsubordinates = key_count;
 | |
|      }
 | |
| -
 | |
| -    ret = dbc->c_close(dbc);
 | |
| -    if (0 != ret) {
 | |
| -        ldbm_nasty("bdb_update_subordinatecounts", sourcefile, 6, ret);
 | |
| -    }
 | |
| -    dblayer_release_index_file(be, ai, db);
 | |
| -
 | |
| -    /* Now see if we need to go trawling through id2entry for the info
 | |
| -     * we need */
 | |
| -    if (NULL != trawl_list) {
 | |
| -        ret = bdb_import_subcount_trawl(be, trawl_list, isencrypted);
 | |
| -        if (0 != ret) {
 | |
| -            ldbm_nasty("bdb_update_subordinatecounts", sourcefile, 7, ret);
 | |
| -        }
 | |
| +    if (ret == DB_NOTFOUND || ret == DB_BUFFER_SMALL) {
 | |
| +        /* No more records or record is the suffix dn
 | |
| +         * ==> there is no more children to look at
 | |
| +         */
 | |
| +        ret = 0;
 | |
|      }
 | |
| -    return (ret);
 | |
| +    bdb_close_subcount_cursor(&c_entryrdn);
 | |
| +    bdb_close_subcount_cursor(&c_objectclass);
 | |
| +    return ret;
 | |
|  }
 | |
|  
 | |
|  /* Function used to gather a list of indexed attrs */
 | |
| @@ -1451,10 +1425,6 @@ bdb_import_free_job(ImportJob *job)
 | |
|          slapi_ch_free((void **)&asabird);
 | |
|      }
 | |
|      job->index_list = NULL;
 | |
| -    if (NULL != job->mothers) {
 | |
| -        import_subcount_stuff_term(job->mothers);
 | |
| -        slapi_ch_free((void **)&job->mothers);
 | |
| -    }
 | |
|  
 | |
|      bdb_back_free_incl_excl(job->include_subtrees, job->exclude_subtrees);
 | |
|      charray_free(job->input_filenames);
 | |
| @@ -2720,7 +2690,6 @@ bdb_back_ldif2db(Slapi_PBlock *pb)
 | |
|      }
 | |
|      job->starting_ID = 1;
 | |
|      job->first_ID = 1;
 | |
| -    job->mothers = CALLOC(import_subcount_stuff);
 | |
|  
 | |
|      /* how much space should we allocate to index buffering? */
 | |
|      job->job_index_buffer_size = bdb_import_get_index_buffer_size();
 | |
| @@ -2731,7 +2700,6 @@ bdb_back_ldif2db(Slapi_PBlock *pb)
 | |
|              (job->inst->inst_li->li_import_cachesize / 10) + (1024 * 1024);
 | |
|          PR_Unlock(job->inst->inst_li->li_config_mutex);
 | |
|      }
 | |
| -    import_subcount_stuff_init(job->mothers);
 | |
|  
 | |
|      if (job->task != NULL) {
 | |
|          /* count files, use that to track "progress" in cn=tasks */
 | |
| diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
 | |
| index 762267dd9..12dfb419b 100644
 | |
| --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
 | |
| +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
 | |
| @@ -2275,17 +2275,6 @@ bdb_foreman_do_parentid(ImportJob *job, FifoItem *fi, struct attrinfo *parentid_
 | |
|          ret = index_addordel_values_ext_sv(be, LDBM_PARENTID_STR, svals, NULL,
 | |
|                                             entry->ep_id, BE_INDEX_ADD,
 | |
|                                             NULL, &idl_disposition, NULL);
 | |
| -        if (idl_disposition != IDL_INSERT_NORMAL) {
 | |
| -            char *attr_value = slapi_value_get_berval(svals[0])->bv_val;
 | |
| -            ID parent_id = atol(attr_value);
 | |
| -
 | |
| -            if (idl_disposition == IDL_INSERT_NOW_ALLIDS) {
 | |
| -                bdb_import_subcount_mother_init(job->mothers, parent_id,
 | |
| -                                            idl_get_allidslimit(parentid_ai, 0) + 1);
 | |
| -            } else if (idl_disposition == IDL_INSERT_ALLIDS) {
 | |
| -                bdb_import_subcount_mother_count(job->mothers, parent_id);
 | |
| -            }
 | |
| -        }
 | |
|          if (ret != 0) {
 | |
|              import_log_notice(job, SLAPI_LOG_ERR, "bdb_foreman_do_parentid",
 | |
|                                "Can't update parentid index (error %d)", ret);
 | |
| @@ -3158,7 +3147,6 @@ bdb_bulk_import_start(Slapi_PBlock *pb)
 | |
|      job->starting_ID = 1;
 | |
|      job->first_ID = 1;
 | |
|  
 | |
| -    job->mothers = CALLOC(import_subcount_stuff);
 | |
|      /* how much space should we allocate to index buffering? */
 | |
|      job->job_index_buffer_size = bdb_import_get_index_buffer_size();
 | |
|      if (job->job_index_buffer_size == 0) {
 | |
| @@ -3166,7 +3154,6 @@ bdb_bulk_import_start(Slapi_PBlock *pb)
 | |
|          job->job_index_buffer_size = (job->inst->inst_li->li_dbcachesize / 10) +
 | |
|                                       (1024 * 1024);
 | |
|      }
 | |
| -    import_subcount_stuff_init(job->mothers);
 | |
|  
 | |
|      pthread_mutex_init(&job->wire_lock, NULL);
 | |
|      pthread_cond_init(&job->wire_cv, NULL);
 | |
| diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h
 | |
| index 0be6cab49..f66640d2e 100644
 | |
| --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h
 | |
| +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h
 | |
| @@ -210,8 +210,6 @@ void bdb_restore_file_update(struct ldbminfo *li, const char *directory);
 | |
|  int bdb_import_file_init(ldbm_instance *inst);
 | |
|  void bdb_import_file_update(ldbm_instance *inst);
 | |
|  int bdb_import_file_check(ldbm_instance *inst);
 | |
| -int bdb_import_subcount_mother_init(import_subcount_stuff *mothers, ID parent_id, size_t count);
 | |
| -int bdb_import_subcount_mother_count(import_subcount_stuff *mothers, ID parent_id);
 | |
|  void bdb_import_configure_index_buffer_size(size_t size);
 | |
|  size_t bdb_import_get_index_buffer_size(void);
 | |
|  int bdb_ldbm_back_wire_import(Slapi_PBlock *pb);
 | |
| @@ -230,6 +228,7 @@ int bdb_public_in_import(ldbm_instance *inst);
 | |
|  int bdb_dblayer_cursor_iterate(dbi_cursor_t *cursor,
 | |
|                             int (*action_cb)(dbi_val_t *key, dbi_val_t *data, void *ctx),
 | |
|                             const dbi_val_t *startingkey, void *ctx);
 | |
| +dbi_error_t bdb_map_error(const char *funcname, int err);
 | |
|  
 | |
|  
 | |
|  /* dbimpl helpers */
 | |
| diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c
 | |
| index ce2151174..d0efc1bca 100644
 | |
| --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c
 | |
| +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c
 | |
| @@ -26,7 +26,16 @@
 | |
|  
 | |
|  static char *sourcefile = "dbmdb_import.c";
 | |
|  
 | |
| -static int dbmdb_import_update_entry_subcount(backend *be, ID parentid, size_t sub_count, int isencrypted, back_txn *txn);
 | |
| +/* Helper struct used to compute numsubordinates */
 | |
| +
 | |
| +typedef struct {
 | |
| +    backend *be;
 | |
| +    dbi_txn_t *txn;
 | |
| +    const char *attrname;
 | |
| +    struct attrinfo *ai;
 | |
| +    dbi_db_t *db;
 | |
| +    MDB_cursor *dbc;
 | |
| +} subcount_cursor_info_t;
 | |
|  
 | |
|  /********** routines to manipulate the entry fifo **********/
 | |
|  
 | |
| @@ -126,57 +135,74 @@ dbmdb_import_task_abort(Slapi_Task *task)
 | |
|  
 | |
|  /********** helper functions for importing **********/
 | |
|  
 | |
| +static void
 | |
| +dbmdb_close_subcount_cursor(subcount_cursor_info_t *info)
 | |
| +{
 | |
| +    if  (info->dbc) {
 | |
| +        MDB_CURSOR_CLOSE(info->dbc);
 | |
| +        info->dbc = NULL;
 | |
| +    }
 | |
| +    if (info->db) {
 | |
| +        dblayer_release_index_file(info->be, info->ai, info->db);
 | |
| +        info->db = NULL;
 | |
| +        info->ai = NULL;
 | |
| +    }
 | |
| +}
 | |
| +
 | |
|  static int
 | |
| -dbmdb_import_update_entry_subcount(backend *be, ID parentid, size_t sub_count, int isencrypted, back_txn *txn)
 | |
| +dbmdb_open_subcount_cursor(backend *be, const char *attrname, dbi_txn_t *txn, subcount_cursor_info_t *info)
 | |
|  {
 | |
| -    ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
 | |
| +    char errfunc[60];
 | |
|      int ret = 0;
 | |
| -    modify_context mc = {0};
 | |
| -    char value_buffer[22] = {0}; /* enough digits for 2^64 children */
 | |
| -    struct backentry *e = NULL;
 | |
| -    int isreplace = 0;
 | |
| -    char *numsub_str = numsubordinates;
 | |
| -
 | |
| -    /* Get hold of the parent */
 | |
| -    e = id2entry(be, parentid, txn, &ret);
 | |
| -    if ((NULL == e) || (0 != ret)) {
 | |
| -        slapi_log_err(SLAPI_LOG_ERR, "dbmdb_import_update_entry_subcount", "failed to read entry with ID %d ret=%d\n",
 | |
| -                parentid, ret);
 | |
| -        ldbm_nasty("dbmdb_import_update_entry_subcount", sourcefile, 5, ret);
 | |
| -        return (0 == ret) ? -1 : ret;
 | |
| -    }
 | |
| -    /* Lock it (not really required since we're single-threaded here, but
 | |
| -     * let's do it so we can reuse the modify routines) */
 | |
| -    cache_lock_entry(&inst->inst_cache, e);
 | |
| -    modify_init(&mc, e);
 | |
| -    mc.attr_encrypt = isencrypted;
 | |
| -    sprintf(value_buffer, "%lu", (long unsigned int)sub_count);
 | |
| -    /* If it is a tombstone entry, add tombstonesubordinates instead of
 | |
| -     * numsubordinates. */
 | |
| -    if (slapi_entry_flag_is_set(e->ep_entry, SLAPI_ENTRY_FLAG_TOMBSTONE)) {
 | |
| -        numsub_str = tombstone_numsubordinates;
 | |
| -    }
 | |
| -    /* attr numsubordinates/tombstonenumsubordinates could already exist in
 | |
| -     * the entry, let's check whether it's already there or not */
 | |
| -    isreplace = (attrlist_find(e->ep_entry->e_attrs, numsub_str) != NULL);
 | |
| -    {
 | |
| -        int op = isreplace ? LDAP_MOD_REPLACE : LDAP_MOD_ADD;
 | |
| -        Slapi_Mods *smods = slapi_mods_new();
 | |
|  
 | |
| -        slapi_mods_add(smods, op | LDAP_MOD_BVALUES, numsub_str,
 | |
| -                       strlen(value_buffer), value_buffer);
 | |
| -        ret = modify_apply_mods(&mc, smods); /* smods passed in */
 | |
| -    }
 | |
| -    if (0 == ret || LDAP_TYPE_OR_VALUE_EXISTS == ret) {
 | |
| -        /* This will correctly index subordinatecount: */
 | |
| -        ret = modify_update_all(be, NULL, &mc, txn);
 | |
| -        if (0 == ret) {
 | |
| -            modify_switch_entries(&mc, be);
 | |
| +    snprintf(errfunc, (sizeof errfunc), "%s[%s]", __FUNCTION__, attrname);
 | |
| +    info->attrname = attrname;
 | |
| +    info->txn = txn;
 | |
| +    info->be = be;
 | |
| +
 | |
| +    /* Lets get the attrinfo */
 | |
| +    ainfo_get(be, (char*)attrname, &info->ai);
 | |
| +    PR_ASSERT(info->ai);
 | |
| +    /* Lets get the db instance */
 | |
| +    if ((ret = dblayer_get_index_file(be, info->ai, &info->db, 0)) != 0) {
 | |
| +        if (ret == DBI_RC_NOTFOUND) {
 | |
| +            dbmdb_close_subcount_cursor(info);
 | |
| +            return 0;
 | |
|          }
 | |
| +        ldbm_nasty(errfunc, sourcefile, 70, ret);
 | |
| +        dbmdb_close_subcount_cursor(info);
 | |
| +        return ret;
 | |
| +    }
 | |
| +
 | |
| +    /* Lets get the cursor */
 | |
| +    if ((ret = MDB_CURSOR_OPEN(TXN(info->txn), DB(info->db), &info->dbc)) != 0) {
 | |
| +        ldbm_nasty(errfunc, sourcefile, 71, ret);
 | |
| +        dbmdb_close_subcount_cursor(info);
 | |
| +        ret = dbmdb_map_error(__FUNCTION__, ret);
 | |
| +    }
 | |
| +    return 0;
 | |
| +}
 | |
| +
 | |
| +static bool
 | |
| +dbmdb_subcount_is_tombstone(subcount_cursor_info_t *info, MDB_val *id)
 | |
| +{
 | |
| +    /*
 | |
| +     * Check if record =nstombstone ==> id exists in objectclass index
 | |
| +     */
 | |
| +    MDB_val key = {0};
 | |
| +    int ret;
 | |
| +    key.mv_data = "=nstombstone" ;
 | |
| +    key.mv_size = 13;
 | |
| +    ret = MDB_CURSOR_GET(info->dbc, &key, id, MDB_GET_BOTH);
 | |
| +    switch (ret) {
 | |
| +        case 0:
 | |
| +            return true;
 | |
| +        case MDB_NOTFOUND:
 | |
| +            return false;
 | |
| +        default:
 | |
| +            ldbm_nasty((char*)__FUNCTION__, sourcefile, 72, ret);
 | |
| +            return false;
 | |
|      }
 | |
| -    /* entry is unlocked and returned to the cache in modify_term */
 | |
| -    modify_term(&mc, be);
 | |
| -    return ret;
 | |
|  }
 | |
|  
 | |
|  /*
 | |
| @@ -188,47 +214,56 @@ dbmdb_import_update_entry_subcount(backend *be, ID parentid, size_t sub_count, i
 | |
|  static int
 | |
|  dbmdb_update_subordinatecounts(backend *be, ImportJob *job, dbi_txn_t *txn)
 | |
|  {
 | |
| -    int isencrypted = job->encrypt;
 | |
| +    subcount_cursor_info_t c_objectclass = {0};
 | |
| +    subcount_cursor_info_t c_entryrdn = {0};
 | |
|      int started_progress_logging = 0;
 | |
| +    int isencrypted = job->encrypt;
 | |
| +    MDB_val data = {0};
 | |
| +    MDB_val key = {0};
 | |
| +    back_txn btxn = {0};
 | |
|      int key_count = 0;
 | |
| +    char tmp[11];
 | |
| +    int ret2 = 0;
 | |
|      int ret = 0;
 | |
| -    dbmdb_dbi_t*db = NULL;
 | |
| -    MDB_cursor *dbc = NULL;
 | |
| -    struct attrinfo *ai = NULL;
 | |
| -    MDB_val key = {0};
 | |
| -    MDB_val data = {0};
 | |
| -    dbmdb_cursor_t cursor = {0};
 | |
| -    struct ldbminfo *li = (struct ldbminfo*)be->be_database->plg_private;
 | |
| -	back_txn btxn = {0};
 | |
| -
 | |
| -    /* Open the parentid index */
 | |
| -    ainfo_get(be, LDBM_PARENTID_STR, &ai);
 | |
|  
 | |
| -    /* Open the parentid index file */
 | |
| -    if ((ret = dblayer_get_index_file(be, ai, (dbi_db_t**)&db, DBOPEN_CREATE)) != 0) {
 | |
| -        ldbm_nasty("dbmdb_update_subordinatecounts", sourcefile, 67, ret);
 | |
| -        return (ret);
 | |
| -    }
 | |
| -    /* Get a cursor with r/w txn so we can walk through the parentid */
 | |
| -    ret = dbmdb_open_cursor(&cursor, MDB_CONFIG(li), db, 0);
 | |
| -    if (ret != 0) {
 | |
| -        ldbm_nasty("dbmdb_update_subordinatecounts", sourcefile, 68, ret);
 | |
| -        dblayer_release_index_file(be, ai, db);
 | |
| -        return ret;
 | |
| +    PR_ASSERT(txn == NULL); /* Apparently always called with null txn */
 | |
| +    /* Need txn / should be rw to update id2entry */
 | |
| +    ret = START_TXN(&txn, NULL, 0);
 | |
| +    if (ret) {
 | |
| +        ldbm_nasty((char*)__FUNCTION__, sourcefile, 60, ret);
 | |
| +        return dbmdb_map_error(__FUNCTION__, ret);
 | |
|      }
 | |
| -    dbc = cursor.cur;
 | |
| -    txn = cursor.txn;
 | |
|      btxn.back_txn_txn = txn;
 | |
| -    ret = MDB_CURSOR_GET(dbc, &key, &data, MDB_FIRST);
 | |
| +    /* Open cursor on the objectclass index */
 | |
| +    ret = dbmdb_open_subcount_cursor(be, SLAPI_ATTR_OBJECTCLASS, txn, &c_objectclass);
 | |
| +    if (ret) {
 | |
| +        if (ret != DBI_RC_NOTFOUND) {
 | |
| +            /* No database ==> There is nothing to do. */
 | |
| +            ldbm_nasty((char*)__FUNCTION__, sourcefile, 61, ret);
 | |
| +        }
 | |
| +        return END_TXN(&txn, ret);
 | |
| +    }
 | |
| +			/* Open cursor on the entryrdn index */
 | |
| +    ret = dbmdb_open_subcount_cursor(be, LDBM_ENTRYRDN_STR, txn, &c_entryrdn);
 | |
| +    if (ret) {
 | |
| +        ldbm_nasty((char*)__FUNCTION__, sourcefile, 62, ret);
 | |
| +        dbmdb_close_subcount_cursor(&c_objectclass);
 | |
| +        return END_TXN(&txn, ret);
 | |
| +    }
 | |
|  
 | |
| -    /* Walk along the index */
 | |
| -    while (ret != MDB_NOTFOUND) {
 | |
| +    /* Walk along C* keys (usually starting at C1) */
 | |
| +    key.mv_data = "C";
 | |
| +    key.mv_size = 1;
 | |
| +    ret = MDB_CURSOR_GET(c_entryrdn.dbc, &key, &data, MDB_SET_RANGE);
 | |
| +    while (ret == 0) {
 | |
|          size_t sub_count = 0;
 | |
| +        size_t t_sub_count = 0;
 | |
| +        MDB_val oldkey = key;
 | |
|          ID parentid = 0;
 | |
|  
 | |
|          if (0 != ret) {
 | |
|              key.mv_data=NULL;
 | |
| -            ldbm_nasty("dbmdb_update_subordinatecounts", sourcefile, 62, ret);
 | |
| +            ldbm_nasty("dbmdb_update_subordinatecounts", sourcefile, 63, ret);
 | |
|              break;
 | |
|          }
 | |
|          /* check if we need to abort */
 | |
| @@ -247,33 +282,50 @@ dbmdb_update_subordinatecounts(backend *be, ImportJob *job, dbi_txn_t *txn)
 | |
|                                key_count);
 | |
|              started_progress_logging = 1;
 | |
|          }
 | |
| +        if (!key.mv_data || *(char *)key.mv_data != 'C') {
 | |
| +            /* No more children */
 | |
| +            break;
 | |
| +        }
 | |
|  
 | |
| -        if (*(char *)key.mv_data == EQ_PREFIX) {
 | |
| -            char tmp[11];
 | |
| -
 | |
| -            /* construct the parent's ID from the key */
 | |
| -            if (key.mv_size >= sizeof tmp) {
 | |
| +        /* construct the parent's ID from the key */
 | |
| +        if (key.mv_size >= sizeof tmp) {
 | |
| +            ldbm_nasty("dbmdb_update_subordinatecounts", sourcefile, 64, ret);
 | |
| +            ret = DBI_RC_INVALID;
 | |
| +            break;
 | |
| +        }
 | |
| +        /* Generate expected value for parentid */
 | |
| +        memcpy(tmp, key.mv_data, key.mv_size);
 | |
| +        tmp[key.mv_size] = 0;
 | |
| +        parentid = (ID)atol(tmp+1);
 | |
| +        PR_ASSERT(0 != parentid);
 | |
| +        oldkey = key;
 | |
| +        /* Walk the entries having same key and check if they are tombstone */
 | |
| +        do {
 | |
| +            /* Reorder data */
 | |
| +            ID old_data, new_data;
 | |
| +            if (data.mv_size < sizeof old_data) {
 | |
|                  ldbm_nasty("dbmdb_update_subordinatecounts", sourcefile, 66, ret);
 | |
| +                ret = DBI_RC_INVALID;
 | |
|                  break;
 | |
|              }
 | |
| -            memcpy(tmp, key.mv_data, key.mv_size);
 | |
| -            tmp[key.mv_size] = 0;
 | |
| -            parentid = (ID)atol(tmp+1);
 | |
| -            PR_ASSERT(0 != parentid);
 | |
| -            /* Get number of records having the same key */
 | |
| -            ret = mdb_cursor_count(dbc, &sub_count);
 | |
| -            if (ret) {
 | |
| -                ldbm_nasty("dbmdb_update_subordinatecounts", sourcefile, 63, ret);
 | |
| -                break;
 | |
| -            }
 | |
| -            PR_ASSERT(0 != sub_count);
 | |
| -            ret = dbmdb_import_update_entry_subcount(be, parentid, sub_count, isencrypted, &btxn);
 | |
| -            if (ret) {
 | |
| -                ldbm_nasty("dbmdb_update_subordinatecounts", sourcefile, 64, ret);
 | |
| -                break;
 | |
| +            memcpy(&old_data, data.mv_data, sizeof old_data);
 | |
| +            id_internal_to_stored(old_data, (char*)&new_data);
 | |
| +            data.mv_data = &new_data;
 | |
| +            data.mv_size = sizeof new_data;
 | |
| +            if (!dbmdb_subcount_is_tombstone(&c_objectclass, &data)) {
 | |
| +                sub_count++;
 | |
| +            } else {
 | |
| +                t_sub_count++;
 | |
|              }
 | |
| +            ret = MDB_CURSOR_GET(c_entryrdn.dbc, &key, &data, MDB_NEXT);
 | |
| +        } while (ret == 0 && key.mv_size == oldkey.mv_size &&
 | |
| +             memcmp(key.mv_data, oldkey.mv_data, key.mv_size) == 0);
 | |
| +        ret2 = import_update_entry_subcount(be, parentid, sub_count, t_sub_count, isencrypted, &btxn);
 | |
| +        if (ret2) {
 | |
| +            ret = ret2;
 | |
| +            ldbm_nasty("dbmdb_update_subordinatecounts", sourcefile, 65, ret);
 | |
| +            break;
 | |
|          }
 | |
| -        ret = MDB_CURSOR_GET(dbc, &key, &data, MDB_NEXT_NODUP);
 | |
|      }
 | |
|      if (started_progress_logging) {
 | |
|          /* Finish what we started... */
 | |
| @@ -285,11 +337,13 @@ dbmdb_update_subordinatecounts(backend *be, ImportJob *job, dbi_txn_t *txn)
 | |
|      if (ret == MDB_NOTFOUND) {
 | |
|          ret = 0;
 | |
|      }
 | |
| -
 | |
| -    dbmdb_close_cursor(&cursor, ret);
 | |
| -    dblayer_release_index_file(be, ai, db);
 | |
| -
 | |
| -    return (ret);
 | |
| +    dbmdb_close_subcount_cursor(&c_entryrdn);
 | |
| +    dbmdb_close_subcount_cursor(&c_objectclass);
 | |
| +    if (txn) {
 | |
| +        return END_TXN(&txn, ret);
 | |
| +    } else {
 | |
| +        return ret;
 | |
| +    }
 | |
|  }
 | |
|  
 | |
|  /* Function used to gather a list of indexed attrs */
 | |
| @@ -362,10 +416,6 @@ dbmdb_import_free_job(ImportJob *job)
 | |
|          slapi_ch_free((void **)&asabird);
 | |
|      }
 | |
|      job->index_list = NULL;
 | |
| -    if (NULL != job->mothers) {
 | |
| -        import_subcount_stuff_term(job->mothers);
 | |
| -        slapi_ch_free((void **)&job->mothers);
 | |
| -    }
 | |
|  
 | |
|      dbmdb_back_free_incl_excl(job->include_subtrees, job->exclude_subtrees);
 | |
|  
 | |
| @@ -1244,7 +1294,6 @@ dbmdb_run_ldif2db(Slapi_PBlock *pb)
 | |
|      }
 | |
|      job->starting_ID = 1;
 | |
|      job->first_ID = 1;
 | |
| -    job->mothers = CALLOC(import_subcount_stuff);
 | |
|  
 | |
|      /* how much space should we allocate to index buffering? */
 | |
|      job->job_index_buffer_size = dbmdb_import_get_index_buffer_size();
 | |
| @@ -1255,7 +1304,6 @@ dbmdb_run_ldif2db(Slapi_PBlock *pb)
 | |
|              (job->inst->inst_li->li_import_cachesize / 10) + (1024 * 1024);
 | |
|          PR_Unlock(job->inst->inst_li->li_config_mutex);
 | |
|      }
 | |
| -    import_subcount_stuff_init(job->mothers);
 | |
|  
 | |
|      if (job->task != NULL) {
 | |
|          /* count files, use that to track "progress" in cn=tasks */
 | |
| @@ -1382,7 +1430,6 @@ dbmdb_bulk_import_start(Slapi_PBlock *pb)
 | |
|      job->starting_ID = 1;
 | |
|      job->first_ID = 1;
 | |
|  
 | |
| -    job->mothers = CALLOC(import_subcount_stuff);
 | |
|      /* how much space should we allocate to index buffering? */
 | |
|      job->job_index_buffer_size = dbmdb_import_get_index_buffer_size();
 | |
|      if (job->job_index_buffer_size == 0) {
 | |
| @@ -1390,7 +1437,6 @@ dbmdb_bulk_import_start(Slapi_PBlock *pb)
 | |
|          job->job_index_buffer_size = (job->inst->inst_li->li_dbcachesize / 10) +
 | |
|                                       (1024 * 1024);
 | |
|      }
 | |
| -    import_subcount_stuff_init(job->mothers);
 | |
|      dbmdb_import_init_writer(job, IM_BULKIMPORT);
 | |
|  
 | |
|      pthread_mutex_init(&job->wire_lock, NULL);
 | |
| diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
 | |
| index 39d2b06f7..6978bf5e3 100644
 | |
| --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
 | |
| +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
 | |
| @@ -2928,6 +2928,7 @@ dbmdb_add_op_attrs(ImportJob *job, struct backentry *ep, ID pid)
 | |
|      /* Get rid of attributes you're not allowed to specify yourself */
 | |
|      slapi_entry_delete_values(ep->ep_entry, hassubordinates, NULL);
 | |
|      slapi_entry_delete_values(ep->ep_entry, numsubordinates, NULL);
 | |
| +    slapi_entry_delete_values(ep->ep_entry, tombstone_numsubordinates, NULL);
 | |
|  
 | |
|      /* Upgrade DN format only */
 | |
|      /* Set current parentid to e_aux_attrs to remove it from the index file. */
 | |
| diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
 | |
| index 1b161a578..682fd70e2 100644
 | |
| --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
 | |
| +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c
 | |
| @@ -1336,7 +1336,7 @@ int dbmdb_open_cursor(dbmdb_cursor_t *dbicur, dbmdb_ctx_t *ctx, dbmdb_dbi_t *dbi
 | |
|      dbicur->dbi = dbi;
 | |
|      if (ctx->readonly)
 | |
|          flags |= MDB_RDONLY;
 | |
| -    rc = START_TXN(&dbicur->txn, NULL, 0);
 | |
| +    rc = START_TXN(&dbicur->txn, NULL, ((flags&MDB_RDONLY) ? TXNFL_RDONLY : 0));
 | |
|      if (rc) {
 | |
|          return rc;
 | |
|      }
 | |
| diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.h b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.h
 | |
| index fe230d60e..9cecf3edb 100644
 | |
| --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.h
 | |
| +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.h
 | |
| @@ -394,8 +394,6 @@ void dbmdb_restore_file_update(struct ldbminfo *li, const char *directory);
 | |
|  int dbmdb_import_file_init(ldbm_instance *inst);
 | |
|  void dbmdb_import_file_update(ldbm_instance *inst);
 | |
|  int dbmdb_import_file_check(ldbm_instance *inst);
 | |
| -int dbmdb_import_subcount_mother_init(import_subcount_stuff *mothers, ID parent_id, size_t count);
 | |
| -int dbmdb_import_subcount_mother_count(import_subcount_stuff *mothers, ID parent_id);
 | |
|  void dbmdb_import_configure_index_buffer_size(size_t size);
 | |
|  size_t dbmdb_import_get_index_buffer_size(void);
 | |
|  int dbmdb_ldbm_back_wire_import(Slapi_PBlock *pb);
 | |
| diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c
 | |
| index 5a03bb533..f9a20051a 100644
 | |
| --- a/ldap/servers/slapd/back-ldbm/import.c
 | |
| +++ b/ldap/servers/slapd/back-ldbm/import.c
 | |
| @@ -241,3 +241,62 @@ wait_for_ref_count(Slapi_Counter *inst_ref_count)
 | |
|      /* Done waiting, return the current ref count */
 | |
|      return slapi_counter_get_value(inst_ref_count);
 | |
|  }
 | |
| +
 | |
| +/********** helper functions for importing **********/
 | |
| +
 | |
| +int
 | |
| +import_update_entry_subcount(backend *be, ID parentid, size_t sub_count, size_t t_sub_count, int isencrypted, back_txn *txn)
 | |
| +{
 | |
| +    ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
 | |
| +    int ret = 0;
 | |
| +    modify_context mc = {0};
 | |
| +    char value_buffer[22] = {0}; /* enough digits for 2^64 children */
 | |
| +    char t_value_buffer[22] = {0}; /* enough digits for 2^64 children */
 | |
| +    struct backentry *e = NULL;
 | |
| +    char *numsub_str = numsubordinates;
 | |
| +    Slapi_Mods *smods = NULL;
 | |
| +    static char *sourcefile = "import.c";
 | |
| +
 | |
| +    /* Get hold of the parent */
 | |
| +    e = id2entry(be, parentid, txn, &ret);
 | |
| +    if ((NULL == e) || (0 != ret)) {
 | |
| +        slapi_log_err(SLAPI_LOG_ERR, "import_update_entry_subcount", "failed to read entry with ID %d ret=%d\n",
 | |
| +                parentid, ret);
 | |
| +        ldbm_nasty("import_update_entry_subcount", sourcefile, 5, ret);
 | |
| +        return (0 == ret) ? -1 : ret;
 | |
| +    }
 | |
| +    /* Lock it (not really required since we're single-threaded here, but
 | |
| +     * let's do it so we can reuse the modify routines) */
 | |
| +    cache_lock_entry(&inst->inst_cache, e);
 | |
| +    modify_init(&mc, e);
 | |
| +    mc.attr_encrypt = isencrypted;
 | |
| +    sprintf(value_buffer, "%lu", (long unsigned int)sub_count);
 | |
| +    sprintf(t_value_buffer, "%lu", (long unsigned int)t_sub_count);
 | |
| +    smods = slapi_mods_new();
 | |
| +    if (sub_count) {
 | |
| +        slapi_mods_add(smods, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES, numsub_str,
 | |
| +                       strlen(value_buffer), value_buffer);
 | |
| +    } else {
 | |
| +        /* Make sure that the attribute is deleted */
 | |
| +        slapi_mods_add_mod_values(smods, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES, numsub_str, NULL);
 | |
| +    }
 | |
| +    if (t_sub_count) {
 | |
| +        slapi_mods_add(smods, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES, LDBM_TOMBSTONE_NUMSUBORDINATES_STR,
 | |
| +                       strlen(t_value_buffer), t_value_buffer);
 | |
| +    } else {
 | |
| +        /* Make sure that the attribute is deleted */
 | |
| +        slapi_mods_add_mod_values(smods, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES, LDBM_TOMBSTONE_NUMSUBORDINATES_STR, NULL);
 | |
| +    }
 | |
| +    ret = modify_apply_mods(&mc, smods); /* smods passed in */
 | |
| +    if (0 == ret) {
 | |
| +        /* This will correctly index subordinatecount: */
 | |
| +        ret = modify_update_all(be, NULL, &mc, txn);
 | |
| +        if (0 == ret) {
 | |
| +            modify_switch_entries(&mc, be);
 | |
| +        }
 | |
| +    }
 | |
| +    /* entry is unlocked and returned to the cache in modify_term */
 | |
| +    modify_term(&mc, be);
 | |
| +    return ret;
 | |
| +}
 | |
| +
 | |
| diff --git a/ldap/servers/slapd/back-ldbm/import.h b/ldap/servers/slapd/back-ldbm/import.h
 | |
| index b3f6b7493..e066f4195 100644
 | |
| --- a/ldap/servers/slapd/back-ldbm/import.h
 | |
| +++ b/ldap/servers/slapd/back-ldbm/import.h
 | |
| @@ -117,7 +117,6 @@ typedef struct _ImportJob
 | |
|                                      * another pass */
 | |
|      int uuid_gen_type;             /* kind of uuid to generate */
 | |
|      char *uuid_namespace;          /* namespace for name-generated uuid */
 | |
| -    import_subcount_stuff *mothers;
 | |
|      double average_progress_rate;
 | |
|      double recent_progress_rate;
 | |
|      double cache_hit_ratio;
 | |
| @@ -209,6 +208,7 @@ struct _import_worker_info
 | |
|  /* import.c */
 | |
|  void import_log_notice(ImportJob *job, int log_level, char *subsystem, char *format, ...);
 | |
|  int import_main_offline(void *arg);
 | |
| +int import_update_entry_subcount(backend *be, ID parentid, size_t sub_count, size_t t_sub_count, int isencrypted, back_txn *txn);
 | |
|  
 | |
|  /* ldif2ldbm.c */
 | |
|  void reset_progress(void);
 | |
| diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
 | |
| index 403ce6ae8..8b0386489 100644
 | |
| --- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
 | |
| +++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
 | |
| @@ -54,49 +54,6 @@ typedef struct _export_args
 | |
|  /* static functions */
 | |
|  
 | |
|  
 | |
| -/**********  common routines for classic/deluxe import code **********/
 | |
| -
 | |
| -static PRIntn
 | |
| -import_subcount_hash_compare_keys(const void *v1, const void *v2)
 | |
| -{
 | |
| -    return (((ID)((uintptr_t)v1) == (ID)((uintptr_t)v2)) ? 1 : 0);
 | |
| -}
 | |
| -
 | |
| -static PRIntn
 | |
| -import_subcount_hash_compare_values(const void *v1, const void *v2)
 | |
| -{
 | |
| -    return (((size_t)v1 == (size_t)v2) ? 1 : 0);
 | |
| -}
 | |
| -
 | |
| -static PLHashNumber
 | |
| -import_subcount_hash_fn(const void *id)
 | |
| -{
 | |
| -    return (PLHashNumber)((uintptr_t)id);
 | |
| -}
 | |
| -
 | |
| -void
 | |
| -import_subcount_stuff_init(import_subcount_stuff *stuff)
 | |
| -{
 | |
| -    stuff->hashtable = PL_NewHashTable(IMPORT_SUBCOUNT_HASHTABLE_SIZE,
 | |
| -                                       import_subcount_hash_fn, import_subcount_hash_compare_keys,
 | |
| -                                       import_subcount_hash_compare_values, NULL, NULL);
 | |
| -}
 | |
| -
 | |
| -void
 | |
| -import_subcount_stuff_term(import_subcount_stuff *stuff)
 | |
| -{
 | |
| -    if (stuff != NULL && stuff->hashtable != NULL) {
 | |
| -        PL_HashTableDestroy(stuff->hashtable);
 | |
| -    }
 | |
| -}
 | |
| -
 | |
| -
 | |
| -
 | |
| -/**********  functions for maintaining the subordinate count **********/
 | |
| -
 | |
| -
 | |
| -
 | |
| -
 | |
|  /**********  ldif2db entry point  **********/
 | |
|  
 | |
|  /*
 | |
| diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
 | |
| index e6f9273eb..39a2852e5 100644
 | |
| --- a/src/lib389/lib389/__init__.py
 | |
| +++ b/src/lib389/lib389/__init__.py
 | |
| @@ -1835,7 +1835,7 @@ class DirSrv(SimpleLDAPObject, object):
 | |
|             one entry.
 | |
|              @param  - entry dn
 | |
|              @param  - search scope, in ldap.SCOPE_BASE (default),
 | |
| -                      ldap.SCOPE_SUB, ldap.SCOPE_ONE
 | |
| +                      ldap.SCOPE_SUB, ldap.SCOPE_ONELEVEL
 | |
|              @param filterstr - filterstr, default '(objectClass=*)' from
 | |
|                                 SimpleLDAPObject
 | |
|              @param attrlist - list of attributes to retrieve. eg ['cn', 'uid']
 | |
| diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
 | |
| index 1f9f1556f..37277296d 100644
 | |
| --- a/src/lib389/lib389/_mapped_object.py
 | |
| +++ b/src/lib389/lib389/_mapped_object.py
 | |
| @@ -200,7 +200,7 @@ class DSLdapObject(DSLogging, DSLint):
 | |
|          if scope == 'base':
 | |
|              search_scope = ldap.SCOPE_BASE
 | |
|          elif scope == 'one':
 | |
| -            search_scope = ldap.SCOPE_ONE
 | |
| +            search_scope = ldap.SCOPE_ONELEVEL
 | |
|          elif scope == 'subtree':
 | |
|              search_scope = ldap.SCOPE_SUBTREE
 | |
|          return _search_ext_s(self._instance,self._dn, search_scope, filter,
 | |
| -- 
 | |
| 2.49.0
 | |
| 
 |