Compare commits
2 Commits
c8-stream-
...
c10-beta
| Author | SHA1 | Date | |
|---|---|---|---|
| 2760ce3ca2 | |||
| 89b7694df5 |
@ -1,3 +0,0 @@
|
||||
bd9aab32d9cbf9231058d585479813f3420dc872 SOURCES/389-ds-base-1.4.3.39.tar.bz2
|
||||
1c8f2d0dfbf39fa8cd86363bf3314351ab21f8d4 SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
8d3275209f2f8e1a69053340930ad1fb037d61fb SOURCES/vendor-1.4.3.39-3.tar.gz
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@ -1,3 +1,4 @@
|
||||
SOURCES/389-ds-base-1.4.3.39.tar.bz2
|
||||
SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
SOURCES/vendor-1.4.3.39-3.tar.gz
|
||||
389-ds-base-3.1.3.tar.bz2
|
||||
jemalloc-5.3.0.tar.bz2
|
||||
libdb-5.3.28-59.tar.bz2
|
||||
vendor-3.1.3-1.tar.gz
|
||||
|
||||
488
0001-Issue-6822-Backend-creation-cleanup-and-Database-UI-.patch
Normal file
488
0001-Issue-6822-Backend-creation-cleanup-and-Database-UI-.patch
Normal file
@ -0,0 +1,488 @@
|
||||
From 8f68c90b69bb09563ad8aa8c365bff534e133419 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 27 Jun 2025 18:43:39 -0700
|
||||
Subject: [PATCH] Issue 6822 - Backend creation cleanup and Database UI tab
|
||||
error handling (#6823)
|
||||
|
||||
Description: Add rollback functionality when mapping tree creation fails
|
||||
during backend creation to prevent orphaned backends.
|
||||
Improve error handling in Database, Replication and Monitoring UI tabs
|
||||
to gracefully handle backend get-tree command failures.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6822
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
src/cockpit/389-console/src/database.jsx | 119 ++++++++------
|
||||
src/cockpit/389-console/src/monitor.jsx | 172 +++++++++++---------
|
||||
src/cockpit/389-console/src/replication.jsx | 55 ++++---
|
||||
src/lib389/lib389/backend.py | 18 +-
|
||||
4 files changed, 210 insertions(+), 154 deletions(-)
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx
|
||||
index c0c4be414..276125dfc 100644
|
||||
--- a/src/cockpit/389-console/src/database.jsx
|
||||
+++ b/src/cockpit/389-console/src/database.jsx
|
||||
@@ -478,6 +478,59 @@ export class Database extends React.Component {
|
||||
}
|
||||
|
||||
loadSuffixTree(fullReset) {
|
||||
+ const treeData = [
|
||||
+ {
|
||||
+ name: _("Global Database Configuration"),
|
||||
+ icon: <CogIcon />,
|
||||
+ id: "dbconfig",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Chaining Configuration"),
|
||||
+ icon: <ExternalLinkAltIcon />,
|
||||
+ id: "chaining-config",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Backups & LDIFs"),
|
||||
+ icon: <CopyIcon />,
|
||||
+ id: "backups",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Password Policies"),
|
||||
+ id: "pwp",
|
||||
+ icon: <KeyIcon />,
|
||||
+ children: [
|
||||
+ {
|
||||
+ name: _("Global Policy"),
|
||||
+ icon: <HomeIcon />,
|
||||
+ id: "pwpolicy",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Local Policies"),
|
||||
+ icon: <UsersIcon />,
|
||||
+ id: "localpwpolicy",
|
||||
+ },
|
||||
+ ],
|
||||
+ defaultExpanded: true
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Suffixes"),
|
||||
+ icon: <CatalogIcon />,
|
||||
+ id: "suffixes-tree",
|
||||
+ children: [],
|
||||
+ defaultExpanded: true,
|
||||
+ action: (
|
||||
+ <Button
|
||||
+ onClick={this.handleShowSuffixModal}
|
||||
+ variant="plain"
|
||||
+ aria-label="Create new suffix"
|
||||
+ title={_("Create new suffix")}
|
||||
+ >
|
||||
+ <PlusIcon />
|
||||
+ </Button>
|
||||
+ ),
|
||||
+ }
|
||||
+ ];
|
||||
+
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"backend", "get-tree",
|
||||
@@ -491,58 +544,20 @@ export class Database extends React.Component {
|
||||
suffixData = JSON.parse(content);
|
||||
this.processTree(suffixData);
|
||||
}
|
||||
- const treeData = [
|
||||
- {
|
||||
- name: _("Global Database Configuration"),
|
||||
- icon: <CogIcon />,
|
||||
- id: "dbconfig",
|
||||
- },
|
||||
- {
|
||||
- name: _("Chaining Configuration"),
|
||||
- icon: <ExternalLinkAltIcon />,
|
||||
- id: "chaining-config",
|
||||
- },
|
||||
- {
|
||||
- name: _("Backups & LDIFs"),
|
||||
- icon: <CopyIcon />,
|
||||
- id: "backups",
|
||||
- },
|
||||
- {
|
||||
- name: _("Password Policies"),
|
||||
- id: "pwp",
|
||||
- icon: <KeyIcon />,
|
||||
- children: [
|
||||
- {
|
||||
- name: _("Global Policy"),
|
||||
- icon: <HomeIcon />,
|
||||
- id: "pwpolicy",
|
||||
- },
|
||||
- {
|
||||
- name: _("Local Policies"),
|
||||
- icon: <UsersIcon />,
|
||||
- id: "localpwpolicy",
|
||||
- },
|
||||
- ],
|
||||
- defaultExpanded: true
|
||||
- },
|
||||
- {
|
||||
- name: _("Suffixes"),
|
||||
- icon: <CatalogIcon />,
|
||||
- id: "suffixes-tree",
|
||||
- children: suffixData,
|
||||
- defaultExpanded: true,
|
||||
- action: (
|
||||
- <Button
|
||||
- onClick={this.handleShowSuffixModal}
|
||||
- variant="plain"
|
||||
- aria-label="Create new suffix"
|
||||
- title={_("Create new suffix")}
|
||||
- >
|
||||
- <PlusIcon />
|
||||
- </Button>
|
||||
- ),
|
||||
- }
|
||||
- ];
|
||||
+
|
||||
+ let current_node = this.state.node_name;
|
||||
+ if (fullReset) {
|
||||
+ current_node = DB_CONFIG;
|
||||
+ }
|
||||
+
|
||||
+ treeData[4].children = suffixData; // suffixes node
|
||||
+ this.setState(() => ({
|
||||
+ nodes: treeData,
|
||||
+ node_name: current_node,
|
||||
+ }), this.loadAttrs);
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ // Handle backend get-tree failure gracefully
|
||||
let current_node = this.state.node_name;
|
||||
if (fullReset) {
|
||||
current_node = DB_CONFIG;
|
||||
diff --git a/src/cockpit/389-console/src/monitor.jsx b/src/cockpit/389-console/src/monitor.jsx
|
||||
index ad48d1f87..91a8e3e37 100644
|
||||
--- a/src/cockpit/389-console/src/monitor.jsx
|
||||
+++ b/src/cockpit/389-console/src/monitor.jsx
|
||||
@@ -200,6 +200,84 @@ export class Monitor extends React.Component {
|
||||
}
|
||||
|
||||
loadSuffixTree(fullReset) {
|
||||
+ const basicData = [
|
||||
+ {
|
||||
+ name: _("Server Statistics"),
|
||||
+ icon: <ClusterIcon />,
|
||||
+ id: "server-monitor",
|
||||
+ type: "server",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Replication"),
|
||||
+ icon: <TopologyIcon />,
|
||||
+ id: "replication-monitor",
|
||||
+ type: "replication",
|
||||
+ defaultExpanded: true,
|
||||
+ children: [
|
||||
+ {
|
||||
+ name: _("Synchronization Report"),
|
||||
+ icon: <MonitoringIcon />,
|
||||
+ id: "sync-report",
|
||||
+ item: "sync-report",
|
||||
+ type: "repl-mon",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Log Analysis"),
|
||||
+ icon: <MonitoringIcon />,
|
||||
+ id: "log-analysis",
|
||||
+ item: "log-analysis",
|
||||
+ type: "repl-mon",
|
||||
+ }
|
||||
+ ],
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Database"),
|
||||
+ icon: <DatabaseIcon />,
|
||||
+ id: "database-monitor",
|
||||
+ type: "database",
|
||||
+ children: [], // Will be populated with treeData on success
|
||||
+ defaultExpanded: true,
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Logging"),
|
||||
+ icon: <CatalogIcon />,
|
||||
+ id: "log-monitor",
|
||||
+ defaultExpanded: true,
|
||||
+ children: [
|
||||
+ {
|
||||
+ name: _("Access Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "access-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Audit Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "audit-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Audit Failure Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "auditfail-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Errors Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "error-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Security Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "security-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ ]
|
||||
+ },
|
||||
+ ];
|
||||
+
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"backend", "get-tree",
|
||||
@@ -210,83 +288,7 @@ export class Monitor extends React.Component {
|
||||
.done(content => {
|
||||
const treeData = JSON.parse(content);
|
||||
this.processTree(treeData);
|
||||
- const basicData = [
|
||||
- {
|
||||
- name: _("Server Statistics"),
|
||||
- icon: <ClusterIcon />,
|
||||
- id: "server-monitor",
|
||||
- type: "server",
|
||||
- },
|
||||
- {
|
||||
- name: _("Replication"),
|
||||
- icon: <TopologyIcon />,
|
||||
- id: "replication-monitor",
|
||||
- type: "replication",
|
||||
- defaultExpanded: true,
|
||||
- children: [
|
||||
- {
|
||||
- name: _("Synchronization Report"),
|
||||
- icon: <MonitoringIcon />,
|
||||
- id: "sync-report",
|
||||
- item: "sync-report",
|
||||
- type: "repl-mon",
|
||||
- },
|
||||
- {
|
||||
- name: _("Log Analysis"),
|
||||
- icon: <MonitoringIcon />,
|
||||
- id: "log-analysis",
|
||||
- item: "log-analysis",
|
||||
- type: "repl-mon",
|
||||
- }
|
||||
- ],
|
||||
- },
|
||||
- {
|
||||
- name: _("Database"),
|
||||
- icon: <DatabaseIcon />,
|
||||
- id: "database-monitor",
|
||||
- type: "database",
|
||||
- children: [],
|
||||
- defaultExpanded: true,
|
||||
- },
|
||||
- {
|
||||
- name: _("Logging"),
|
||||
- icon: <CatalogIcon />,
|
||||
- id: "log-monitor",
|
||||
- defaultExpanded: true,
|
||||
- children: [
|
||||
- {
|
||||
- name: _("Access Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "access-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Audit Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "audit-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Audit Failure Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "auditfail-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Errors Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "error-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Security Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "security-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- ]
|
||||
- },
|
||||
- ];
|
||||
+
|
||||
let current_node = this.state.node_name;
|
||||
let type = this.state.node_type;
|
||||
if (fullReset) {
|
||||
@@ -296,6 +298,22 @@ export class Monitor extends React.Component {
|
||||
basicData[2].children = treeData; // database node
|
||||
this.processReplSuffixes(basicData[1].children);
|
||||
|
||||
+ this.setState(() => ({
|
||||
+ nodes: basicData,
|
||||
+ node_name: current_node,
|
||||
+ node_type: type,
|
||||
+ }), this.update_tree_nodes);
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ // Handle backend get-tree failure gracefully
|
||||
+ let current_node = this.state.node_name;
|
||||
+ let type = this.state.node_type;
|
||||
+ if (fullReset) {
|
||||
+ current_node = "server-monitor";
|
||||
+ type = "server";
|
||||
+ }
|
||||
+ this.processReplSuffixes(basicData[1].children);
|
||||
+
|
||||
this.setState(() => ({
|
||||
nodes: basicData,
|
||||
node_name: current_node,
|
||||
diff --git a/src/cockpit/389-console/src/replication.jsx b/src/cockpit/389-console/src/replication.jsx
|
||||
index fa492fd2a..aa535bfc7 100644
|
||||
--- a/src/cockpit/389-console/src/replication.jsx
|
||||
+++ b/src/cockpit/389-console/src/replication.jsx
|
||||
@@ -177,6 +177,16 @@ export class Replication extends React.Component {
|
||||
loaded: false
|
||||
});
|
||||
|
||||
+ const basicData = [
|
||||
+ {
|
||||
+ name: _("Suffixes"),
|
||||
+ icon: <TopologyIcon />,
|
||||
+ id: "repl-suffixes",
|
||||
+ children: [],
|
||||
+ defaultExpanded: true
|
||||
+ }
|
||||
+ ];
|
||||
+
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"backend", "get-tree",
|
||||
@@ -199,15 +209,7 @@ export class Replication extends React.Component {
|
||||
}
|
||||
}
|
||||
}
|
||||
- const basicData = [
|
||||
- {
|
||||
- name: _("Suffixes"),
|
||||
- icon: <TopologyIcon />,
|
||||
- id: "repl-suffixes",
|
||||
- children: [],
|
||||
- defaultExpanded: true
|
||||
- }
|
||||
- ];
|
||||
+
|
||||
let current_node = this.state.node_name;
|
||||
let current_type = this.state.node_type;
|
||||
let replicated = this.state.node_replicated;
|
||||
@@ -258,6 +260,19 @@ export class Replication extends React.Component {
|
||||
}
|
||||
|
||||
basicData[0].children = treeData;
|
||||
+ this.setState({
|
||||
+ nodes: basicData,
|
||||
+ node_name: current_node,
|
||||
+ node_type: current_type,
|
||||
+ node_replicated: replicated,
|
||||
+ }, () => { this.update_tree_nodes() });
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ // Handle backend get-tree failure gracefully
|
||||
+ let current_node = this.state.node_name;
|
||||
+ let current_type = this.state.node_type;
|
||||
+ let replicated = this.state.node_replicated;
|
||||
+
|
||||
this.setState({
|
||||
nodes: basicData,
|
||||
node_name: current_node,
|
||||
@@ -905,18 +920,18 @@ export class Replication extends React.Component {
|
||||
disableTree: false
|
||||
});
|
||||
});
|
||||
- })
|
||||
- .fail(err => {
|
||||
- const errMsg = JSON.parse(err);
|
||||
- this.props.addNotification(
|
||||
- "error",
|
||||
- cockpit.format(_("Error loading replication agreements configuration - $0"), errMsg.desc)
|
||||
- );
|
||||
- this.setState({
|
||||
- suffixLoading: false,
|
||||
- disableTree: false
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ const errMsg = JSON.parse(err);
|
||||
+ this.props.addNotification(
|
||||
+ "error",
|
||||
+ cockpit.format(_("Error loading replication agreements configuration - $0"), errMsg.desc)
|
||||
+ );
|
||||
+ this.setState({
|
||||
+ suffixLoading: false,
|
||||
+ disableTree: false
|
||||
+ });
|
||||
});
|
||||
- });
|
||||
})
|
||||
.fail(err => {
|
||||
// changelog failure
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index 1d000ed66..53f15b6b0 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -694,24 +694,32 @@ class Backend(DSLdapObject):
|
||||
parent_suffix = properties.pop('parent', False)
|
||||
|
||||
# Okay, now try to make the backend.
|
||||
- super(Backend, self).create(dn, properties, basedn)
|
||||
+ backend_obj = super(Backend, self).create(dn, properties, basedn)
|
||||
|
||||
# We check if the mapping tree exists in create, so do this *after*
|
||||
if create_mapping_tree is True:
|
||||
- properties = {
|
||||
+ mapping_tree_properties = {
|
||||
'cn': self._nprops_stash['nsslapd-suffix'],
|
||||
'nsslapd-state': 'backend',
|
||||
'nsslapd-backend': self._nprops_stash['cn'],
|
||||
}
|
||||
if parent_suffix:
|
||||
# This is a subsuffix, set the parent suffix
|
||||
- properties['nsslapd-parent-suffix'] = parent_suffix
|
||||
- self._mts.create(properties=properties)
|
||||
+ mapping_tree_properties['nsslapd-parent-suffix'] = parent_suffix
|
||||
+
|
||||
+ try:
|
||||
+ self._mts.create(properties=mapping_tree_properties)
|
||||
+ except Exception as e:
|
||||
+ try:
|
||||
+ backend_obj.delete()
|
||||
+ except Exception as cleanup_error:
|
||||
+ self._instance.log.error(f"Failed to cleanup backend after mapping tree creation failure: {cleanup_error}")
|
||||
+ raise e
|
||||
|
||||
# We can't create the sample entries unless a mapping tree was installed.
|
||||
if sample_entries is not False and create_mapping_tree is True:
|
||||
self.create_sample_entries(sample_entries)
|
||||
- return self
|
||||
+ return backend_obj
|
||||
|
||||
def delete(self):
|
||||
"""Deletes the backend, it's mapping tree and all related indices.
|
||||
--
|
||||
2.49.0
|
||||
|
||||
56
0002-Issue-6852-Move-ds-CLI-tools-back-to-sbin.patch
Normal file
56
0002-Issue-6852-Move-ds-CLI-tools-back-to-sbin.patch
Normal file
@ -0,0 +1,56 @@
|
||||
From 6ed6a67f142fec393cd254df38b9750a14848528 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Tue, 8 Jul 2025 19:09:34 +0200
|
||||
Subject: [PATCH] Issue 6852 - Move ds* CLI tools back to /sbin
|
||||
|
||||
Bug Description:
|
||||
After #6767 ds* CLI tools are packaged in /bin instead of /sbin. Even
|
||||
though Fedora 42 has unified /bin and /sbin, some tools (ipa-backup) and
|
||||
our tests still rely on hardcoded paths.
|
||||
|
||||
Fix Description:
|
||||
Move ds* tools back to /sbin
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6852
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
---
|
||||
rpm/389-ds-base.spec.in | 15 ++++++++++-----
|
||||
1 file changed, 10 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
|
||||
index 2f1df63c9..101771574 100644
|
||||
--- a/rpm/389-ds-base.spec.in
|
||||
+++ b/rpm/389-ds-base.spec.in
|
||||
@@ -486,6 +486,11 @@ cp -r %{_builddir}/%{name}-%{version}/man/man3 $RPM_BUILD_ROOT/%{_mandir}/man3
|
||||
# lib389
|
||||
pushd src/lib389
|
||||
%pyproject_install
|
||||
+%if 0%{?fedora} <= 41 || (0%{?rhel} && 0%{?rhel} <= 10)
|
||||
+for clitool in dsconf dscreate dsctl dsidm openldap_to_ds; do
|
||||
+ mv %{buildroot}%{_bindir}/$clitool %{buildroot}%{_sbindir}/
|
||||
+done
|
||||
+%endif
|
||||
%pyproject_save_files -l lib389
|
||||
popd
|
||||
|
||||
@@ -743,11 +748,11 @@ fi
|
||||
%doc src/lib389/README.md
|
||||
%license LICENSE LICENSE.GPLv3+
|
||||
# Binaries
|
||||
-%{_bindir}/dsconf
|
||||
-%{_bindir}/dscreate
|
||||
-%{_bindir}/dsctl
|
||||
-%{_bindir}/dsidm
|
||||
-%{_bindir}/openldap_to_ds
|
||||
+%{_sbindir}/dsconf
|
||||
+%{_sbindir}/dscreate
|
||||
+%{_sbindir}/dsctl
|
||||
+%{_sbindir}/dsidm
|
||||
+%{_sbindir}/openldap_to_ds
|
||||
%{_libexecdir}/%{pkgname}/dscontainer
|
||||
# Man pages
|
||||
%{_mandir}/man8/dsconf.8.gz
|
||||
--
|
||||
2.49.0
|
||||
|
||||
380
0003-Issue-6663-Fix-NULL-subsystem-crash-in-JSON-error-lo.patch
Normal file
380
0003-Issue-6663-Fix-NULL-subsystem-crash-in-JSON-error-lo.patch
Normal file
@ -0,0 +1,380 @@
|
||||
From bd9ab54f64148d467e022c59ee8e5aed16f0c385 Mon Sep 17 00:00:00 2001
|
||||
From: Akshay Adhikari <aadhikar@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 18:14:15 +0530
|
||||
Subject: [PATCH] Issue 6663 - Fix NULL subsystem crash in JSON error logging
|
||||
(#6883)
|
||||
|
||||
Description: Fixes crash in JSON error logging when subsystem is NULL.
|
||||
Parametrized test case for better debugging.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6663
|
||||
|
||||
Reviewed by: @mreynolds389
|
||||
---
|
||||
.../tests/suites/clu/dsconf_logging.py | 168 ------------------
|
||||
.../tests/suites/clu/dsconf_logging_test.py | 164 +++++++++++++++++
|
||||
ldap/servers/slapd/log.c | 2 +-
|
||||
3 files changed, 165 insertions(+), 169 deletions(-)
|
||||
delete mode 100644 dirsrvtests/tests/suites/clu/dsconf_logging.py
|
||||
create mode 100644 dirsrvtests/tests/suites/clu/dsconf_logging_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsconf_logging.py b/dirsrvtests/tests/suites/clu/dsconf_logging.py
|
||||
deleted file mode 100644
|
||||
index 1c2f7fc2e..000000000
|
||||
--- a/dirsrvtests/tests/suites/clu/dsconf_logging.py
|
||||
+++ /dev/null
|
||||
@@ -1,168 +0,0 @@
|
||||
-# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2025 Red Hat, Inc.
|
||||
-# All rights reserved.
|
||||
-#
|
||||
-# License: GPL (version 3 or any later version).
|
||||
-# See LICENSE for details.
|
||||
-# --- END COPYRIGHT BLOCK ---
|
||||
-#
|
||||
-import json
|
||||
-import subprocess
|
||||
-import logging
|
||||
-import pytest
|
||||
-from lib389._constants import DN_DM
|
||||
-from lib389.topologies import topology_st as topo
|
||||
-
|
||||
-pytestmark = pytest.mark.tier1
|
||||
-
|
||||
-log = logging.getLogger(__name__)
|
||||
-
|
||||
-SETTINGS = [
|
||||
- ('logging-enabled', None),
|
||||
- ('logging-disabled', None),
|
||||
- ('mode', '700'),
|
||||
- ('compress-enabled', None),
|
||||
- ('compress-disabled', None),
|
||||
- ('buffering-enabled', None),
|
||||
- ('buffering-disabled', None),
|
||||
- ('max-logs', '4'),
|
||||
- ('max-logsize', '7'),
|
||||
- ('rotation-interval', '2'),
|
||||
- ('rotation-interval-unit', 'week'),
|
||||
- ('rotation-tod-enabled', None),
|
||||
- ('rotation-tod-disabled', None),
|
||||
- ('rotation-tod-hour', '12'),
|
||||
- ('rotation-tod-minute', '20'),
|
||||
- ('deletion-interval', '3'),
|
||||
- ('deletion-interval-unit', 'day'),
|
||||
- ('max-disk-space', '20'),
|
||||
- ('free-disk-space', '2'),
|
||||
-]
|
||||
-
|
||||
-DEFAULT_TIME_FORMAT = "%FT%TZ"
|
||||
-
|
||||
-
|
||||
-def execute_dsconf_command(dsconf_cmd, subcommands):
|
||||
- """Execute dsconf command and return output and return code"""
|
||||
-
|
||||
- cmdline = dsconf_cmd + subcommands
|
||||
- proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
|
||||
- out, _ = proc.communicate()
|
||||
- return out.decode('utf-8'), proc.returncode
|
||||
-
|
||||
-
|
||||
-def get_dsconf_base_cmd(topo):
|
||||
- """Return base dsconf command list"""
|
||||
- return ['/usr/sbin/dsconf', topo.standalone.serverid,
|
||||
- '-j', '-D', DN_DM, '-w', 'password', 'logging']
|
||||
-
|
||||
-
|
||||
-def test_log_settings(topo):
|
||||
- """Test each log setting can be set successfully
|
||||
-
|
||||
- :id: b800fd03-37f5-4e74-9af8-eeb07030eb52
|
||||
- :setup: Standalone DS instance
|
||||
- :steps:
|
||||
- 1. Test each log's settings
|
||||
- :expectedresults:
|
||||
- 1. Success
|
||||
- """
|
||||
-
|
||||
- dsconf_cmd = get_dsconf_base_cmd(topo)
|
||||
- for log_type in ['access', 'audit', 'auditfail', 'error', 'security']:
|
||||
- # Test "get" command
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd, [log_type, 'get'])
|
||||
- assert rc == 0
|
||||
- json_result = json.loads(output)
|
||||
- default_location = json_result['Log name and location']
|
||||
-
|
||||
- # Log location
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd, [log_type, 'set',
|
||||
- 'location',
|
||||
- f'/tmp/{log_type}'])
|
||||
- assert rc == 0
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd, [log_type, 'set',
|
||||
- 'location',
|
||||
- default_location])
|
||||
- assert rc == 0
|
||||
-
|
||||
- # Log levels
|
||||
- if log_type == "access":
|
||||
- # List levels
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'list-levels'])
|
||||
- assert rc == 0
|
||||
-
|
||||
- # Set levels
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set', 'level',
|
||||
- 'internal'])
|
||||
- assert rc == 0
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set', 'level',
|
||||
- 'internal', 'entry'])
|
||||
- assert rc == 0
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set', 'level',
|
||||
- 'internal', 'default'])
|
||||
- assert rc == 0
|
||||
-
|
||||
- if log_type == "error":
|
||||
- # List levels
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'list-levels'])
|
||||
- assert rc == 0
|
||||
-
|
||||
- # Set levels
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set', 'level',
|
||||
- 'plugin', 'replication'])
|
||||
- assert rc == 0
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set', 'level',
|
||||
- 'default'])
|
||||
- assert rc == 0
|
||||
-
|
||||
- # Log formats
|
||||
- if log_type in ["access", "audit", "error"]:
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set',
|
||||
- 'time-format', '%D'])
|
||||
- assert rc == 0
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set',
|
||||
- 'time-format',
|
||||
- DEFAULT_TIME_FORMAT])
|
||||
- assert rc == 0
|
||||
-
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set',
|
||||
- 'log-format',
|
||||
- 'json'])
|
||||
- assert rc == 0
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set',
|
||||
- 'log-format',
|
||||
- 'default'])
|
||||
- assert rc == 0
|
||||
-
|
||||
- # Audit log display attrs
|
||||
- if log_type == "audit":
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set',
|
||||
- 'display-attrs', 'cn'])
|
||||
- assert rc == 0
|
||||
-
|
||||
- # Common settings
|
||||
- for attr, value in SETTINGS:
|
||||
- if log_type == "auditfail" and attr.startswith("buffer"):
|
||||
- # auditfail doesn't have a buffering settings
|
||||
- continue
|
||||
-
|
||||
- if value is None:
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd, [log_type,
|
||||
- 'set', attr])
|
||||
- else:
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd, [log_type,
|
||||
- 'set', attr, value])
|
||||
- assert rc == 0
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsconf_logging_test.py b/dirsrvtests/tests/suites/clu/dsconf_logging_test.py
|
||||
new file mode 100644
|
||||
index 000000000..ca3f71997
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsconf_logging_test.py
|
||||
@@ -0,0 +1,164 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import json
|
||||
+import subprocess
|
||||
+import logging
|
||||
+import pytest
|
||||
+from lib389._constants import DN_DM
|
||||
+from lib389.topologies import topology_st as topo
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+SETTINGS = [
|
||||
+ ('logging-enabled', None),
|
||||
+ ('logging-disabled', None),
|
||||
+ ('mode', '700'),
|
||||
+ ('compress-enabled', None),
|
||||
+ ('compress-disabled', None),
|
||||
+ ('buffering-enabled', None),
|
||||
+ ('buffering-disabled', None),
|
||||
+ ('max-logs', '4'),
|
||||
+ ('max-logsize', '7'),
|
||||
+ ('rotation-interval', '2'),
|
||||
+ ('rotation-interval-unit', 'week'),
|
||||
+ ('rotation-tod-enabled', None),
|
||||
+ ('rotation-tod-disabled', None),
|
||||
+ ('rotation-tod-hour', '12'),
|
||||
+ ('rotation-tod-minute', '20'),
|
||||
+ ('deletion-interval', '3'),
|
||||
+ ('deletion-interval-unit', 'day'),
|
||||
+ ('max-disk-space', '20'),
|
||||
+ ('free-disk-space', '2'),
|
||||
+]
|
||||
+
|
||||
+DEFAULT_TIME_FORMAT = "%FT%TZ"
|
||||
+
|
||||
+
|
||||
+def execute_dsconf_command(dsconf_cmd, subcommands):
|
||||
+ """Execute dsconf command and return output and return code"""
|
||||
+
|
||||
+ cmdline = dsconf_cmd + subcommands
|
||||
+ proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
+ out, err = proc.communicate()
|
||||
+
|
||||
+ if proc.returncode != 0 and err:
|
||||
+ log.error(f"Command failed: {' '.join(cmdline)}")
|
||||
+ log.error(f"Stderr: {err.decode('utf-8')}")
|
||||
+
|
||||
+ return out.decode('utf-8'), proc.returncode
|
||||
+
|
||||
+
|
||||
+def get_dsconf_base_cmd(topo):
|
||||
+ """Return base dsconf command list"""
|
||||
+ return ['/usr/sbin/dsconf', topo.standalone.serverid,
|
||||
+ '-j', '-D', DN_DM, '-w', 'password', 'logging']
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_type", ['access', 'audit', 'auditfail', 'error', 'security'])
|
||||
+def test_log_settings(topo, log_type):
|
||||
+ """Test each log setting can be set successfully
|
||||
+
|
||||
+ :id: b800fd03-37f5-4e74-9af8-eeb07030eb52
|
||||
+ :setup: Standalone DS instance
|
||||
+ :steps:
|
||||
+ 1. Test each log's settings
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ """
|
||||
+
|
||||
+ dsconf_cmd = get_dsconf_base_cmd(topo)
|
||||
+
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, [log_type, 'get'])
|
||||
+ assert rc == 0
|
||||
+ json_result = json.loads(output)
|
||||
+ default_location = json_result['Log name and location']
|
||||
+
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, [log_type, 'set',
|
||||
+ 'location',
|
||||
+ f'/tmp/{log_type}'])
|
||||
+ assert rc == 0
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, [log_type, 'set',
|
||||
+ 'location',
|
||||
+ default_location])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ if log_type == "access":
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'list-levels'])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set', 'level',
|
||||
+ 'internal'])
|
||||
+ assert rc == 0
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set', 'level',
|
||||
+ 'internal', 'entry'])
|
||||
+ assert rc == 0
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set', 'level',
|
||||
+ 'internal', 'default'])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ if log_type == "error":
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'list-levels'])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set', 'level',
|
||||
+ 'plugin', 'replication'])
|
||||
+ assert rc == 0
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set', 'level',
|
||||
+ 'default'])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ if log_type in ["access", "audit", "error"]:
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set',
|
||||
+ 'time-format', '%D'])
|
||||
+ assert rc == 0
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set',
|
||||
+ 'time-format',
|
||||
+ DEFAULT_TIME_FORMAT])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set',
|
||||
+ 'log-format',
|
||||
+ 'json'])
|
||||
+ assert rc == 0, f"Failed to set {log_type} log-format to json: {output}"
|
||||
+
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set',
|
||||
+ 'log-format',
|
||||
+ 'default'])
|
||||
+ assert rc == 0, f"Failed to set {log_type} log-format to default: {output}"
|
||||
+
|
||||
+ if log_type == "audit":
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set',
|
||||
+ 'display-attrs', 'cn'])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ for attr, value in SETTINGS:
|
||||
+ if log_type == "auditfail" and attr.startswith("buffer"):
|
||||
+ continue
|
||||
+
|
||||
+ if value is None:
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, [log_type,
|
||||
+ 'set', attr])
|
||||
+ else:
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, [log_type,
|
||||
+ 'set', attr, value])
|
||||
+ assert rc == 0
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index 06dae4d0b..e859682fe 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -2937,7 +2937,7 @@ vslapd_log_error(
|
||||
json_obj = json_object_new_object();
|
||||
json_object_object_add(json_obj, "local_time", json_object_new_string(local_time));
|
||||
json_object_object_add(json_obj, "severity", json_object_new_string(get_log_sev_name(sev_level, sev_name)));
|
||||
- json_object_object_add(json_obj, "subsystem", json_object_new_string(subsystem));
|
||||
+ json_object_object_add(json_obj, "subsystem", json_object_new_string(subsystem ? subsystem : ""));
|
||||
json_object_object_add(json_obj, "msg", json_object_new_string(vbuf));
|
||||
|
||||
PR_snprintf(buffer, sizeof(buffer), "%s\n",
|
||||
--
|
||||
2.49.0
|
||||
|
||||
205
0004-Issue-6829-Update-parametrized-docstring-for-tests.patch
Normal file
205
0004-Issue-6829-Update-parametrized-docstring-for-tests.patch
Normal file
@ -0,0 +1,205 @@
|
||||
From 23e0b93c3bbe96a365357a3af11bc86172102c05 Mon Sep 17 00:00:00 2001
|
||||
From: Barbora Simonova <bsmejkal@redhat.com>
|
||||
Date: Wed, 25 Jun 2025 10:43:37 +0200
|
||||
Subject: [PATCH] Issue 6829 - Update parametrized docstring for tests
|
||||
|
||||
Description:
|
||||
Update missing parametrized value in docstring for some tests
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6829
|
||||
|
||||
Reviewed by: @vashirov (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/basic/basic_test.py | 2 +-
|
||||
dirsrvtests/tests/suites/clu/dsconf_config_test.py | 8 ++++++++
|
||||
dirsrvtests/tests/suites/clu/schema_test.py | 5 +++++
|
||||
dirsrvtests/tests/suites/mapping_tree/regression_test.py | 1 +
|
||||
dirsrvtests/tests/suites/password/password_test.py | 1 +
|
||||
.../tests/suites/replication/regression_m2_test.py | 1 +
|
||||
dirsrvtests/tests/suites/vlv/regression_test.py | 2 ++
|
||||
7 files changed, 19 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
index 8f5de91aa..db6bfab56 100644
|
||||
--- a/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
@@ -961,7 +961,7 @@ def test_basic_search_lookthroughlimit(topology_st, limit, resp, import_example_
|
||||
Tests normal search with lookthroughlimit set high and low.
|
||||
|
||||
:id: b5119970-6c9f-41b7-9649-de9233226fec
|
||||
-
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone instance, add example.ldif to the database, search filter (uid=*).
|
||||
|
||||
:steps:
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsconf_config_test.py b/dirsrvtests/tests/suites/clu/dsconf_config_test.py
|
||||
index d67679adf..232018097 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsconf_config_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsconf_config_test.py
|
||||
@@ -58,6 +58,7 @@ def test_single_value_add(topology_st, attr_name, values_dict):
|
||||
"""Test adding a single value to an attribute
|
||||
|
||||
:id: ffc912a6-c188-413d-9c35-7f4b3774d946
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add a single value to the specified attribute
|
||||
@@ -89,6 +90,7 @@ def test_single_value_replace(topology_st, attr_name, values_dict):
|
||||
"""Test replacing a single value in configuration attributes
|
||||
|
||||
:id: 112e3e5e-8db8-4974-9ea4-ed789c2d02f2
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add initial value to the specified attribute
|
||||
@@ -127,6 +129,7 @@ def test_multi_value_batch_add(topology_st, attr_name, values_dict):
|
||||
"""Test adding multiple values in a single batch command
|
||||
|
||||
:id: 4c34c7f8-16cc-4ab6-938a-967537be5470
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add multiple values to the attribute in a single command
|
||||
@@ -157,6 +160,7 @@ def test_multi_value_batch_replace(topology_st, attr_name, values_dict):
|
||||
"""Test replacing with multiple values in a single batch command
|
||||
|
||||
:id: 05cf28b8-000e-4856-a10b-7e1df012737d
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add initial single value
|
||||
@@ -194,6 +198,7 @@ def test_multi_value_specific_delete(topology_st, attr_name, values_dict):
|
||||
"""Test deleting specific values from multi-valued attribute
|
||||
|
||||
:id: bb325c9a-eae8-438a-b577-bd63540b91cb
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add multiple values to the attribute
|
||||
@@ -232,6 +237,7 @@ def test_multi_value_batch_delete(topology_st, attr_name, values_dict):
|
||||
"""Test deleting multiple values in a single batch command
|
||||
|
||||
:id: 4b105824-b060-4f83-97d7-001a01dba1a5
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add multiple values to the attribute
|
||||
@@ -269,6 +275,7 @@ def test_single_value_persists_after_restart(topology_st, attr_name, values_dict
|
||||
"""Test single value persists after server restart
|
||||
|
||||
:id: be1a7e3d-a9ca-48a1-a3bc-062990d4f3e9
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add single value to the attribute
|
||||
@@ -310,6 +317,7 @@ def test_multi_value_batch_persists_after_restart(topology_st, attr_name, values
|
||||
"""Test multiple values added in batch persist after server restart
|
||||
|
||||
:id: fd0435e2-90b1-465a-8968-d3a375c8fb22
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add multiple values in a single batch command
|
||||
diff --git a/dirsrvtests/tests/suites/clu/schema_test.py b/dirsrvtests/tests/suites/clu/schema_test.py
|
||||
index 19ec032bc..5709471cf 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/schema_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/schema_test.py
|
||||
@@ -100,6 +100,7 @@ def test_origins(create_attribute):
|
||||
"""Test the various possibilities of x-origin
|
||||
|
||||
:id: 3229f6f8-67c1-4558-9be5-71434283086a
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone Instance
|
||||
:steps:
|
||||
1. Add an attribute with different x-origin values/types
|
||||
@@ -116,6 +117,7 @@ def test_mrs(create_attribute):
|
||||
"""Test an attribute can be added with a matching rule
|
||||
|
||||
:id: e4eb06e0-7f80-41fe-8868-08c2bafc7590
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone Instance
|
||||
:steps:
|
||||
1. Add an attribute with a matching rule
|
||||
@@ -132,6 +134,7 @@ def test_edit_attributetype(create_attribute):
|
||||
"""Test editing an attribute type in the schema
|
||||
|
||||
:id: 07c98f6a-89f8-44e5-9cc1-353d1f7bccf4
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone Instance
|
||||
:steps:
|
||||
1. Add a new attribute type
|
||||
@@ -209,6 +212,7 @@ def test_edit_attributetype_remove_superior(create_attribute):
|
||||
"""Test editing an attribute type to remove a parameter from it
|
||||
|
||||
:id: bd6ae89f-9617-4620-adc2-465884ca568b
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone Instance
|
||||
:steps:
|
||||
1. Add a new attribute type with a superior
|
||||
@@ -244,6 +248,7 @@ def test_edit_attribute_keep_custom_values(create_attribute):
|
||||
"""Test editing a custom schema attribute keeps all custom values
|
||||
|
||||
:id: 5b1e2e8b-28c2-4f77-9c03-07eff20f763d
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone Instance
|
||||
:steps:
|
||||
1. Create a custom attribute
|
||||
diff --git a/dirsrvtests/tests/suites/mapping_tree/regression_test.py b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
index 51c687059..2c57c2973 100644
|
||||
--- a/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
@@ -111,6 +111,7 @@ def test_sub_suffixes(topo, orphan_param):
|
||||
""" check the entries found on suffix/sub-suffix
|
||||
|
||||
:id: 5b4421c2-d851-11ec-a760-482ae39447e5
|
||||
+ :parametrized: yes
|
||||
:feature: mapping-tree
|
||||
:setup: Standalone instance with 3 additional backends:
|
||||
dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py
|
||||
index 2d4aef028..94a23e669 100644
|
||||
--- a/dirsrvtests/tests/suites/password/password_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/password_test.py
|
||||
@@ -156,6 +156,7 @@ def test_pwd_scheme_no_upgrade_on_bind(topology_st, crypt_scheme, request, no_up
|
||||
the current hash is in nsslapd-scheme-list-no-upgrade-hash
|
||||
|
||||
:id: b4d2c525-a239-4ca6-a168-5126da7abedd
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone instance
|
||||
:steps:
|
||||
1. Create a user with userpassword stored as CRYPT
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index 10a5fa419..68966ac49 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -991,6 +991,7 @@ def test_change_repl_passwd(topo_m2, request, bind_cn):
|
||||
Testing when agmt bind group are used.
|
||||
|
||||
:id: a305913a-cc76-11ec-b324-482ae39447e5
|
||||
+ :parametrized: yes
|
||||
:setup: 2 Supplier Instances
|
||||
:steps:
|
||||
1. Insure agmt from supplier1 to supplier2 is properly set to use bind group
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index d9d1cb444..f7847ac74 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -775,6 +775,7 @@ def test_vlv_reindex(topology_st, prefix, basedn):
|
||||
"""Test VLV reindexing.
|
||||
|
||||
:id: d5dc0d8e-cbe6-11ee-95b1-482ae39447e5
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone instance.
|
||||
:steps:
|
||||
1. Cleanup leftover from previous tests
|
||||
@@ -830,6 +831,7 @@ def test_vlv_offline_import(topology_st, prefix, basedn):
|
||||
"""Test VLV after off line import.
|
||||
|
||||
:id: 8732d7a8-e851-11ee-9d63-482ae39447e5
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone instance.
|
||||
:steps:
|
||||
1. Cleanup leftover from previous tests
|
||||
--
|
||||
2.49.0
|
||||
|
||||
127
0005-Issue-6782-Improve-paged-result-locking.patch
Normal file
127
0005-Issue-6782-Improve-paged-result-locking.patch
Normal file
@ -0,0 +1,127 @@
|
||||
From 4f81f696e85dc7c50555df2d410222214c8ac883 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 15 May 2025 10:35:27 -0400
|
||||
Subject: [PATCH] Issue 6782 - Improve paged result locking
|
||||
|
||||
Description:
|
||||
|
||||
When cleaning a slot, instead of mem setting everything to Zero and restoring
|
||||
the mutex, manually reset all the values leaving the mutex pointer
|
||||
intact.
|
||||
|
||||
There is also a deadlock possibility when checking for abandoned PR search
|
||||
in opshared.c, and we were checking a flag value outside of the per_conn
|
||||
lock.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6782
|
||||
|
||||
Reviewed by: progier & spichugi(Thanks!!)
|
||||
---
|
||||
ldap/servers/slapd/opshared.c | 10 +++++++++-
|
||||
ldap/servers/slapd/pagedresults.c | 27 +++++++++++++++++----------
|
||||
2 files changed, 26 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index 5ea919e2d..545518748 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -619,6 +619,14 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
int32_t tlimit;
|
||||
slapi_pblock_get(pb, SLAPI_SEARCH_TIMELIMIT, &tlimit);
|
||||
pagedresults_set_timelimit(pb_conn, operation, (time_t)tlimit, pr_idx);
|
||||
+ /* When using this mutex in conjunction with the main paged
|
||||
+ * result lock, you must do so in this order:
|
||||
+ *
|
||||
+ * --> pagedresults_lock()
|
||||
+ * --> pagedresults_mutex
|
||||
+ * <-- pagedresults_mutex
|
||||
+ * <-- pagedresults_unlock()
|
||||
+ */
|
||||
pagedresults_mutex = pageresult_lock_get_addr(pb_conn);
|
||||
}
|
||||
|
||||
@@ -744,11 +752,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
pr_search_result = pagedresults_get_search_result(pb_conn, operation, 1 /*locked*/, pr_idx);
|
||||
if (pr_search_result) {
|
||||
if (pagedresults_is_abandoned_or_notavailable(pb_conn, 1 /*locked*/, pr_idx)) {
|
||||
+ pthread_mutex_unlock(pagedresults_mutex);
|
||||
pagedresults_unlock(pb_conn, pr_idx);
|
||||
/* Previous operation was abandoned and the simplepaged object is not in use. */
|
||||
send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL);
|
||||
rc = LDAP_SUCCESS;
|
||||
- pthread_mutex_unlock(pagedresults_mutex);
|
||||
goto free_and_return;
|
||||
} else {
|
||||
slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, pr_search_result);
|
||||
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
|
||||
index 642aefb3d..c3f3aae01 100644
|
||||
--- a/ldap/servers/slapd/pagedresults.c
|
||||
+++ b/ldap/servers/slapd/pagedresults.c
|
||||
@@ -48,7 +48,6 @@ pageresult_lock_get_addr(Connection *conn)
|
||||
static void
|
||||
_pr_cleanup_one_slot(PagedResults *prp)
|
||||
{
|
||||
- PRLock *prmutex = NULL;
|
||||
if (!prp) {
|
||||
return;
|
||||
}
|
||||
@@ -56,13 +55,17 @@ _pr_cleanup_one_slot(PagedResults *prp)
|
||||
/* sr is left; release it. */
|
||||
prp->pr_current_be->be_search_results_release(&(prp->pr_search_result_set));
|
||||
}
|
||||
- /* clean up the slot */
|
||||
- if (prp->pr_mutex) {
|
||||
- /* pr_mutex is reused; back it up and reset it. */
|
||||
- prmutex = prp->pr_mutex;
|
||||
- }
|
||||
- memset(prp, '\0', sizeof(PagedResults));
|
||||
- prp->pr_mutex = prmutex;
|
||||
+
|
||||
+ /* clean up the slot except the mutex */
|
||||
+ prp->pr_current_be = NULL;
|
||||
+ prp->pr_search_result_set = NULL;
|
||||
+ prp->pr_search_result_count = 0;
|
||||
+ prp->pr_search_result_set_size_estimate = 0;
|
||||
+ prp->pr_sort_result_code = 0;
|
||||
+ prp->pr_timelimit_hr.tv_sec = 0;
|
||||
+ prp->pr_timelimit_hr.tv_nsec = 0;
|
||||
+ prp->pr_flags = 0;
|
||||
+ prp->pr_msgid = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1007,7 +1010,8 @@ op_set_pagedresults(Operation *op)
|
||||
|
||||
/*
|
||||
* pagedresults_lock/unlock -- introduced to protect search results for the
|
||||
- * asynchronous searches.
|
||||
+ * asynchronous searches. Do not call these functions while the PR conn lock
|
||||
+ * is held (e.g. pageresult_lock_get_addr(conn))
|
||||
*/
|
||||
void
|
||||
pagedresults_lock(Connection *conn, int index)
|
||||
@@ -1045,6 +1049,8 @@ int
|
||||
pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int index)
|
||||
{
|
||||
PagedResults *prp;
|
||||
+ int32_t result;
|
||||
+
|
||||
if (!conn || (index < 0) || (index >= conn->c_pagedresults.prl_maxlen)) {
|
||||
return 1; /* not abandoned, but do not want to proceed paged results op. */
|
||||
}
|
||||
@@ -1052,10 +1058,11 @@ pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int inde
|
||||
pthread_mutex_lock(pageresult_lock_get_addr(conn));
|
||||
}
|
||||
prp = conn->c_pagedresults.prl_list + index;
|
||||
+ result = prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED;
|
||||
if (!locked) {
|
||||
pthread_mutex_unlock(pageresult_lock_get_addr(conn));
|
||||
}
|
||||
- return prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED;
|
||||
+ return result;
|
||||
}
|
||||
|
||||
int
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,37 @@
|
||||
From 0ab37e0848e6f1c4e46068bee46bd91c3bb3d22d Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Tue, 1 Jul 2025 12:44:04 +0200
|
||||
Subject: [PATCH] Issue 6838 - lib389/replica.py is using nonexistent
|
||||
datetime.UTC in Python 3.9
|
||||
|
||||
Bug Description:
|
||||
389-ds-base-2.x is supposed to be used with Python 3.9.
|
||||
But lib389/replica.py is using `datetime.UTC`, which is an alias
|
||||
to `datetime.timezone.utc` was added only in Python 3.11.
|
||||
|
||||
Fix Description:
|
||||
Use `datetime.timezone.utc` instead.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6838
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
src/lib389/lib389/replica.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 18ce1b1d5..59be00a33 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -917,7 +917,7 @@ class RUV(object):
|
||||
ValueError("Wrong CSN value was supplied")
|
||||
|
||||
timestamp = int(csn[:8], 16)
|
||||
- time_str = datetime.datetime.fromtimestamp(timestamp, datetime.UTC).strftime('%Y-%m-%d %H:%M:%S')
|
||||
+ time_str = datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc).strftime('%Y-%m-%d %H:%M:%S')
|
||||
# We are parsing shorter CSN which contains only timestamp
|
||||
if len(csn) == 8:
|
||||
return time_str
|
||||
--
|
||||
2.49.0
|
||||
|
||||
515
0007-Issue-6753-Add-add_exclude_subtree-and-remove_exclud.patch
Normal file
515
0007-Issue-6753-Add-add_exclude_subtree-and-remove_exclud.patch
Normal file
@ -0,0 +1,515 @@
|
||||
From 8984550568737142dd22020e1b9efd87cc0e42f8 Mon Sep 17 00:00:00 2001
|
||||
From: Lenka Doudova <lryznaro@redhat.com>
|
||||
Date: Mon, 9 Jun 2025 15:15:04 +0200
|
||||
Subject: [PATCH] Issue 6753 - Add 'add_exclude_subtree' and
|
||||
'remove_exclude_subtree' methods to Attribute uniqueness plugin
|
||||
|
||||
Description:
|
||||
Adding 'add_exclude_subtree' and 'remove_exclude_subtree' methods to AttributeUniquenessPlugin in
|
||||
order to be able to easily add or remove an exclude subtree.
|
||||
Porting ticket 47927 test to
|
||||
dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
|
||||
Relates: #6753
|
||||
|
||||
Author: Lenka Doudova
|
||||
|
||||
Reviewers: Simon Pichugin, Mark Reynolds
|
||||
---
|
||||
.../tests/suites/plugins/attruniq_test.py | 171 +++++++++++
|
||||
dirsrvtests/tests/tickets/ticket47927_test.py | 267 ------------------
|
||||
src/lib389/lib389/plugins.py | 10 +
|
||||
3 files changed, 181 insertions(+), 267 deletions(-)
|
||||
delete mode 100644 dirsrvtests/tests/tickets/ticket47927_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/attruniq_test.py b/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
index c1ccad9ae..aac659c29 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
@@ -10,6 +10,7 @@ import pytest
|
||||
import ldap
|
||||
import logging
|
||||
from lib389.plugins import AttributeUniquenessPlugin
|
||||
+from lib389.idm.nscontainer import nsContainers
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.idm.group import Groups
|
||||
from lib389._constants import DEFAULT_SUFFIX
|
||||
@@ -22,6 +23,19 @@ log = logging.getLogger(__name__)
|
||||
MAIL_ATTR_VALUE = 'non-uniq@value.net'
|
||||
MAIL_ATTR_VALUE_ALT = 'alt-mail@value.net'
|
||||
|
||||
+EXCLUDED_CONTAINER_CN = "excluded_container"
|
||||
+EXCLUDED_CONTAINER_DN = "cn={},{}".format(EXCLUDED_CONTAINER_CN, DEFAULT_SUFFIX)
|
||||
+
|
||||
+EXCLUDED_BIS_CONTAINER_CN = "excluded_bis_container"
|
||||
+EXCLUDED_BIS_CONTAINER_DN = "cn={},{}".format(EXCLUDED_BIS_CONTAINER_CN, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ENFORCED_CONTAINER_CN = "enforced_container"
|
||||
+
|
||||
+USER_1_CN = "test_1"
|
||||
+USER_2_CN = "test_2"
|
||||
+USER_3_CN = "test_3"
|
||||
+USER_4_CN = "test_4"
|
||||
+
|
||||
|
||||
def test_modrdn_attr_uniqueness(topology_st):
|
||||
"""Test that we can not add two entries that have the same attr value that is
|
||||
@@ -154,3 +168,160 @@ def test_multiple_attr_uniqueness(topology_st):
|
||||
testuser2.delete()
|
||||
attruniq.disable()
|
||||
attruniq.delete()
|
||||
+
|
||||
+
|
||||
+def test_exclude_subtrees(topology_st):
|
||||
+ """ Test attribute uniqueness with exclude scope
|
||||
+
|
||||
+ :id: 43d29a60-40e1-4ebd-b897-6ef9f20e9f27
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Setup and enable attribute uniqueness plugin for telephonenumber unique attribute
|
||||
+ 2. Create subtrees and test users
|
||||
+ 3. Add a unique attribute to a user within uniqueness scope
|
||||
+ 4. Add exclude subtree
|
||||
+ 5. Try to add existing value attribute to an entry within uniqueness scope
|
||||
+ 6. Try to add existing value attribute to an entry within exclude scope
|
||||
+ 7. Remove the attribute from affected entries
|
||||
+ 8. Add a unique attribute to a user within exclude scope
|
||||
+ 9. Try to add existing value attribute to an entry within uniqueness scope
|
||||
+ 10. Try to add existing value attribute to another entry within uniqueness scope
|
||||
+ 11. Remove the attribute from affected entries
|
||||
+ 12. Add another exclude subtree
|
||||
+ 13. Add a unique attribute to a user within uniqueness scope
|
||||
+ 14. Try to add existing value attribute to an entry within uniqueness scope
|
||||
+ 15. Try to add existing value attribute to an entry within exclude scope
|
||||
+ 16. Try to add existing value attribute to an entry within another exclude scope
|
||||
+ 17. Clean up entries
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Should raise CONSTRAINT_VIOLATION
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Success
|
||||
+ 9. Success
|
||||
+ 10. Should raise CONSTRAINT_VIOLATION
|
||||
+ 11. Success
|
||||
+ 12. Success
|
||||
+ 13. Success
|
||||
+ 14. Should raise CONSTRAINT_VIOLATION
|
||||
+ 15. Success
|
||||
+ 16. Success
|
||||
+ 17. Success
|
||||
+ """
|
||||
+ log.info('Setup attribute uniqueness plugin')
|
||||
+ attruniq = AttributeUniquenessPlugin(topology_st.standalone, dn="cn=attruniq,cn=plugins,cn=config")
|
||||
+ attruniq.create(properties={'cn': 'attruniq'})
|
||||
+ attruniq.add_unique_attribute('telephonenumber')
|
||||
+ attruniq.add_unique_subtree(DEFAULT_SUFFIX)
|
||||
+ attruniq.enable_all_subtrees()
|
||||
+ attruniq.enable()
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ log.info('Create subtrees container')
|
||||
+ containers = nsContainers(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ cont1 = containers.create(properties={'cn': EXCLUDED_CONTAINER_CN})
|
||||
+ cont2 = containers.create(properties={'cn': EXCLUDED_BIS_CONTAINER_CN})
|
||||
+ cont3 = containers.create(properties={'cn': ENFORCED_CONTAINER_CN})
|
||||
+
|
||||
+ log.info('Create test users')
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX,
|
||||
+ rdn='cn={}'.format(ENFORCED_CONTAINER_CN))
|
||||
+ users_excluded = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX,
|
||||
+ rdn='cn={}'.format(EXCLUDED_CONTAINER_CN))
|
||||
+ users_excluded2 = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX,
|
||||
+ rdn='cn={}'.format(EXCLUDED_BIS_CONTAINER_CN))
|
||||
+
|
||||
+ user1 = users.create(properties={'cn': USER_1_CN,
|
||||
+ 'uid': USER_1_CN,
|
||||
+ 'sn': USER_1_CN,
|
||||
+ 'uidNumber': '1',
|
||||
+ 'gidNumber': '11',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_1_CN)})
|
||||
+ user2 = users.create(properties={'cn': USER_2_CN,
|
||||
+ 'uid': USER_2_CN,
|
||||
+ 'sn': USER_2_CN,
|
||||
+ 'uidNumber': '2',
|
||||
+ 'gidNumber': '22',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_2_CN)})
|
||||
+ user3 = users_excluded.create(properties={'cn': USER_3_CN,
|
||||
+ 'uid': USER_3_CN,
|
||||
+ 'sn': USER_3_CN,
|
||||
+ 'uidNumber': '3',
|
||||
+ 'gidNumber': '33',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_3_CN)})
|
||||
+ user4 = users_excluded2.create(properties={'cn': USER_4_CN,
|
||||
+ 'uid': USER_4_CN,
|
||||
+ 'sn': USER_4_CN,
|
||||
+ 'uidNumber': '4',
|
||||
+ 'gidNumber': '44',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_4_CN)})
|
||||
+
|
||||
+ UNIQUE_VALUE = '1234'
|
||||
+
|
||||
+ try:
|
||||
+ log.info('Create user with unique attribute')
|
||||
+ user1.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user1.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Add exclude subtree')
|
||||
+ attruniq.add_exclude_subtree(EXCLUDED_CONTAINER_DN)
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ log.info('Verify an already used attribute value cannot be added within the same subtree')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user2.add('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify an entry with same attribute value can be added within exclude subtree')
|
||||
+ user3.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user3.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Cleanup unique attribute values')
|
||||
+ user1.remove_all('telephonenumber')
|
||||
+ user3.remove_all('telephonenumber')
|
||||
+
|
||||
+ log.info('Add a unique value to an entry in excluded scope')
|
||||
+ user3.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user3.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify the same value can be added to an entry within uniqueness scope')
|
||||
+ user1.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user1.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify that yet another same value cannot be added to another entry within uniqueness scope')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user2.add('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Cleanup unique attribute values')
|
||||
+ user1.remove_all('telephonenumber')
|
||||
+ user3.remove_all('telephonenumber')
|
||||
+
|
||||
+ log.info('Add another exclude subtree')
|
||||
+ attruniq.add_exclude_subtree(EXCLUDED_BIS_CONTAINER_DN)
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ user1.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ log.info('Verify an already used attribute value cannot be added within the same subtree')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user2.add('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify an already used attribute can be added to an entry in exclude scope')
|
||||
+ user3.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user3.present('telephonenumber', UNIQUE_VALUE)
|
||||
+ user4.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user4.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ finally:
|
||||
+ log.info('Clean up users, containers and attribute uniqueness plugin')
|
||||
+ user1.delete()
|
||||
+ user2.delete()
|
||||
+ user3.delete()
|
||||
+ user4.delete()
|
||||
+ cont1.delete()
|
||||
+ cont2.delete()
|
||||
+ cont3.delete()
|
||||
+ attruniq.disable()
|
||||
+ attruniq.delete()
|
||||
\ No newline at end of file
|
||||
diff --git a/dirsrvtests/tests/tickets/ticket47927_test.py b/dirsrvtests/tests/tickets/ticket47927_test.py
|
||||
deleted file mode 100644
|
||||
index 887fe1af4..000000000
|
||||
--- a/dirsrvtests/tests/tickets/ticket47927_test.py
|
||||
+++ /dev/null
|
||||
@@ -1,267 +0,0 @@
|
||||
-# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2016 Red Hat, Inc.
|
||||
-# All rights reserved.
|
||||
-#
|
||||
-# License: GPL (version 3 or any later version).
|
||||
-# See LICENSE for details.
|
||||
-# --- END COPYRIGHT BLOCK ---
|
||||
-#
|
||||
-import pytest
|
||||
-from lib389.tasks import *
|
||||
-from lib389.utils import *
|
||||
-from lib389.topologies import topology_st
|
||||
-
|
||||
-from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_ATTR_UNIQUENESS
|
||||
-
|
||||
-# Skip on older versions
|
||||
-pytestmark = [pytest.mark.tier2,
|
||||
- pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")]
|
||||
-
|
||||
-logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
-log = logging.getLogger(__name__)
|
||||
-
|
||||
-EXCLUDED_CONTAINER_CN = "excluded_container"
|
||||
-EXCLUDED_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_CONTAINER_CN, SUFFIX)
|
||||
-
|
||||
-EXCLUDED_BIS_CONTAINER_CN = "excluded_bis_container"
|
||||
-EXCLUDED_BIS_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_BIS_CONTAINER_CN, SUFFIX)
|
||||
-
|
||||
-ENFORCED_CONTAINER_CN = "enforced_container"
|
||||
-ENFORCED_CONTAINER_DN = "cn=%s,%s" % (ENFORCED_CONTAINER_CN, SUFFIX)
|
||||
-
|
||||
-USER_1_CN = "test_1"
|
||||
-USER_1_DN = "cn=%s,%s" % (USER_1_CN, ENFORCED_CONTAINER_DN)
|
||||
-USER_2_CN = "test_2"
|
||||
-USER_2_DN = "cn=%s,%s" % (USER_2_CN, ENFORCED_CONTAINER_DN)
|
||||
-USER_3_CN = "test_3"
|
||||
-USER_3_DN = "cn=%s,%s" % (USER_3_CN, EXCLUDED_CONTAINER_DN)
|
||||
-USER_4_CN = "test_4"
|
||||
-USER_4_DN = "cn=%s,%s" % (USER_4_CN, EXCLUDED_BIS_CONTAINER_DN)
|
||||
-
|
||||
-
|
||||
-def test_ticket47927_init(topology_st):
|
||||
- topology_st.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
|
||||
- try:
|
||||
- topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
|
||||
- [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', b'telephonenumber'),
|
||||
- (ldap.MOD_REPLACE, 'uniqueness-subtrees', ensure_bytes(DEFAULT_SUFFIX)),
|
||||
- ])
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927: Failed to configure plugin for "telephonenumber": error ' + e.args[0]['desc'])
|
||||
- assert False
|
||||
- topology_st.standalone.restart(timeout=120)
|
||||
-
|
||||
- topology_st.standalone.add_s(Entry((EXCLUDED_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
|
||||
- 'cn': EXCLUDED_CONTAINER_CN})))
|
||||
- topology_st.standalone.add_s(Entry((EXCLUDED_BIS_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
|
||||
- 'cn': EXCLUDED_BIS_CONTAINER_CN})))
|
||||
- topology_st.standalone.add_s(Entry((ENFORCED_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
|
||||
- 'cn': ENFORCED_CONTAINER_CN})))
|
||||
-
|
||||
- # adding an entry on a stage with a different 'cn'
|
||||
- topology_st.standalone.add_s(Entry((USER_1_DN, {
|
||||
- 'objectclass': "top person".split(),
|
||||
- 'sn': USER_1_CN,
|
||||
- 'cn': USER_1_CN})))
|
||||
- # adding an entry on a stage with a different 'cn'
|
||||
- topology_st.standalone.add_s(Entry((USER_2_DN, {
|
||||
- 'objectclass': "top person".split(),
|
||||
- 'sn': USER_2_CN,
|
||||
- 'cn': USER_2_CN})))
|
||||
- topology_st.standalone.add_s(Entry((USER_3_DN, {
|
||||
- 'objectclass': "top person".split(),
|
||||
- 'sn': USER_3_CN,
|
||||
- 'cn': USER_3_CN})))
|
||||
- topology_st.standalone.add_s(Entry((USER_4_DN, {
|
||||
- 'objectclass': "top person".split(),
|
||||
- 'sn': USER_4_CN,
|
||||
- 'cn': USER_4_CN})))
|
||||
-
|
||||
-
|
||||
-def test_ticket47927_one(topology_st):
|
||||
- '''
|
||||
- Check that uniqueness is enforce on all SUFFIX
|
||||
- '''
|
||||
- UNIQUE_VALUE = b'1234'
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_1_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_one: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
-
|
||||
- # we expect to fail because user1 is in the scope of the plugin
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_2_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_2_DN))
|
||||
- assert False
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_2_DN, e.args[0]['desc']))
|
||||
- pass
|
||||
-
|
||||
- # we expect to fail because user1 is in the scope of the plugin
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_3_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_3_DN))
|
||||
- assert False
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_3_DN, e.args[0]['desc']))
|
||||
- pass
|
||||
-
|
||||
-
|
||||
-def test_ticket47927_two(topology_st):
|
||||
- '''
|
||||
- Exclude the EXCLUDED_CONTAINER_DN from the uniqueness plugin
|
||||
- '''
|
||||
- try:
|
||||
- topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
|
||||
- [(ldap.MOD_REPLACE, 'uniqueness-exclude-subtrees', ensure_bytes(EXCLUDED_CONTAINER_DN))])
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_two: Failed to configure plugin for to exclude %s: error %s' % (
|
||||
- EXCLUDED_CONTAINER_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
- topology_st.standalone.restart(timeout=120)
|
||||
-
|
||||
-
|
||||
-def test_ticket47927_three(topology_st):
|
||||
- '''
|
||||
- Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN
|
||||
- First case: it exists an entry (with the same attribute value) in the scope
|
||||
- of the plugin and we set the value in an entry that is in an excluded scope
|
||||
- '''
|
||||
- UNIQUE_VALUE = b'9876'
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_1_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_three: Failed to set the telephonenumber ' + e.args[0]['desc'])
|
||||
- assert False
|
||||
-
|
||||
- # we should not be allowed to set this value (because user1 is in the scope)
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_2_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_three: unexpected success to set the telephonenumber for %s' % (USER_2_DN))
|
||||
- assert False
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_three: Failed (expected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_2_DN, e.args[0]['desc']))
|
||||
-
|
||||
- # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_3_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_three: success to set the telephonenumber for %s' % (USER_3_DN))
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_three: Failed (unexpected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_3_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
-
|
||||
-
|
||||
-def test_ticket47927_four(topology_st):
|
||||
- '''
|
||||
- Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN
|
||||
- Second case: it exists an entry (with the same attribute value) in an excluded scope
|
||||
- of the plugin and we set the value in an entry is in the scope
|
||||
- '''
|
||||
- UNIQUE_VALUE = b'1111'
|
||||
- # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_3_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_four: success to set the telephonenumber for %s' % USER_3_DN)
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_four: Failed (unexpected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_3_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
-
|
||||
- # we should be allowed to set this value (because user3 is excluded from scope)
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_1_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal(
|
||||
- 'test_ticket47927_four: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
-
|
||||
- # we should not be allowed to set this value (because user1 is in the scope)
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_2_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_four: unexpected success to set the telephonenumber %s' % USER_2_DN)
|
||||
- assert False
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_four: Failed (expected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_2_DN, e.args[0]['desc']))
|
||||
- pass
|
||||
-
|
||||
-
|
||||
-def test_ticket47927_five(topology_st):
|
||||
- '''
|
||||
- Exclude the EXCLUDED_BIS_CONTAINER_DN from the uniqueness plugin
|
||||
- '''
|
||||
- try:
|
||||
- topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
|
||||
- [(ldap.MOD_ADD, 'uniqueness-exclude-subtrees', ensure_bytes(EXCLUDED_BIS_CONTAINER_DN))])
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_five: Failed to configure plugin for to exclude %s: error %s' % (
|
||||
- EXCLUDED_BIS_CONTAINER_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
- topology_st.standalone.restart(timeout=120)
|
||||
- topology_st.standalone.getEntry('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', ldap.SCOPE_BASE)
|
||||
-
|
||||
-
|
||||
-def test_ticket47927_six(topology_st):
|
||||
- '''
|
||||
- Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN
|
||||
- and EXCLUDED_BIS_CONTAINER_DN
|
||||
- First case: it exists an entry (with the same attribute value) in the scope
|
||||
- of the plugin and we set the value in an entry that is in an excluded scope
|
||||
- '''
|
||||
- UNIQUE_VALUE = b'222'
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_1_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_six: Failed to set the telephonenumber ' + e.args[0]['desc'])
|
||||
- assert False
|
||||
-
|
||||
- # we should not be allowed to set this value (because user1 is in the scope)
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_2_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_six: unexpected success to set the telephonenumber for %s' % (USER_2_DN))
|
||||
- assert False
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_six: Failed (expected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_2_DN, e.args[0]['desc']))
|
||||
-
|
||||
- # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_3_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_3_DN))
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_3_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
- # USER_4_DN is in EXCLUDED_CONTAINER_DN so update should be successful
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_4_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_4_DN))
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_4_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
-
|
||||
-
|
||||
-if __name__ == '__main__':
|
||||
- # Run isolated
|
||||
- # -s for DEBUG mode
|
||||
- CURRENT_FILE = os.path.realpath(__file__)
|
||||
- pytest.main("-s %s" % CURRENT_FILE)
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 31bbfa502..977091726 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -175,6 +175,16 @@ class AttributeUniquenessPlugin(Plugin):
|
||||
|
||||
self.set('uniqueness-across-all-subtrees', 'off')
|
||||
|
||||
+ def add_exclude_subtree(self, basedn):
|
||||
+ """Add a uniqueness-exclude-subtrees attribute"""
|
||||
+
|
||||
+ self.add('uniqueness-exclude-subtrees', basedn)
|
||||
+
|
||||
+ def remove_exclude_subtree(self, basedn):
|
||||
+ """Remove a uniqueness-exclude-subtrees attribute"""
|
||||
+
|
||||
+ self.remove('uniqueness-exclude-subtrees', basedn)
|
||||
+
|
||||
|
||||
class AttributeUniquenessPlugins(DSLdapObjects):
|
||||
"""A DSLdapObjects entity which represents Attribute Uniqueness plugin instances
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From 2b73c3596e724f314b0e09cf6209e0151260f7e5 Mon Sep 17 00:00:00 2001
|
||||
From 4be22be50dfdf0a5ddd27dc8f9d9618b941c8be8 Mon Sep 17 00:00:00 2001
|
||||
From: Alexander Bokovoy <abokovoy@redhat.com>
|
||||
Date: Wed, 9 Jul 2025 12:08:09 +0300
|
||||
Subject: [PATCH] Issue 6857 - uiduniq: allow specifying match rules in the
|
||||
@ -22,10 +22,10 @@ Signed-off-by: Alexander Bokovoy <abokovoy@redhat.com>
|
||||
1 file changed, 7 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
|
||||
index 5b763b551..15cf88477 100644
|
||||
index 053af4f9d..887e79d78 100644
|
||||
--- a/ldap/servers/plugins/uiduniq/uid.c
|
||||
+++ b/ldap/servers/plugins/uiduniq/uid.c
|
||||
@@ -1031,7 +1031,14 @@ preop_add(Slapi_PBlock *pb)
|
||||
@@ -1030,7 +1030,14 @@ preop_add(Slapi_PBlock *pb)
|
||||
}
|
||||
|
||||
for (i = 0; attrNames && attrNames[i]; i++) {
|
||||
1201
0009-Issue-6756-CLI-UI-Properly-handle-disabled-NDN-cache.patch
Normal file
1201
0009-Issue-6756-CLI-UI-Properly-handle-disabled-NDN-cache.patch
Normal file
File diff suppressed because it is too large
Load Diff
2237
0010-Issue-6854-Refactor-for-improved-data-management-685.patch
Normal file
2237
0010-Issue-6854-Refactor-for-improved-data-management-685.patch
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,65 @@
|
||||
From ad2a06cb64156c55d81b7a1647f9bec7071df9f4 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 7 Jul 2025 23:11:17 +0200
|
||||
Subject: [PATCH] Issue 6850 - AddressSanitizer: memory leak in mdb_init
|
||||
|
||||
Bug Description:
|
||||
`dbmdb_componentid` can be allocated multiple times. To avoid a memory
|
||||
leak, allocate it only once, and free at the cleanup.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6850
|
||||
|
||||
Reviewed by: @mreynolds389, @tbordaz (Tnanks!)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c | 4 +++-
|
||||
ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c | 2 +-
|
||||
ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c | 5 +++++
|
||||
3 files changed, 9 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
index 447f3c70a..54ca03b0b 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
@@ -146,7 +146,9 @@ dbmdb_compute_limits(struct ldbminfo *li)
|
||||
int mdb_init(struct ldbminfo *li, config_info *config_array)
|
||||
{
|
||||
dbmdb_ctx_t *conf = (dbmdb_ctx_t *)slapi_ch_calloc(1, sizeof(dbmdb_ctx_t));
|
||||
- dbmdb_componentid = generate_componentid(NULL, "db-mdb");
|
||||
+ if (dbmdb_componentid == NULL) {
|
||||
+ dbmdb_componentid = generate_componentid(NULL, "db-mdb");
|
||||
+ }
|
||||
|
||||
li->li_dblayer_config = conf;
|
||||
strncpy(conf->home, li->li_directory, MAXPATHLEN-1);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
index c4e87987f..ed17f979f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
@@ -19,7 +19,7 @@
|
||||
#include <prclist.h>
|
||||
#include <glob.h>
|
||||
|
||||
-Slapi_ComponentId *dbmdb_componentid;
|
||||
+Slapi_ComponentId *dbmdb_componentid = NULL;
|
||||
|
||||
#define BULKOP_MAX_RECORDS 100 /* Max records handled by a single bulk operations */
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c
|
||||
index 2d07db9b5..ae10ac7cf 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c
|
||||
@@ -49,6 +49,11 @@ dbmdb_cleanup(struct ldbminfo *li)
|
||||
}
|
||||
slapi_ch_free((void **)&(li->li_dblayer_config));
|
||||
|
||||
+ if (dbmdb_componentid != NULL) {
|
||||
+ release_componentid(dbmdb_componentid);
|
||||
+ dbmdb_componentid = NULL;
|
||||
+ }
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
58
0012-Issue-6848-AddressSanitizer-leak-in-do_search.patch
Normal file
58
0012-Issue-6848-AddressSanitizer-leak-in-do_search.patch
Normal file
@ -0,0 +1,58 @@
|
||||
From 98a83bb00255f77467a370d3347a8428b6659463 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 7 Jul 2025 22:01:09 +0200
|
||||
Subject: [PATCH] Issue 6848 - AddressSanitizer: leak in do_search
|
||||
|
||||
Bug Description:
|
||||
When there's a BER decoding error and the function goes to
|
||||
`free_and_return`, the `attrs` variable is not being freed because it's
|
||||
only freed if `!psearch || rc != 0 || err != 0`, but `err` is still 0 at
|
||||
that point.
|
||||
|
||||
If we reach `free_and_return` from the `ber_scanf` error path, `attrs`
|
||||
was never set in the pblock with `slapi_pblock_set()`, so the
|
||||
`slapi_pblock_get()` call will not retrieve the potentially partially
|
||||
allocated `attrs` from the BER decoding.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6848
|
||||
|
||||
Reviewed by: @tbordaz, @droideck (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/search.c | 14 ++++++++++++--
|
||||
1 file changed, 12 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/search.c b/ldap/servers/slapd/search.c
|
||||
index e9b2c3670..f9d03c090 100644
|
||||
--- a/ldap/servers/slapd/search.c
|
||||
+++ b/ldap/servers/slapd/search.c
|
||||
@@ -235,6 +235,7 @@ do_search(Slapi_PBlock *pb)
|
||||
log_search_access(pb, base, scope, fstr, "decoding error");
|
||||
send_ldap_result(pb, LDAP_PROTOCOL_ERROR, NULL, NULL, 0,
|
||||
NULL);
|
||||
+ err = 1; /* Make sure we free everything */
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
@@ -420,8 +421,17 @@ free_and_return:
|
||||
if (!psearch || rc != 0 || err != 0) {
|
||||
slapi_ch_free_string(&fstr);
|
||||
slapi_filter_free(filter, 1);
|
||||
- slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &attrs);
|
||||
- charray_free(attrs); /* passing NULL is fine */
|
||||
+
|
||||
+ /* Get attrs from pblock if it was set there, otherwise use local attrs */
|
||||
+ char **pblock_attrs = NULL;
|
||||
+ slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &pblock_attrs);
|
||||
+ if (pblock_attrs != NULL) {
|
||||
+ charray_free(pblock_attrs); /* Free attrs from pblock */
|
||||
+ slapi_pblock_set(pb, SLAPI_SEARCH_ATTRS, NULL);
|
||||
+ } else if (attrs != NULL) {
|
||||
+ /* Free attrs that were allocated but never put in pblock */
|
||||
+ charray_free(attrs);
|
||||
+ }
|
||||
charray_free(gerattrs); /* passing NULL is fine */
|
||||
/*
|
||||
* Fix for defect 526719 / 553356 : Persistent search op failed.
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,58 @@
|
||||
From e89a5acbc1bcc1b460683aa498005d6f0ce7054e Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Fri, 11 Jul 2025 12:32:38 +0200
|
||||
Subject: [PATCH] Issue 6865 - AddressSanitizer: leak in
|
||||
agmt_update_init_status
|
||||
|
||||
Bug Description:
|
||||
We allocate an array of `LDAPMod *` pointers, but never free it:
|
||||
|
||||
```
|
||||
=================================================================
|
||||
==2748356==ERROR: LeakSanitizer: detected memory leaks
|
||||
|
||||
Direct leak of 24 byte(s) in 1 object(s) allocated from:
|
||||
#0 0x7f05e8cb4a07 in __interceptor_malloc (/lib64/libasan.so.6+0xb4a07)
|
||||
#1 0x7f05e85c0138 in slapi_ch_malloc (/usr/lib64/dirsrv/libslapd.so.0+0x1c0138)
|
||||
#2 0x7f05e109e481 in agmt_update_init_status ldap/servers/plugins/replication/repl5_agmt.c:2583
|
||||
#3 0x7f05e10a0aa5 in agmtlist_shutdown ldap/servers/plugins/replication/repl5_agmtlist.c:789
|
||||
#4 0x7f05e10ab6bc in multisupplier_stop ldap/servers/plugins/replication/repl5_init.c:844
|
||||
#5 0x7f05e10ab6bc in multisupplier_stop ldap/servers/plugins/replication/repl5_init.c:837
|
||||
#6 0x7f05e862507d in plugin_call_func ldap/servers/slapd/plugin.c:2001
|
||||
#7 0x7f05e8625be1 in plugin_call_one ldap/servers/slapd/plugin.c:1950
|
||||
#8 0x7f05e8625be1 in plugin_dependency_closeall ldap/servers/slapd/plugin.c:1844
|
||||
#9 0x55e1a7ff9815 in slapd_daemon ldap/servers/slapd/daemon.c:1275
|
||||
#10 0x55e1a7fd36ef in main (/usr/sbin/ns-slapd+0x3e6ef)
|
||||
#11 0x7f05e80295cf in __libc_start_call_main (/lib64/libc.so.6+0x295cf)
|
||||
#12 0x7f05e802967f in __libc_start_main_alias_2 (/lib64/libc.so.6+0x2967f)
|
||||
#13 0x55e1a7fd74a4 in _start (/usr/sbin/ns-slapd+0x424a4)
|
||||
|
||||
SUMMARY: AddressSanitizer: 24 byte(s) leaked in 1 allocation(s).
|
||||
```
|
||||
|
||||
Fix Description:
|
||||
Ensure `mods` is freed in the cleanup code.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6865
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6470
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/replication/repl5_agmt.c | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
index c818c5857..0a81167b7 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
@@ -2743,6 +2743,7 @@ agmt_update_init_status(Repl_Agmt *ra)
|
||||
} else {
|
||||
PR_Unlock(ra->lock);
|
||||
}
|
||||
+ slapi_ch_free((void **)&mods);
|
||||
slapi_mod_done(&smod_start_time);
|
||||
slapi_mod_done(&smod_end_time);
|
||||
slapi_mod_done(&smod_status);
|
||||
--
|
||||
2.49.0
|
||||
|
||||
169
0014-Issue-6859-str2filter-is-not-fully-applying-matching.patch
Normal file
169
0014-Issue-6859-str2filter-is-not-fully-applying-matching.patch
Normal file
@ -0,0 +1,169 @@
|
||||
From bf58cba210cd785d3abe6ffbbf174481258dcf5e Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 9 Jul 2025 14:18:50 -0400
|
||||
Subject: [PATCH] Issue 6859 - str2filter is not fully applying matching rules
|
||||
|
||||
Description:
|
||||
|
||||
When we have an extended filter, one with a MR applied, it is ignored during
|
||||
internal searches:
|
||||
|
||||
"(cn:CaseExactMatch:=Value)"
|
||||
|
||||
For internal searches we use str2filter() and it doesn't fully apply extended
|
||||
search filter matching rules
|
||||
|
||||
Also needed to update attr uniqueness plugin to apply this change for mod
|
||||
operations (previously only Adds were correctly handling these attribute
|
||||
filters)
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6857
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6859
|
||||
|
||||
Reviewed by: spichugi & tbordaz(Thanks!!)
|
||||
---
|
||||
.../tests/suites/plugins/attruniq_test.py | 65 ++++++++++++++++++-
|
||||
ldap/servers/plugins/uiduniq/uid.c | 7 ++
|
||||
ldap/servers/slapd/plugin_mr.c | 2 +-
|
||||
ldap/servers/slapd/str2filter.c | 8 +++
|
||||
4 files changed, 79 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/attruniq_test.py b/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
index aac659c29..046952df3 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2021 Red Hat, Inc.
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -324,4 +324,65 @@ def test_exclude_subtrees(topology_st):
|
||||
cont2.delete()
|
||||
cont3.delete()
|
||||
attruniq.disable()
|
||||
- attruniq.delete()
|
||||
\ No newline at end of file
|
||||
+ attruniq.delete()
|
||||
+
|
||||
+
|
||||
+def test_matchingrule_attr(topology_st):
|
||||
+ """ Test list extension MR attribute. Check for "cn" using CES (versus it
|
||||
+ being defined as CIS)
|
||||
+
|
||||
+ :id: 5cde4342-6fa3-4225-b23d-0af918981075
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Setup and enable attribute uniqueness plugin to use CN attribute
|
||||
+ with a matching rule of CaseExactMatch.
|
||||
+ 2. Add user with CN value is lowercase
|
||||
+ 3. Add second user with same lowercase CN which should be rejected
|
||||
+ 4. Add second user with same CN value but with mixed case
|
||||
+ 5. Modify second user replacing CN value to lc which should be rejected
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+
|
||||
+ attruniq = AttributeUniquenessPlugin(inst,
|
||||
+ dn="cn=attribute uniqueness,cn=plugins,cn=config")
|
||||
+ attruniq.add_unique_attribute('cn:CaseExactMatch:')
|
||||
+ attruniq.enable_all_subtrees()
|
||||
+ attruniq.enable()
|
||||
+ inst.restart()
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ users.create(properties={'cn': "common_name",
|
||||
+ 'uid': "uid_name",
|
||||
+ 'sn': "uid_name",
|
||||
+ 'uidNumber': '1',
|
||||
+ 'gidNumber': '11',
|
||||
+ 'homeDirectory': '/home/uid_name'})
|
||||
+
|
||||
+ log.info('Add entry with the exact CN value which should be rejected')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ users.create(properties={'cn': "common_name",
|
||||
+ 'uid': "uid_name2",
|
||||
+ 'sn': "uid_name2",
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'homeDirectory': '/home/uid_name2'})
|
||||
+
|
||||
+ log.info('Add entry with the mixed case CN value which should be allowed')
|
||||
+ user = users.create(properties={'cn': "Common_Name",
|
||||
+ 'uid': "uid_name2",
|
||||
+ 'sn': "uid_name2",
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'homeDirectory': '/home/uid_name2'})
|
||||
+
|
||||
+ log.info('Mod entry with exact case CN value which should be rejected')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user.replace('cn', 'common_name')
|
||||
diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
|
||||
index 887e79d78..fdb1404a0 100644
|
||||
--- a/ldap/servers/plugins/uiduniq/uid.c
|
||||
+++ b/ldap/servers/plugins/uiduniq/uid.c
|
||||
@@ -1178,6 +1178,10 @@ preop_modify(Slapi_PBlock *pb)
|
||||
for (; mods && *mods; mods++) {
|
||||
mod = *mods;
|
||||
for (i = 0; attrNames && attrNames[i]; i++) {
|
||||
+ char *attr_match = strchr(attrNames[i], ':');
|
||||
+ if (attr_match != NULL) {
|
||||
+ attr_match[0] = '\0';
|
||||
+ }
|
||||
if ((slapi_attr_type_cmp(mod->mod_type, attrNames[i], 1) == 0) && /* mod contains target attr */
|
||||
(mod->mod_op & LDAP_MOD_BVALUES) && /* mod is bval encoded (not string val) */
|
||||
(mod->mod_bvalues && mod->mod_bvalues[0]) && /* mod actually contains some values */
|
||||
@@ -1186,6 +1190,9 @@ preop_modify(Slapi_PBlock *pb)
|
||||
{
|
||||
addMod(&checkmods, &checkmodsCapacity, &modcount, mod);
|
||||
}
|
||||
+ if (attr_match != NULL) {
|
||||
+ attr_match[0] = ':';
|
||||
+ }
|
||||
}
|
||||
}
|
||||
if (modcount == 0) {
|
||||
diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c
|
||||
index 9809a4374..757355dbc 100644
|
||||
--- a/ldap/servers/slapd/plugin_mr.c
|
||||
+++ b/ldap/servers/slapd/plugin_mr.c
|
||||
@@ -625,7 +625,7 @@ attempt_mr_filter_create(mr_filter_t *f, struct slapdplugin *mrp, Slapi_PBlock *
|
||||
int rc;
|
||||
int32_t (*mrf_create)(Slapi_PBlock *) = NULL;
|
||||
f->mrf_match = NULL;
|
||||
- pblock_init(pb);
|
||||
+ slapi_pblock_init(pb);
|
||||
if (!(rc = slapi_pblock_set(pb, SLAPI_PLUGIN, mrp)) &&
|
||||
!(rc = slapi_pblock_get(pb, SLAPI_PLUGIN_MR_FILTER_CREATE_FN, &mrf_create)) &&
|
||||
mrf_create != NULL &&
|
||||
diff --git a/ldap/servers/slapd/str2filter.c b/ldap/servers/slapd/str2filter.c
|
||||
index 9fdc500f7..5620b7439 100644
|
||||
--- a/ldap/servers/slapd/str2filter.c
|
||||
+++ b/ldap/servers/slapd/str2filter.c
|
||||
@@ -344,6 +344,14 @@ str2simple(char *str, int unescape_filter)
|
||||
return NULL; /* error */
|
||||
} else {
|
||||
f->f_choice = LDAP_FILTER_EXTENDED;
|
||||
+ if (f->f_mr_oid) {
|
||||
+ /* apply the MR indexers */
|
||||
+ rc = plugin_mr_filter_create(&f->f_mr);
|
||||
+ if (rc) {
|
||||
+ slapi_filter_free(f, 1);
|
||||
+ return NULL; /* error */
|
||||
+ }
|
||||
+ }
|
||||
}
|
||||
} else if (str_find_star(value) == NULL) {
|
||||
f->f_choice = LDAP_FILTER_EQUALITY;
|
||||
--
|
||||
2.49.0
|
||||
|
||||
163
0015-Issue-6872-compressed-log-rotation-creates-files-wit.patch
Normal file
163
0015-Issue-6872-compressed-log-rotation-creates-files-wit.patch
Normal file
@ -0,0 +1,163 @@
|
||||
From 4719e7df7ba0eb5e26830812ab9ead51f1e0c5f5 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Tue, 15 Jul 2025 17:56:18 -0400
|
||||
Subject: [PATCH] Issue 6872 - compressed log rotation creates files with world
|
||||
readable permission
|
||||
|
||||
Description:
|
||||
|
||||
When compressing a log file, first create the empty file using open()
|
||||
so we can set the correct permissions right from the start. gzopen()
|
||||
always uses permission 644 and that is not safe. So after creating it
|
||||
with open(), with the correct permissions, then pass the FD to gzdopen()
|
||||
and write the compressed content.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6872
|
||||
|
||||
Reviewed by: progier(Thanks!)
|
||||
---
|
||||
.../logging/logging_compression_test.py | 15 ++++++++--
|
||||
ldap/servers/slapd/log.c | 28 +++++++++++++------
|
||||
ldap/servers/slapd/schema.c | 2 +-
|
||||
3 files changed, 33 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/logging_compression_test.py b/dirsrvtests/tests/suites/logging/logging_compression_test.py
|
||||
index e30874cc0..3a987d62c 100644
|
||||
--- a/dirsrvtests/tests/suites/logging/logging_compression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/logging/logging_compression_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2022 Red Hat, Inc.
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -22,12 +22,21 @@ log = logging.getLogger(__name__)
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
+
|
||||
def log_rotated_count(log_type, log_dir, check_compressed=False):
|
||||
- # Check if the log was rotated
|
||||
+ """
|
||||
+ Check if the log was rotated and has the correct permissions
|
||||
+ """
|
||||
log_file = f'{log_dir}/{log_type}.2*'
|
||||
if check_compressed:
|
||||
log_file += ".gz"
|
||||
- return len(glob.glob(log_file))
|
||||
+ log_files = glob.glob(log_file)
|
||||
+ for logf in log_files:
|
||||
+ # Check permissions
|
||||
+ st = os.stat(logf)
|
||||
+ assert oct(st.st_mode) == '0o100600' # 0600
|
||||
+
|
||||
+ return len(log_files)
|
||||
|
||||
|
||||
def update_and_sleep(inst, suffix, sleep=True):
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index e859682fe..f535011ab 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -174,17 +174,28 @@ get_syslog_loglevel(int loglevel)
|
||||
}
|
||||
|
||||
static int
|
||||
-compress_log_file(char *log_name)
|
||||
+compress_log_file(char *log_name, int32_t mode)
|
||||
{
|
||||
char gzip_log[BUFSIZ] = {0};
|
||||
char buf[LOG_CHUNK] = {0};
|
||||
size_t bytes_read = 0;
|
||||
gzFile outfile = NULL;
|
||||
FILE *source = NULL;
|
||||
+ int fd = 0;
|
||||
|
||||
PR_snprintf(gzip_log, sizeof(gzip_log), "%s.gz", log_name);
|
||||
- if ((outfile = gzopen(gzip_log,"wb")) == NULL) {
|
||||
- /* Failed to open new gzip file */
|
||||
+
|
||||
+ /*
|
||||
+ * Try to open the file as we may have an incorrect path. We also need to
|
||||
+ * set the permissions using open() as gzopen() creates the file with
|
||||
+ * 644 permissions (world readable - bad). So we create an empty file with
|
||||
+ * the correct permissions, then we pass the FD to gzdopen() to write the
|
||||
+ * compressed content.
|
||||
+ */
|
||||
+ if ((fd = open(gzip_log, O_WRONLY|O_CREAT|O_TRUNC, mode)) >= 0) {
|
||||
+ /* FIle successfully created, now pass the FD to gzdopen() */
|
||||
+ outfile = gzdopen(fd, "ab");
|
||||
+ } else {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -193,6 +204,7 @@ compress_log_file(char *log_name)
|
||||
gzclose(outfile);
|
||||
return -1;
|
||||
}
|
||||
+
|
||||
bytes_read = fread(buf, 1, LOG_CHUNK, source);
|
||||
while (bytes_read > 0) {
|
||||
int bytes_written = gzwrite(outfile, buf, bytes_read);
|
||||
@@ -3402,7 +3414,7 @@ log__open_accesslogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_access_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_access_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile",
|
||||
"failed to compress rotated access log (%s)\n",
|
||||
newfile);
|
||||
@@ -3570,7 +3582,7 @@ log__open_securitylogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_security_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_security_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_securitylogfile",
|
||||
"failed to compress rotated security audit log (%s)\n",
|
||||
newfile);
|
||||
@@ -6288,7 +6300,7 @@ log__open_errorlogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_error_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_error_mode) != 0) {
|
||||
PR_snprintf(buffer, sizeof(buffer), "Failed to compress errors log file (%s)\n", newfile);
|
||||
log__error_emergency(buffer, 1, 1);
|
||||
} else {
|
||||
@@ -6476,7 +6488,7 @@ log__open_auditlogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_audit_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_audit_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile",
|
||||
"failed to compress rotated audit log (%s)\n",
|
||||
newfile);
|
||||
@@ -6641,7 +6653,7 @@ log__open_auditfaillogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_auditfail_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_auditfail_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile",
|
||||
"failed to compress rotated auditfail log (%s)\n",
|
||||
newfile);
|
||||
diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c
|
||||
index a8e6b1210..9ef4ee4bf 100644
|
||||
--- a/ldap/servers/slapd/schema.c
|
||||
+++ b/ldap/servers/slapd/schema.c
|
||||
@@ -903,7 +903,7 @@ oc_check_allowed_sv(Slapi_PBlock *pb, Slapi_Entry *e, const char *type, struct o
|
||||
|
||||
if (pb) {
|
||||
PR_snprintf(errtext, sizeof(errtext),
|
||||
- "attribute \"%s\" not allowed\n",
|
||||
+ "attribute \"%s\" not allowed",
|
||||
escape_string(type, ebuf));
|
||||
slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, errtext);
|
||||
}
|
||||
--
|
||||
2.49.0
|
||||
|
||||
116
0016-Issue-6878-Prevent-repeated-disconnect-logs-during-s.patch
Normal file
116
0016-Issue-6878-Prevent-repeated-disconnect-logs-during-s.patch
Normal file
@ -0,0 +1,116 @@
|
||||
From 1cc3715130650d9e778430792c5e2c2e9690cc72 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 18 Jul 2025 18:50:33 -0700
|
||||
Subject: [PATCH] Issue 6878 - Prevent repeated disconnect logs during shutdown
|
||||
(#6879)
|
||||
|
||||
Description: Avoid logging non-active initialized connections via CONN in disconnect_server_nomutex_ext by adding a check to skip invalid conn=0 with invalid sockets, preventing excessive repeated messages.
|
||||
|
||||
Update ds_logs_test.py by adding test_no_repeated_disconnect_messages to verify the fix.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6878
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 51 ++++++++++++++++++-
|
||||
ldap/servers/slapd/connection.c | 15 +++---
|
||||
2 files changed, 59 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 209d63b5d..6fd790c18 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -24,7 +24,7 @@ from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, Aut
|
||||
from lib389.idm.user import UserAccounts, UserAccount
|
||||
from lib389.idm.group import Groups
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
-from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, PASSWORD
|
||||
+from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, PASSWORD, ErrorLog
|
||||
from lib389.utils import ds_is_older, ds_is_newer
|
||||
from lib389.config import RSA
|
||||
from lib389.dseldif import DSEldif
|
||||
@@ -1410,6 +1410,55 @@ def test_errorlog_buffering(topology_st, request):
|
||||
assert inst.ds_error_log.match(".*slapd_daemon - slapd started.*")
|
||||
|
||||
|
||||
+def test_no_repeated_disconnect_messages(topology_st):
|
||||
+ """Test that there are no repeated "Not setting conn 0 to be disconnected: socket is invalid" messages on restart
|
||||
+
|
||||
+ :id: 72b5e1ce-2db8-458f-b2cd-0a0b6525f51f
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Set error log level to CONNECTION
|
||||
+ 2. Clear existing error logs
|
||||
+ 3. Restart the server with 30 second timeout
|
||||
+ 4. Check error log for repeated disconnect messages
|
||||
+ 5. Verify there are no more than 10 occurrences of the disconnect message
|
||||
+ :expectedresults:
|
||||
+ 1. Error log level should be set successfully
|
||||
+ 2. Error logs should be cleared
|
||||
+ 3. Server should restart successfully within 30 seconds
|
||||
+ 4. Error log should be accessible
|
||||
+ 5. There should be no more than 10 repeated disconnect messages
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+
|
||||
+ log.info('Set error log level to CONNECTION')
|
||||
+ inst.config.loglevel([ErrorLog.CONNECT])
|
||||
+ current_level = inst.config.get_attr_val_int('nsslapd-errorlog-level')
|
||||
+ log.info(f'Error log level set to: {current_level}')
|
||||
+
|
||||
+ log.info('Clear existing error logs')
|
||||
+ inst.deleteErrorLogs()
|
||||
+
|
||||
+ log.info('Restart the server with 30 second timeout')
|
||||
+ inst.restart(timeout=30)
|
||||
+
|
||||
+ log.info('Check error log for repeated disconnect messages')
|
||||
+ disconnect_message = "Not setting conn 0 to be disconnected: socket is invalid"
|
||||
+
|
||||
+ # Count occurrences of the disconnect message
|
||||
+ error_log_lines = inst.ds_error_log.readlines()
|
||||
+ disconnect_count = 0
|
||||
+
|
||||
+ for line in error_log_lines:
|
||||
+ if disconnect_message in line:
|
||||
+ disconnect_count += 1
|
||||
+
|
||||
+ log.info(f'Found {disconnect_count} occurrences of disconnect message')
|
||||
+
|
||||
+ log.info('Verify there are no more than 10 occurrences')
|
||||
+ assert disconnect_count <= 10, f"Found {disconnect_count} repeated disconnect messages, expected <= 10"
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index 9f3c374cf..b3ca2e773 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -2556,12 +2556,15 @@ disconnect_server_nomutex_ext(Connection *conn, PRUint64 opconnid, int opid, PRE
|
||||
}
|
||||
}
|
||||
} else {
|
||||
- slapi_log_err(SLAPI_LOG_CONNS, "disconnect_server_nomutex_ext",
|
||||
- "Not setting conn %d to be disconnected: %s\n",
|
||||
- conn->c_sd,
|
||||
- (conn->c_sd == SLAPD_INVALID_SOCKET) ? "socket is invalid" :
|
||||
- ((conn->c_connid != opconnid) ? "conn id does not match op conn id" :
|
||||
- ((conn->c_flags & CONN_FLAG_CLOSING) ? "conn is closing" : "unknown")));
|
||||
+ /* We avoid logging an invalid conn=0 connection as it is not a real connection. */
|
||||
+ if (!(conn->c_sd == SLAPD_INVALID_SOCKET && conn->c_connid == 0)) {
|
||||
+ slapi_log_err(SLAPI_LOG_CONNS, "disconnect_server_nomutex_ext",
|
||||
+ "Not setting conn %d to be disconnected: %s\n",
|
||||
+ conn->c_sd,
|
||||
+ (conn->c_sd == SLAPD_INVALID_SOCKET) ? "socket is invalid" :
|
||||
+ ((conn->c_connid != opconnid) ? "conn id does not match op conn id" :
|
||||
+ ((conn->c_flags & CONN_FLAG_CLOSING) ? "conn is closing" : "unknown")));
|
||||
+ }
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
590
0017-Issue-6888-Missing-access-JSON-logging-for-TLS-Clien.patch
Normal file
590
0017-Issue-6888-Missing-access-JSON-logging-for-TLS-Clien.patch
Normal file
@ -0,0 +1,590 @@
|
||||
From 5ff051f936a5bc4f5e1edb17ee1c3149b66644a2 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 16 Jul 2025 20:54:48 -0400
|
||||
Subject: [PATCH] Issue 6888 - Missing access JSON logging for TLS/Client auth
|
||||
|
||||
Description:
|
||||
|
||||
TLS/Client auth logging was not converted to JSON (auth.c got missed)
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6888
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
---
|
||||
.../logging/access_json_logging_test.py | 96 ++++++++-
|
||||
ldap/servers/slapd/accesslog.c | 114 +++++++++++
|
||||
ldap/servers/slapd/auth.c | 182 +++++++++++++-----
|
||||
ldap/servers/slapd/log.c | 2 +
|
||||
ldap/servers/slapd/slapi-private.h | 10 +
|
||||
5 files changed, 353 insertions(+), 51 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/access_json_logging_test.py b/dirsrvtests/tests/suites/logging/access_json_logging_test.py
|
||||
index ae91dc487..f0dc861a7 100644
|
||||
--- a/dirsrvtests/tests/suites/logging/access_json_logging_test.py
|
||||
+++ b/dirsrvtests/tests/suites/logging/access_json_logging_test.py
|
||||
@@ -19,6 +19,8 @@ from lib389.idm.user import UserAccounts
|
||||
from lib389.dirsrv_log import DirsrvAccessJSONLog
|
||||
from lib389.index import VLVSearch, VLVIndex
|
||||
from lib389.tasks import Tasks
|
||||
+from lib389.config import CertmapLegacy
|
||||
+from lib389.nss_ssl import NssSsl
|
||||
from ldap.controls.vlv import VLVRequestControl
|
||||
from ldap.controls.sss import SSSRequestControl
|
||||
from ldap.controls import SimplePagedResultsControl
|
||||
@@ -67,11 +69,11 @@ def get_log_event(inst, op, key=None, val=None, key2=None, val2=None):
|
||||
if val == str(event[key]).lower() and \
|
||||
val2 == str(event[key2]).lower():
|
||||
return event
|
||||
-
|
||||
- elif key is not None and key in event:
|
||||
- val = str(val).lower()
|
||||
- if val == str(event[key]).lower():
|
||||
- return event
|
||||
+ elif key is not None:
|
||||
+ if key in event:
|
||||
+ val = str(val).lower()
|
||||
+ if val == str(event[key]).lower():
|
||||
+ return event
|
||||
else:
|
||||
return event
|
||||
|
||||
@@ -163,6 +165,7 @@ def test_access_json_format(topo_m2, setup_test):
|
||||
14. Test PAGED SEARCH is logged correctly
|
||||
15. Test PERSISTENT SEARCH is logged correctly
|
||||
16. Test EXTENDED OP
|
||||
+ 17. Test TLS_INFO is logged correctly
|
||||
:expectedresults:
|
||||
1. Success
|
||||
2. Success
|
||||
@@ -180,6 +183,7 @@ def test_access_json_format(topo_m2, setup_test):
|
||||
14. Success
|
||||
15. Success
|
||||
16. Success
|
||||
+ 17. Success
|
||||
"""
|
||||
|
||||
inst = topo_m2.ms["supplier1"]
|
||||
@@ -560,6 +564,88 @@ def test_access_json_format(topo_m2, setup_test):
|
||||
assert event['oid_name'] == "REPL_END_NSDS50_REPLICATION_REQUEST_OID"
|
||||
assert event['name'] == "replication-multisupplier-extop"
|
||||
|
||||
+ #
|
||||
+ # TLS INFO/TLS CLIENT INFO
|
||||
+ #
|
||||
+ RDN_TEST_USER = 'testuser'
|
||||
+ RDN_TEST_USER_WRONG = 'testuser_wrong'
|
||||
+ inst.enable_tls()
|
||||
+ inst.restart()
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ user = users.create(properties={
|
||||
+ 'uid': RDN_TEST_USER,
|
||||
+ 'cn': RDN_TEST_USER,
|
||||
+ 'sn': RDN_TEST_USER,
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': f'/home/{RDN_TEST_USER}'
|
||||
+ })
|
||||
+
|
||||
+ ssca_dir = inst.get_ssca_dir()
|
||||
+ ssca = NssSsl(dbpath=ssca_dir)
|
||||
+ ssca.create_rsa_user(RDN_TEST_USER)
|
||||
+ ssca.create_rsa_user(RDN_TEST_USER_WRONG)
|
||||
+
|
||||
+ # Get the details of where the key and crt are.
|
||||
+ tls_locs = ssca.get_rsa_user(RDN_TEST_USER)
|
||||
+ tls_locs_wrong = ssca.get_rsa_user(RDN_TEST_USER_WRONG)
|
||||
+
|
||||
+ user.enroll_certificate(tls_locs['crt_der_path'])
|
||||
+
|
||||
+ # Turn on the certmap.
|
||||
+ cm = CertmapLegacy(inst)
|
||||
+ certmaps = cm.list()
|
||||
+ certmaps['default']['DNComps'] = ''
|
||||
+ certmaps['default']['FilterComps'] = ['cn']
|
||||
+ certmaps['default']['VerifyCert'] = 'off'
|
||||
+ cm.set(certmaps)
|
||||
+
|
||||
+ # Check that EXTERNAL is listed in supported mechns.
|
||||
+ assert (inst.rootdse.supports_sasl_external())
|
||||
+
|
||||
+ # Restart to allow certmaps to be re-read: Note, we CAN NOT use post_open
|
||||
+ # here, it breaks on auth. see lib389/__init__.py
|
||||
+ inst.restart(post_open=False)
|
||||
+
|
||||
+ # Attempt a bind with TLS external
|
||||
+ inst.open(saslmethod='EXTERNAL', connOnly=True, certdir=ssca_dir,
|
||||
+ userkey=tls_locs['key'], usercert=tls_locs['crt'])
|
||||
+ inst.restart()
|
||||
+
|
||||
+ event = get_log_event(inst, "TLS_INFO")
|
||||
+ assert event is not None
|
||||
+ assert 'tls_version' in event
|
||||
+ assert 'keysize' in event
|
||||
+ assert 'cipher' in event
|
||||
+
|
||||
+ event = get_log_event(inst, "TLS_CLIENT_INFO",
|
||||
+ "subject",
|
||||
+ "CN=testuser,O=testing,L=389ds,ST=Queensland,C=AU")
|
||||
+ assert event is not None
|
||||
+ assert 'tls_version' in event
|
||||
+ assert 'keysize' in event
|
||||
+ assert 'issuer' in event
|
||||
+
|
||||
+ event = get_log_event(inst, "TLS_CLIENT_INFO",
|
||||
+ "client_dn",
|
||||
+ "uid=testuser,ou=People,dc=example,dc=com")
|
||||
+ assert event is not None
|
||||
+ assert 'tls_version' in event
|
||||
+ assert event['msg'] == "client bound"
|
||||
+
|
||||
+ # Check for failed certmap error
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ inst.open(saslmethod='EXTERNAL', connOnly=True, certdir=ssca_dir,
|
||||
+ userkey=tls_locs_wrong['key'],
|
||||
+ usercert=tls_locs_wrong['crt'])
|
||||
+
|
||||
+ event = get_log_event(inst, "TLS_CLIENT_INFO", "err", -185)
|
||||
+ assert event is not None
|
||||
+ assert 'tls_version' in event
|
||||
+ assert event['msg'] == "failed to map client certificate to LDAP DN"
|
||||
+ assert event['err_msg'] == "Certificate couldn't be mapped to an ldap entry"
|
||||
+
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
diff --git a/ldap/servers/slapd/accesslog.c b/ldap/servers/slapd/accesslog.c
|
||||
index 68022fe38..072ace203 100644
|
||||
--- a/ldap/servers/slapd/accesslog.c
|
||||
+++ b/ldap/servers/slapd/accesslog.c
|
||||
@@ -1147,3 +1147,117 @@ slapd_log_access_sort(slapd_log_pblock *logpb)
|
||||
|
||||
return rc;
|
||||
}
|
||||
+
|
||||
+/*
|
||||
+ * TLS connection
|
||||
+ *
|
||||
+ * int32_t log_format
|
||||
+ * time_t conn_time
|
||||
+ * uint64_t conn_id
|
||||
+ * const char *msg
|
||||
+ * const char *tls_version
|
||||
+ * int32_t keysize
|
||||
+ * const char *cipher
|
||||
+ * int32_t err
|
||||
+ * const char *err_str
|
||||
+ */
|
||||
+int32_t
|
||||
+slapd_log_access_tls(slapd_log_pblock *logpb)
|
||||
+{
|
||||
+ int32_t rc = 0;
|
||||
+ char *msg = NULL;
|
||||
+ json_object *json_obj = NULL;
|
||||
+
|
||||
+ if ((json_obj = build_base_obj(logpb, "TLS_INFO")) == NULL) {
|
||||
+ return rc;
|
||||
+ }
|
||||
+
|
||||
+ if (logpb->msg) {
|
||||
+ json_object_object_add(json_obj, "msg", json_obj_add_str(logpb->msg));
|
||||
+ }
|
||||
+ if (logpb->tls_version) {
|
||||
+ json_object_object_add(json_obj, "tls_version", json_obj_add_str(logpb->tls_version));
|
||||
+ }
|
||||
+ if (logpb->cipher) {
|
||||
+ json_object_object_add(json_obj, "cipher", json_obj_add_str(logpb->cipher));
|
||||
+ }
|
||||
+ if (logpb->keysize) {
|
||||
+ json_object_object_add(json_obj, "keysize", json_object_new_int(logpb->keysize));
|
||||
+ }
|
||||
+ if (logpb->err_str) {
|
||||
+ json_object_object_add(json_obj, "err", json_object_new_int(logpb->err));
|
||||
+ json_object_object_add(json_obj, "err_msg", json_obj_add_str(logpb->err_str));
|
||||
+ }
|
||||
+
|
||||
+ /* Convert json object to string and log it */
|
||||
+ msg = (char *)json_object_to_json_string_ext(json_obj, logpb->log_format);
|
||||
+ rc = slapd_log_access_json(msg);
|
||||
+
|
||||
+ /* Done with JSON object - free it */
|
||||
+ json_object_put(json_obj);
|
||||
+
|
||||
+ return rc;
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * TLS client auth
|
||||
+ *
|
||||
+ * int32_t log_format
|
||||
+ * time_t conn_time
|
||||
+ * uint64_t conn_id
|
||||
+ * const char* tls_version
|
||||
+ * const char* keysize
|
||||
+ * const char* cipher
|
||||
+ * const char* msg
|
||||
+ * const char* subject
|
||||
+ * const char* issuer
|
||||
+ * int32_t err
|
||||
+ * const char* err_str
|
||||
+ * const char *client_dn
|
||||
+ */
|
||||
+int32_t
|
||||
+slapd_log_access_tls_client_auth(slapd_log_pblock *logpb)
|
||||
+{
|
||||
+ int32_t rc = 0;
|
||||
+ char *msg = NULL;
|
||||
+ json_object *json_obj = NULL;
|
||||
+
|
||||
+ if ((json_obj = build_base_obj(logpb, "TLS_CLIENT_INFO")) == NULL) {
|
||||
+ return rc;
|
||||
+ }
|
||||
+
|
||||
+ if (logpb->tls_version) {
|
||||
+ json_object_object_add(json_obj, "tls_version", json_obj_add_str(logpb->tls_version));
|
||||
+ }
|
||||
+ if (logpb->cipher) {
|
||||
+ json_object_object_add(json_obj, "cipher", json_obj_add_str(logpb->cipher));
|
||||
+ }
|
||||
+ if (logpb->keysize) {
|
||||
+ json_object_object_add(json_obj, "keysize", json_object_new_int(logpb->keysize));
|
||||
+ }
|
||||
+ if (logpb->subject) {
|
||||
+ json_object_object_add(json_obj, "subject", json_obj_add_str(logpb->subject));
|
||||
+ }
|
||||
+ if (logpb->issuer) {
|
||||
+ json_object_object_add(json_obj, "issuer", json_obj_add_str(logpb->issuer));
|
||||
+ }
|
||||
+ if (logpb->client_dn) {
|
||||
+ json_object_object_add(json_obj, "client_dn", json_obj_add_str(logpb->client_dn));
|
||||
+ }
|
||||
+ if (logpb->msg) {
|
||||
+ json_object_object_add(json_obj, "msg", json_obj_add_str(logpb->msg));
|
||||
+ }
|
||||
+ if (logpb->err_str) {
|
||||
+ json_object_object_add(json_obj, "err", json_object_new_int(logpb->err));
|
||||
+ json_object_object_add(json_obj, "err_msg", json_obj_add_str(logpb->err_str));
|
||||
+ }
|
||||
+
|
||||
+ /* Convert json object to string and log it */
|
||||
+ msg = (char *)json_object_to_json_string_ext(json_obj, logpb->log_format);
|
||||
+ rc = slapd_log_access_json(msg);
|
||||
+
|
||||
+ /* Done with JSON object - free it */
|
||||
+ json_object_put(json_obj);
|
||||
+
|
||||
+ return rc;
|
||||
+}
|
||||
diff --git a/ldap/servers/slapd/auth.c b/ldap/servers/slapd/auth.c
|
||||
index e4231bf45..48e4b7129 100644
|
||||
--- a/ldap/servers/slapd/auth.c
|
||||
+++ b/ldap/servers/slapd/auth.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -363,19 +363,32 @@ handle_bad_certificate(void *clientData, PRFileDesc *prfd)
|
||||
char sbuf[BUFSIZ], ibuf[BUFSIZ];
|
||||
Connection *conn = (Connection *)clientData;
|
||||
CERTCertificate *clientCert = slapd_ssl_peerCertificate(prfd);
|
||||
-
|
||||
PRErrorCode errorCode = PR_GetError();
|
||||
char *subject = subject_of(clientCert);
|
||||
char *issuer = issuer_of(clientCert);
|
||||
- slapi_log_access(LDAP_DEBUG_STATS,
|
||||
- "conn=%" PRIu64 " " SLAPI_COMPONENT_NAME_NSPR " error %i (%s); unauthenticated client %s; issuer %s\n",
|
||||
- conn->c_connid, errorCode, slapd_pr_strerror(errorCode),
|
||||
- subject ? escape_string(subject, sbuf) : "NULL",
|
||||
- issuer ? escape_string(issuer, ibuf) : "NULL");
|
||||
+ int32_t log_format = config_get_accesslog_log_format();
|
||||
+ slapd_log_pblock logpb = {0};
|
||||
+
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.msg = "unauthenticated client";
|
||||
+ logpb.subject = subject ? escape_string(subject, sbuf) : "NULL";
|
||||
+ logpb.issuer = issuer ? escape_string(issuer, ibuf) : "NULL";
|
||||
+ logpb.err = errorCode;
|
||||
+ logpb.err_str = slapd_pr_strerror(errorCode);
|
||||
+ slapd_log_access_tls_client_auth(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " " SLAPI_COMPONENT_NAME_NSPR " error %i (%s); unauthenticated client %s; issuer %s\n",
|
||||
+ conn->c_connid, errorCode, slapd_pr_strerror(errorCode),
|
||||
+ subject ? escape_string(subject, sbuf) : "NULL",
|
||||
+ issuer ? escape_string(issuer, ibuf) : "NULL");
|
||||
+ }
|
||||
if (issuer)
|
||||
- free(issuer);
|
||||
+ slapi_ch_free_string(&issuer);
|
||||
if (subject)
|
||||
- free(subject);
|
||||
+ slapi_ch_free_string(&subject);
|
||||
if (clientCert)
|
||||
CERT_DestroyCertificate(clientCert);
|
||||
return -1; /* non-zero means reject this certificate */
|
||||
@@ -394,7 +407,8 @@ handle_handshake_done(PRFileDesc *prfd, void *clientData)
|
||||
{
|
||||
Connection *conn = (Connection *)clientData;
|
||||
CERTCertificate *clientCert = slapd_ssl_peerCertificate(prfd);
|
||||
-
|
||||
+ int32_t log_format = config_get_accesslog_log_format();
|
||||
+ slapd_log_pblock logpb = {0};
|
||||
char *clientDN = NULL;
|
||||
int keySize = 0;
|
||||
char *cipher = NULL;
|
||||
@@ -403,19 +417,39 @@ handle_handshake_done(PRFileDesc *prfd, void *clientData)
|
||||
SSLCipherSuiteInfo cipherInfo;
|
||||
char *subject = NULL;
|
||||
char sslversion[64];
|
||||
+ int err = 0;
|
||||
|
||||
if ((slapd_ssl_getChannelInfo(prfd, &channelInfo, sizeof(channelInfo))) != SECSuccess) {
|
||||
PRErrorCode errorCode = PR_GetError();
|
||||
- slapi_log_access(LDAP_DEBUG_STATS,
|
||||
- "conn=%" PRIu64 " SSL failed to obtain channel info; " SLAPI_COMPONENT_NAME_NSPR " error %i (%s)\n",
|
||||
- conn->c_connid, errorCode, slapd_pr_strerror(errorCode));
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.err = errorCode;
|
||||
+ logpb.err_str = slapd_pr_strerror(errorCode);
|
||||
+ logpb.msg = "SSL failed to obtain channel info; " SLAPI_COMPONENT_NAME_NSPR;
|
||||
+ slapd_log_access_tls(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " SSL failed to obtain channel info; " SLAPI_COMPONENT_NAME_NSPR " error %i (%s)\n",
|
||||
+ conn->c_connid, errorCode, slapd_pr_strerror(errorCode));
|
||||
+ }
|
||||
goto done;
|
||||
}
|
||||
+
|
||||
if ((slapd_ssl_getCipherSuiteInfo(channelInfo.cipherSuite, &cipherInfo, sizeof(cipherInfo))) != SECSuccess) {
|
||||
PRErrorCode errorCode = PR_GetError();
|
||||
- slapi_log_access(LDAP_DEBUG_STATS,
|
||||
- "conn=%" PRIu64 " SSL failed to obtain cipher info; " SLAPI_COMPONENT_NAME_NSPR " error %i (%s)\n",
|
||||
- conn->c_connid, errorCode, slapd_pr_strerror(errorCode));
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.err = errorCode;
|
||||
+ logpb.err_str = slapd_pr_strerror(errorCode);
|
||||
+ logpb.msg = "SSL failed to obtain cipher info; " SLAPI_COMPONENT_NAME_NSPR;
|
||||
+ slapd_log_access_tls(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " SSL failed to obtain cipher info; " SLAPI_COMPONENT_NAME_NSPR " error %i (%s)\n",
|
||||
+ conn->c_connid, errorCode, slapd_pr_strerror(errorCode));
|
||||
+ }
|
||||
goto done;
|
||||
}
|
||||
|
||||
@@ -434,47 +468,84 @@ handle_handshake_done(PRFileDesc *prfd, void *clientData)
|
||||
|
||||
if (config_get_SSLclientAuth() == SLAPD_SSLCLIENTAUTH_OFF) {
|
||||
(void)slapi_getSSLVersion_str(channelInfo.protocolVersion, sslversion, sizeof(sslversion));
|
||||
- slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " %s %i-bit %s\n",
|
||||
- conn->c_connid,
|
||||
- sslversion, keySize, cipher ? cipher : "NULL");
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.tls_version = sslversion;
|
||||
+ logpb.keysize = keySize;
|
||||
+ logpb.cipher = cipher ? cipher : "NULL";
|
||||
+ slapd_log_access_tls(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " %s %i-bit %s\n",
|
||||
+ conn->c_connid,
|
||||
+ sslversion, keySize, cipher ? cipher : "NULL");
|
||||
+ }
|
||||
goto done;
|
||||
}
|
||||
if (clientCert == NULL) {
|
||||
(void)slapi_getSSLVersion_str(channelInfo.protocolVersion, sslversion, sizeof(sslversion));
|
||||
- slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " %s %i-bit %s\n",
|
||||
- conn->c_connid,
|
||||
- sslversion, keySize, cipher ? cipher : "NULL");
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.tls_version = sslversion;
|
||||
+ logpb.keysize = keySize;
|
||||
+ logpb.cipher = cipher ? cipher : "NULL";
|
||||
+ slapd_log_access_tls(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " %s %i-bit %s\n",
|
||||
+ conn->c_connid,
|
||||
+ sslversion, keySize, cipher ? cipher : "NULL");
|
||||
+ }
|
||||
} else {
|
||||
subject = subject_of(clientCert);
|
||||
if (!subject) {
|
||||
(void)slapi_getSSLVersion_str(channelInfo.protocolVersion,
|
||||
sslversion, sizeof(sslversion));
|
||||
- slapi_log_access(LDAP_DEBUG_STATS,
|
||||
- "conn=%" PRIu64 " %s %i-bit %s; missing subject\n",
|
||||
- conn->c_connid,
|
||||
- sslversion, keySize, cipher ? cipher : "NULL");
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.msg = "missing subject";
|
||||
+ logpb.tls_version = sslversion;
|
||||
+ logpb.keysize = keySize;
|
||||
+ logpb.cipher = cipher ? cipher : "NULL";
|
||||
+ slapd_log_access_tls_client_auth(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " %s %i-bit %s; missing subject\n",
|
||||
+ conn->c_connid,
|
||||
+ sslversion, keySize, cipher ? cipher : "NULL");
|
||||
+ }
|
||||
goto done;
|
||||
- }
|
||||
- {
|
||||
+ } else {
|
||||
char *issuer = issuer_of(clientCert);
|
||||
char sbuf[BUFSIZ], ibuf[BUFSIZ];
|
||||
(void)slapi_getSSLVersion_str(channelInfo.protocolVersion,
|
||||
sslversion, sizeof(sslversion));
|
||||
- slapi_log_access(LDAP_DEBUG_STATS,
|
||||
- "conn=%" PRIu64 " %s %i-bit %s; client %s; issuer %s\n",
|
||||
- conn->c_connid,
|
||||
- sslversion, keySize,
|
||||
- cipher ? cipher : "NULL",
|
||||
- escape_string(subject, sbuf),
|
||||
- issuer ? escape_string(issuer, ibuf) : "NULL");
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.tls_version = sslversion;
|
||||
+ logpb.keysize = keySize;
|
||||
+ logpb.cipher = cipher ? cipher : "NULL";
|
||||
+ logpb.subject = escape_string(subject, sbuf);
|
||||
+ logpb.issuer = issuer ? escape_string(issuer, ibuf) : "NULL";
|
||||
+ slapd_log_access_tls_client_auth(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " %s %i-bit %s; client %s; issuer %s\n",
|
||||
+ conn->c_connid,
|
||||
+ sslversion, keySize,
|
||||
+ cipher ? cipher : "NULL",
|
||||
+ escape_string(subject, sbuf),
|
||||
+ issuer ? escape_string(issuer, ibuf) : "NULL");
|
||||
+ }
|
||||
if (issuer)
|
||||
- free(issuer);
|
||||
+ slapi_ch_free_string(&issuer);
|
||||
}
|
||||
slapi_dn_normalize(subject);
|
||||
{
|
||||
LDAPMessage *chain = NULL;
|
||||
char *basedn = config_get_basedn();
|
||||
- int err;
|
||||
|
||||
err = ldapu_cert_to_ldap_entry(clientCert, internal_ld, basedn ? basedn : "" /*baseDN*/, &chain);
|
||||
if (err == LDAPU_SUCCESS && chain) {
|
||||
@@ -505,18 +576,37 @@ handle_handshake_done(PRFileDesc *prfd, void *clientData)
|
||||
slapi_sdn_free(&sdn);
|
||||
(void)slapi_getSSLVersion_str(channelInfo.protocolVersion,
|
||||
sslversion, sizeof(sslversion));
|
||||
- slapi_log_access(LDAP_DEBUG_STATS,
|
||||
- "conn=%" PRIu64 " %s client bound as %s\n",
|
||||
- conn->c_connid,
|
||||
- sslversion, clientDN);
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.msg = "client bound";
|
||||
+ logpb.tls_version = sslversion;
|
||||
+ logpb.client_dn = clientDN;
|
||||
+ slapd_log_access_tls_client_auth(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " %s client bound as %s\n",
|
||||
+ conn->c_connid,
|
||||
+ sslversion, clientDN);
|
||||
+ }
|
||||
} else if (clientCert != NULL) {
|
||||
(void)slapi_getSSLVersion_str(channelInfo.protocolVersion,
|
||||
sslversion, sizeof(sslversion));
|
||||
- slapi_log_access(LDAP_DEBUG_STATS,
|
||||
- "conn=%" PRIu64 " %s failed to map client "
|
||||
- "certificate to LDAP DN (%s)\n",
|
||||
- conn->c_connid,
|
||||
- sslversion, extraErrorMsg);
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.msg = "failed to map client certificate to LDAP DN";
|
||||
+ logpb.tls_version = sslversion;
|
||||
+ logpb.err = err;
|
||||
+ logpb.err_str = extraErrorMsg;
|
||||
+ slapd_log_access_tls_client_auth(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " %s failed to map client "
|
||||
+ "certificate to LDAP DN (%s)\n",
|
||||
+ conn->c_connid,
|
||||
+ sslversion, extraErrorMsg);
|
||||
+ }
|
||||
}
|
||||
|
||||
/*
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index f535011ab..91ba23047 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -7270,6 +7270,8 @@ slapd_log_pblock_init(slapd_log_pblock *logpb, int32_t log_format, Slapi_PBlock
|
||||
slapi_pblock_get(pb, SLAPI_CONNECTION, &conn);
|
||||
}
|
||||
|
||||
+ memset(logpb, 0, sizeof(slapd_log_pblock));
|
||||
+
|
||||
logpb->loginfo = &loginfo;
|
||||
logpb->level = 256; /* default log level */
|
||||
logpb->log_format = log_format;
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index 6438a81fe..da232ae2f 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -1549,6 +1549,13 @@ typedef struct slapd_log_pblock {
|
||||
PRBool using_tls;
|
||||
PRBool haproxied;
|
||||
const char *bind_dn;
|
||||
+ /* TLS */
|
||||
+ const char *tls_version;
|
||||
+ int32_t keysize;
|
||||
+ const char *cipher;
|
||||
+ const char *subject;
|
||||
+ const char *issuer;
|
||||
+ const char *client_dn;
|
||||
/* Close connection */
|
||||
const char *close_error;
|
||||
const char *close_reason;
|
||||
@@ -1619,6 +1626,7 @@ typedef struct slapd_log_pblock {
|
||||
const char *oid;
|
||||
const char *msg;
|
||||
const char *name;
|
||||
+ const char *err_str;
|
||||
LDAPControl **request_controls;
|
||||
LDAPControl **response_controls;
|
||||
} slapd_log_pblock;
|
||||
@@ -1645,6 +1653,8 @@ int32_t slapd_log_access_entry(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_referral(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_extop(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_sort(slapd_log_pblock *logpb);
|
||||
+int32_t slapd_log_access_tls(slapd_log_pblock *logpb);
|
||||
+int32_t slapd_log_access_tls_client_auth(slapd_log_pblock *logpb);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,43 @@
|
||||
From 091016df4680e1f9ffc3f78292583800626153c2 Mon Sep 17 00:00:00 2001
|
||||
From: Barbora Simonova <bsmejkal@redhat.com>
|
||||
Date: Thu, 17 Jul 2025 16:46:57 +0200
|
||||
Subject: [PATCH] Issue 6829 - Update parametrized docstring for tests
|
||||
|
||||
Description:
|
||||
Update the rest of missing parametrized values
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6829
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/logging/error_json_logging_test.py | 1 +
|
||||
.../tests/suites/schema/schema_replication_origin_test.py | 1 +
|
||||
2 files changed, 2 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/error_json_logging_test.py b/dirsrvtests/tests/suites/logging/error_json_logging_test.py
|
||||
index 87e1840a6..e0b3d7317 100644
|
||||
--- a/dirsrvtests/tests/suites/logging/error_json_logging_test.py
|
||||
+++ b/dirsrvtests/tests/suites/logging/error_json_logging_test.py
|
||||
@@ -29,6 +29,7 @@ def test_error_json_format(topo, log_format):
|
||||
"""Test error log is in JSON
|
||||
|
||||
:id: c9afb295-43de-4581-af8b-ec8f25a06d75
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone
|
||||
:steps:
|
||||
1. Check error log has json and the expected data is present
|
||||
diff --git a/dirsrvtests/tests/suites/schema/schema_replication_origin_test.py b/dirsrvtests/tests/suites/schema/schema_replication_origin_test.py
|
||||
index 9e4ce498c..e93dddad0 100644
|
||||
--- a/dirsrvtests/tests/suites/schema/schema_replication_origin_test.py
|
||||
+++ b/dirsrvtests/tests/suites/schema/schema_replication_origin_test.py
|
||||
@@ -157,6 +157,7 @@ def test_schema_xorigin_repl(topology, schema_replication_init, xorigin):
|
||||
schema is pushed and there is a message in the error log
|
||||
|
||||
:id: 2b29823b-3e83-4b25-954a-8a081dbc15ee
|
||||
+ :parametrized: yes
|
||||
:setup: Supplier and consumer topology, with one user entry;
|
||||
Supplier, hub and consumer topology, with one user entry
|
||||
:steps:
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,67 @@
|
||||
From 863c244cc137376ee8da0f007fc9b9da88d8dbc0 Mon Sep 17 00:00:00 2001
|
||||
From: Anuar Beisembayev <111912342+abeisemb@users.noreply.github.com>
|
||||
Date: Wed, 23 Jul 2025 23:48:11 -0400
|
||||
Subject: [PATCH] Issue 6772 - dsconf - Replicas with the "consumer" role allow
|
||||
for viewing and modification of their changelog. (#6773)
|
||||
|
||||
dsconf currently allows users to set and retrieve changelogs in consumer replicas, which do not have officially supported changelogs. This can lead to undefined behavior and confusion.
|
||||
This commit prints a warning message if the user tries to interact with a changelog on a consumer replica.
|
||||
|
||||
Resolves: https://github.com/389ds/389-ds-base/issues/6772
|
||||
|
||||
Reviewed by: @droideck
|
||||
---
|
||||
src/lib389/lib389/cli_conf/replication.py | 23 +++++++++++++++++++++++
|
||||
1 file changed, 23 insertions(+)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 6f77f34ca..a18bf83ca 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -686,6 +686,9 @@ def set_per_backend_cl(inst, basedn, log, args):
|
||||
replace_list = []
|
||||
did_something = False
|
||||
|
||||
+ if (is_replica_role_consumer(inst, suffix)):
|
||||
+ log.info("Warning: Changelogs are not supported for consumer replicas. You may run into undefined behavior.")
|
||||
+
|
||||
if args.encrypt:
|
||||
cl.replace('nsslapd-encryptionalgorithm', 'AES')
|
||||
del args.encrypt
|
||||
@@ -715,6 +718,10 @@ def set_per_backend_cl(inst, basedn, log, args):
|
||||
# that means there is a changelog config entry per backend (aka suffix)
|
||||
def get_per_backend_cl(inst, basedn, log, args):
|
||||
suffix = args.suffix
|
||||
+
|
||||
+ if (is_replica_role_consumer(inst, suffix)):
|
||||
+ log.info("Warning: Changelogs are not supported for consumer replicas. You may run into undefined behavior.")
|
||||
+
|
||||
cl = Changelog(inst, suffix)
|
||||
if args and args.json:
|
||||
log.info(cl.get_all_attrs_json())
|
||||
@@ -822,6 +829,22 @@ def del_repl_manager(inst, basedn, log, args):
|
||||
|
||||
log.info("Successfully deleted replication manager: " + manager_dn)
|
||||
|
||||
+def is_replica_role_consumer(inst, suffix):
|
||||
+ """Helper function for get_per_backend_cl and set_per_backend_cl.
|
||||
+ Makes sure the instance in question is not a consumer, which is a role that
|
||||
+ does not support changelogs.
|
||||
+ """
|
||||
+ replicas = Replicas(inst)
|
||||
+ try:
|
||||
+ replica = replicas.get(suffix)
|
||||
+ role = replica.get_role()
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ raise ValueError(f"Backend \"{suffix}\" is not enabled for replication")
|
||||
+
|
||||
+ if role == ReplicaRole.CONSUMER:
|
||||
+ return True
|
||||
+ else:
|
||||
+ return False
|
||||
|
||||
#
|
||||
# Agreements
|
||||
--
|
||||
2.49.0
|
||||
|
||||
360
0020-Issue-6893-Log-user-that-is-updated-during-password-.patch
Normal file
360
0020-Issue-6893-Log-user-that-is-updated-during-password-.patch
Normal file
@ -0,0 +1,360 @@
|
||||
From 79b68019ff4b17c4b80fd2c6c725071a050559ca Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 21 Jul 2025 18:07:21 -0400
|
||||
Subject: [PATCH] Issue 6893 - Log user that is updated during password modify
|
||||
extended operation
|
||||
|
||||
Description:
|
||||
|
||||
When a user's password is updated via an extended operation (password modify
|
||||
plugin) we only log the bind DN and not what user was updated. While "internal
|
||||
operation" logging will display the the user it should be logged by the default
|
||||
logging level.
|
||||
|
||||
Add access logging using "EXT_INFO" for the old logging format, and
|
||||
"EXTENDED_OP_INFO" for json logging where we display the bind dn, target
|
||||
dn, and message.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6893
|
||||
|
||||
Reviewed by: spichugi & tbordaz(Thanks!!)
|
||||
---
|
||||
.../logging/access_json_logging_test.py | 98 +++++++++++++++----
|
||||
ldap/servers/slapd/accesslog.c | 47 +++++++++
|
||||
ldap/servers/slapd/passwd_extop.c | 69 +++++++------
|
||||
ldap/servers/slapd/slapi-private.h | 1 +
|
||||
4 files changed, 169 insertions(+), 46 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/access_json_logging_test.py b/dirsrvtests/tests/suites/logging/access_json_logging_test.py
|
||||
index f0dc861a7..699bd8c4d 100644
|
||||
--- a/dirsrvtests/tests/suites/logging/access_json_logging_test.py
|
||||
+++ b/dirsrvtests/tests/suites/logging/access_json_logging_test.py
|
||||
@@ -11,7 +11,7 @@ import os
|
||||
import time
|
||||
import ldap
|
||||
import pytest
|
||||
-from lib389._constants import DEFAULT_SUFFIX, PASSWORD, LOG_ACCESS_LEVEL
|
||||
+from lib389._constants import DEFAULT_SUFFIX, PASSWORD, LOG_ACCESS_LEVEL, DN_DM
|
||||
from lib389.properties import TASK_WAIT
|
||||
from lib389.topologies import topology_m2 as topo_m2
|
||||
from lib389.idm.group import Groups
|
||||
@@ -548,22 +548,6 @@ def test_access_json_format(topo_m2, setup_test):
|
||||
"2.16.840.1.113730.3.4.3",
|
||||
"LDAP_CONTROL_PERSISTENTSEARCH")
|
||||
|
||||
- #
|
||||
- # Extended op
|
||||
- #
|
||||
- log.info("Test EXTENDED_OP")
|
||||
- event = get_log_event(inst, "EXTENDED_OP", "oid",
|
||||
- "2.16.840.1.113730.3.5.12")
|
||||
- assert event is not None
|
||||
- assert event['oid_name'] == "REPL_START_NSDS90_REPLICATION_REQUEST_OID"
|
||||
- assert event['name'] == "replication-multisupplier-extop"
|
||||
-
|
||||
- event = get_log_event(inst, "EXTENDED_OP", "oid",
|
||||
- "2.16.840.1.113730.3.5.5")
|
||||
- assert event is not None
|
||||
- assert event['oid_name'] == "REPL_END_NSDS50_REPLICATION_REQUEST_OID"
|
||||
- assert event['name'] == "replication-multisupplier-extop"
|
||||
-
|
||||
#
|
||||
# TLS INFO/TLS CLIENT INFO
|
||||
#
|
||||
@@ -579,7 +563,8 @@ def test_access_json_format(topo_m2, setup_test):
|
||||
'sn': RDN_TEST_USER,
|
||||
'uidNumber': '1000',
|
||||
'gidNumber': '2000',
|
||||
- 'homeDirectory': f'/home/{RDN_TEST_USER}'
|
||||
+ 'homeDirectory': f'/home/{RDN_TEST_USER}',
|
||||
+ 'userpassword': 'password'
|
||||
})
|
||||
|
||||
ssca_dir = inst.get_ssca_dir()
|
||||
@@ -646,6 +631,83 @@ def test_access_json_format(topo_m2, setup_test):
|
||||
assert event['msg'] == "failed to map client certificate to LDAP DN"
|
||||
assert event['err_msg'] == "Certificate couldn't be mapped to an ldap entry"
|
||||
|
||||
+ #
|
||||
+ # Extended op
|
||||
+ #
|
||||
+ log.info("Test EXTENDED_OP")
|
||||
+ event = get_log_event(inst, "EXTENDED_OP", "oid",
|
||||
+ "2.16.840.1.113730.3.5.12")
|
||||
+ assert event is not None
|
||||
+ assert event['oid_name'] == "REPL_START_NSDS90_REPLICATION_REQUEST_OID"
|
||||
+ assert event['name'] == "replication-multisupplier-extop"
|
||||
+
|
||||
+ event = get_log_event(inst, "EXTENDED_OP", "oid",
|
||||
+ "2.16.840.1.113730.3.5.5")
|
||||
+ assert event is not None
|
||||
+ assert event['oid_name'] == "REPL_END_NSDS50_REPLICATION_REQUEST_OID"
|
||||
+ assert event['name'] == "replication-multisupplier-extop"
|
||||
+
|
||||
+ #
|
||||
+ # Extended op info
|
||||
+ #
|
||||
+ log.info("Test EXTENDED_OP_INFO")
|
||||
+ OLD_PASSWD = 'password'
|
||||
+ NEW_PASSWD = 'newpassword'
|
||||
+
|
||||
+ assert inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ assert inst.passwd_s(user.dn, OLD_PASSWD, NEW_PASSWD)
|
||||
+ event = get_log_event(inst, "EXTENDED_OP_INFO", "name",
|
||||
+ "passwd_modify_plugin")
|
||||
+ assert event is not None
|
||||
+ assert event['bind_dn'] == "cn=directory manager"
|
||||
+ assert event['target_dn'] == user.dn.lower()
|
||||
+ assert event['msg'] == "success"
|
||||
+
|
||||
+ # Test no such object
|
||||
+ BAD_DN = user.dn + ",dc=not"
|
||||
+ with pytest.raises(ldap.NO_SUCH_OBJECT):
|
||||
+ inst.passwd_s(BAD_DN, OLD_PASSWD, NEW_PASSWD)
|
||||
+
|
||||
+ event = get_log_event(inst, "EXTENDED_OP_INFO", "target_dn", BAD_DN)
|
||||
+ assert event is not None
|
||||
+ assert event['bind_dn'] == "cn=directory manager"
|
||||
+ assert event['target_dn'] == BAD_DN.lower()
|
||||
+ assert event['msg'] == "No such entry exists."
|
||||
+
|
||||
+ # Test invalid old password
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ inst.passwd_s(user.dn, "not_the_old_pw", NEW_PASSWD)
|
||||
+ event = get_log_event(inst, "EXTENDED_OP_INFO", "err", 49)
|
||||
+ assert event is not None
|
||||
+ assert event['bind_dn'] == "cn=directory manager"
|
||||
+ assert event['target_dn'] == user.dn.lower()
|
||||
+ assert event['msg'] == "Invalid oldPasswd value."
|
||||
+
|
||||
+ # Test user without permissions
|
||||
+ user2 = users.create(properties={
|
||||
+ 'uid': RDN_TEST_USER + "2",
|
||||
+ 'cn': RDN_TEST_USER + "2",
|
||||
+ 'sn': RDN_TEST_USER + "2",
|
||||
+ 'uidNumber': '1001',
|
||||
+ 'gidNumber': '2001',
|
||||
+ 'homeDirectory': f'/home/{RDN_TEST_USER + "2"}',
|
||||
+ 'userpassword': 'password'
|
||||
+ })
|
||||
+ inst.simple_bind_s(user2.dn, 'password')
|
||||
+ with pytest.raises(ldap.INSUFFICIENT_ACCESS):
|
||||
+ inst.passwd_s(user.dn, NEW_PASSWD, OLD_PASSWD)
|
||||
+ event = get_log_event(inst, "EXTENDED_OP_INFO", "err", 50)
|
||||
+ assert event is not None
|
||||
+ assert event['bind_dn'] == user2.dn.lower()
|
||||
+ assert event['target_dn'] == user.dn.lower()
|
||||
+ assert event['msg'] == "Insufficient access rights"
|
||||
+
|
||||
+
|
||||
+ # Reset bind
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
diff --git a/ldap/servers/slapd/accesslog.c b/ldap/servers/slapd/accesslog.c
|
||||
index 072ace203..46228d4a1 100644
|
||||
--- a/ldap/servers/slapd/accesslog.c
|
||||
+++ b/ldap/servers/slapd/accesslog.c
|
||||
@@ -1113,6 +1113,53 @@ slapd_log_access_extop(slapd_log_pblock *logpb)
|
||||
return rc;
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Extended operation information
|
||||
+ *
|
||||
+ * int32_t log_format
|
||||
+ * time_t conn_time
|
||||
+ * uint64_t conn_id
|
||||
+ * int32_t op_id
|
||||
+ * const char *name
|
||||
+ * const char *bind_dn
|
||||
+ * const char *tartet_dn
|
||||
+ * const char *msg
|
||||
+ */
|
||||
+int32_t
|
||||
+slapd_log_access_extop_info(slapd_log_pblock *logpb)
|
||||
+{
|
||||
+ int32_t rc = 0;
|
||||
+ char *msg = NULL;
|
||||
+ json_object *json_obj = NULL;
|
||||
+
|
||||
+ if ((json_obj = build_base_obj(logpb, "EXTENDED_OP_INFO")) == NULL) {
|
||||
+ return rc;
|
||||
+ }
|
||||
+
|
||||
+ if (logpb->name) {
|
||||
+ json_object_object_add(json_obj, "name", json_obj_add_str(logpb->name));
|
||||
+ }
|
||||
+ if (logpb->target_dn) {
|
||||
+ json_object_object_add(json_obj, "target_dn", json_obj_add_str(logpb->target_dn));
|
||||
+ }
|
||||
+ if (logpb->bind_dn) {
|
||||
+ json_object_object_add(json_obj, "bind_dn", json_obj_add_str(logpb->bind_dn));
|
||||
+ }
|
||||
+ if (logpb->msg) {
|
||||
+ json_object_object_add(json_obj, "msg", json_obj_add_str(logpb->msg));
|
||||
+ }
|
||||
+ json_object_object_add(json_obj, "err", json_object_new_int(logpb->err));
|
||||
+
|
||||
+ /* Convert json object to string and log it */
|
||||
+ msg = (char *)json_object_to_json_string_ext(json_obj, logpb->log_format);
|
||||
+ rc = slapd_log_access_json(msg);
|
||||
+
|
||||
+ /* Done with JSON object - free it */
|
||||
+ json_object_put(json_obj);
|
||||
+
|
||||
+ return rc;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Sort
|
||||
*
|
||||
diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c
|
||||
index 4bb60afd6..69bb3494c 100644
|
||||
--- a/ldap/servers/slapd/passwd_extop.c
|
||||
+++ b/ldap/servers/slapd/passwd_extop.c
|
||||
@@ -465,12 +465,14 @@ passwd_modify_extop(Slapi_PBlock *pb)
|
||||
BerElement *response_ber = NULL;
|
||||
Slapi_Entry *targetEntry = NULL;
|
||||
Connection *conn = NULL;
|
||||
+ Operation *pb_op = NULL;
|
||||
LDAPControl **req_controls = NULL;
|
||||
LDAPControl **resp_controls = NULL;
|
||||
passwdPolicy *pwpolicy = NULL;
|
||||
Slapi_DN *target_sdn = NULL;
|
||||
Slapi_Entry *referrals = NULL;
|
||||
- /* Slapi_DN sdn; */
|
||||
+ Slapi_Backend *be = NULL;
|
||||
+ int32_t log_format = config_get_accesslog_log_format();
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "passwd_modify_extop", "=>\n");
|
||||
|
||||
@@ -647,7 +649,7 @@ parse_req_done:
|
||||
}
|
||||
dn = slapi_sdn_get_ndn(target_sdn);
|
||||
if (dn == NULL || *dn == '\0') {
|
||||
- /* Refuse the operation because they're bound anonymously */
|
||||
+ /* Invalid DN - refuse the operation */
|
||||
errMesg = "Invalid dn.";
|
||||
rc = LDAP_INVALID_DN_SYNTAX;
|
||||
goto free_and_return;
|
||||
@@ -724,14 +726,19 @@ parse_req_done:
|
||||
ber_free(response_ber, 1);
|
||||
}
|
||||
|
||||
- slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET, (void *)dn);
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &pb_op);
|
||||
+ if (pb_op == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "pb_op is NULL\n");
|
||||
+ goto free_and_return;
|
||||
+ }
|
||||
|
||||
+ slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET, (void *)dn);
|
||||
/* Now we have the DN, look for the entry */
|
||||
ret = passwd_modify_getEntry(dn, &targetEntry);
|
||||
/* If we can't find the entry, then that's an error */
|
||||
if (ret) {
|
||||
/* Couldn't find the entry, fail */
|
||||
- errMesg = "No such Entry exists.";
|
||||
+ errMesg = "No such entry exists.";
|
||||
rc = LDAP_NO_SUCH_OBJECT;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -742,30 +749,18 @@ parse_req_done:
|
||||
leak any useful information to the client such as current password
|
||||
wrong, etc.
|
||||
*/
|
||||
- Operation *pb_op = NULL;
|
||||
- slapi_pblock_get(pb, SLAPI_OPERATION, &pb_op);
|
||||
- if (pb_op == NULL) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "pb_op is NULL\n");
|
||||
- goto free_and_return;
|
||||
- }
|
||||
-
|
||||
operation_set_target_spec(pb_op, slapi_entry_get_sdn(targetEntry));
|
||||
slapi_pblock_set(pb, SLAPI_REQUESTOR_ISROOT, &pb_op->o_isroot);
|
||||
|
||||
- /* In order to perform the access control check , we need to select a backend (even though
|
||||
- * we don't actually need it otherwise).
|
||||
- */
|
||||
- {
|
||||
- Slapi_Backend *be = NULL;
|
||||
-
|
||||
- be = slapi_mapping_tree_find_backend_for_sdn(slapi_entry_get_sdn(targetEntry));
|
||||
- if (NULL == be) {
|
||||
- errMesg = "Failed to find backend for target entry";
|
||||
- rc = LDAP_OPERATIONS_ERROR;
|
||||
- goto free_and_return;
|
||||
- }
|
||||
- slapi_pblock_set(pb, SLAPI_BACKEND, be);
|
||||
+ /* In order to perform the access control check, we need to select a backend (even though
|
||||
+ * we don't actually need it otherwise). */
|
||||
+ be = slapi_mapping_tree_find_backend_for_sdn(slapi_entry_get_sdn(targetEntry));
|
||||
+ if (NULL == be) {
|
||||
+ errMesg = "Failed to find backend for target entry";
|
||||
+ rc = LDAP_NO_SUCH_OBJECT;
|
||||
+ goto free_and_return;
|
||||
}
|
||||
+ slapi_pblock_set(pb, SLAPI_BACKEND, be);
|
||||
|
||||
/* Check if the pwpolicy control is present */
|
||||
slapi_pblock_get(pb, SLAPI_PWPOLICY, &need_pwpolicy_ctrl);
|
||||
@@ -797,10 +792,7 @@ parse_req_done:
|
||||
/* Check if password policy allows users to change their passwords. We need to do
|
||||
* this here since the normal modify code doesn't perform this check for
|
||||
* internal operations. */
|
||||
-
|
||||
- Connection *pb_conn;
|
||||
- slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn);
|
||||
- if (!pb_op->o_isroot && !pb_conn->c_needpw && !pwpolicy->pw_change) {
|
||||
+ if (!pb_op->o_isroot && !conn->c_needpw && !pwpolicy->pw_change) {
|
||||
if (NULL == bindSDN) {
|
||||
bindSDN = slapi_sdn_new_normdn_byref(bindDN);
|
||||
}
|
||||
@@ -848,6 +840,27 @@ free_and_return:
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop",
|
||||
"%s\n", errMesg ? errMesg : "success");
|
||||
|
||||
+ if (dn) {
|
||||
+ /* Log the target ndn (if we have a target ndn) */
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ /* JSON logging */
|
||||
+ slapd_log_pblock logpb = {0};
|
||||
+ slapd_log_pblock_init(&logpb, log_format, pb);
|
||||
+ logpb.name = "passwd_modify_plugin";
|
||||
+ logpb.target_dn = dn;
|
||||
+ logpb.bind_dn = bindDN;
|
||||
+ logpb.msg = errMesg ? errMesg : "success";
|
||||
+ logpb.err = rc;
|
||||
+ slapd_log_access_extop_info(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " op=%d EXT_INFO name=\"passwd_modify_plugin\" bind_dn=\"%s\" target_dn=\"%s\" msg=\"%s\" rc=%d\n",
|
||||
+ conn ? conn->c_connid : -1, pb_op ? pb_op->o_opid : -1,
|
||||
+ bindDN ? bindDN : "", dn,
|
||||
+ errMesg ? errMesg : "success", rc);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
if ((rc == LDAP_REFERRAL) && (referrals)) {
|
||||
send_referrals_from_entry(pb, referrals);
|
||||
} else {
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index da232ae2f..e9abf8b75 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -1652,6 +1652,7 @@ int32_t slapd_log_access_vlv(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_entry(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_referral(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_extop(slapd_log_pblock *logpb);
|
||||
+int32_t slapd_log_access_extop_info(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_sort(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_tls(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_tls_client_auth(slapd_log_pblock *logpb);
|
||||
--
|
||||
2.49.0
|
||||
|
||||
37
0021-Issue-6352-Fix-DeprecationWarning.patch
Normal file
37
0021-Issue-6352-Fix-DeprecationWarning.patch
Normal file
@ -0,0 +1,37 @@
|
||||
From ab188bdfed7a144734c715f57ba4772c8d453b6f Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Fri, 11 Jul 2025 13:12:44 +0200
|
||||
Subject: [PATCH] Issue 6352 - Fix DeprecationWarning
|
||||
|
||||
Bug Description:
|
||||
When pytest is used on ASAN build, pytest-html plugin collects `*asan*`
|
||||
files, which results in the following deprecation warning:
|
||||
|
||||
```
|
||||
The 'report.extra' attribute is deprecated and will be removed in a
|
||||
future release, use 'report.extras' instead.
|
||||
```
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6352
|
||||
|
||||
Reviwed by: @droideck (Thanks!)
|
||||
---
|
||||
dirsrvtests/conftest.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/conftest.py b/dirsrvtests/conftest.py
|
||||
index c989729c1..0db6045f4 100644
|
||||
--- a/dirsrvtests/conftest.py
|
||||
+++ b/dirsrvtests/conftest.py
|
||||
@@ -138,7 +138,7 @@ def pytest_runtest_makereport(item, call):
|
||||
log_name = os.path.basename(f)
|
||||
instance_name = os.path.basename(os.path.dirname(f)).split("slapd-",1)[1]
|
||||
extra.append(pytest_html.extras.text(text, name=f"{instance_name}-{log_name}"))
|
||||
- report.extra = extra
|
||||
+ report.extras = extra
|
||||
|
||||
# Make a screenshot if WebUI test fails
|
||||
if call.when == "call" and "WEBUI" in os.environ:
|
||||
--
|
||||
2.49.0
|
||||
|
||||
38
0022-Issue-6880-Fix-ds_logs-test-suite-failure.patch
Normal file
38
0022-Issue-6880-Fix-ds_logs-test-suite-failure.patch
Normal file
@ -0,0 +1,38 @@
|
||||
From 5b0188baf00c395dac807657736ed51968d5c3a0 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 17 Jul 2025 13:41:04 +0200
|
||||
Subject: [PATCH] Issue 6880 - Fix ds_logs test suite failure
|
||||
|
||||
Bug Description:
|
||||
After 947ee67df6 ds_logs test suite started to fail in
|
||||
test_internal_log_server_level_4. It slightly changed the order and
|
||||
timing of log messages.
|
||||
|
||||
Fix Description:
|
||||
Do another MOD after restart to trigger the internal search.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6880
|
||||
|
||||
Reviewed by: @bsimonova, @droideck (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/ds_logs/ds_logs_test.py | 4 ++++
|
||||
1 file changed, 4 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 6fd790c18..eff6780cd 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -356,6 +356,10 @@ def test_internal_log_server_level_4(topology_st, clean_access_logs, disable_acc
|
||||
log.info('Restart the server to flush the logs')
|
||||
topo.restart()
|
||||
|
||||
+ # After 947ee67 log dynamic has changed slightly
|
||||
+ # Do another MOD to trigger the internal search
|
||||
+ topo.config.set(LOG_ACCESS_LEVEL, access_log_level)
|
||||
+
|
||||
try:
|
||||
# These comments contain lines we are trying to find without regex (the op numbers are just examples)
|
||||
log.info("Check if access log contains internal MOD operation in correct format")
|
||||
--
|
||||
2.49.0
|
||||
|
||||
53
0023-Issue-6901-Update-changelog-trimming-logging.patch
Normal file
53
0023-Issue-6901-Update-changelog-trimming-logging.patch
Normal file
@ -0,0 +1,53 @@
|
||||
From 13b0a1637b2fb8eb8b6f5fa391721f61bfe41874 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 24 Jul 2025 19:09:40 +0200
|
||||
Subject: [PATCH] Issue 6901 - Update changelog trimming logging
|
||||
|
||||
Description:
|
||||
* Set SLAPI_LOG_ERR for message in `_cl5DispatchTrimThread`
|
||||
* Set correct function name for logs in `_cl5TrimEntry`.
|
||||
* Add number of scanned entries to the log.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6901
|
||||
|
||||
Reviewed by: @mreynolds389, @progier389 (Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/replication/cl5_api.c | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
|
||||
index 3c356abc0..1d62aa020 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.c
|
||||
@@ -2007,7 +2007,7 @@ _cl5DispatchTrimThread(Replica *replica)
|
||||
(void *)replica, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
|
||||
PR_UNJOINABLE_THREAD, DEFAULT_THREAD_STACKSIZE);
|
||||
if (NULL == pth) {
|
||||
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
"_cl5DispatchTrimThread - Failed to create trimming thread for %s"
|
||||
"; NSPR error - %d\n", replica_get_name(replica),
|
||||
PR_GetError());
|
||||
@@ -2788,7 +2788,7 @@ _cl5TrimEntry(dbi_val_t *key, dbi_val_t *data, void *ctx)
|
||||
return DBI_RC_NOTFOUND;
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
|
||||
- "_cl5TrimReplica - Changelog purge skipped anchor csn %s\n",
|
||||
+ "_cl5TrimEntry - Changelog purge skipped anchor csn %s\n",
|
||||
(char*)key->data);
|
||||
return DBI_RC_SUCCESS;
|
||||
}
|
||||
@@ -2867,8 +2867,8 @@ _cl5TrimReplica(Replica *r)
|
||||
slapi_ch_free((void**)&dblcictx.rids);
|
||||
|
||||
if (dblcictx.changed.tot) {
|
||||
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl, "_cl5TrimReplica - Trimmed %ld changes from the changelog\n",
|
||||
- dblcictx.changed.tot);
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl, "_cl5TrimReplica - Scanned %ld records, and trimmed %ld changes from the changelog\n",
|
||||
+ dblcictx.seen.tot, dblcictx.changed.tot);
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From a7231528b5ad7e887eeed4317de48d054cd046cd Mon Sep 17 00:00:00 2001
|
||||
From 9cb193418d4dd07182d2e1a38a5cc5a2a41e2877 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 23 Jul 2025 19:35:32 -0400
|
||||
Subject: [PATCH] Issue 6895 - Crash if repl keep alive entry can not be
|
||||
@ -13,9 +13,6 @@ we try and get the dn from the entry and we get a use-after-free crash.
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6895
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
|
||||
(cherry picked from commit 43ab6b1d1de138d6be03b657f27cbb6ba19ddd14)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
ldap/servers/plugins/chainingdb/cb_config.c | 3 +--
|
||||
ldap/servers/plugins/posix-winsync/posix-winsync.c | 1 -
|
||||
@ -38,10 +35,10 @@ index 40a7088d7..24fa1bcb3 100644
|
||||
rc = res;
|
||||
slapi_pblock_destroy(util_pb);
|
||||
diff --git a/ldap/servers/plugins/posix-winsync/posix-winsync.c b/ldap/servers/plugins/posix-winsync/posix-winsync.c
|
||||
index 56efb2330..ab37497cd 100644
|
||||
index 51a55b643..3a002bb70 100644
|
||||
--- a/ldap/servers/plugins/posix-winsync/posix-winsync.c
|
||||
+++ b/ldap/servers/plugins/posix-winsync/posix-winsync.c
|
||||
@@ -1625,7 +1625,6 @@ posix_winsync_end_update_cb(void *cbdata __attribute__((unused)),
|
||||
@@ -1626,7 +1626,6 @@ posix_winsync_end_update_cb(void *cbdata __attribute__((unused)),
|
||||
"posix_winsync_end_update_cb: "
|
||||
"add task entry\n");
|
||||
}
|
||||
@ -50,7 +47,7 @@ index 56efb2330..ab37497cd 100644
|
||||
pb = NULL;
|
||||
posix_winsync_config_reset_MOFTaskCreated();
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_init.c b/ldap/servers/plugins/replication/repl5_init.c
|
||||
index 5a748e35a..9b6523a2e 100644
|
||||
index 8bc0b5372..5047fb8dc 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_init.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_init.c
|
||||
@@ -682,7 +682,6 @@ create_repl_schema_policy(void)
|
||||
@ -78,10 +75,10 @@ index 5a748e35a..9b6523a2e 100644
|
||||
}
|
||||
slapi_pblock_destroy(pb);
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
index d67f1bc71..cec140140 100644
|
||||
index 59062b46b..a97c807e9 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_replica.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
@@ -440,10 +440,10 @@ replica_subentry_create(const char *repl_root, ReplicaId rid)
|
||||
@@ -465,10 +465,10 @@ replica_subentry_create(const char *repl_root, ReplicaId rid)
|
||||
if (return_value != LDAP_SUCCESS &&
|
||||
return_value != LDAP_ALREADY_EXISTS &&
|
||||
return_value != LDAP_REFERRAL /* CONSUMER */) {
|
||||
@ -97,5 +94,5 @@ index d67f1bc71..cec140140 100644
|
||||
goto done;
|
||||
}
|
||||
--
|
||||
2.51.1
|
||||
2.49.0
|
||||
|
||||
352
0025-Issue-6250-Add-test-for-entryUSN-overflow-on-failed-.patch
Normal file
352
0025-Issue-6250-Add-test-for-entryUSN-overflow-on-failed-.patch
Normal file
@ -0,0 +1,352 @@
|
||||
From a0cdf2970edb46acec06a5ac204ec04135806b35 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 10:50:26 -0700
|
||||
Subject: [PATCH] Issue 6250 - Add test for entryUSN overflow on failed add
|
||||
operations (#6821)
|
||||
|
||||
Description: Add comprehensive test to reproduce the entryUSN
|
||||
overflow issue where failed attempts to add existing entries followed by
|
||||
modify operations cause entryUSN values to underflow/overflow instead of
|
||||
incrementing properly.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6250
|
||||
|
||||
Reviewed by: @tbordaz (Thanks!)
|
||||
---
|
||||
.../suites/plugins/entryusn_overflow_test.py | 323 ++++++++++++++++++
|
||||
1 file changed, 323 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
new file mode 100644
|
||||
index 000000000..a23d734ca
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
@@ -0,0 +1,323 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import os
|
||||
+import ldap
|
||||
+import logging
|
||||
+import pytest
|
||||
+import time
|
||||
+import random
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
+from lib389.config import Config
|
||||
+from lib389.plugins import USNPlugin
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389.rootdse import RootDSE
|
||||
+
|
||||
+pytestmark = pytest.mark.tier2
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+# Test constants
|
||||
+DEMO_USER_BASE_DN = "uid=demo_user,ou=people," + DEFAULT_SUFFIX
|
||||
+TEST_USER_PREFIX = "Demo User"
|
||||
+MAX_USN_64BIT = 18446744073709551615 # 2^64 - 1
|
||||
+ITERATIONS = 10
|
||||
+ADD_EXISTING_ENTRY_MAX_ATTEMPTS = 5
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="module")
|
||||
+def setup_usn_test(topology_st, request):
|
||||
+ """Setup USN plugin and test data for entryUSN overflow testing"""
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+
|
||||
+ log.info("Enable the USN plugin...")
|
||||
+ plugin = USNPlugin(inst)
|
||||
+ plugin.enable()
|
||||
+ plugin.enable_global_mode()
|
||||
+
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Create initial test users
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ created_users = []
|
||||
+
|
||||
+ log.info("Creating initial test users...")
|
||||
+ for i in range(3):
|
||||
+ user_props = {
|
||||
+ 'uid': f'{TEST_USER_PREFIX}-{i}',
|
||||
+ 'cn': f'{TEST_USER_PREFIX}-{i}',
|
||||
+ 'sn': f'User{i}',
|
||||
+ 'uidNumber': str(1000 + i),
|
||||
+ 'gidNumber': str(1000 + i),
|
||||
+ 'homeDirectory': f'/home/{TEST_USER_PREFIX}-{i}',
|
||||
+ 'userPassword': 'password123'
|
||||
+ }
|
||||
+ try:
|
||||
+ user = users.create(properties=user_props)
|
||||
+ created_users.append(user)
|
||||
+ log.info(f"Created user: {user.dn}")
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ log.info(f"User {user_props['uid']} already exists, skipping creation")
|
||||
+ user = users.get(user_props['uid'])
|
||||
+ created_users.append(user)
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info("Cleaning up test users...")
|
||||
+ for user in created_users:
|
||||
+ try:
|
||||
+ user.delete()
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ pass
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ return created_users
|
||||
+
|
||||
+
|
||||
+def test_entryusn_overflow_on_add_existing_entries(topology_st, setup_usn_test):
|
||||
+ """Test that reproduces entryUSN overflow when adding existing entries
|
||||
+
|
||||
+ :id: a5a8c33d-82f3-4113-be2b-027de51791c8
|
||||
+ :setup: Standalone instance with USN plugin enabled and test users
|
||||
+ :steps:
|
||||
+ 1. Record initial entryUSN values for existing users
|
||||
+ 2. Attempt to add existing entries multiple times (should fail)
|
||||
+ 3. Perform modify operations on the entries
|
||||
+ 4. Check that entryUSN values increment correctly without overflow
|
||||
+ 5. Verify lastusn values are consistent
|
||||
+ :expectedresults:
|
||||
+ 1. Initial entryUSN values are recorded successfully
|
||||
+ 2. Add operations fail with ALREADY_EXISTS error
|
||||
+ 3. Modify operations succeed
|
||||
+ 4. EntryUSN values increment properly without underflow/overflow
|
||||
+ 5. LastUSN values are consistent and increasing
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ users = setup_usn_test
|
||||
+
|
||||
+ # Enable detailed logging for debugging
|
||||
+ config = Config(inst)
|
||||
+ config.replace('nsslapd-accesslog-level', '260') # Internal op logging
|
||||
+ config.replace('nsslapd-errorlog-level', '65536')
|
||||
+ config.replace('nsslapd-plugin-logging', 'on')
|
||||
+
|
||||
+ root_dse = RootDSE(inst)
|
||||
+
|
||||
+ log.info("Starting entryUSN overflow reproduction test")
|
||||
+
|
||||
+ # Record initial state
|
||||
+ initial_usn_values = {}
|
||||
+ for user in users:
|
||||
+ initial_usn = user.get_attr_val_int('entryusn')
|
||||
+ initial_usn_values[user.dn] = initial_usn
|
||||
+ log.info(f"Initial entryUSN for {user.get_attr_val_utf8('cn')}: {initial_usn}")
|
||||
+
|
||||
+ initial_lastusn = root_dse.get_attr_val_int("lastusn")
|
||||
+ log.info(f"Initial lastUSN: {initial_lastusn}")
|
||||
+
|
||||
+ # Perform test iterations
|
||||
+ for iteration in range(1, ITERATIONS + 1):
|
||||
+ log.info(f"\n--- Iteration {iteration} ---")
|
||||
+
|
||||
+ # Step 1: Try to add existing entries multiple times
|
||||
+ selected_user = random.choice(users)
|
||||
+ cn_value = selected_user.get_attr_val_utf8('cn')
|
||||
+ attempts = random.randint(1, ADD_EXISTING_ENTRY_MAX_ATTEMPTS)
|
||||
+
|
||||
+ log.info(f"Attempting to add existing entry '{cn_value}' {attempts} times")
|
||||
+
|
||||
+ # Get user attributes for recreation attempt
|
||||
+ user_attrs = {
|
||||
+ 'uid': selected_user.get_attr_val_utf8('uid'),
|
||||
+ 'cn': selected_user.get_attr_val_utf8('cn'),
|
||||
+ 'sn': selected_user.get_attr_val_utf8('sn'),
|
||||
+ 'uidNumber': selected_user.get_attr_val_utf8('uidNumber'),
|
||||
+ 'gidNumber': selected_user.get_attr_val_utf8('gidNumber'),
|
||||
+ 'homeDirectory': selected_user.get_attr_val_utf8('homeDirectory'),
|
||||
+ 'userPassword': 'password123'
|
||||
+ }
|
||||
+
|
||||
+ users_collection = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ # Try to add the existing user multiple times
|
||||
+ for attempt in range(attempts):
|
||||
+ try:
|
||||
+ users_collection.create(properties=user_attrs)
|
||||
+ log.error(f"ERROR: Add operation should have failed but succeeded on attempt {attempt + 1}")
|
||||
+ assert False, "Add operation should have failed with ALREADY_EXISTS"
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ log.info(f"Attempt {attempt + 1}: Got expected ALREADY_EXISTS error")
|
||||
+ except Exception as e:
|
||||
+ log.error(f"Unexpected error on attempt {attempt + 1}: {e}")
|
||||
+ raise
|
||||
+
|
||||
+ # Step 2: Perform modify operation
|
||||
+ target_user = random.choice(users)
|
||||
+ cn_value = target_user.get_attr_val_utf8('cn')
|
||||
+ old_usn = target_user.get_attr_val_int('entryusn')
|
||||
+
|
||||
+ # Modify the user entry
|
||||
+ new_description = f"Modified in iteration {iteration} - {time.time()}"
|
||||
+ target_user.replace('description', new_description)
|
||||
+
|
||||
+ # Get new USN value
|
||||
+ new_usn = target_user.get_attr_val_int('entryusn')
|
||||
+
|
||||
+ log.info(f"Modified entry '{cn_value}': old USN = {old_usn}, new USN = {new_usn}")
|
||||
+
|
||||
+ # Step 3: Validate USN values
|
||||
+ # Check for overflow/underflow conditions
|
||||
+ assert new_usn > 0, f"EntryUSN should be positive, got {new_usn}"
|
||||
+ assert new_usn < MAX_USN_64BIT, f"EntryUSN overflow detected: {new_usn} >= {MAX_USN_64BIT}"
|
||||
+
|
||||
+ # Check that USN didn't wrap around (underflow detection)
|
||||
+ usn_diff = new_usn - old_usn
|
||||
+ assert usn_diff < 1000, f"USN increment too large, possible overflow: {usn_diff}"
|
||||
+
|
||||
+ # Verify lastUSN is also reasonable
|
||||
+ current_lastusn = root_dse.get_attr_val_int("lastusn")
|
||||
+ assert current_lastusn >= new_usn, f"LastUSN ({current_lastusn}) should be >= entryUSN ({new_usn})"
|
||||
+ assert current_lastusn < MAX_USN_64BIT, f"LastUSN overflow detected: {current_lastusn}"
|
||||
+
|
||||
+ log.info(f"USN validation passed for iteration {iteration}")
|
||||
+
|
||||
+ # Add a new entry occasionally to increase USN diversity
|
||||
+ if iteration % 3 == 0:
|
||||
+ new_user_props = {
|
||||
+ 'uid': f'{TEST_USER_PREFIX}-new-{iteration}',
|
||||
+ 'cn': f'{TEST_USER_PREFIX}-new-{iteration}',
|
||||
+ 'sn': f'NewUser{iteration}',
|
||||
+ 'uidNumber': str(2000 + iteration),
|
||||
+ 'gidNumber': str(2000 + iteration),
|
||||
+ 'homeDirectory': f'/home/{TEST_USER_PREFIX}-new-{iteration}',
|
||||
+ 'userPassword': 'newpassword123'
|
||||
+ }
|
||||
+ try:
|
||||
+ new_user = users_collection.create(properties=new_user_props)
|
||||
+ new_user_usn = new_user.get_attr_val_int('entryusn')
|
||||
+ log.info(f"Created new entry '{new_user.get_attr_val_utf8('cn')}' with USN: {new_user_usn}")
|
||||
+ users.append(new_user) # Add to cleanup list
|
||||
+ except Exception as e:
|
||||
+ log.warning(f"Failed to create new user in iteration {iteration}: {e}")
|
||||
+
|
||||
+ # Final validation: Check all USN values are reasonable
|
||||
+ log.info("\nFinal USN validation")
|
||||
+ final_lastusn = root_dse.get_attr_val_int("lastusn")
|
||||
+
|
||||
+ for user in users:
|
||||
+ try:
|
||||
+ final_usn = user.get_attr_val_int('entryusn')
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+ log.info(f"Final entryUSN for '{cn_value}': {final_usn}")
|
||||
+
|
||||
+ # Ensure no overflow occurred
|
||||
+ assert final_usn > 0, f"Final entryUSN should be positive for {cn_value}: {final_usn}"
|
||||
+ assert final_usn < MAX_USN_64BIT, f"EntryUSN overflow for {cn_value}: {final_usn}"
|
||||
+
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ log.info(f"User {user.dn} was deleted during test")
|
||||
+
|
||||
+ log.info(f"Final lastUSN: {final_lastusn}")
|
||||
+ assert final_lastusn > initial_lastusn, "LastUSN should have increased during test"
|
||||
+ assert final_lastusn < MAX_USN_64BIT, f"LastUSN overflow detected: {final_lastusn}"
|
||||
+
|
||||
+ log.info("EntryUSN overflow test completed successfully")
|
||||
+
|
||||
+
|
||||
+def test_entryusn_consistency_after_failed_adds(topology_st, setup_usn_test):
|
||||
+ """Test that entryUSN remains consistent after failed add operations
|
||||
+
|
||||
+ :id: e380ccad-527b-427e-a331-df5c41badbed
|
||||
+ :setup: Standalone instance with USN plugin enabled and test users
|
||||
+ :steps:
|
||||
+ 1. Record entryUSN values before failed add attempts
|
||||
+ 2. Attempt to add existing entries (should fail)
|
||||
+ 3. Verify entryUSN values haven't changed due to failed operations
|
||||
+ 4. Perform successful modify operations
|
||||
+ 5. Verify entryUSN increments correctly
|
||||
+ :expectedresults:
|
||||
+ 1. Initial entryUSN values recorded
|
||||
+ 2. Add operations fail as expected
|
||||
+ 3. EntryUSN values unchanged after failed adds
|
||||
+ 4. Modify operations succeed
|
||||
+ 5. EntryUSN values increment correctly without overflow
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ users = setup_usn_test
|
||||
+
|
||||
+ log.info("Testing entryUSN consistency after failed adds")
|
||||
+
|
||||
+ # Record USN values before any operations
|
||||
+ pre_operation_usns = {}
|
||||
+ for user in users:
|
||||
+ usn = user.get_attr_val_int('entryusn')
|
||||
+ pre_operation_usns[user.dn] = usn
|
||||
+ log.info(f"Pre-operation entryUSN for {user.get_attr_val_utf8('cn')}: {usn}")
|
||||
+
|
||||
+ # Attempt to add existing entries - these should fail
|
||||
+ users_collection = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ for user in users:
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+ log.info(f"Attempting to add existing user: {cn_value}")
|
||||
+
|
||||
+ user_attrs = {
|
||||
+ 'uid': user.get_attr_val_utf8('uid'),
|
||||
+ 'cn': cn_value,
|
||||
+ 'sn': user.get_attr_val_utf8('sn'),
|
||||
+ 'uidNumber': user.get_attr_val_utf8('uidNumber'),
|
||||
+ 'gidNumber': user.get_attr_val_utf8('gidNumber'),
|
||||
+ 'homeDirectory': user.get_attr_val_utf8('homeDirectory'),
|
||||
+ 'userPassword': 'password123'
|
||||
+ }
|
||||
+
|
||||
+ try:
|
||||
+ users_collection.create(properties=user_attrs)
|
||||
+ assert False, f"Add operation should have failed for existing user {cn_value}"
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ log.info(f"Got expected ALREADY_EXISTS for {cn_value}")
|
||||
+
|
||||
+ # Verify USN values haven't changed after failed adds
|
||||
+ log.info("Verifying entryUSN values after failed add operations...")
|
||||
+ for user in users:
|
||||
+ current_usn = user.get_attr_val_int('entryusn')
|
||||
+ expected_usn = pre_operation_usns[user.dn]
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+
|
||||
+ assert current_usn == expected_usn, \
|
||||
+ f"EntryUSN changed after failed add for {cn_value}: was {expected_usn}, now {current_usn}"
|
||||
+ log.info(f"EntryUSN unchanged for {cn_value}: {current_usn}")
|
||||
+
|
||||
+ # Now perform successful modify operations
|
||||
+ log.info("Performing successful modify operations...")
|
||||
+ for i, user in enumerate(users):
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+ old_usn = user.get_attr_val_int('entryusn')
|
||||
+
|
||||
+ # Modify the user
|
||||
+ user.replace('description', f'Consistency test modification {i + 1}')
|
||||
+
|
||||
+ new_usn = user.get_attr_val_int('entryusn')
|
||||
+ log.info(f"Modified {cn_value}: USN {old_usn} -> {new_usn}")
|
||||
+
|
||||
+ # Verify proper increment
|
||||
+ assert (new_usn - old_usn) == 1, f"EntryUSN should increment by 1 for {cn_value}: {old_usn} -> {new_usn}"
|
||||
+ assert new_usn < MAX_USN_64BIT, f"EntryUSN overflow for {cn_value}: {new_usn}"
|
||||
+
|
||||
+ log.info("EntryUSN consistency test completed successfully")
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
\ No newline at end of file
|
||||
--
|
||||
2.49.0
|
||||
|
||||
172
0026-Issue-6594-Add-test-for-numSubordinates-replication-.patch
Normal file
172
0026-Issue-6594-Add-test-for-numSubordinates-replication-.patch
Normal file
@ -0,0 +1,172 @@
|
||||
From 36ca93e8ad2915bcfcae0367051e0f606386f861 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 15:35:50 -0700
|
||||
Subject: [PATCH] Issue 6594 - Add test for numSubordinates replication
|
||||
consistency with tombstones (#6862)
|
||||
|
||||
Description: Add a comprehensive test to verify that numSubordinates and
|
||||
tombstoneNumSubordinates attributes are correctly replicated between
|
||||
instances when tombstone entries are present.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6594
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
.../numsubordinates_replication_test.py | 144 ++++++++++++++++++
|
||||
1 file changed, 144 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py b/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
|
||||
new file mode 100644
|
||||
index 000000000..9ba10657d
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
|
||||
@@ -0,0 +1,144 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+
|
||||
+import os
|
||||
+import logging
|
||||
+import pytest
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
+from lib389.replica import ReplicationManager
|
||||
+from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.topologies import topology_i2 as topo_i2
|
||||
+
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+if DEBUGGING:
|
||||
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+else:
|
||||
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+def test_numsubordinates_tombstone_replication_mismatch(topo_i2):
|
||||
+ """Test that numSubordinates values match between replicas after tombstone creation
|
||||
+
|
||||
+ :id: c43ecc7a-d706-42e8-9179-1ff7d0e7163a
|
||||
+ :setup: Two standalone instances
|
||||
+ :steps:
|
||||
+ 1. Create a container (organizational unit) on the first instance
|
||||
+ 2. Create a user object in that container
|
||||
+ 3. Delete the user object (this creates a tombstone)
|
||||
+ 4. Set up replication between the two instances
|
||||
+ 5. Wait for replication to complete
|
||||
+ 6. Check numSubordinates on both instances
|
||||
+ 7. Check tombstoneNumSubordinates on both instances
|
||||
+ 8. Verify that numSubordinates values match on both instances
|
||||
+ :expectedresults:
|
||||
+ 1. Container should be created successfully
|
||||
+ 2. User object should be created successfully
|
||||
+ 3. User object should be deleted successfully
|
||||
+ 4. Replication should be set up successfully
|
||||
+ 5. Replication should complete successfully
|
||||
+ 6. numSubordinates should be accessible on both instances
|
||||
+ 7. tombstoneNumSubordinates should be accessible on both instances
|
||||
+ 8. numSubordinates values should match on both instances
|
||||
+ """
|
||||
+
|
||||
+ instance1 = topo_i2.ins["standalone1"]
|
||||
+ instance2 = topo_i2.ins["standalone2"]
|
||||
+
|
||||
+ log.info("Create a container (organizational unit) on the first instance")
|
||||
+ ous1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX)
|
||||
+ container = ous1.create(properties={
|
||||
+ 'ou': 'test_container',
|
||||
+ 'description': 'Test container for numSubordinates replication test'
|
||||
+ })
|
||||
+ container_rdn = container.rdn
|
||||
+ log.info(f"Created container: {container_rdn}")
|
||||
+
|
||||
+ log.info("Create a user object in that container")
|
||||
+ users1 = UserAccounts(instance1, DEFAULT_SUFFIX, rdn=f"ou={container_rdn}")
|
||||
+ test_user = users1.create_test_user(uid=1001)
|
||||
+ log.info(f"Created user: {test_user.dn}")
|
||||
+
|
||||
+ log.info("Checking initial numSubordinates on container")
|
||||
+ container_obj1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX).get(container_rdn)
|
||||
+ initial_numsubordinates = container_obj1.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"Initial numSubordinates: {initial_numsubordinates}")
|
||||
+ assert initial_numsubordinates == 1
|
||||
+
|
||||
+ log.info("Delete the user object (this creates a tombstone)")
|
||||
+ test_user.delete()
|
||||
+
|
||||
+ log.info("Checking numSubordinates after deletion")
|
||||
+ after_delete_numsubordinates = container_obj1.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"numSubordinates after deletion: {after_delete_numsubordinates}")
|
||||
+
|
||||
+ log.info("Checking tombstoneNumSubordinates after deletion")
|
||||
+ try:
|
||||
+ tombstone_numsubordinates = container_obj1.get_attr_val_int('tombstoneNumSubordinates')
|
||||
+ log.info(f"tombstoneNumSubordinates: {tombstone_numsubordinates}")
|
||||
+ except Exception as e:
|
||||
+ log.info(f"tombstoneNumSubordinates not found or error: {e}")
|
||||
+ tombstone_numsubordinates = 0
|
||||
+
|
||||
+ log.info("Set up replication between the two instances")
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ repl.create_first_supplier(instance1)
|
||||
+ repl.join_supplier(instance1, instance2)
|
||||
+
|
||||
+ log.info("Wait for replication to complete")
|
||||
+ repl.wait_for_replication(instance1, instance2)
|
||||
+
|
||||
+ log.info("Check numSubordinates on both instances")
|
||||
+ container_obj1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX).get(container_rdn)
|
||||
+ numsubordinates_instance1 = container_obj1.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"numSubordinates on instance1: {numsubordinates_instance1}")
|
||||
+
|
||||
+ container_obj2 = OrganizationalUnits(instance2, DEFAULT_SUFFIX).get(container_rdn)
|
||||
+ numsubordinates_instance2 = container_obj2.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"numSubordinates on instance2: {numsubordinates_instance2}")
|
||||
+
|
||||
+ log.info("Check tombstoneNumSubordinates on both instances")
|
||||
+ try:
|
||||
+ tombstone_numsubordinates_instance1 = container_obj1.get_attr_val_int('tombstoneNumSubordinates')
|
||||
+ log.info(f"tombstoneNumSubordinates on instance1: {tombstone_numsubordinates_instance1}")
|
||||
+ except Exception as e:
|
||||
+ log.info(f"tombstoneNumSubordinates not found on instance1: {e}")
|
||||
+ tombstone_numsubordinates_instance1 = 0
|
||||
+
|
||||
+ try:
|
||||
+ tombstone_numsubordinates_instance2 = container_obj2.get_attr_val_int('tombstoneNumSubordinates')
|
||||
+ log.info(f"tombstoneNumSubordinates on instance2: {tombstone_numsubordinates_instance2}")
|
||||
+ except Exception as e:
|
||||
+ log.info(f"tombstoneNumSubordinates not found on instance2: {e}")
|
||||
+ tombstone_numsubordinates_instance2 = 0
|
||||
+
|
||||
+ log.info("Verify that numSubordinates values match on both instances")
|
||||
+ log.info(f"Comparison: instance1 numSubordinates={numsubordinates_instance1}, "
|
||||
+ f"instance2 numSubordinates={numsubordinates_instance2}")
|
||||
+ log.info(f"Comparison: instance1 tombstoneNumSubordinates={tombstone_numsubordinates_instance1}, "
|
||||
+ f"instance2 tombstoneNumSubordinates={tombstone_numsubordinates_instance2}")
|
||||
+
|
||||
+ assert numsubordinates_instance1 == numsubordinates_instance2, (
|
||||
+ f"numSubordinates mismatch: instance1 has {numsubordinates_instance1}, "
|
||||
+ f"instance2 has {numsubordinates_instance2}. "
|
||||
+ )
|
||||
+ assert tombstone_numsubordinates_instance1 == tombstone_numsubordinates_instance2, (
|
||||
+ f"tombstoneNumSubordinates mismatch: instance1 has {tombstone_numsubordinates_instance1}, "
|
||||
+ f"instance2 has {tombstone_numsubordinates_instance2}. "
|
||||
+ )
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
\ No newline at end of file
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From 18a807e0e23b1160ea61e05e721da9fbd0c560b1 Mon Sep 17 00:00:00 2001
|
||||
From 14b1407abc196df947fa50d48946ed072e4ea772 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 15:41:29 -0700
|
||||
Subject: [PATCH] Issue 6884 - Mask password hashes in audit logs (#6885)
|
||||
@ -14,23 +14,20 @@ password masking works correctly across all log formats and operation types.
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6884
|
||||
|
||||
Reviewed by: @mreynolds389, @vashirov (Thanks!!)
|
||||
|
||||
(cherry picked from commit 24f9aea1ae7e29bd885212825dc52d2a5db08a03)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
.../logging/audit_password_masking_test.py | 457 ++++++++++++++++++
|
||||
ldap/servers/slapd/auditlog.c | 144 +++++-
|
||||
.../logging/audit_password_masking_test.py | 501 ++++++++++++++++++
|
||||
ldap/servers/slapd/auditlog.c | 170 +++++-
|
||||
ldap/servers/slapd/slapi-private.h | 1 +
|
||||
src/lib389/lib389/chaining.py | 3 +-
|
||||
4 files changed, 586 insertions(+), 19 deletions(-)
|
||||
4 files changed, 652 insertions(+), 23 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
new file mode 100644
|
||||
index 000000000..ae379cbba
|
||||
index 000000000..3b6a54849
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
@@ -0,0 +1,457 @@
|
||||
@@ -0,0 +1,501 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
@ -48,6 +45,7 @@ index 000000000..ae379cbba
|
||||
+from lib389._constants import DEFAULT_SUFFIX, DN_DM, PW_DM
|
||||
+from lib389.topologies import topology_m2 as topo
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.dirsrv_log import DirsrvAuditJSONLog
|
||||
+from lib389.plugins import ChainingBackendPlugin
|
||||
+from lib389.chaining import ChainingLinks
|
||||
+from lib389.agreement import Agreements
|
||||
@ -64,7 +62,9 @@ index 000000000..ae379cbba
|
||||
+
|
||||
+def setup_audit_logging(inst, log_format='default', display_attrs=None):
|
||||
+ """Configure audit logging settings"""
|
||||
+ inst.config.replace('nsslapd-auditlog-logbuffering', 'off')
|
||||
+ inst.config.replace('nsslapd-auditlog-logging-enabled', 'on')
|
||||
+ inst.config.replace('nsslapd-auditlog-log-format', log_format)
|
||||
+
|
||||
+ if display_attrs is not None:
|
||||
+ inst.config.replace('nsslapd-auditlog-display-attrs', display_attrs)
|
||||
@ -75,7 +75,7 @@ index 000000000..ae379cbba
|
||||
+def check_password_masked(inst, log_format, expected_password, actual_password):
|
||||
+ """Helper function to check password masking in audit logs"""
|
||||
+
|
||||
+ inst.restart() # Flush the logs
|
||||
+ time.sleep(1) # Allow log to flush
|
||||
+
|
||||
+ # List of all password/credential attributes that should be masked
|
||||
+ password_attributes = [
|
||||
@ -90,25 +90,50 @@ index 000000000..ae379cbba
|
||||
+ user_password_scheme = inst.config.get_attr_val_utf8('passwordStorageScheme')
|
||||
+ root_password_scheme = inst.config.get_attr_val_utf8('nsslapd-rootpwstoragescheme')
|
||||
+
|
||||
+ # Check LDIF format logs
|
||||
+ found_masked = False
|
||||
+ found_actual = False
|
||||
+ found_hashed = False
|
||||
+ if log_format == 'json':
|
||||
+ # Check JSON format logs
|
||||
+ audit_log = DirsrvAuditJSONLog(inst)
|
||||
+ log_lines = audit_log.readlines()
|
||||
+
|
||||
+ # Check each password attribute for masked password
|
||||
+ for attr in password_attributes:
|
||||
+ if inst.ds_audit_log.match(f"{attr}: {re.escape(expected_password)}"):
|
||||
+ found_masked = True
|
||||
+ if inst.ds_audit_log.match(f"{attr}: {actual_password}"):
|
||||
+ found_actual = True
|
||||
+ found_masked = False
|
||||
+ found_actual = False
|
||||
+ found_hashed = False
|
||||
+
|
||||
+ # Check for hashed passwords in LDIF format
|
||||
+ if user_password_scheme:
|
||||
+ if inst.ds_audit_log.match(f"userPassword: {{{user_password_scheme}}}"):
|
||||
+ found_hashed = True
|
||||
+ if root_password_scheme:
|
||||
+ if inst.ds_audit_log.match(f"nsslapd-rootpw: {{{root_password_scheme}}}"):
|
||||
+ found_hashed = True
|
||||
+ for line in log_lines:
|
||||
+ # Check if any password attribute is present in the line
|
||||
+ for attr in password_attributes:
|
||||
+ if attr in line:
|
||||
+ if expected_password in line:
|
||||
+ found_masked = True
|
||||
+ if actual_password in line:
|
||||
+ found_actual = True
|
||||
+ # Check for password scheme indicators (hashed passwords)
|
||||
+ if user_password_scheme and f'{{{user_password_scheme}}}' in line:
|
||||
+ found_hashed = True
|
||||
+ if root_password_scheme and f'{{{root_password_scheme}}}' in line:
|
||||
+ found_hashed = True
|
||||
+ break # Found a password attribute, no need to check others for this line
|
||||
+
|
||||
+ else:
|
||||
+ # Check LDIF format logs
|
||||
+ found_masked = False
|
||||
+ found_actual = False
|
||||
+ found_hashed = False
|
||||
+
|
||||
+ # Check each password attribute for masked password
|
||||
+ for attr in password_attributes:
|
||||
+ if inst.ds_audit_log.match(f"{attr}: {re.escape(expected_password)}"):
|
||||
+ found_masked = True
|
||||
+ if inst.ds_audit_log.match(f"{attr}: {actual_password}"):
|
||||
+ found_actual = True
|
||||
+
|
||||
+ # Check for hashed passwords in LDIF format
|
||||
+ if user_password_scheme:
|
||||
+ if inst.ds_audit_log.match(f"userPassword: {{{user_password_scheme}}}"):
|
||||
+ found_hashed = True
|
||||
+ if root_password_scheme:
|
||||
+ if inst.ds_audit_log.match(f"nsslapd-rootpw: {{{root_password_scheme}}}"):
|
||||
+ found_hashed = True
|
||||
+
|
||||
+ # Delete audit logs to avoid interference with other tests
|
||||
+ # We need to reset the root password to default as deleteAuditLogs()
|
||||
@ -124,6 +149,9 @@ index 000000000..ae379cbba
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "userPassword"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "userPassword")
|
||||
+])
|
||||
+def test_password_masking_add_operation(topo, log_format, display_attrs):
|
||||
+ """Test password masking in ADD operations
|
||||
@ -177,6 +205,9 @@ index 000000000..ae379cbba
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "userPassword"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "userPassword")
|
||||
+])
|
||||
+def test_password_masking_modify_operation(topo, log_format, display_attrs):
|
||||
+ """Test password masking in MODIFY operations
|
||||
@ -243,6 +274,9 @@ index 000000000..ae379cbba
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsslapd-rootpw"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsslapd-rootpw")
|
||||
+])
|
||||
+def test_password_masking_rootpw_modify_operation(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsslapd-rootpw MODIFY operations
|
||||
@ -272,7 +306,6 @@ index 000000000..ae379cbba
|
||||
+ try:
|
||||
+ dm.change_password(TEST_PASSWORD)
|
||||
+ dm.rebind(TEST_PASSWORD)
|
||||
+ dm.change_password(PW_DM)
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+ assert found_masked, f"Masked root password not found in {log_format} MODIFY operation (first root password)"
|
||||
@ -281,7 +314,6 @@ index 000000000..ae379cbba
|
||||
+
|
||||
+ dm.change_password(TEST_PASSWORD_2)
|
||||
+ dm.rebind(TEST_PASSWORD_2)
|
||||
+ dm.change_password(PW_DM)
|
||||
+
|
||||
+ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_2, f"Masked root password not found in {log_format} MODIFY operation (second root password)"
|
||||
@ -297,6 +329,9 @@ index 000000000..ae379cbba
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsmultiplexorcredentials"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsmultiplexorcredentials")
|
||||
+])
|
||||
+def test_password_masking_multiplexor_credentials(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsmultiplexorcredentials in chaining/multiplexor configurations
|
||||
@ -365,6 +400,9 @@ index 000000000..ae379cbba
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsDS5ReplicaCredentials"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsDS5ReplicaCredentials")
|
||||
+])
|
||||
+def test_password_masking_replica_credentials(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsDS5ReplicaCredentials in replication agreements
|
||||
@ -426,6 +464,9 @@ index 000000000..ae379cbba
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsDS5ReplicaBootstrapCredentials"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsDS5ReplicaBootstrapCredentials")
|
||||
+])
|
||||
+def test_password_masking_bootstrap_credentials(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsDS5ReplicaCredentials and nsDS5ReplicaBootstrapCredentials in replication agreements
|
||||
@ -490,10 +531,10 @@ index 000000000..ae379cbba
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
\ No newline at end of file
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 0597ecc6f..c41415725 100644
|
||||
index 1121aef35..7b591e072 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -37,6 +37,89 @@ static void write_audit_file(Slapi_Entry *entry, int logtype, int optype, const
|
||||
@@ -39,6 +39,89 @@ static void write_audit_file(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
|
||||
static const char *modrdn_changes[4];
|
||||
|
||||
@ -583,11 +624,12 @@ index 0597ecc6f..c41415725 100644
|
||||
void
|
||||
write_audit_log_entry(Slapi_PBlock *pb)
|
||||
{
|
||||
@@ -248,7 +331,21 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
@@ -282,10 +365,31 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object
|
||||
{
|
||||
slapi_entry_attr_find(entry, req_attr, &entry_attr);
|
||||
if (entry_attr) {
|
||||
- log_entry_attr(entry_attr, req_attr, l);
|
||||
- if (use_json) {
|
||||
- log_entry_attr_json(entry_attr, req_attr, id_list);
|
||||
+ if (strcmp(req_attr, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) {
|
||||
+ /* Do not write the unhashed clear-text password */
|
||||
+ continue;
|
||||
@ -596,17 +638,28 @@ index 0597ecc6f..c41415725 100644
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ if (is_password_attribute(req_attr)) {
|
||||
+ /* userpassword/rootdn password - mask the value */
|
||||
+ addlenstr(l, "#");
|
||||
+ addlenstr(l, req_attr);
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
+ } else {
|
||||
+ if (use_json) {
|
||||
+ json_object *secret_obj = json_object_new_object();
|
||||
+ json_object_object_add(secret_obj, req_attr,
|
||||
+ json_object_new_string("**********************"));
|
||||
+ json_object_array_add(id_list, secret_obj);
|
||||
+ } else {
|
||||
+ addlenstr(l, "#");
|
||||
+ addlenstr(l, req_attr);
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
+ }
|
||||
} else {
|
||||
- log_entry_attr(entry_attr, req_attr, l);
|
||||
+ /* Regular attribute - log normally */
|
||||
+ log_entry_attr(entry_attr, req_attr, l);
|
||||
+ }
|
||||
+ if (use_json) {
|
||||
+ log_entry_attr_json(entry_attr, req_attr, id_list);
|
||||
+ } else {
|
||||
+ log_entry_attr(entry_attr, req_attr, l);
|
||||
+ }
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -262,13 +359,11 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
@@ -300,9 +404,7 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -615,14 +668,55 @@ index 0597ecc6f..c41415725 100644
|
||||
- {
|
||||
+ if (is_password_attribute(attr)) {
|
||||
/* userpassword/rootdn password - mask the value */
|
||||
addlenstr(l, "#");
|
||||
addlenstr(l, attr);
|
||||
- addlenstr(l, ": ****************************\n");
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
if (use_json) {
|
||||
json_object *secret_obj = json_object_new_object();
|
||||
@@ -312,7 +414,7 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object
|
||||
} else {
|
||||
addlenstr(l, "#");
|
||||
addlenstr(l, attr);
|
||||
- addlenstr(l, ": ****************************\n");
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
log_entry_attr(entry_attr, attr, l);
|
||||
@@ -354,6 +449,10 @@ write_audit_file(
|
||||
@@ -481,6 +583,9 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
}
|
||||
}
|
||||
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ int is_password_attr = is_password_attribute(mods[j]->mod_type);
|
||||
+
|
||||
mod = json_object_new_object();
|
||||
switch (operationtype) {
|
||||
case LDAP_MOD_ADD:
|
||||
@@ -505,7 +610,12 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
json_object *val_list = NULL;
|
||||
val_list = json_object_new_array();
|
||||
for (size_t i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) {
|
||||
- json_object_array_add(val_list, json_object_new_string(mods[j]->mod_bvalues[i]->bv_val));
|
||||
+ if (is_password_attr) {
|
||||
+ /* Mask password values */
|
||||
+ json_object_array_add(val_list, json_object_new_string("**********************"));
|
||||
+ } else {
|
||||
+ json_object_array_add(val_list, json_object_new_string(mods[j]->mod_bvalues[i]->bv_val));
|
||||
+ }
|
||||
}
|
||||
json_object_object_add(mod, "values", val_list);
|
||||
}
|
||||
@@ -517,8 +627,11 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
}
|
||||
case SLAPI_OPERATION_ADD: {
|
||||
int len;
|
||||
+
|
||||
e = change;
|
||||
- tmp = slapi_entry2str(e, &len);
|
||||
+
|
||||
+ /* Create a masked string representation for password attributes */
|
||||
+ tmp = create_masked_entry_string(e, &len);
|
||||
tmpsave = tmp;
|
||||
while ((tmp = strchr(tmp, '\n')) != NULL) {
|
||||
tmp++;
|
||||
@@ -665,6 +778,10 @@ write_audit_file(
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -633,7 +727,7 @@ index 0597ecc6f..c41415725 100644
|
||||
switch (operationtype) {
|
||||
case LDAP_MOD_ADD:
|
||||
addlenstr(l, "add: ");
|
||||
@@ -378,18 +477,27 @@ write_audit_file(
|
||||
@@ -689,18 +806,27 @@ write_audit_file(
|
||||
break;
|
||||
}
|
||||
if (operationtype != LDAP_MOD_IGNORE) {
|
||||
@ -673,7 +767,7 @@ index 0597ecc6f..c41415725 100644
|
||||
}
|
||||
}
|
||||
addlenstr(l, "-\n");
|
||||
@@ -400,7 +508,7 @@ write_audit_file(
|
||||
@@ -711,7 +837,7 @@ write_audit_file(
|
||||
e = change;
|
||||
addlenstr(l, attr_changetype);
|
||||
addlenstr(l, ": add\n");
|
||||
@ -683,10 +777,10 @@ index 0597ecc6f..c41415725 100644
|
||||
while ((tmp = strchr(tmp, '\n')) != NULL) {
|
||||
tmp++;
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index dfb0e272a..af2180e55 100644
|
||||
index e9abf8b75..02f22fd2d 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -843,6 +843,7 @@ void task_cleanup(void);
|
||||
@@ -848,6 +848,7 @@ void task_cleanup(void);
|
||||
/* for reversible encyrption */
|
||||
#define SLAPI_MB_CREDENTIALS "nsmultiplexorcredentials"
|
||||
#define SLAPI_REP_CREDENTIALS "nsds5ReplicaCredentials"
|
||||
@ -716,5 +810,5 @@ index 533b83ebf..33ae78c8b 100644
|
||||
|
||||
class ChainingLinks(DSLdapObjects):
|
||||
--
|
||||
2.51.1
|
||||
2.49.0
|
||||
|
||||
1719
0028-Issue-6897-Fix-disk-monitoring-test-failures-and-imp.patch
Normal file
1719
0028-Issue-6897-Fix-disk-monitoring-test-failures-and-imp.patch
Normal file
File diff suppressed because it is too large
Load Diff
262
0029-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch
Normal file
262
0029-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch
Normal file
@ -0,0 +1,262 @@
|
||||
From c80554be0cea0eb5f2ab6d1e6e1fcef098304f69 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Wed, 16 Jul 2025 11:22:30 +0200
|
||||
Subject: [PATCH] Issue 6778 - Memory leak in
|
||||
roles_cache_create_object_from_entry part 2
|
||||
|
||||
Bug Description:
|
||||
Everytime a role with scope DN is processed, we leak rolescopeDN.
|
||||
|
||||
Fix Description:
|
||||
* Initialize all pointer variables to NULL
|
||||
* Add additional NULL checks
|
||||
* Free rolescopeDN
|
||||
* Move test_rewriter_with_invalid_filter before the DB contains 90k entries
|
||||
* Use task.wait() for import task completion instead of parsing logs,
|
||||
increase the timeout
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6778
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/roles/basic_test.py | 164 +++++++++----------
|
||||
ldap/servers/plugins/roles/roles_cache.c | 10 +-
|
||||
2 files changed, 82 insertions(+), 92 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
index d92d6f0c3..ec208bae9 100644
|
||||
--- a/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
@@ -510,6 +510,76 @@ def test_vattr_on_managed_role(topo, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+def test_rewriter_with_invalid_filter(topo, request):
|
||||
+ """Test that server does not crash when having
|
||||
+ invalid filter in filtered role
|
||||
+
|
||||
+ :id: 5013b0b2-0af6-11f0-8684-482ae39447e5
|
||||
+ :setup: standalone server
|
||||
+ :steps:
|
||||
+ 1. Setup filtered role with good filter
|
||||
+ 2. Setup nsrole rewriter
|
||||
+ 3. Restart the server
|
||||
+ 4. Search for entries
|
||||
+ 5. Setup filtered role with bad filter
|
||||
+ 6. Search for entries
|
||||
+ :expectedresults:
|
||||
+ 1. Operation should succeed
|
||||
+ 2. Operation should succeed
|
||||
+ 3. Operation should succeed
|
||||
+ 4. Operation should succeed
|
||||
+ 5. Operation should succeed
|
||||
+ 6. Operation should succeed
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ entries = []
|
||||
+
|
||||
+ def fin():
|
||||
+ inst.start()
|
||||
+ for entry in entries:
|
||||
+ entry.delete()
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Setup filtered role
|
||||
+ roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}')
|
||||
+ filter_ko = '(&((objectClass=top)(objectClass=nsPerson))'
|
||||
+ filter_ok = '(&(objectClass=top)(objectClass=nsPerson))'
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ok,
|
||||
+ 'description': 'Test good filter',
|
||||
+ }
|
||||
+ role = roles.create(properties=role_properties)
|
||||
+ entries.append(role)
|
||||
+
|
||||
+ # Setup nsrole rewriter
|
||||
+ rewriters = Rewriters(inst)
|
||||
+ rewriter_properties = {
|
||||
+ "cn": "nsrole",
|
||||
+ "nsslapd-libpath": 'libroles-plugin',
|
||||
+ "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter',
|
||||
+ }
|
||||
+ rewriter = rewriters.ensure_state(properties=rewriter_properties)
|
||||
+ entries.append(rewriter)
|
||||
+
|
||||
+ # Restart thge instance
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+ # Set bad filter
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ko,
|
||||
+ 'description': 'Test bad filter',
|
||||
+ }
|
||||
+ role.ensure_state(properties=role_properties)
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+
|
||||
def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
"""Test that filter components containing 'nsrole=xxx'
|
||||
are reworked if xxx is either a filtered role or a managed
|
||||
@@ -581,17 +651,11 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
PARENT="ou=people,%s" % DEFAULT_SUFFIX
|
||||
dbgen_users(topo.standalone, 90000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT)
|
||||
|
||||
- # online import
|
||||
+ # Online import
|
||||
import_task = ImportTask(topo.standalone)
|
||||
import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
|
||||
- # Check for up to 200sec that the completion
|
||||
- for i in range(1, 20):
|
||||
- if len(topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9000.*')) > 0:
|
||||
- break
|
||||
- time.sleep(10)
|
||||
- import_complete = topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9000.*')
|
||||
- assert (len(import_complete) == 1)
|
||||
-
|
||||
+ import_task.wait(timeout=400)
|
||||
+ assert import_task.get_exit_code() == 0
|
||||
# Restart server
|
||||
topo.standalone.restart()
|
||||
|
||||
@@ -715,17 +779,11 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
PARENT="ou=people,%s" % DEFAULT_SUFFIX
|
||||
dbgen_users(topo.standalone, 91000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT)
|
||||
|
||||
- # online import
|
||||
+ # Online import
|
||||
import_task = ImportTask(topo.standalone)
|
||||
import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
|
||||
- # Check for up to 200sec that the completion
|
||||
- for i in range(1, 20):
|
||||
- if len(topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9100.*')) > 0:
|
||||
- break
|
||||
- time.sleep(10)
|
||||
- import_complete = topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9100.*')
|
||||
- assert (len(import_complete) == 1)
|
||||
-
|
||||
+ import_task.wait(timeout=400)
|
||||
+ assert import_task.get_exit_code() == 0
|
||||
# Restart server
|
||||
topo.standalone.restart()
|
||||
|
||||
@@ -769,76 +827,6 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
request.addfinalizer(fin)
|
||||
|
||||
|
||||
-def test_rewriter_with_invalid_filter(topo, request):
|
||||
- """Test that server does not crash when having
|
||||
- invalid filter in filtered role
|
||||
-
|
||||
- :id: 5013b0b2-0af6-11f0-8684-482ae39447e5
|
||||
- :setup: standalone server
|
||||
- :steps:
|
||||
- 1. Setup filtered role with good filter
|
||||
- 2. Setup nsrole rewriter
|
||||
- 3. Restart the server
|
||||
- 4. Search for entries
|
||||
- 5. Setup filtered role with bad filter
|
||||
- 6. Search for entries
|
||||
- :expectedresults:
|
||||
- 1. Operation should succeed
|
||||
- 2. Operation should succeed
|
||||
- 3. Operation should succeed
|
||||
- 4. Operation should succeed
|
||||
- 5. Operation should succeed
|
||||
- 6. Operation should succeed
|
||||
- """
|
||||
- inst = topo.standalone
|
||||
- entries = []
|
||||
-
|
||||
- def fin():
|
||||
- inst.start()
|
||||
- for entry in entries:
|
||||
- entry.delete()
|
||||
- request.addfinalizer(fin)
|
||||
-
|
||||
- # Setup filtered role
|
||||
- roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}')
|
||||
- filter_ko = '(&((objectClass=top)(objectClass=nsPerson))'
|
||||
- filter_ok = '(&(objectClass=top)(objectClass=nsPerson))'
|
||||
- role_properties = {
|
||||
- 'cn': 'TestFilteredRole',
|
||||
- 'nsRoleFilter': filter_ok,
|
||||
- 'description': 'Test good filter',
|
||||
- }
|
||||
- role = roles.create(properties=role_properties)
|
||||
- entries.append(role)
|
||||
-
|
||||
- # Setup nsrole rewriter
|
||||
- rewriters = Rewriters(inst)
|
||||
- rewriter_properties = {
|
||||
- "cn": "nsrole",
|
||||
- "nsslapd-libpath": 'libroles-plugin',
|
||||
- "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter',
|
||||
- }
|
||||
- rewriter = rewriters.ensure_state(properties=rewriter_properties)
|
||||
- entries.append(rewriter)
|
||||
-
|
||||
- # Restart thge instance
|
||||
- inst.restart()
|
||||
-
|
||||
- # Search for entries
|
||||
- entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
-
|
||||
- # Set bad filter
|
||||
- role_properties = {
|
||||
- 'cn': 'TestFilteredRole',
|
||||
- 'nsRoleFilter': filter_ko,
|
||||
- 'description': 'Test bad filter',
|
||||
- }
|
||||
- role.ensure_state(properties=role_properties)
|
||||
-
|
||||
- # Search for entries
|
||||
- entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
-
|
||||
-
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c
|
||||
index 3e1c5b429..05cabc3a3 100644
|
||||
--- a/ldap/servers/plugins/roles/roles_cache.c
|
||||
+++ b/ldap/servers/plugins/roles/roles_cache.c
|
||||
@@ -1117,16 +1117,17 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
|
||||
rolescopeDN = slapi_entry_attr_get_charptr(role_entry, ROLE_SCOPE_DN);
|
||||
if (rolescopeDN) {
|
||||
- Slapi_DN *rolescopeSDN;
|
||||
- Slapi_DN *top_rolescopeSDN, *top_this_roleSDN;
|
||||
+ Slapi_DN *rolescopeSDN = NULL;
|
||||
+ Slapi_DN *top_rolescopeSDN = NULL;
|
||||
+ Slapi_DN *top_this_roleSDN = NULL;
|
||||
|
||||
/* Before accepting to use this scope, first check if it belongs to the same suffix */
|
||||
rolescopeSDN = slapi_sdn_new_dn_byref(rolescopeDN);
|
||||
- if ((strlen((char *)slapi_sdn_get_ndn(rolescopeSDN)) > 0) &&
|
||||
+ if (rolescopeSDN && (strlen((char *)slapi_sdn_get_ndn(rolescopeSDN)) > 0) &&
|
||||
(slapi_dn_syntax_check(NULL, (char *)slapi_sdn_get_ndn(rolescopeSDN), 1) == 0)) {
|
||||
top_rolescopeSDN = roles_cache_get_top_suffix(rolescopeSDN);
|
||||
top_this_roleSDN = roles_cache_get_top_suffix(this_role->dn);
|
||||
- if (slapi_sdn_compare(top_rolescopeSDN, top_this_roleSDN) == 0) {
|
||||
+ if (top_rolescopeSDN && top_this_roleSDN && slapi_sdn_compare(top_rolescopeSDN, top_this_roleSDN) == 0) {
|
||||
/* rolescopeDN belongs to the same suffix as the role, we can use this scope */
|
||||
this_role->rolescopedn = rolescopeSDN;
|
||||
} else {
|
||||
@@ -1148,6 +1149,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
rolescopeDN);
|
||||
slapi_sdn_free(&rolescopeSDN);
|
||||
}
|
||||
+ slapi_ch_free_string(&rolescopeDN);
|
||||
}
|
||||
|
||||
/* Depending upon role type, pull out the remaining information we need */
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,64 @@
|
||||
From 2988a4ad320b7a4870cfa055bf7afd009424a15f Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 13:16:10 +0200
|
||||
Subject: [PATCH] Issue 6901 - Update changelog trimming logging - fix tests
|
||||
|
||||
Description:
|
||||
Update changelog_trimming_test for the new error message.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6901
|
||||
|
||||
Reviewed by: @progier389, @aadhikar (Thanks!)
|
||||
---
|
||||
.../suites/replication/changelog_trimming_test.py | 10 +++++-----
|
||||
1 file changed, 5 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/changelog_trimming_test.py b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py
|
||||
index 2d70d328e..27d19e8fd 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/changelog_trimming_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py
|
||||
@@ -110,7 +110,7 @@ def test_max_age(topo, setup_max_age):
|
||||
do_mods(supplier, 10)
|
||||
|
||||
time.sleep(1) # Trimming should not have occurred
|
||||
- if supplier.searchErrorsLog("Trimmed") is True:
|
||||
+ if supplier.searchErrorsLog("trimmed") is True:
|
||||
log.fatal('Trimming event unexpectedly occurred')
|
||||
assert False
|
||||
|
||||
@@ -120,12 +120,12 @@ def test_max_age(topo, setup_max_age):
|
||||
cl.set_trim_interval('5')
|
||||
|
||||
time.sleep(3) # Trimming should not have occurred
|
||||
- if supplier.searchErrorsLog("Trimmed") is True:
|
||||
+ if supplier.searchErrorsLog("trimmed") is True:
|
||||
log.fatal('Trimming event unexpectedly occurred')
|
||||
assert False
|
||||
|
||||
time.sleep(3) # Trimming should have occurred
|
||||
- if supplier.searchErrorsLog("Trimmed") is False:
|
||||
+ if supplier.searchErrorsLog("trimmed") is False:
|
||||
log.fatal('Trimming event did not occur')
|
||||
assert False
|
||||
|
||||
@@ -159,7 +159,7 @@ def test_max_entries(topo, setup_max_entries):
|
||||
do_mods(supplier, 10)
|
||||
|
||||
time.sleep(1) # Trimming should have occurred
|
||||
- if supplier.searchErrorsLog("Trimmed") is True:
|
||||
+ if supplier.searchErrorsLog("trimmed") is True:
|
||||
log.fatal('Trimming event unexpectedly occurred')
|
||||
assert False
|
||||
|
||||
@@ -169,7 +169,7 @@ def test_max_entries(topo, setup_max_entries):
|
||||
cl.set_trim_interval('5')
|
||||
|
||||
time.sleep(6) # Trimming should have occurred
|
||||
- if supplier.searchErrorsLog("Trimmed") is False:
|
||||
+ if supplier.searchErrorsLog("trimmed") is False:
|
||||
log.fatal('Trimming event did not occur')
|
||||
assert False
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,32 @@
|
||||
From 36c97c19dadda7f09a1e2b3d838e12fbdc39af23 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 13:18:26 +0200
|
||||
Subject: [PATCH] Issue 6181 - RFE - Allow system to manage uid/gid at startup
|
||||
|
||||
Description:
|
||||
Expand CapabilityBoundingSet to include CAP_FOWNER
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6181
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6906
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
wrappers/systemd.template.service.in | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in
|
||||
index ada608c86..8d2b96c7e 100644
|
||||
--- a/wrappers/systemd.template.service.in
|
||||
+++ b/wrappers/systemd.template.service.in
|
||||
@@ -29,7 +29,7 @@ MemoryAccounting=yes
|
||||
|
||||
# Allow non-root instances to bind to low ports.
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
-CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_SETUID CAP_SETGID CAP_DAC_OVERRIDE CAP_CHOWN
|
||||
+CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_SETUID CAP_SETGID CAP_DAC_OVERRIDE CAP_CHOWN CAP_FOWNER
|
||||
|
||||
PrivateTmp=on
|
||||
# https://en.opensuse.org/openSUSE:Security_Features#Systemd_hardening_effort
|
||||
--
|
||||
2.49.0
|
||||
|
||||
31
0032-Issue-6468-CLI-Fix-default-error-log-level.patch
Normal file
31
0032-Issue-6468-CLI-Fix-default-error-log-level.patch
Normal file
@ -0,0 +1,31 @@
|
||||
From 70256cd0e90b91733516d4434428d04aa55b39bd Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Tue, 29 Jul 2025 08:00:00 +0200
|
||||
Subject: [PATCH] Issue 6468 - CLI - Fix default error log level
|
||||
|
||||
Description:
|
||||
Default error log level is 16384
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6468
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
---
|
||||
src/lib389/lib389/cli_conf/logging.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/logging.py b/src/lib389/lib389/cli_conf/logging.py
|
||||
index 124556f1f..d9ae1ab16 100644
|
||||
--- a/src/lib389/lib389/cli_conf/logging.py
|
||||
+++ b/src/lib389/lib389/cli_conf/logging.py
|
||||
@@ -44,7 +44,7 @@ ERROR_LEVELS = {
|
||||
+ "methods used for a SASL bind"
|
||||
},
|
||||
"default": {
|
||||
- "level": 6384,
|
||||
+ "level": 16384,
|
||||
"desc": "Default logging level"
|
||||
},
|
||||
"filter": {
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,97 @@
|
||||
From d668c477158e962ebb6fb25ccabe6d9d09f30259 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 1 Aug 2025 13:27:02 +0100
|
||||
Subject: [PATCH] Issue 6768 - ns-slapd crashes when a referral is added
|
||||
(#6780)
|
||||
|
||||
Bug description: When a paged result search is successfully run on a referred
|
||||
suffix, we retrieve the search result set from the pblock and try to release
|
||||
it. In this case the search result set is NULL, which triggers a SEGV during
|
||||
the release.
|
||||
|
||||
Fix description: If the search result code is LDAP_REFERRAL, skip deletion of
|
||||
the search result set. Added test case.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6768
|
||||
|
||||
Reviewed by: @tbordaz, @progier389 (Thank you)
|
||||
---
|
||||
.../paged_results/paged_results_test.py | 46 +++++++++++++++++++
|
||||
ldap/servers/slapd/opshared.c | 4 +-
|
||||
2 files changed, 49 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
index fca48db0f..1bb94b53a 100644
|
||||
--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
+++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
@@ -1271,6 +1271,52 @@ def test_search_stress_abandon(create_40k_users, create_user):
|
||||
paged_search(conn, create_40k_users.suffix, [req_ctrl], search_flt, searchreq_attrlist, abandon_rate=abandon_rate)
|
||||
|
||||
|
||||
+def test_search_referral(topology_st):
|
||||
+ """Test a paged search on a referred suffix doesnt crash the server.
|
||||
+
|
||||
+ :id: c788bdbf-965b-4f12-ac24-d4d695e2cce2
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Configure a default referral.
|
||||
+ 2. Create a paged result search control.
|
||||
+ 3. Paged result search on referral suffix (doesnt exist on the instance, triggering a referral).
|
||||
+ 4. Check the server is still running.
|
||||
+ 5. Remove referral.
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Referral sucessfully set.
|
||||
+ 2. Control created.
|
||||
+ 3. Search returns ldap.REFERRAL (10).
|
||||
+ 4. Server still running.
|
||||
+ 5. Referral removed.
|
||||
+ """
|
||||
+
|
||||
+ page_size = 5
|
||||
+ SEARCH_SUFFIX = "dc=referme,dc=com"
|
||||
+ REFERRAL = "ldap://localhost.localdomain:389/o%3dnetscaperoot"
|
||||
+
|
||||
+ log.info('Configuring referral')
|
||||
+ topology_st.standalone.config.set('nsslapd-referral', REFERRAL)
|
||||
+ referral = topology_st.standalone.config.get_attr_val_utf8('nsslapd-referral')
|
||||
+ assert (referral == REFERRAL)
|
||||
+
|
||||
+ log.info('Create paged result search control')
|
||||
+ req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
|
||||
+
|
||||
+ log.info('Perform a paged result search on referred suffix, no chase')
|
||||
+ with pytest.raises(ldap.REFERRAL):
|
||||
+ topology_st.standalone.search_ext_s(SEARCH_SUFFIX, ldap.SCOPE_SUBTREE, serverctrls=[req_ctrl])
|
||||
+
|
||||
+ log.info('Confirm instance is still running')
|
||||
+ assert (topology_st.standalone.status())
|
||||
+
|
||||
+ log.info('Remove referral')
|
||||
+ topology_st.standalone.config.remove_all('nsslapd-referral')
|
||||
+ referral = topology_st.standalone.config.get_attr_val_utf8('nsslapd-referral')
|
||||
+ assert (referral == None)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index 545518748..a5cddfd23 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -910,7 +910,9 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
/* Free the results if not "no_such_object" */
|
||||
void *sr = NULL;
|
||||
slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
|
||||
- be->be_search_results_release(&sr);
|
||||
+ if (be->be_search_results_release != NULL) {
|
||||
+ be->be_search_results_release(&sr);
|
||||
+ }
|
||||
}
|
||||
pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx);
|
||||
rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1);
|
||||
--
|
||||
2.49.0
|
||||
|
||||
222
0034-Issues-6913-6886-6250-Adjust-xfail-marks-6914.patch
Normal file
222
0034-Issues-6913-6886-6250-Adjust-xfail-marks-6914.patch
Normal file
@ -0,0 +1,222 @@
|
||||
From e0d9deaab662468e11b08105e1b155660076b5eb Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 1 Aug 2025 09:28:39 -0700
|
||||
Subject: [PATCH] Issues 6913, 6886, 6250 - Adjust xfail marks (#6914)
|
||||
|
||||
Description: Some of the ACI invalid syntax issues were fixed,
|
||||
so we need to remove xfail marks.
|
||||
Disk space issue should have a 'skipif' mark.
|
||||
Display all attrs (nsslapd-auditlog-display-attrs: *) fails because of a bug.
|
||||
EntryUSN inconsistency and overflow bugs were exposed with the tests.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6913
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6886
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6250
|
||||
|
||||
Reviewed by: @vashirov (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/acl/syntax_test.py | 13 ++++++++--
|
||||
.../tests/suites/import/regression_test.py | 18 +++++++-------
|
||||
.../logging/audit_password_masking_test.py | 24 +++++++++----------
|
||||
.../suites/plugins/entryusn_overflow_test.py | 2 ++
|
||||
4 files changed, 34 insertions(+), 23 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/acl/syntax_test.py b/dirsrvtests/tests/suites/acl/syntax_test.py
|
||||
index 4edc7fa4b..ed9919ba3 100644
|
||||
--- a/dirsrvtests/tests/suites/acl/syntax_test.py
|
||||
+++ b/dirsrvtests/tests/suites/acl/syntax_test.py
|
||||
@@ -190,10 +190,9 @@ FAILED = [('test_targattrfilters_18',
|
||||
f'(all)userdn="ldap:///anyone";)'), ]
|
||||
|
||||
|
||||
-@pytest.mark.xfail(reason='https://bugzilla.redhat.com/show_bug.cgi?id=1691473')
|
||||
@pytest.mark.parametrize("real_value", [a[1] for a in FAILED],
|
||||
ids=[a[0] for a in FAILED])
|
||||
-def test_aci_invalid_syntax_fail(topo, real_value):
|
||||
+def test_aci_invalid_syntax_fail(topo, real_value, request):
|
||||
"""Try to set wrong ACI syntax.
|
||||
|
||||
:id: 83c40784-fff5-49c8-9535-7064c9c19e7e
|
||||
@@ -206,6 +205,16 @@ def test_aci_invalid_syntax_fail(topo, real_value):
|
||||
1. It should pass
|
||||
2. It should not pass
|
||||
"""
|
||||
+ # Mark specific test cases as xfail
|
||||
+ xfail_cases = [
|
||||
+ 'test_targattrfilters_18',
|
||||
+ 'test_targattrfilters_20',
|
||||
+ 'test_bind_rule_set_with_more_than_three'
|
||||
+ ]
|
||||
+
|
||||
+ if request.node.callspec.id in xfail_cases:
|
||||
+ pytest.xfail("DS6913 - This test case is expected to fail")
|
||||
+
|
||||
domain = Domain(topo.standalone, DEFAULT_SUFFIX)
|
||||
with pytest.raises(ldap.INVALID_SYNTAX):
|
||||
domain.add("aci", real_value)
|
||||
diff --git a/dirsrvtests/tests/suites/import/regression_test.py b/dirsrvtests/tests/suites/import/regression_test.py
|
||||
index e6fef89cc..61fdf8559 100644
|
||||
--- a/dirsrvtests/tests/suites/import/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/import/regression_test.py
|
||||
@@ -320,7 +320,7 @@ ou: myDups00001
|
||||
assert standalone.ds_error_log.match('.*Duplicated DN detected.*')
|
||||
|
||||
@pytest.mark.tier2
|
||||
-@pytest.mark.xfail(not _check_disk_space(), reason="not enough disk space for lmdb map")
|
||||
+@pytest.mark.skipif(not _check_disk_space(), reason="not enough disk space for lmdb map")
|
||||
@pytest.mark.xfail(ds_is_older("1.3.10.1"), reason="bz1749595 not fixed on versions older than 1.3.10.1")
|
||||
def test_large_ldif2db_ancestorid_index_creation(topo, _set_mdb_map_size):
|
||||
"""Import with ldif2db a large file - check that the ancestorid index creation phase has a correct performance
|
||||
@@ -396,39 +396,39 @@ def test_large_ldif2db_ancestorid_index_creation(topo, _set_mdb_map_size):
|
||||
log.info('Starting the server')
|
||||
topo.standalone.start()
|
||||
|
||||
- # With lmdb there is no more any special phase for ancestorid
|
||||
+ # With lmdb there is no more any special phase for ancestorid
|
||||
# because ancestorsid get updated on the fly while processing the
|
||||
# entryrdn (by up the parents chain to compute the parentid
|
||||
- #
|
||||
+ #
|
||||
# But there is still a numSubordinates generation phase
|
||||
if get_default_db_lib() == "mdb":
|
||||
log.info('parse the errors logs to check lines with "Generating numSubordinates complete." are present')
|
||||
end_numsubordinates = str(topo.standalone.ds_error_log.match(r'.*Generating numSubordinates complete.*'))[1:-1]
|
||||
assert len(end_numsubordinates) > 0
|
||||
-
|
||||
+
|
||||
else:
|
||||
log.info('parse the errors logs to check lines with "Starting sort of ancestorid" are present')
|
||||
start_sort_str = str(topo.standalone.ds_error_log.match(r'.*Starting sort of ancestorid non-leaf IDs*'))[1:-1]
|
||||
assert len(start_sort_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('parse the errors logs to check lines with "Finished sort of ancestorid" are present')
|
||||
end_sort_str = str(topo.standalone.ds_error_log.match(r'.*Finished sort of ancestorid non-leaf IDs*'))[1:-1]
|
||||
assert len(end_sort_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('parse the error logs for the line with "Gathering ancestorid non-leaf IDs"')
|
||||
start_ancestorid_indexing_op_str = str(topo.standalone.ds_error_log.match(r'.*Gathering ancestorid non-leaf IDs*'))[1:-1]
|
||||
assert len(start_ancestorid_indexing_op_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('parse the error logs for the line with "Created ancestorid index"')
|
||||
end_ancestorid_indexing_op_str = str(topo.standalone.ds_error_log.match(r'.*Created ancestorid index*'))[1:-1]
|
||||
assert len(end_ancestorid_indexing_op_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('get the ancestorid non-leaf IDs indexing start and end time from the collected strings')
|
||||
# Collected lines look like : '[15/May/2020:05:30:27.245967313 -0400] - INFO - bdb_get_nonleaf_ids - import userRoot: Gathering ancestorid non-leaf IDs...'
|
||||
# We are getting the sec.nanosec part of the date, '27.245967313' in the above example
|
||||
start_time = (start_ancestorid_indexing_op_str.split()[0]).split(':')[3]
|
||||
end_time = (end_ancestorid_indexing_op_str.split()[0]).split(':')[3]
|
||||
-
|
||||
+
|
||||
log.info('Calculate the elapsed time for the ancestorid non-leaf IDs index creation')
|
||||
etime = (Decimal(end_time) - Decimal(start_time))
|
||||
# The time for the ancestorid index creation should be less than 10s for an offline import of an ldif file with 100000 entries / 5 entries per node
|
||||
diff --git a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
index 3b6a54849..69a36cb5d 100644
|
||||
--- a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
+++ b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
@@ -117,10 +117,10 @@ def check_password_masked(inst, log_format, expected_password, actual_password):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "userPassword"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "userPassword")
|
||||
])
|
||||
def test_password_masking_add_operation(topo, log_format, display_attrs):
|
||||
@@ -173,10 +173,10 @@ def test_password_masking_add_operation(topo, log_format, display_attrs):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "userPassword"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "userPassword")
|
||||
])
|
||||
def test_password_masking_modify_operation(topo, log_format, display_attrs):
|
||||
@@ -242,10 +242,10 @@ def test_password_masking_modify_operation(topo, log_format, display_attrs):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsslapd-rootpw"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsslapd-rootpw")
|
||||
])
|
||||
def test_password_masking_rootpw_modify_operation(topo, log_format, display_attrs):
|
||||
@@ -297,10 +297,10 @@ def test_password_masking_rootpw_modify_operation(topo, log_format, display_attr
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsmultiplexorcredentials"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsmultiplexorcredentials")
|
||||
])
|
||||
def test_password_masking_multiplexor_credentials(topo, log_format, display_attrs):
|
||||
@@ -368,10 +368,10 @@ def test_password_masking_multiplexor_credentials(topo, log_format, display_attr
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsDS5ReplicaCredentials"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsDS5ReplicaCredentials")
|
||||
])
|
||||
def test_password_masking_replica_credentials(topo, log_format, display_attrs):
|
||||
@@ -432,10 +432,10 @@ def test_password_masking_replica_credentials(topo, log_format, display_attrs):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsDS5ReplicaBootstrapCredentials"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsDS5ReplicaBootstrapCredentials")
|
||||
])
|
||||
def test_password_masking_bootstrap_credentials(topo, log_format, display_attrs):
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
index a23d734ca..8c3a537ab 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
@@ -81,6 +81,7 @@ def setup_usn_test(topology_st, request):
|
||||
return created_users
|
||||
|
||||
|
||||
+@pytest.mark.xfail(reason="DS6250")
|
||||
def test_entryusn_overflow_on_add_existing_entries(topology_st, setup_usn_test):
|
||||
"""Test that reproduces entryUSN overflow when adding existing entries
|
||||
|
||||
@@ -232,6 +233,7 @@ def test_entryusn_overflow_on_add_existing_entries(topology_st, setup_usn_test):
|
||||
log.info("EntryUSN overflow test completed successfully")
|
||||
|
||||
|
||||
+@pytest.mark.xfail(reason="DS6250")
|
||||
def test_entryusn_consistency_after_failed_adds(topology_st, setup_usn_test):
|
||||
"""Test that entryUSN remains consistent after failed add operations
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
378
0035-Issue-6875-Fix-dsidm-tests.patch
Normal file
378
0035-Issue-6875-Fix-dsidm-tests.patch
Normal file
@ -0,0 +1,378 @@
|
||||
From e48f31b756509938d69a626744b8862fc26aa3ef Mon Sep 17 00:00:00 2001
|
||||
From: Lenka Doudova <lryznaro@redhat.com>
|
||||
Date: Tue, 15 Jul 2025 17:17:04 +0200
|
||||
Subject: [PATCH] Issue 6875 - Fix dsidm tests
|
||||
|
||||
Description:
|
||||
Adding testing of the "full_dn" option with 'dsidm list' command for all
|
||||
relevant types of entries
|
||||
Removing xfail markers in dsidm role tests since the issues were
|
||||
resolved
|
||||
|
||||
Relates: #6875
|
||||
|
||||
Author: Lenka Doudova
|
||||
|
||||
Reviewed by: ???
|
||||
---
|
||||
.../tests/suites/clu/dsidm_group_test.py | 12 +++++++++-
|
||||
.../clu/dsidm_organizational_unit_test.py | 13 ++++++++++-
|
||||
.../tests/suites/clu/dsidm_posixgroup_test.py | 13 ++++++++++-
|
||||
.../tests/suites/clu/dsidm_role_test.py | 23 +++++--------------
|
||||
.../tests/suites/clu/dsidm_services_test.py | 13 ++++++++++-
|
||||
.../suites/clu/dsidm_uniquegroup_test.py | 12 +++++++++-
|
||||
.../tests/suites/clu/dsidm_user_test.py | 22 +++++++++++++++++-
|
||||
7 files changed, 85 insertions(+), 23 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_group_test.py b/dirsrvtests/tests/suites/clu/dsidm_group_test.py
|
||||
index 36723a2d0..eba823d2d 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_group_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_group_test.py
|
||||
@@ -17,7 +17,7 @@ from lib389.cli_idm.group import (list, get, get_dn, create, delete, modify, ren
|
||||
members, add_member, remove_member)
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.cli_base import FakeArgs
|
||||
-from lib389.utils import ds_is_older, ensure_str
|
||||
+from lib389.utils import ds_is_older, ensure_str, is_a_dn
|
||||
from lib389.idm.group import Groups
|
||||
from . import check_value_in_log_and_reset
|
||||
|
||||
@@ -198,6 +198,7 @@ def test_dsidm_group_list(topology_st, create_test_group):
|
||||
standalone = topology_st.standalone
|
||||
args = FakeArgs()
|
||||
args.json = False
|
||||
+ args.full_dn = False
|
||||
json_list = ['type',
|
||||
'list',
|
||||
'items']
|
||||
@@ -214,12 +215,21 @@ def test_dsidm_group_list(topology_st, create_test_group):
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=group_name)
|
||||
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+
|
||||
log.info('Delete the group')
|
||||
groups = Groups(standalone, DEFAULT_SUFFIX)
|
||||
testgroup = groups.get(group_name)
|
||||
testgroup.delete()
|
||||
|
||||
log.info('Test empty dsidm group list with json')
|
||||
+ topology_st.logcap.flush()
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=group_name)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_organizational_unit_test.py b/dirsrvtests/tests/suites/clu/dsidm_organizational_unit_test.py
|
||||
index ee908fb22..06556b227 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_organizational_unit_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_organizational_unit_test.py
|
||||
@@ -11,12 +11,13 @@ import subprocess
|
||||
import pytest
|
||||
import logging
|
||||
import os
|
||||
+import json
|
||||
|
||||
from lib389 import DEFAULT_SUFFIX
|
||||
from lib389.cli_idm.organizationalunit import get, get_dn, create, modify, delete, list, rename
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.cli_base import FakeArgs
|
||||
-from lib389.utils import ds_is_older
|
||||
+from lib389.utils import ds_is_older, is_a_dn
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
from . import check_value_in_log_and_reset
|
||||
|
||||
@@ -110,6 +111,7 @@ def test_dsidm_organizational_unit_list(topology_st, create_test_ou):
|
||||
standalone = topology_st.standalone
|
||||
args = FakeArgs()
|
||||
args.json = False
|
||||
+ args.full_dn = False
|
||||
json_list = ['type',
|
||||
'list',
|
||||
'items']
|
||||
@@ -126,7 +128,16 @@ def test_dsidm_organizational_unit_list(topology_st, create_test_ou):
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, check_value=ou_name)
|
||||
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+
|
||||
log.info('Delete the organizational unit')
|
||||
+ topology_st.logcap.flush()
|
||||
ous = OrganizationalUnits(standalone, DEFAULT_SUFFIX)
|
||||
test_ou = ous.get(ou_name)
|
||||
test_ou.delete()
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_posixgroup_test.py b/dirsrvtests/tests/suites/clu/dsidm_posixgroup_test.py
|
||||
index ccafd3905..10799ee28 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_posixgroup_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_posixgroup_test.py
|
||||
@@ -9,12 +9,13 @@
|
||||
import pytest
|
||||
import logging
|
||||
import os
|
||||
+import json
|
||||
|
||||
from lib389 import DEFAULT_SUFFIX
|
||||
from lib389.cli_idm.posixgroup import list, get, get_dn, create, delete, modify, rename
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.cli_base import FakeArgs
|
||||
-from lib389.utils import ds_is_older, ensure_str
|
||||
+from lib389.utils import ds_is_older, ensure_str, is_a_dn
|
||||
from lib389.idm.posixgroup import PosixGroups
|
||||
from . import check_value_in_log_and_reset
|
||||
|
||||
@@ -195,6 +196,7 @@ def test_dsidm_posixgroup_list(topology_st, create_test_posixgroup):
|
||||
standalone = topology_st.standalone
|
||||
args = FakeArgs()
|
||||
args.json = False
|
||||
+ args.full_dn = False
|
||||
json_list = ['type',
|
||||
'list',
|
||||
'items']
|
||||
@@ -211,12 +213,21 @@ def test_dsidm_posixgroup_list(topology_st, create_test_posixgroup):
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=posixgroup_name)
|
||||
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+
|
||||
log.info('Delete the posixgroup')
|
||||
posixgroups = PosixGroups(standalone, DEFAULT_SUFFIX)
|
||||
test_posixgroup = posixgroups.get(posixgroup_name)
|
||||
test_posixgroup.delete()
|
||||
|
||||
log.info('Test empty dsidm posixgroup list with json')
|
||||
+ topology_st.logcap.flush()
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=posixgroup_name)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_role_test.py b/dirsrvtests/tests/suites/clu/dsidm_role_test.py
|
||||
index eb5f692d7..094db2f78 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_role_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_role_test.py
|
||||
@@ -67,6 +67,7 @@ def create_test_filtered_role(topology_st, request):
|
||||
|
||||
properties = FakeArgs()
|
||||
properties.cn = filtered_role_name
|
||||
+ properties.nsrolefilter = "(cn=*)"
|
||||
create_filtered(topology_st.standalone, DEFAULT_SUFFIX, topology_st.logcap.log, properties)
|
||||
test_filtered_role = filtered_roles.get(filtered_role_name)
|
||||
|
||||
@@ -92,7 +93,7 @@ def create_test_nested_role(topology_st, create_test_managed_role, request):
|
||||
|
||||
properties = FakeArgs()
|
||||
properties.cn = nested_role_name
|
||||
- properties.nsRoleDN = managed_role.dn
|
||||
+ properties.nsroledn = managed_role.dn
|
||||
create_nested(topology_st.standalone, DEFAULT_SUFFIX, topology_st.logcap.log, properties)
|
||||
test_nested_role = nested_roles.get(nested_role_name)
|
||||
|
||||
@@ -341,14 +342,8 @@ def test_dsidm_role_list(topology_st, create_test_managed_role):
|
||||
@pytest.mark.parametrize(
|
||||
"role_name, fixture, objectclasses",
|
||||
[(managed_role_name, 'create_test_managed_role', ['nsSimpleRoleDefinition', 'nsManagedRoleDefinition']),
|
||||
- (pytest.param(filtered_role_name,
|
||||
- create_test_filtered_role,
|
||||
- ['nsComplexRoleDefinition', 'nsFilteredRoleDefinition'],
|
||||
- marks=pytest.mark.xfail(reason="DS6492"))),
|
||||
- (pytest.param(nested_role_name,
|
||||
- create_test_nested_role,
|
||||
- ['nsComplexRoleDefinition', 'nsNestedRoleDefinition'],
|
||||
- marks=pytest.mark.xfail(reason="DS6493")))])
|
||||
+ (filtered_role_name, 'create_test_filtered_role', ['nsComplexRoleDefinition', 'nsFilteredRoleDefinition']),
|
||||
+ (nested_role_name, 'create_test_nested_role', ['nsComplexRoleDefinition', 'nsNestedRoleDefinition'])])
|
||||
def test_dsidm_role_get(topology_st, role_name, fixture, objectclasses, request):
|
||||
""" Test dsidm role get option for managed, filtered and nested role
|
||||
|
||||
@@ -422,14 +417,8 @@ def test_dsidm_role_get(topology_st, role_name, fixture, objectclasses, request)
|
||||
@pytest.mark.parametrize(
|
||||
"role_name, fixture, objectclasses",
|
||||
[(managed_role_name, 'create_test_managed_role', ['nsSimpleRoleDefinition', 'nsManagedRoleDefinition']),
|
||||
- (pytest.param(filtered_role_name,
|
||||
- create_test_filtered_role,
|
||||
- ['nsComplexRoleDefinition', 'nsFilteredRoleDefinition'],
|
||||
- marks=pytest.mark.xfail(reason="DS6492"))),
|
||||
- (pytest.param(nested_role_name,
|
||||
- create_test_nested_role,
|
||||
- ['nsComplexRoleDefinition', 'nsNestedRoleDefinition'],
|
||||
- marks=pytest.mark.xfail(reason="DS6493")))])
|
||||
+ (filtered_role_name, 'create_test_filtered_role', ['nsComplexRoleDefinition', 'nsFilteredRoleDefinition']),
|
||||
+ (nested_role_name, 'create_test_nested_role', ['nsComplexRoleDefinition', 'nsNestedRoleDefinition'])])
|
||||
def test_dsidm_role_get_by_dn(topology_st, role_name, fixture, objectclasses, request):
|
||||
""" Test dsidm role get-by-dn option for managed, filtered and nested role
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_services_test.py b/dirsrvtests/tests/suites/clu/dsidm_services_test.py
|
||||
index 61dd0ac11..f167b1c6f 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_services_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_services_test.py
|
||||
@@ -11,12 +11,13 @@ import subprocess
|
||||
import pytest
|
||||
import logging
|
||||
import os
|
||||
+import json
|
||||
|
||||
from lib389 import DEFAULT_SUFFIX
|
||||
from lib389.cli_idm.service import list, get, get_dn, create, delete, modify, rename
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.cli_base import FakeArgs
|
||||
-from lib389.utils import ds_is_older, ensure_str
|
||||
+from lib389.utils import ds_is_older, ensure_str, is_a_dn
|
||||
from lib389.idm.services import ServiceAccounts
|
||||
from . import check_value_in_log_and_reset
|
||||
|
||||
@@ -73,6 +74,7 @@ def test_dsidm_service_list(topology_st, create_test_service):
|
||||
standalone = topology_st.standalone
|
||||
args = FakeArgs()
|
||||
args.json = False
|
||||
+ args.full_dn = False
|
||||
service_value = 'test_service'
|
||||
json_list = ['type',
|
||||
'list',
|
||||
@@ -90,12 +92,21 @@ def test_dsidm_service_list(topology_st, create_test_service):
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=service_value)
|
||||
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+
|
||||
log.info('Delete the service')
|
||||
services = ServiceAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
testservice = services.get(service_value)
|
||||
testservice.delete()
|
||||
|
||||
log.info('Test empty dsidm service list with json')
|
||||
+ topology_st.logcap.flush()
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=service_value)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_uniquegroup_test.py b/dirsrvtests/tests/suites/clu/dsidm_uniquegroup_test.py
|
||||
index 0532791c1..4689ae34b 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_uniquegroup_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_uniquegroup_test.py
|
||||
@@ -17,7 +17,7 @@ from lib389.cli_idm.uniquegroup import (list, get, get_dn, create, delete, modif
|
||||
members, add_member, remove_member)
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.cli_base import FakeArgs
|
||||
-from lib389.utils import ds_is_older, ensure_str
|
||||
+from lib389.utils import ds_is_older, ensure_str, is_a_dn
|
||||
from lib389.idm.group import UniqueGroups
|
||||
from . import check_value_in_log_and_reset
|
||||
|
||||
@@ -153,6 +153,7 @@ def test_dsidm_uniquegroup_list(topology_st, create_test_uniquegroup):
|
||||
standalone = topology_st.standalone
|
||||
args = FakeArgs()
|
||||
args.json = False
|
||||
+ args.full_dn = False
|
||||
json_list = ['type',
|
||||
'list',
|
||||
'items']
|
||||
@@ -169,12 +170,21 @@ def test_dsidm_uniquegroup_list(topology_st, create_test_uniquegroup):
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=uniquegroup_name)
|
||||
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+
|
||||
log.info('Delete the uniquegroup')
|
||||
uniquegroups = UniqueGroups(standalone, DEFAULT_SUFFIX)
|
||||
test_uniquegroup = uniquegroups.get(uniquegroup_name)
|
||||
test_uniquegroup.delete()
|
||||
|
||||
log.info('Test empty dsidm uniquegroup list with json')
|
||||
+ topology_st.logcap.flush()
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=uniquegroup_name)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_user_test.py b/dirsrvtests/tests/suites/clu/dsidm_user_test.py
|
||||
index 4b5491735..620e183ac 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_user_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_user_test.py
|
||||
@@ -12,12 +12,13 @@ import pytest
|
||||
import logging
|
||||
import os
|
||||
import ldap
|
||||
+import json
|
||||
|
||||
from lib389 import DEFAULT_SUFFIX
|
||||
from lib389.cli_idm.user import list, get, get_dn, create, delete, modify, rename
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.cli_base import FakeArgs
|
||||
-from lib389.utils import ds_is_older, ensure_str
|
||||
+from lib389.utils import ds_is_older, ensure_str, is_a_dn
|
||||
from lib389.idm.user import nsUserAccounts
|
||||
from . import check_value_in_log_and_reset
|
||||
|
||||
@@ -74,6 +75,7 @@ def test_dsidm_user_list(topology_st, create_test_user):
|
||||
standalone = topology_st.standalone
|
||||
args = FakeArgs()
|
||||
args.json = False
|
||||
+ args.full_dn = False
|
||||
user_value = 'test_user_1000'
|
||||
json_list = ['type',
|
||||
'list',
|
||||
@@ -92,6 +94,15 @@ def test_dsidm_user_list(topology_st, create_test_user):
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=user_value)
|
||||
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+ topology_st.logcap.flush()
|
||||
+
|
||||
log.info('Delete the user')
|
||||
users = nsUserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
testuser = users.get(user_value)
|
||||
@@ -777,6 +788,7 @@ def test_dsidm_user_list_rdn_after_rename(topology_st):
|
||||
log.info('Test dsidm user list without json')
|
||||
args = FakeArgs()
|
||||
args.json = False
|
||||
+ args.full_dn = False
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
# Should show the new name, not the original name
|
||||
check_value_in_log_and_reset(topology_st, check_value=new_name, check_value_not=original_name)
|
||||
@@ -787,6 +799,14 @@ def test_dsidm_user_list_rdn_after_rename(topology_st):
|
||||
# Should show the new name in JSON output as well
|
||||
check_value_in_log_and_reset(topology_st, check_value=new_name, check_value_not=original_name)
|
||||
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+
|
||||
log.info('Directly verify RDN extraction works correctly')
|
||||
renamed_user = users.get(new_name)
|
||||
rdn_value = renamed_user.get_rdn_from_dn(renamed_user.dn)
|
||||
--
|
||||
2.49.0
|
||||
|
||||
503
0036-Issue-6519-Add-basic-dsidm-account-tests.patch
Normal file
503
0036-Issue-6519-Add-basic-dsidm-account-tests.patch
Normal file
@ -0,0 +1,503 @@
|
||||
From 10937417415577569bd777aacf7941803e96da21 Mon Sep 17 00:00:00 2001
|
||||
From: Lenka Doudova <lryznaro@redhat.com>
|
||||
Date: Mon, 20 Jan 2025 14:19:51 +0100
|
||||
Subject: [PATCH] Issue 6519 - Add basic dsidm account tests
|
||||
|
||||
Automating basic dsidm account tests
|
||||
|
||||
Relates to: https://github.com/389ds/389-ds-base/issues/6519
|
||||
|
||||
Author: Lenka Doudova
|
||||
|
||||
Reviewed by: Simon Pichugin
|
||||
---
|
||||
.../tests/suites/clu/dsidm_account_test.py | 417 +++++++++++++++++-
|
||||
src/lib389/lib389/cli_idm/account.py | 6 +-
|
||||
2 files changed, 409 insertions(+), 14 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_account_test.py b/dirsrvtests/tests/suites/clu/dsidm_account_test.py
|
||||
index 4b48a11a5..c600e31fd 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_account_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_account_test.py
|
||||
@@ -6,22 +6,19 @@
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
#
|
||||
+
|
||||
import logging
|
||||
import os
|
||||
import json
|
||||
import pytest
|
||||
import ldap
|
||||
from lib389 import DEFAULT_SUFFIX
|
||||
-from lib389.cli_idm.account import (
|
||||
- get_dn,
|
||||
- lock,
|
||||
- unlock,
|
||||
- entry_status,
|
||||
- subtree_status,
|
||||
-)
|
||||
+from lib389.cli_idm.account import list, get_dn, lock, unlock, delete, modify, rename, entry_status, \
|
||||
+ subtree_status, reset_password, change_password
|
||||
+from lib389.cli_idm.user import create
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.cli_base import FakeArgs
|
||||
-from lib389.utils import ds_is_older
|
||||
+from lib389.utils import ds_is_older, is_a_dn
|
||||
from lib389.idm.user import nsUserAccounts
|
||||
from . import check_value_in_log_and_reset
|
||||
|
||||
@@ -30,13 +27,28 @@ pytestmark = pytest.mark.tier0
|
||||
logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
+test_user_name = 'test_user_1000'
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def create_test_user(topology_st, request):
|
||||
log.info('Create test user')
|
||||
users = nsUserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
- test_user = users.create_test_user()
|
||||
- log.info('Created test user: %s', test_user.dn)
|
||||
+
|
||||
+ if users.exists(test_user_name):
|
||||
+ test_user = users.get(test_user_name)
|
||||
+ test_user.delete()
|
||||
+
|
||||
+ properties = FakeArgs()
|
||||
+ properties.uid = test_user_name
|
||||
+ properties.cn = test_user_name
|
||||
+ properties.sn = test_user_name
|
||||
+ properties.uidNumber = '1000'
|
||||
+ properties.gidNumber = '2000'
|
||||
+ properties.homeDirectory = '/home/test_user_1000'
|
||||
+ properties.displayName = test_user_name
|
||||
+
|
||||
+ create(topology_st.standalone, DEFAULT_SUFFIX, topology_st.logcap.log, properties)
|
||||
+ test_user = users.get(test_user_name)
|
||||
|
||||
def fin():
|
||||
log.info('Delete test user')
|
||||
@@ -74,7 +86,7 @@ def test_dsidm_account_entry_status_with_lock(topology_st, create_test_user):
|
||||
|
||||
standalone = topology_st.standalone
|
||||
users = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
- test_user = users.get('test_user_1000')
|
||||
+ test_user = users.get(test_user_name)
|
||||
|
||||
entry_list = ['Entry DN: {}'.format(test_user.dn),
|
||||
'Entry Creation Date',
|
||||
@@ -169,8 +181,389 @@ def test_dsidm_account_entry_get_by_dn(topology_st, create_test_user):
|
||||
assert json_result['dn'] == user_dn
|
||||
|
||||
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_delete(topology_st, create_test_user):
|
||||
+ """ Test dsidm account delete option
|
||||
+
|
||||
+ :id: a7960bc2-0282-4a82-8dfb-3af2088ec661
|
||||
+ :setup: Standalone
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account delete on a created account
|
||||
+ 2. Check that a message is provided on deletion
|
||||
+ 3. Check that the account no longer exists
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+ output = 'Successfully deleted {}'.format(test_account.dn)
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = test_account.dn
|
||||
+
|
||||
+ log.info('Test dsidm account delete')
|
||||
+ delete(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+
|
||||
+ log.info('Check that the account no longer exists')
|
||||
+ assert not test_account.exists()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_list(topology_st, create_test_user):
|
||||
+ """ Test dsidm account list option
|
||||
+
|
||||
+ :id: 4d173a3e-ee36-4a8b-8d0d-4955c792faca
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account list without json
|
||||
+ 2. Check the output content is correct
|
||||
+ 3. Run dsidm account list with json
|
||||
+ 4. Check the output content is correct
|
||||
+ 5. Test full_dn option with list
|
||||
+ 6. Delete the account
|
||||
+ 7. Check the account is not in the list with json
|
||||
+ 8. Check the account is not in the list without json
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ args = FakeArgs()
|
||||
+ args.json = False
|
||||
+ args.full_dn = False
|
||||
+ json_list = ['type',
|
||||
+ 'list',
|
||||
+ 'items']
|
||||
+
|
||||
+ log.info('Empty the log file to prevent false data to check about group')
|
||||
+ topology_st.logcap.flush()
|
||||
+
|
||||
+ log.info('Test dsidm account list without json')
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=test_user_name)
|
||||
+
|
||||
+ log.info('Test dsidm account list with json')
|
||||
+ args.json = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=test_user_name)
|
||||
+
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+ topology_st.logcap.flush()
|
||||
+
|
||||
+ log.info('Delete the account')
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+ test_account.delete()
|
||||
+
|
||||
+ log.info('Test empty dsidm account list with json')
|
||||
+ list(standalone,DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=test_user_name)
|
||||
+
|
||||
+ log.info('Test empty dsidm account list without json')
|
||||
+ args.json = False
|
||||
+ list(standalone,DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value_not=test_user_name)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.xfail(reason='DS6515')
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_get_by_dn(topology_st, create_test_user):
|
||||
+ """ Test dsidm account get-by-dn option
|
||||
+
|
||||
+ :id: 07945577-2da0-4fd9-9237-43dd2823f7b8
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account get-by-dn for an account without json
|
||||
+ 2. Check the output content is correct
|
||||
+ 3. Run dsidm account get-by-dn for an account with json
|
||||
+ 4. Check the output content is correct
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = test_account.dn
|
||||
+ args.json = False
|
||||
+
|
||||
+ account_content = ['dn: {}'.format(test_account.dn),
|
||||
+ 'cn: {}'.format(test_account.rdn),
|
||||
+ 'displayName: {}'.format(test_user_name),
|
||||
+ 'gidNumber: 2000',
|
||||
+ 'homeDirectory: /home/{}'.format(test_user_name),
|
||||
+ 'objectClass: top',
|
||||
+ 'objectClass: nsPerson',
|
||||
+ 'objectClass: nsAccount',
|
||||
+ 'objectClass: nsOrgPerson',
|
||||
+ 'objectClass: posixAccount',
|
||||
+ 'uid: {}'.format(test_user_name),
|
||||
+ 'uidNumber: 1000']
|
||||
+
|
||||
+ json_content = ['attrs',
|
||||
+ 'objectclass',
|
||||
+ 'top',
|
||||
+ 'nsPerson',
|
||||
+ 'nsAccount',
|
||||
+ 'nsOrgPerson',
|
||||
+ 'posixAccount',
|
||||
+ 'cn',
|
||||
+ test_account.rdn,
|
||||
+ 'gidnumber',
|
||||
+ '2000',
|
||||
+ 'homedirectory',
|
||||
+ '/home/{}'.format(test_user_name),
|
||||
+ 'displayname',
|
||||
+ test_user_name,
|
||||
+ 'uidnumber',
|
||||
+ '1000',
|
||||
+ 'creatorsname',
|
||||
+ 'cn=directory manager',
|
||||
+ 'modifiersname',
|
||||
+ 'createtimestamp',
|
||||
+ 'modifytimestamp',
|
||||
+ 'nsuniqueid',
|
||||
+ 'parentid',
|
||||
+ 'entryid',
|
||||
+ 'entryuuid',
|
||||
+ 'dsentrydn',
|
||||
+ 'entrydn',
|
||||
+ test_account.dn]
|
||||
+
|
||||
+ log.info('Empty the log file to prevent false data to check about the account')
|
||||
+ topology_st.logcap.flush()
|
||||
+
|
||||
+ log.info('Test dsidm account get-by-dn without json')
|
||||
+ get_dn(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, content_list=account_content)
|
||||
+
|
||||
+ log.info('Test dsidm account get-by-dn with json')
|
||||
+ args.json = True
|
||||
+ get_dn(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, content_list=json_content)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_modify_by_dn(topology_st, create_test_user):
|
||||
+ """ Test dsidm account modify-by-dn
|
||||
+
|
||||
+ :id: e7288f8c-f0a8-4d8d-a00f-1b243eb117bc
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account modify-by-dn add description value
|
||||
+ 2. Run dsidm account modify-by-dn replace description value
|
||||
+ 3. Run dsidm account modify-by-dn delete description value
|
||||
+ :expectedresults:
|
||||
+ 1. A description value is added
|
||||
+ 2. The original description value is replaced and the previous is not present
|
||||
+ 3. The replaced description value is deleted
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+ output = 'Successfully modified {}'.format(test_account.dn)
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = test_account.dn
|
||||
+ args.changes = ['add:description:new_description']
|
||||
+
|
||||
+ log.info('Test dsidm account modify add')
|
||||
+ modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+ assert test_account.present('description', 'new_description')
|
||||
+
|
||||
+ log.info('Test dsidm account modify replace')
|
||||
+ args.changes = ['replace:description:replaced_description']
|
||||
+ modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+ assert test_account.present('description', 'replaced_description')
|
||||
+ assert not test_account.present('description', 'new_description')
|
||||
+
|
||||
+ log.info('Test dsidm account modify delete')
|
||||
+ args.changes = ['delete:description:replaced_description']
|
||||
+ modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+ assert not test_account.present('description', 'replaced_description')
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_rename_by_dn(topology_st, create_test_user):
|
||||
+ """ Test dsidm account rename-by-dn option
|
||||
+
|
||||
+ :id: f4b8e491-35b1-4113-b9c4-e0a80f8985f3
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account rename option on existing account
|
||||
+ 2. Check the account does not have another uid attribute with the old rdn
|
||||
+ 3. Check the old account is deleted
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = test_account.dn
|
||||
+ args.new_name = 'renamed_account'
|
||||
+ args.new_dn = 'uid=renamed_account,ou=people,{}'.format(DEFAULT_SUFFIX)
|
||||
+ args.keep_old_rdn = False
|
||||
+
|
||||
+ log.info('Test dsidm account rename-by-dn')
|
||||
+ rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ new_account = accounts.get(args.new_name)
|
||||
+
|
||||
+ try:
|
||||
+ output = 'Successfully renamed to {}'.format(new_account.dn)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+
|
||||
+ log.info('Verify the new account does not have a uid attribute with the old rdn')
|
||||
+ assert not new_account.present('uid', test_user_name)
|
||||
+ assert new_account.present('displayName', test_user_name)
|
||||
+
|
||||
+ log.info('Verify the old account does not exist')
|
||||
+ assert not test_account.exists()
|
||||
+ finally:
|
||||
+ log.info('Clean up')
|
||||
+ new_account.delete()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_rename_by_dn_keep_old_rdn(topology_st, create_test_user):
|
||||
+ """ Test dsidm account rename-by-dn option with keep-old-rdn
|
||||
+
|
||||
+ :id: a128bdbb-c0a4-4d9d-9a95-9be2d3780094
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account rename option on existing account
|
||||
+ 2. Check the account has another uid attribute with the old rdn
|
||||
+ 3. Check the old account is deleted
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = test_account.dn
|
||||
+ args.new_name = 'renamed_account'
|
||||
+ args.new_dn = 'uid=renamed_account,ou=people,{}'.format(DEFAULT_SUFFIX)
|
||||
+ args.keep_old_rdn = True
|
||||
+
|
||||
+ log.info('Test dsidm account rename-by-dn')
|
||||
+ rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ new_account = accounts.get(args.new_name)
|
||||
+
|
||||
+ try:
|
||||
+ output = 'Successfully renamed to {}'.format(new_account.dn)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+
|
||||
+ log.info('Verify the new account does not have a uid attribute with the old rdn')
|
||||
+ assert new_account.present('uid', test_user_name)
|
||||
+ assert new_account.present('displayName', test_user_name)
|
||||
+
|
||||
+ log.info('Verify the old account does not exist')
|
||||
+ assert not test_account.exists()
|
||||
+ finally:
|
||||
+ log.info('Clean up')
|
||||
+ new_account.delete()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_reset_password(topology_st, create_test_user):
|
||||
+ """ Test dsidm account reset_password option
|
||||
+
|
||||
+ :id: 02ffa044-08ae-40c5-9108-b02d0c3b0521
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account reset_password on an existing user
|
||||
+ 2. Verify that the user has now userPassword attribute set
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = test_account.dn
|
||||
+ args.new_password = 'newpasswd'
|
||||
+ output = 'reset password for {}'.format(test_account.dn)
|
||||
+
|
||||
+ log.info('Test dsidm account reset_password')
|
||||
+ reset_password(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+
|
||||
+ log.info('Verify the userPassword attribute is set')
|
||||
+ assert test_account.present('userPassword')
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_change_password(topology_st, create_test_user):
|
||||
+ """ Test dsidm account change_password option
|
||||
+
|
||||
+ :id: 24c25b8f-df2b-4d43-a88e-47e24bc4ff36
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account change_password on an existing user
|
||||
+ 2. Verify that the user has userPassword attribute set
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = test_account.dn
|
||||
+ args.new_password = 'newpasswd'
|
||||
+ output = 'changed password for {}'.format(test_account.dn)
|
||||
+
|
||||
+ log.info('Test dsidm account change_password')
|
||||
+ change_password(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+
|
||||
+ log.info('Verify the userPassword attribute is set')
|
||||
+ assert test_account.present('userPassword')
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
- pytest.main("-s %s" % CURRENT_FILE)
|
||||
+ pytest.main("-s {}".format(CURRENT_FILE))
|
||||
\ No newline at end of file
|
||||
diff --git a/src/lib389/lib389/cli_idm/account.py b/src/lib389/lib389/cli_idm/account.py
|
||||
index 8b6f99549..9877c533a 100644
|
||||
--- a/src/lib389/lib389/cli_idm/account.py
|
||||
+++ b/src/lib389/lib389/cli_idm/account.py
|
||||
@@ -12,10 +12,12 @@ import ldap
|
||||
import math
|
||||
from datetime import datetime
|
||||
from lib389.idm.account import Account, Accounts, AccountState
|
||||
-from lib389.cli_base import (
|
||||
- _generic_get_dn,
|
||||
+from lib389.cli_idm import (
|
||||
_generic_list,
|
||||
_generic_delete,
|
||||
+ _generic_get_dn
|
||||
+)
|
||||
+from lib389.cli_base import (
|
||||
_generic_modify_dn,
|
||||
_get_arg,
|
||||
_get_dn_arg,
|
||||
--
|
||||
2.49.0
|
||||
|
||||
268
0037-Issue-6940-dsconf-monitor-server-fails-with-ldapi-du.patch
Normal file
268
0037-Issue-6940-dsconf-monitor-server-fails-with-ldapi-du.patch
Normal file
@ -0,0 +1,268 @@
|
||||
From 7423f0a0b90bac39a23b5ce54a1c61439d0ebcb6 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 19 Aug 2025 16:10:09 -0700
|
||||
Subject: [PATCH] Issue 6940 - dsconf monitor server fails with ldapi:// due to
|
||||
absent server ID (#6941)
|
||||
|
||||
Description: The dsconf monitor server command fails when using ldapi://
|
||||
protocol because the server ID is not set, preventing PID retrieval from
|
||||
defaults.inf. This causes the Web console to fail displaying the "Server
|
||||
Version" field and potentially other CLI/WebUI issues.
|
||||
|
||||
The fix attempts to derive the server ID from the LDAPI socket path when
|
||||
not explicitly provided. This covers the common case where the socket name
|
||||
contains the instance name (e.g., slapd-instance.socket).
|
||||
If that's not possible, it also attempts to derive the server ID from the
|
||||
nsslapd-instancedir configuration attribute. The derived server ID
|
||||
is validated against actual system instances to ensure it exists.
|
||||
Note that socket names can vary and nsslapd-instancedir can be changed.
|
||||
This is a best-effort approach for the common naming pattern.
|
||||
|
||||
Also fixes the LDAPI socket path extraction which was incorrectly using
|
||||
offset 9 instead of 8 for ldapi:// URIs.
|
||||
|
||||
The monitor command now handles missing PIDs gracefully, returning zero
|
||||
values for process-specific stats instead of failing completely.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6940
|
||||
|
||||
Reviewed by: @vashirov, @mreynolds389 (Thanks!!)
|
||||
---
|
||||
src/lib389/lib389/__init__.py | 93 +++++++++++++++++++++++++++---
|
||||
src/lib389/lib389/cli_base/dsrc.py | 4 +-
|
||||
src/lib389/lib389/monitor.py | 50 ++++++++++++----
|
||||
3 files changed, 124 insertions(+), 23 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index 0ddfca8ae..23a20739f 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
import sys
|
||||
import os
|
||||
-from urllib.parse import urlparse
|
||||
+from urllib.parse import urlparse, unquote
|
||||
import stat
|
||||
import pwd
|
||||
import grp
|
||||
@@ -67,7 +67,8 @@ from lib389.utils import (
|
||||
get_default_db_lib,
|
||||
selinux_present,
|
||||
selinux_label_port,
|
||||
- get_user_is_root)
|
||||
+ get_user_is_root,
|
||||
+ get_instance_list)
|
||||
from lib389.paths import Paths
|
||||
from lib389.nss_ssl import NssSsl
|
||||
from lib389.tasks import BackupTask, RestoreTask, Task
|
||||
@@ -249,6 +250,57 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
self.dbdir = self.ds_paths.db_dir
|
||||
self.changelogdir = os.path.join(os.path.dirname(self.dbdir), DEFAULT_CHANGELOG_DB)
|
||||
|
||||
+ def _extract_serverid_from_string(self, text):
|
||||
+ """Extract serverid from a string containing 'slapd-<serverid>' pattern.
|
||||
+ Returns the serverid or None if not found or validation fails.
|
||||
+ Only attempts derivation if serverid is currently None.
|
||||
+ """
|
||||
+ if getattr(self, 'serverid', None) is not None:
|
||||
+ return None
|
||||
+ if not text:
|
||||
+ return None
|
||||
+
|
||||
+ # Use regex to extract serverid from "slapd-<serverid>" or "slapd-<serverid>.socket"
|
||||
+ match = re.search(r'slapd-([A-Za-z0-9._-]+?)(?:\.socket)?(?:$|/)', text)
|
||||
+ if not match:
|
||||
+ return None
|
||||
+ candidate = match.group(1)
|
||||
+
|
||||
+ self.serverid = candidate
|
||||
+ try:
|
||||
+ insts = get_instance_list()
|
||||
+ except Exception:
|
||||
+ self.serverid = None
|
||||
+ return None
|
||||
+ if f'slapd-{candidate}' in insts or candidate in insts:
|
||||
+ return candidate
|
||||
+ # restore original and report failure
|
||||
+ self.serverid = None
|
||||
+ return None
|
||||
+
|
||||
+ def _derive_serverid_from_ldapi(self):
|
||||
+ """Attempt to derive serverid from an LDAPI socket path or URI and
|
||||
+ verify it exists on the system. Returns the serverid or None.
|
||||
+ """
|
||||
+ socket_path = None
|
||||
+ if hasattr(self, 'ldapi_socket') and self.ldapi_socket:
|
||||
+ socket_path = unquote(self.ldapi_socket)
|
||||
+ elif hasattr(self, 'ldapuri') and isinstance(self.ldapuri, str) and self.ldapuri.startswith('ldapi://'):
|
||||
+ socket_path = unquote(self.ldapuri[len('ldapi://'):])
|
||||
+
|
||||
+ return self._extract_serverid_from_string(socket_path)
|
||||
+
|
||||
+ def _derive_serverid_from_instancedir(self):
|
||||
+ """Extract serverid from nsslapd-instancedir path like '/usr/lib64/dirsrv/slapd-<serverid>'"""
|
||||
+ try:
|
||||
+ from lib389.config import Config
|
||||
+ config = Config(self)
|
||||
+ instancedir = config.get_attr_val_utf8_l("nsslapd-instancedir")
|
||||
+ except Exception:
|
||||
+ return None
|
||||
+
|
||||
+ return self._extract_serverid_from_string(instancedir)
|
||||
+
|
||||
def rebind(self):
|
||||
"""Reconnect to the DS
|
||||
|
||||
@@ -528,6 +580,15 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
self.ldapi_autobind = args.get(SER_LDAPI_AUTOBIND, 'off')
|
||||
self.isLocal = True
|
||||
self.log.debug("Allocate %s with %s", self.__class__, self.ldapi_socket)
|
||||
+ elif self.ldapuri is not None and isinstance(self.ldapuri, str) and self.ldapuri.startswith('ldapi://'):
|
||||
+ # Try to learn serverid from ldapi uri
|
||||
+ try:
|
||||
+ self.ldapi_enabled = 'on'
|
||||
+ self.ldapi_socket = unquote(self.ldapuri[len('ldapi://'):])
|
||||
+ self.ldapi_autobind = args.get(SER_LDAPI_AUTOBIND, 'off')
|
||||
+ self.isLocal = True
|
||||
+ except Exception:
|
||||
+ pass
|
||||
# Settings from args of server attributes
|
||||
self.strict_hostname = args.get(SER_STRICT_HOSTNAME_CHECKING, False)
|
||||
if self.strict_hostname is True:
|
||||
@@ -548,9 +609,16 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
|
||||
self.log.debug("Allocate %s with %s:%s", self.__class__, self.host, (self.sslport or self.port))
|
||||
|
||||
- if SER_SERVERID_PROP in args:
|
||||
- self.ds_paths = Paths(serverid=args[SER_SERVERID_PROP], instance=self, local=self.isLocal)
|
||||
+ # Try to determine serverid if not provided
|
||||
+ if SER_SERVERID_PROP in args and args.get(SER_SERVERID_PROP) is not None:
|
||||
self.serverid = args.get(SER_SERVERID_PROP, None)
|
||||
+ elif getattr(self, 'serverid', None) is None and self.isLocal:
|
||||
+ sid = self._derive_serverid_from_ldapi()
|
||||
+ if sid:
|
||||
+ self.serverid = sid
|
||||
+
|
||||
+ if getattr(self, 'serverid', None):
|
||||
+ self.ds_paths = Paths(serverid=self.serverid, instance=self, local=self.isLocal)
|
||||
else:
|
||||
self.ds_paths = Paths(instance=self, local=self.isLocal)
|
||||
|
||||
@@ -989,6 +1057,17 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
self.__initPart2()
|
||||
self.state = DIRSRV_STATE_ONLINE
|
||||
# Now that we're online, some of our methods may try to query the version online.
|
||||
+
|
||||
+ # After transitioning online, attempt to derive serverid if still unknown.
|
||||
+ # If we find it, refresh ds_paths and rerun __initPart2
|
||||
+ if getattr(self, 'serverid', None) is None and self.isLocal:
|
||||
+ sid = self._derive_serverid_from_instancedir()
|
||||
+ if sid:
|
||||
+ self.serverid = sid
|
||||
+ # Reinitialize paths with the new serverid
|
||||
+ self.ds_paths = Paths(serverid=self.serverid, instance=self, local=self.isLocal)
|
||||
+ if not connOnly:
|
||||
+ self.__initPart2()
|
||||
self.__add_brookers__()
|
||||
|
||||
def close(self):
|
||||
@@ -3537,8 +3616,4 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
"""
|
||||
Get the pid of the running server
|
||||
"""
|
||||
- pid = pid_from_file(self.pid_file())
|
||||
- if pid == 0 or pid is None:
|
||||
- return 0
|
||||
- else:
|
||||
- return pid
|
||||
+ return pid_from_file(self.pid_file())
|
||||
diff --git a/src/lib389/lib389/cli_base/dsrc.py b/src/lib389/lib389/cli_base/dsrc.py
|
||||
index 84567b990..498228ce0 100644
|
||||
--- a/src/lib389/lib389/cli_base/dsrc.py
|
||||
+++ b/src/lib389/lib389/cli_base/dsrc.py
|
||||
@@ -56,7 +56,7 @@ def dsrc_arg_concat(args, dsrc_inst):
|
||||
new_dsrc_inst['args'][SER_ROOT_DN] = new_dsrc_inst['binddn']
|
||||
if new_dsrc_inst['uri'][0:8] == 'ldapi://':
|
||||
new_dsrc_inst['args'][SER_LDAPI_ENABLED] = "on"
|
||||
- new_dsrc_inst['args'][SER_LDAPI_SOCKET] = new_dsrc_inst['uri'][9:]
|
||||
+ new_dsrc_inst['args'][SER_LDAPI_SOCKET] = new_dsrc_inst['uri'][8:]
|
||||
new_dsrc_inst['args'][SER_LDAPI_AUTOBIND] = "on"
|
||||
|
||||
# Make new
|
||||
@@ -170,7 +170,7 @@ def dsrc_to_ldap(path, instance_name, log):
|
||||
dsrc_inst['args'][SER_ROOT_DN] = dsrc_inst['binddn']
|
||||
if dsrc_inst['uri'][0:8] == 'ldapi://':
|
||||
dsrc_inst['args'][SER_LDAPI_ENABLED] = "on"
|
||||
- dsrc_inst['args'][SER_LDAPI_SOCKET] = dsrc_inst['uri'][9:]
|
||||
+ dsrc_inst['args'][SER_LDAPI_SOCKET] = dsrc_inst['uri'][8:]
|
||||
dsrc_inst['args'][SER_LDAPI_AUTOBIND] = "on"
|
||||
|
||||
# Return the dict.
|
||||
diff --git a/src/lib389/lib389/monitor.py b/src/lib389/lib389/monitor.py
|
||||
index 27b99a7e3..bf3e1df76 100644
|
||||
--- a/src/lib389/lib389/monitor.py
|
||||
+++ b/src/lib389/lib389/monitor.py
|
||||
@@ -92,21 +92,47 @@ class Monitor(DSLdapObject):
|
||||
Get CPU and memory stats
|
||||
"""
|
||||
stats = {}
|
||||
- pid = self._instance.get_pid()
|
||||
+ try:
|
||||
+ pid = self._instance.get_pid()
|
||||
+ except Exception:
|
||||
+ pid = None
|
||||
total_mem = psutil.virtual_memory()[0]
|
||||
- p = psutil.Process(pid)
|
||||
- memory_stats = p.memory_full_info()
|
||||
|
||||
- # Get memory & CPU stats
|
||||
+ # Always include total system memory
|
||||
stats['total_mem'] = [str(total_mem)]
|
||||
- stats['rss'] = [str(memory_stats[0])]
|
||||
- stats['vms'] = [str(memory_stats[1])]
|
||||
- stats['swap'] = [str(memory_stats[9])]
|
||||
- stats['mem_rss_percent'] = [str(round(p.memory_percent("rss")))]
|
||||
- stats['mem_vms_percent'] = [str(round(p.memory_percent("vms")))]
|
||||
- stats['mem_swap_percent'] = [str(round(p.memory_percent("swap")))]
|
||||
- stats['total_threads'] = [str(p.num_threads())]
|
||||
- stats['cpu_usage'] = [str(round(p.cpu_percent(interval=0.1)))]
|
||||
+
|
||||
+ # Process-specific stats - only if process is running (pid is not None)
|
||||
+ if pid is not None:
|
||||
+ try:
|
||||
+ p = psutil.Process(pid)
|
||||
+ memory_stats = p.memory_full_info()
|
||||
+
|
||||
+ # Get memory & CPU stats
|
||||
+ stats['rss'] = [str(memory_stats[0])]
|
||||
+ stats['vms'] = [str(memory_stats[1])]
|
||||
+ stats['swap'] = [str(memory_stats[9])]
|
||||
+ stats['mem_rss_percent'] = [str(round(p.memory_percent("rss")))]
|
||||
+ stats['mem_vms_percent'] = [str(round(p.memory_percent("vms")))]
|
||||
+ stats['mem_swap_percent'] = [str(round(p.memory_percent("swap")))]
|
||||
+ stats['total_threads'] = [str(p.num_threads())]
|
||||
+ stats['cpu_usage'] = [str(round(p.cpu_percent(interval=0.1)))]
|
||||
+ except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||
+ # Process exists in PID file but is not accessible or doesn't exist
|
||||
+ pid = None
|
||||
+
|
||||
+ # If no valid PID, provide zero values for process stats
|
||||
+ if pid is None:
|
||||
+ stats['rss'] = ['0']
|
||||
+ stats['vms'] = ['0']
|
||||
+ stats['swap'] = ['0']
|
||||
+ stats['mem_rss_percent'] = ['0']
|
||||
+ stats['mem_vms_percent'] = ['0']
|
||||
+ stats['mem_swap_percent'] = ['0']
|
||||
+ stats['total_threads'] = ['0']
|
||||
+ stats['cpu_usage'] = ['0']
|
||||
+ stats['server_status'] = ['PID unavailable']
|
||||
+ else:
|
||||
+ stats['server_status'] = ['Server running']
|
||||
|
||||
# Connections to DS
|
||||
if self._instance.port == "0":
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From 23e56fd01eaa24a2fa945430f91600dd9c726d34 Mon Sep 17 00:00:00 2001
|
||||
From 594333d1a6a8bba4d485b8227c4474e4ca2aa6a4 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 19 Aug 2025 14:30:15 -0700
|
||||
Subject: [PATCH] Issue 6936 - Make user/subtree policy creation idempotent
|
||||
@ -17,9 +17,6 @@ and preventing regressions.
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6936
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
|
||||
(cherry picked from commit da4eea126cc9019f540b57c1db9dec7988cade10)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
.../pwp_history_local_override_test.py | 351 ++++++++++++++++++
|
||||
src/lib389/lib389/cli_conf/pwpolicy.py | 4 +-
|
||||
@ -568,5 +565,5 @@ index 6a47a44fe..539c230a9 100644
|
||||
# Starting deleting the policy, ignore the parts that might already have been removed
|
||||
pwp_container = nsContainer(self._instance, 'cn=nsPwPolicyContainer,%s' % parentdn)
|
||||
--
|
||||
2.51.1
|
||||
2.49.0
|
||||
|
||||
1460
0039-Issue-6919-numSubordinates-tombstoneNumSubordinates-.patch
Normal file
1460
0039-Issue-6919-numSubordinates-tombstoneNumSubordinates-.patch
Normal file
File diff suppressed because it is too large
Load Diff
574
0040-Issue-6910-Fix-latest-coverity-issues.patch
Normal file
574
0040-Issue-6910-Fix-latest-coverity-issues.patch
Normal file
@ -0,0 +1,574 @@
|
||||
From 5d2dc7f78f0a834e46d5665f0c12024da5ddda9e Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 17:12:33 -0400
|
||||
Subject: [PATCH] Issue 6910 - Fix latest coverity issues
|
||||
|
||||
Description:
|
||||
|
||||
Fix various coverity/ASAN warnings:
|
||||
|
||||
- CID 1618837: Out-of-bounds read (OVERRUN) - bdb_bdbreader_glue.c
|
||||
- CID 1618831: Resource leak (RESOURCE_LEAK) - bdb_layer.c
|
||||
- CID 1612606: Resource leak (RESOURCE_LEAK) - log.c
|
||||
- CID 1611461: Uninitialized pointer read (UNINIT) - repl5_agmt.c
|
||||
- CID 1568589: Dereference before null check (REVERSE_INULL) - repl5_agmt.c
|
||||
- CID 1590353: Logically dead code (DEADCODE) - repl5_agmt.c
|
||||
- CID 1611460: Logically dead code (DEADCODE) - control.c
|
||||
- CID 1610568: Dereference after null check (FORWARD_NULL) - modify.c
|
||||
- CID 1591259: Out-of-bounds read (OVERRUN) - memberof.c
|
||||
- CID 1550231: Unsigned compared against 0 (NO_EFFECT) - memberof_config.c
|
||||
- CID 1548904: Overflowed constant (INTEGER_OVERFLOW) - ch_malloc.c
|
||||
- CID 1548902: Overflowed constant (INTEGER_OVERFLOW) - dse.lc
|
||||
- CID 1548900: Overflowed return value (INTEGER_OVERFLOW) - acct_util.c
|
||||
- CID 1548898: Overflowed constant (INTEGER_OVERFLOW) - parents.c
|
||||
- CID 1546849: Resource leak (RESOURCE_LEAK) - referint.c
|
||||
- ASAN - Use after free - automember.c
|
||||
|
||||
Relates: http://github.com/389ds/389-ds-base/issues/6910
|
||||
|
||||
Reviewed by: progier & spichugi(Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/acctpolicy/acct_util.c | 6 ++-
|
||||
ldap/servers/plugins/automember/automember.c | 9 ++--
|
||||
ldap/servers/plugins/memberof/memberof.c | 15 +++++--
|
||||
.../plugins/memberof/memberof_config.c | 11 +++--
|
||||
ldap/servers/plugins/referint/referint.c | 4 +-
|
||||
ldap/servers/plugins/replication/repl5_agmt.c | 41 ++++++++-----------
|
||||
.../slapd/back-ldbm/db-bdb/bdb_import.c | 5 ++-
|
||||
.../back-ldbm/db-bdb/bdb_instance_config.c | 3 +-
|
||||
.../slapd/back-ldbm/db-bdb/bdb_layer.c | 13 ++++--
|
||||
ldap/servers/slapd/back-ldbm/parents.c | 4 +-
|
||||
ldap/servers/slapd/ch_malloc.c | 4 +-
|
||||
ldap/servers/slapd/control.c | 5 +--
|
||||
ldap/servers/slapd/dse.c | 4 +-
|
||||
ldap/servers/slapd/log.c | 5 ++-
|
||||
ldap/servers/slapd/modify.c | 6 +--
|
||||
ldap/servers/slapd/passwd_extop.c | 2 +-
|
||||
ldap/servers/slapd/unbind.c | 12 ++++--
|
||||
17 files changed, 88 insertions(+), 61 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/acctpolicy/acct_util.c b/ldap/servers/plugins/acctpolicy/acct_util.c
|
||||
index b27eeaff1..7735d10e6 100644
|
||||
--- a/ldap/servers/plugins/acctpolicy/acct_util.c
|
||||
+++ b/ldap/servers/plugins/acctpolicy/acct_util.c
|
||||
@@ -17,7 +17,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
Contributors:
|
||||
Hewlett-Packard Development Company, L.P.
|
||||
|
||||
-Copyright (C) 2021 Red Hat, Inc.
|
||||
+Copyright (C) 2025 Red Hat, Inc.
|
||||
******************************************************************************/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -248,6 +248,10 @@ gentimeToEpochtime(char *gentimestr)
|
||||
|
||||
/* Turn tm object into local epoch time */
|
||||
epochtime = mktime(&t);
|
||||
+ if (epochtime == (time_t) -1) {
|
||||
+ /* mktime failed */
|
||||
+ return 0;
|
||||
+ }
|
||||
|
||||
/* Turn local epoch time into GMT epoch time */
|
||||
epochtime -= zone_offset;
|
||||
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
|
||||
index f900db7f2..9eade495e 100644
|
||||
--- a/ldap/servers/plugins/automember/automember.c
|
||||
+++ b/ldap/servers/plugins/automember/automember.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2022 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -1756,9 +1756,10 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
|
||||
mod_pb = slapi_pblock_new();
|
||||
/* Do a single mod with error overrides for DEL/ADD */
|
||||
- result = slapi_single_modify_internal_override(mod_pb, slapi_sdn_new_dn_byval(group_dn), mods,
|
||||
- automember_get_plugin_id(), 0);
|
||||
-
|
||||
+ Slapi_DN *sdn = slapi_sdn_new_normdn_byref(group_dn);
|
||||
+ result = slapi_single_modify_internal_override(mod_pb, sdn, mods,
|
||||
+ automember_get_plugin_id(), 0);
|
||||
+ slapi_sdn_free(&sdn);
|
||||
if(add){
|
||||
if (result != LDAP_SUCCESS) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index 073d8d938..cfda977f0 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -1655,6 +1655,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn
|
||||
/* We already did the search for this backend, don't
|
||||
* do it again when we fall through */
|
||||
do_suffix_search = PR_FALSE;
|
||||
+ slapi_pblock_init(search_pb);
|
||||
}
|
||||
}
|
||||
} else if (!all_backends) {
|
||||
@@ -3763,6 +3764,10 @@ memberof_replace_list(Slapi_PBlock *pb, MemberOfConfig *config, Slapi_DN *group_
|
||||
|
||||
pre_index++;
|
||||
} else {
|
||||
+ if (pre_index >= pre_total || post_index >= post_total) {
|
||||
+ /* Don't overrun pre_array/post_array */
|
||||
+ break;
|
||||
+ }
|
||||
/* decide what to do */
|
||||
int cmp = memberof_compare(
|
||||
config,
|
||||
@@ -4453,10 +4458,12 @@ memberof_add_memberof_attr(LDAPMod **mods, const char *dn, char *add_oc)
|
||||
|
||||
while (1) {
|
||||
slapi_pblock_init(mod_pb);
|
||||
-
|
||||
+ Slapi_DN *sdn = slapi_sdn_new_normdn_byref(dn);
|
||||
/* Internal mod with error overrides for DEL/ADD */
|
||||
- rc = slapi_single_modify_internal_override(mod_pb, slapi_sdn_new_normdn_byref(dn), single_mod,
|
||||
- memberof_get_plugin_id(), SLAPI_OP_FLAG_BYPASS_REFERRALS);
|
||||
+ rc = slapi_single_modify_internal_override(mod_pb, sdn, single_mod,
|
||||
+ memberof_get_plugin_id(),
|
||||
+ SLAPI_OP_FLAG_BYPASS_REFERRALS);
|
||||
+ slapi_sdn_free(&sdn);
|
||||
if (rc == LDAP_OBJECT_CLASS_VIOLATION) {
|
||||
if (!add_oc || added_oc) {
|
||||
/*
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof_config.c b/ldap/servers/plugins/memberof/memberof_config.c
|
||||
index 1e83ba6e0..e4da351d9 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof_config.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof_config.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -570,21 +570,24 @@ memberof_apply_config(Slapi_PBlock *pb __attribute__((unused)),
|
||||
if (num_groupattrs > 1) {
|
||||
size_t bytes_out = 0;
|
||||
size_t filter_str_len = groupattr_name_len + (num_groupattrs * 4) + 4;
|
||||
+ int32_t rc = 0;
|
||||
|
||||
/* Allocate enough space for the filter */
|
||||
filter_str = slapi_ch_malloc(filter_str_len);
|
||||
|
||||
/* Add beginning of filter. */
|
||||
- bytes_out = snprintf(filter_str, filter_str_len - bytes_out, "(|");
|
||||
- if (bytes_out<0) {
|
||||
+ rc = snprintf(filter_str, filter_str_len - bytes_out, "(|");
|
||||
+ if (rc < 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM, "snprintf unexpectly failed in memberof_apply_config.\n");
|
||||
*returncode = LDAP_UNWILLING_TO_PERFORM;
|
||||
goto done;
|
||||
+ } else {
|
||||
+ bytes_out = rc;
|
||||
}
|
||||
|
||||
/* Add filter section for each groupattr. */
|
||||
for (size_t i=0; theConfig.groupattrs && theConfig.groupattrs[i]; i++) {
|
||||
- size_t bytes_read = snprintf(filter_str + bytes_out, filter_str_len - bytes_out, "(%s=*)", theConfig.groupattrs[i]);
|
||||
+ int32_t bytes_read = snprintf(filter_str + bytes_out, filter_str_len - bytes_out, "(%s=*)", theConfig.groupattrs[i]);
|
||||
if (bytes_read<0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM, "snprintf unexpectly failed in memberof_apply_config.\n");
|
||||
*returncode = LDAP_UNWILLING_TO_PERFORM;
|
||||
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
|
||||
index 5d7f9e5dd..5746b913f 100644
|
||||
--- a/ldap/servers/plugins/referint/referint.c
|
||||
+++ b/ldap/servers/plugins/referint/referint.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -1499,6 +1499,8 @@ referint_thread_func(void *arg __attribute__((unused)))
|
||||
slapi_sdn_free(&sdn);
|
||||
continue;
|
||||
}
|
||||
+
|
||||
+ slapi_sdn_free(&tmpsuperior);
|
||||
if (!strcasecmp(ptoken, "NULL")) {
|
||||
tmpsuperior = NULL;
|
||||
} else {
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
index 0a81167b7..eed97578e 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -202,7 +202,7 @@ agmt_init_session_id(Repl_Agmt *ra)
|
||||
char *host = NULL; /* e.g. localhost.domain */
|
||||
char port[10]; /* e.g. 389 */
|
||||
char sport[10]; /* e.g. 636 */
|
||||
- char *hash_in;
|
||||
+ char *hash_in = NULL;
|
||||
int32_t max_str_sid = SESSION_ID_STR_SZ - 4;
|
||||
|
||||
if (ra == NULL) {
|
||||
@@ -2718,31 +2718,26 @@ agmt_update_init_status(Repl_Agmt *ra)
|
||||
mod_idx++;
|
||||
}
|
||||
|
||||
- if (nb_mods) {
|
||||
- /* it is ok to release the lock here because we are done with the agreement data.
|
||||
- we have to do it before issuing the modify operation because it causes
|
||||
- agmtlist_notify_all to be called which uses the same lock - hence the deadlock */
|
||||
- PR_Unlock(ra->lock);
|
||||
-
|
||||
- pb = slapi_pblock_new();
|
||||
- mods[nb_mods] = NULL;
|
||||
+ /* it is ok to release the lock here because we are done with the agreement data.
|
||||
+ we have to do it before issuing the modify operation because it causes
|
||||
+ agmtlist_notify_all to be called which uses the same lock - hence the deadlock */
|
||||
+ PR_Unlock(ra->lock);
|
||||
|
||||
- slapi_modify_internal_set_pb_ext(pb, ra->dn, mods, NULL, NULL,
|
||||
- repl_get_plugin_identity(PLUGIN_MULTISUPPLIER_REPLICATION), 0);
|
||||
- slapi_modify_internal_pb(pb);
|
||||
+ pb = slapi_pblock_new();
|
||||
+ mods[nb_mods] = NULL;
|
||||
|
||||
- slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
|
||||
- if (rc != LDAP_SUCCESS && rc != LDAP_NO_SUCH_ATTRIBUTE) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "agmt_update_consumer_ruv - "
|
||||
- "%s: agmt_update_consumer_ruv: "
|
||||
- "failed to update consumer's RUV; LDAP error - %d\n",
|
||||
- ra->long_name, rc);
|
||||
- }
|
||||
+ slapi_modify_internal_set_pb_ext(pb, ra->dn, mods, NULL, NULL,
|
||||
+ repl_get_plugin_identity(PLUGIN_MULTISUPPLIER_REPLICATION), 0);
|
||||
+ slapi_modify_internal_pb(pb);
|
||||
|
||||
- slapi_pblock_destroy(pb);
|
||||
- } else {
|
||||
- PR_Unlock(ra->lock);
|
||||
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
|
||||
+ if (rc != LDAP_SUCCESS && rc != LDAP_NO_SUCH_ATTRIBUTE) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "agmt_update_consumer_ruv - "
|
||||
+ "%s: agmt_update_consumer_ruv: failed to update consumer's RUV; LDAP error - %d\n",
|
||||
+ ra->long_name, rc);
|
||||
}
|
||||
+
|
||||
+ slapi_pblock_destroy(pb);
|
||||
slapi_ch_free((void **)&mods);
|
||||
slapi_mod_done(&smod_start_time);
|
||||
slapi_mod_done(&smod_end_time);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
index 46c80ec3d..0127bf2f9 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2020 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -947,6 +947,7 @@ bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job)
|
||||
EQ_PREFIX, (u_long)id);
|
||||
key.size++; /* include the null terminator */
|
||||
ret = NEW_IDL_NO_ALLID;
|
||||
+ idl_free(&children);
|
||||
children = idl_fetch(be, db_pid, &key, txn, ai_pid, &ret);
|
||||
if (ret != 0) {
|
||||
ldbm_nasty("bdb_ancestorid_new_idl_create_index", sourcefile, 13070, ret);
|
||||
@@ -957,6 +958,7 @@ bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job)
|
||||
if (job->flags & FLAG_ABORT) {
|
||||
import_log_notice(job, SLAPI_LOG_ERR, "bdb_ancestorid_new_idl_create_index",
|
||||
"ancestorid creation aborted.");
|
||||
+ idl_free(&children);
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
@@ -1290,6 +1292,7 @@ bdb_update_subordinatecounts(backend *be, ImportJob *job, DB_TXN *txn)
|
||||
}
|
||||
bdb_close_subcount_cursor(&c_entryrdn);
|
||||
bdb_close_subcount_cursor(&c_objectclass);
|
||||
+
|
||||
return ret;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c
|
||||
index bb515a23f..44a624fde 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2020 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -261,6 +261,7 @@ bdb_instance_cleanup(struct ldbm_instance *inst)
|
||||
if (inst_dirp && *inst_dir) {
|
||||
return_value = env->remove(env, inst_dirp, 0);
|
||||
} else {
|
||||
+ slapi_ch_free((void **)&env);
|
||||
return_value = -1;
|
||||
}
|
||||
if (return_value == EBUSY) {
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
index 53f1cde69..b1e44a919 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2023 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -2027,9 +2027,13 @@ bdb_pre_close(struct ldbminfo *li)
|
||||
conf = (bdb_config *)li->li_dblayer_config;
|
||||
bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
|
||||
|
||||
+ if (pEnv == NULL) {
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
pthread_mutex_lock(&pEnv->bdb_thread_count_lock);
|
||||
|
||||
- if (conf->bdb_stop_threads || !pEnv) {
|
||||
+ if (conf->bdb_stop_threads) {
|
||||
/* already stopped. do nothing... */
|
||||
goto timeout_escape;
|
||||
}
|
||||
@@ -2203,6 +2207,7 @@ bdb_remove_env(struct ldbminfo *li)
|
||||
}
|
||||
if (NULL == li) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "bdb_remove_env", "No ldbm info is given\n");
|
||||
+ slapi_ch_free((void **)&env);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -2212,10 +2217,11 @@ bdb_remove_env(struct ldbminfo *li)
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR,
|
||||
"bdb_remove_env", "Failed to remove DB environment files. "
|
||||
- "Please remove %s/__db.00# (# is 1 through 6)\n",
|
||||
+ "Please remove %s/__db.00# (# is 1 through 6)\n",
|
||||
home_dir);
|
||||
}
|
||||
}
|
||||
+ slapi_ch_free((void **)&env);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -6341,6 +6347,7 @@ bdb_back_ctrl(Slapi_Backend *be, int cmd, void *info)
|
||||
db->close(db, 0);
|
||||
rc = bdb_db_remove_ex((bdb_db_env *)priv->dblayer_env, path, NULL, PR_TRUE);
|
||||
inst->inst_changelog = NULL;
|
||||
+ slapi_ch_free_string(&path);
|
||||
slapi_ch_free_string(&instancedir);
|
||||
}
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/parents.c b/ldap/servers/slapd/back-ldbm/parents.c
|
||||
index 31107591e..52c665ca4 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/parents.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/parents.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -123,7 +123,7 @@ parent_update_on_childchange(modify_context *mc, int op, size_t *new_sub_count)
|
||||
/* Now compute the new value */
|
||||
if ((PARENTUPDATE_ADD == op) || (PARENTUPDATE_RESURECT == op)) {
|
||||
current_sub_count++;
|
||||
- } else {
|
||||
+ } else if (current_sub_count > 0) {
|
||||
current_sub_count--;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/ch_malloc.c b/ldap/servers/slapd/ch_malloc.c
|
||||
index cbab1d170..27ed546a5 100644
|
||||
--- a/ldap/servers/slapd/ch_malloc.c
|
||||
+++ b/ldap/servers/slapd/ch_malloc.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -254,7 +254,7 @@ slapi_ch_bvecdup(struct berval **v)
|
||||
++i;
|
||||
newberval = (struct berval **)slapi_ch_malloc((i + 1) * sizeof(struct berval *));
|
||||
newberval[i] = NULL;
|
||||
- while (i-- > 0) {
|
||||
+ while (i > 0 && i-- > 0) {
|
||||
newberval[i] = slapi_ch_bvdup(v[i]);
|
||||
}
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/control.c b/ldap/servers/slapd/control.c
|
||||
index 7aeeba885..d661dc6e1 100644
|
||||
--- a/ldap/servers/slapd/control.c
|
||||
+++ b/ldap/servers/slapd/control.c
|
||||
@@ -174,7 +174,6 @@ create_sessiontracking_ctrl(const char *session_tracking_id, LDAPControl **sessi
|
||||
char *undefined_sid = "undefined sid";
|
||||
const char *sid;
|
||||
int rc = 0;
|
||||
- int tag;
|
||||
LDAPControl *ctrl = NULL;
|
||||
|
||||
if (session_tracking_id) {
|
||||
@@ -183,9 +182,7 @@ create_sessiontracking_ctrl(const char *session_tracking_id, LDAPControl **sessi
|
||||
sid = undefined_sid;
|
||||
}
|
||||
ctrlber = ber_alloc();
|
||||
- tag = ber_printf( ctrlber, "{nnno}", sid, strlen(sid));
|
||||
- if (rc == LBER_ERROR) {
|
||||
- tag = -1;
|
||||
+ if ((rc = ber_printf( ctrlber, "{nnno}", sid, strlen(sid)) == LBER_ERROR)) {
|
||||
goto done;
|
||||
}
|
||||
slapi_build_control(LDAP_CONTROL_X_SESSION_TRACKING, ctrlber, 0, &ctrl);
|
||||
diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c
|
||||
index b788054db..bec3e32f4 100644
|
||||
--- a/ldap/servers/slapd/dse.c
|
||||
+++ b/ldap/servers/slapd/dse.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -637,7 +637,7 @@ dse_updateNumSubordinates(Slapi_Entry *entry, int op)
|
||||
/* Now compute the new value */
|
||||
if (SLAPI_OPERATION_ADD == op) {
|
||||
current_sub_count++;
|
||||
- } else {
|
||||
+ } else if (current_sub_count > 0) {
|
||||
current_sub_count--;
|
||||
}
|
||||
{
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index 91ba23047..a9a5f3b3f 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005-2024 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* Copyright (C) 2010 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -201,6 +201,7 @@ compress_log_file(char *log_name, int32_t mode)
|
||||
|
||||
if ((source = fopen(log_name, "r")) == NULL) {
|
||||
/* Failed to open log file */
|
||||
+ /* coverity[leaked_storage] gzclose does close FD */
|
||||
gzclose(outfile);
|
||||
return -1;
|
||||
}
|
||||
@@ -211,11 +212,13 @@ compress_log_file(char *log_name, int32_t mode)
|
||||
if (bytes_written == 0)
|
||||
{
|
||||
fclose(source);
|
||||
+ /* coverity[leaked_storage] gzclose does close FD */
|
||||
gzclose(outfile);
|
||||
return -1;
|
||||
}
|
||||
bytes_read = fread(buf, 1, LOG_CHUNK, source);
|
||||
}
|
||||
+ /* coverity[leaked_storage] gzclose does close FD */
|
||||
gzclose(outfile);
|
||||
fclose(source);
|
||||
PR_Delete(log_name); /* remove the old uncompressed log */
|
||||
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
|
||||
index 0a351d46a..9e5bce80b 100644
|
||||
--- a/ldap/servers/slapd/modify.c
|
||||
+++ b/ldap/servers/slapd/modify.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2009 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* Copyright (C) 2009, 2010 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -498,7 +498,7 @@ slapi_modify_internal_set_pb_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod
|
||||
*
|
||||
* Any other errors encountered during the operation will be returned as-is.
|
||||
*/
|
||||
-int
|
||||
+int
|
||||
slapi_single_modify_internal_override(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod **mod, Slapi_ComponentId *plugin_id, int op_flags)
|
||||
{
|
||||
int rc = 0;
|
||||
@@ -512,7 +512,7 @@ slapi_single_modify_internal_override(Slapi_PBlock *pb, const Slapi_DN *sdn, LDA
|
||||
!pb ? "pb " : "",
|
||||
!sdn ? "sdn " : "",
|
||||
!mod ? "mod " : "",
|
||||
- !mod[0] ? "mod[0] " : "");
|
||||
+ !mod || !mod[0] ? "mod[0] " : "");
|
||||
|
||||
return LDAP_PARAM_ERROR;
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c
|
||||
index 69bb3494c..5f05cf74e 100644
|
||||
--- a/ldap/servers/slapd/passwd_extop.c
|
||||
+++ b/ldap/servers/slapd/passwd_extop.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
diff --git a/ldap/servers/slapd/unbind.c b/ldap/servers/slapd/unbind.c
|
||||
index fa8cd649f..c4e7a5efd 100644
|
||||
--- a/ldap/servers/slapd/unbind.c
|
||||
+++ b/ldap/servers/slapd/unbind.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -112,8 +112,12 @@ do_unbind(Slapi_PBlock *pb)
|
||||
/* pass the unbind to all backends */
|
||||
be_unbindall(pb_conn, operation);
|
||||
|
||||
-free_and_return:;
|
||||
+free_and_return:
|
||||
|
||||
- /* close the connection to the client */
|
||||
- disconnect_server(pb_conn, operation->o_connid, operation->o_opid, SLAPD_DISCONNECT_UNBIND, 0);
|
||||
+ /* close the connection to the client after refreshing the operation */
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
|
||||
+ disconnect_server(pb_conn,
|
||||
+ operation ? operation->o_connid : -1,
|
||||
+ operation ? operation->o_opid : -1,
|
||||
+ SLAPD_DISCONNECT_UNBIND, 0);
|
||||
}
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,35 @@
|
||||
From ea62e862c8ca7e036f7d1e23ec3a27bffbc39bdf Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 11 Aug 2025 13:19:13 +0200
|
||||
Subject: [PATCH] Issue 6929 - Compilation failure with rust-1.89 on Fedora ELN
|
||||
|
||||
Bug Description:
|
||||
The `ValueArrayRefIter` struct has a lifetime parameter `'a`.
|
||||
But in the `iter` method the return type doesn't specify the lifetime parameter.
|
||||
|
||||
Fix Description:
|
||||
Make the lifetime explicit.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6929
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
---
|
||||
src/slapi_r_plugin/src/value.rs | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/slapi_r_plugin/src/value.rs b/src/slapi_r_plugin/src/value.rs
|
||||
index 2fd35c808..fec74ac25 100644
|
||||
--- a/src/slapi_r_plugin/src/value.rs
|
||||
+++ b/src/slapi_r_plugin/src/value.rs
|
||||
@@ -61,7 +61,7 @@ impl ValueArrayRef {
|
||||
ValueArrayRef { raw_slapi_val }
|
||||
}
|
||||
|
||||
- pub fn iter(&self) -> ValueArrayRefIter {
|
||||
+ pub fn iter(&self) -> ValueArrayRefIter<'_> {
|
||||
ValueArrayRefIter {
|
||||
idx: 0,
|
||||
va_ref: &self,
|
||||
--
|
||||
2.49.0
|
||||
|
||||
4
389-ds-base-devel.README
Normal file
4
389-ds-base-devel.README
Normal file
@ -0,0 +1,4 @@
|
||||
For detailed information on developing plugins for 389 Directory Server visit
|
||||
|
||||
https://www.port389.org/docs/389ds/design/plugins.html
|
||||
https://github.com/389ds/389-ds-base/blob/main/src/slapi_r_plugin/README.md
|
||||
1100
389-ds-base.spec
Normal file
1100
389-ds-base.spec
Normal file
File diff suppressed because it is too large
Load Diff
3
389-ds-base.sysusers
Normal file
3
389-ds-base.sysusers
Normal file
@ -0,0 +1,3 @@
|
||||
#Type Name ID GECOS Home directory Shell
|
||||
g dirsrv 389
|
||||
u dirsrv 389:389 "user for 389-ds-base" /usr/share/dirsrv/ /sbin/nologin
|
||||
@ -13,29 +13,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "adler2"
|
||||
version = "2.0.0"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
|
||||
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
|
||||
|
||||
[[package]]
|
||||
name = "ahash"
|
||||
version = "0.7.8"
|
||||
name = "allocator-api2"
|
||||
version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9"
|
||||
dependencies = [
|
||||
"getrandom 0.2.16",
|
||||
"once_cell",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
@ -50,9 +36,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.4.0"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
|
||||
checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
@ -66,7 +52,7 @@ dependencies = [
|
||||
"miniz_oxide",
|
||||
"object",
|
||||
"rustc-demangle",
|
||||
"windows-targets",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -95,11 +81,13 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
|
||||
|
||||
[[package]]
|
||||
name = "cbindgen"
|
||||
version = "0.9.1"
|
||||
version = "0.26.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
|
||||
checksum = "da6bc11b07529f16944307272d5bd9b22530bc7d05751717c9d416586cedab49"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"heck",
|
||||
"indexmap",
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -112,9 +100,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.25"
|
||||
version = "1.2.31"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0fc897dc1e865cc67c0e05a836d9d3f1df3cbe442aa4a9473b18e12624a4951"
|
||||
checksum = "c3a42d84bb6b69d3a8b3eaacf0d88f179e1929695e1ad012b6cf64d9caaa5fd2"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
"libc",
|
||||
@ -123,72 +111,49 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268"
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.34.0"
|
||||
version = "3.2.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
|
||||
checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123"
|
||||
dependencies = [
|
||||
"ansi_term",
|
||||
"atty",
|
||||
"bitflags 1.3.2",
|
||||
"clap_lex",
|
||||
"indexmap",
|
||||
"strsim",
|
||||
"termcolor",
|
||||
"textwrap",
|
||||
"unicode-width",
|
||||
"vec_map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_lex"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
|
||||
dependencies = [
|
||||
"os_str_bytes",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "concread"
|
||||
version = "0.2.21"
|
||||
version = "0.5.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcc9816f5ac93ebd51c37f7f9a6bf2b40dfcd42978ad2aea5d542016e9244cf6"
|
||||
checksum = "07fd8c4b53f0aafeec114fa1cd863f323880f790656f2d7508af83a9b5110e8d"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"crossbeam",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"lru",
|
||||
"parking_lot",
|
||||
"rand",
|
||||
"smallvec",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"crossbeam-deque",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-queue",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-deque"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
|
||||
dependencies = [
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"foldhash",
|
||||
"lru",
|
||||
"smallvec",
|
||||
"sptr",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -238,13 +203,19 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.12"
|
||||
name = "equivalent"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18"
|
||||
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -266,6 +237,12 @@ dependencies = [
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foldhash"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types"
|
||||
version = "0.3.2"
|
||||
@ -289,7 +266,7 @@ checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"wasi 0.11.0+wasi-snapshot-preview1",
|
||||
"wasi 0.11.1+wasi-snapshot-preview1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -315,10 +292,24 @@ name = "hashbrown"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.15.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"allocator-api2",
|
||||
"equivalent",
|
||||
"foldhash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.19"
|
||||
@ -329,12 +320,24 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "instant"
|
||||
version = "0.1.13"
|
||||
name = "indexmap"
|
||||
version = "1.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
|
||||
checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"hashbrown 0.12.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "io-uring"
|
||||
version = "0.7.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -355,9 +358,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.172"
|
||||
version = "0.2.174"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
|
||||
checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776"
|
||||
|
||||
[[package]]
|
||||
name = "librnsslapd"
|
||||
@ -384,16 +387,6 @@ version = "0.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.27"
|
||||
@ -402,28 +395,39 @@ checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
||||
|
||||
[[package]]
|
||||
name = "lru"
|
||||
version = "0.7.8"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a"
|
||||
checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465"
|
||||
dependencies = [
|
||||
"hashbrown",
|
||||
"hashbrown 0.15.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.7.4"
|
||||
version = "2.7.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
|
||||
checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0"
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.8.8"
|
||||
version = "0.8.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a"
|
||||
checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316"
|
||||
dependencies = [
|
||||
"adler2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"wasi 0.11.1+wasi-snapshot-preview1",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.36.7"
|
||||
@ -462,7 +466,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
"syn 2.0.104",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -478,29 +482,10 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.11.2"
|
||||
name = "os_str_bytes"
|
||||
version = "6.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
|
||||
dependencies = [
|
||||
"instant",
|
||||
"lock_api",
|
||||
"parking_lot_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot_core"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"instant",
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"smallvec",
|
||||
"winapi",
|
||||
]
|
||||
checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1"
|
||||
|
||||
[[package]]
|
||||
name = "paste"
|
||||
@ -533,15 +518,6 @@ version = "0.3.32"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
|
||||
dependencies = [
|
||||
"zerocopy",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.20+deprecated"
|
||||
@ -581,70 +557,27 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "r-efi"
|
||||
version = "5.2.0"
|
||||
version = "5.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_chacha"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
|
||||
dependencies = [
|
||||
"getrandom 0.2.16",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rsds"
|
||||
version = "0.1.0"
|
||||
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.24"
|
||||
version = "0.1.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
|
||||
checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace"
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "1.0.7"
|
||||
version = "1.0.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266"
|
||||
checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys",
|
||||
"windows-sys",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -653,12 +586,6 @@ version = "1.0.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.219"
|
||||
@ -676,14 +603,14 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
"syn 2.0.104",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.140"
|
||||
version = "1.0.142"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
|
||||
checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
@ -697,6 +624,12 @@ version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "slab"
|
||||
version = "0.4.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d"
|
||||
|
||||
[[package]]
|
||||
name = "slapd"
|
||||
version = "0.1.0"
|
||||
@ -715,15 +648,21 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.15.0"
|
||||
version = "1.15.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9"
|
||||
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
|
||||
|
||||
[[package]]
|
||||
name = "sptr"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a"
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.8.0"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
|
||||
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
@ -738,9 +677,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.101"
|
||||
version = "2.0.104"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf"
|
||||
checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -757,38 +696,36 @@ dependencies = [
|
||||
"getrandom 0.3.3",
|
||||
"once_cell",
|
||||
"rustix",
|
||||
"windows-sys",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "1.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
|
||||
dependencies = [
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.11.0"
|
||||
version = "0.16.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
dependencies = [
|
||||
"unicode-width",
|
||||
]
|
||||
checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.45.1"
|
||||
version = "1.47.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779"
|
||||
checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"io-uring",
|
||||
"libc",
|
||||
"mio",
|
||||
"pin-project-lite",
|
||||
"tokio-macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-macros"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
"slab",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -800,18 +737,43 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing"
|
||||
version = "0.1.41"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
|
||||
dependencies = [
|
||||
"pin-project-lite",
|
||||
"tracing-attributes",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-attributes"
|
||||
version = "0.1.30"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.104",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-core"
|
||||
version = "0.1.34"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "0.8.2"
|
||||
@ -827,23 +789,11 @@ version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
|
||||
|
||||
[[package]]
|
||||
name = "vec_map"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.11.0+wasi-snapshot-preview1"
|
||||
version = "0.11.1+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
|
||||
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
@ -870,19 +820,43 @@ version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||
dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "windows-link"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.59.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
|
||||
dependencies = [
|
||||
"windows-targets",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.60.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
|
||||
dependencies = [
|
||||
"windows-targets 0.53.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -891,14 +865,31 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_gnullvm",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
"windows_x86_64_msvc",
|
||||
"windows_aarch64_gnullvm 0.52.6",
|
||||
"windows_aarch64_msvc 0.52.6",
|
||||
"windows_i686_gnu 0.52.6",
|
||||
"windows_i686_gnullvm 0.52.6",
|
||||
"windows_i686_msvc 0.52.6",
|
||||
"windows_x86_64_gnu 0.52.6",
|
||||
"windows_x86_64_gnullvm 0.52.6",
|
||||
"windows_x86_64_msvc 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.53.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91"
|
||||
dependencies = [
|
||||
"windows-link",
|
||||
"windows_aarch64_gnullvm 0.53.0",
|
||||
"windows_aarch64_msvc 0.53.0",
|
||||
"windows_i686_gnu 0.53.0",
|
||||
"windows_i686_gnullvm 0.53.0",
|
||||
"windows_i686_msvc 0.53.0",
|
||||
"windows_x86_64_gnu 0.53.0",
|
||||
"windows_x86_64_gnullvm 0.53.0",
|
||||
"windows_x86_64_msvc 0.53.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -907,48 +898,96 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen-rt"
|
||||
version = "0.39.0"
|
||||
@ -958,26 +997,6 @@ dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy"
|
||||
version = "0.8.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb"
|
||||
dependencies = [
|
||||
"zerocopy-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy-derive"
|
||||
version = "0.8.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zeroize"
|
||||
version = "1.8.1"
|
||||
@ -995,5 +1014,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
"syn 2.0.104",
|
||||
]
|
||||
@ -1,119 +0,0 @@
|
||||
From dddb14210b402f317e566b6387c76a8e659bf7fa Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 14 Feb 2023 13:34:10 +0100
|
||||
Subject: [PATCH 1/2] issue 5647 - covscan: memory leak in audit log when
|
||||
adding entries (#5650)
|
||||
|
||||
covscan reported an issue about "vals" variable in auditlog.c:231 and indeed a charray_free is missing.
|
||||
Issue: 5647
|
||||
Reviewed by: @mreynolds389, @droideck
|
||||
---
|
||||
ldap/servers/slapd/auditlog.c | 71 +++++++++++++++++++----------------
|
||||
1 file changed, 38 insertions(+), 33 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 68cbc674d..3128e0497 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -177,6 +177,40 @@ write_auditfail_log_entry(Slapi_PBlock *pb)
|
||||
slapi_ch_free_string(&audit_config);
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Write the attribute values to the audit log as "comments"
|
||||
+ *
|
||||
+ * Slapi_Attr *entry - the attribute begin logged.
|
||||
+ * char *attrname - the attribute name.
|
||||
+ * lenstr *l - the audit log buffer
|
||||
+ *
|
||||
+ * Resulting output in the log:
|
||||
+ *
|
||||
+ * #ATTR: VALUE
|
||||
+ * #ATTR: VALUE
|
||||
+ */
|
||||
+static void
|
||||
+log_entry_attr(Slapi_Attr *entry_attr, char *attrname, lenstr *l)
|
||||
+{
|
||||
+ Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
+ for(size_t i = 0; vals && vals[i]; i++) {
|
||||
+ char log_val[256] = "";
|
||||
+ const struct berval *bv = slapi_value_get_berval(vals[i]);
|
||||
+ if (bv->bv_len >= 256) {
|
||||
+ strncpy(log_val, bv->bv_val, 252);
|
||||
+ strcpy(log_val+252, "...");
|
||||
+ } else {
|
||||
+ strncpy(log_val, bv->bv_val, bv->bv_len);
|
||||
+ log_val[bv->bv_len] = 0;
|
||||
+ }
|
||||
+ addlenstr(l, "#");
|
||||
+ addlenstr(l, attrname);
|
||||
+ addlenstr(l, ": ");
|
||||
+ addlenstr(l, log_val);
|
||||
+ addlenstr(l, "\n");
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Write "requested" attributes from the entry to the audit log as "comments"
|
||||
*
|
||||
@@ -212,21 +246,9 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
for (req_attr = ldap_utf8strtok_r(display_attrs, ", ", &last); req_attr;
|
||||
req_attr = ldap_utf8strtok_r(NULL, ", ", &last))
|
||||
{
|
||||
- char **vals = slapi_entry_attr_get_charray(entry, req_attr);
|
||||
- for(size_t i = 0; vals && vals[i]; i++) {
|
||||
- char log_val[256] = {0};
|
||||
-
|
||||
- if (strlen(vals[i]) > 256) {
|
||||
- strncpy(log_val, vals[i], 252);
|
||||
- strcat(log_val, "...");
|
||||
- } else {
|
||||
- strcpy(log_val, vals[i]);
|
||||
- }
|
||||
- addlenstr(l, "#");
|
||||
- addlenstr(l, req_attr);
|
||||
- addlenstr(l, ": ");
|
||||
- addlenstr(l, log_val);
|
||||
- addlenstr(l, "\n");
|
||||
+ slapi_entry_attr_find(entry, req_attr, &entry_attr);
|
||||
+ if (entry_attr) {
|
||||
+ log_entry_attr(entry_attr, req_attr, l);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -234,7 +256,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
for (; entry_attr; entry_attr = entry_attr->a_next) {
|
||||
Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
char *attr = NULL;
|
||||
- const char *val = NULL;
|
||||
|
||||
slapi_attr_get_type(entry_attr, &attr);
|
||||
if (strcmp(attr, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) {
|
||||
@@ -251,23 +272,7 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
addlenstr(l, ": ****************************\n");
|
||||
continue;
|
||||
}
|
||||
-
|
||||
- for(size_t i = 0; vals && vals[i]; i++) {
|
||||
- char log_val[256] = {0};
|
||||
-
|
||||
- val = slapi_value_get_string(vals[i]);
|
||||
- if (strlen(val) > 256) {
|
||||
- strncpy(log_val, val, 252);
|
||||
- strcat(log_val, "...");
|
||||
- } else {
|
||||
- strcpy(log_val, val);
|
||||
- }
|
||||
- addlenstr(l, "#");
|
||||
- addlenstr(l, attr);
|
||||
- addlenstr(l, ": ");
|
||||
- addlenstr(l, log_val);
|
||||
- addlenstr(l, "\n");
|
||||
- }
|
||||
+ log_entry_attr(entry_attr, attr, l);
|
||||
}
|
||||
}
|
||||
slapi_ch_free_string(&display_attrs);
|
||||
--
|
||||
2.43.0
|
||||
|
||||
@ -1,27 +0,0 @@
|
||||
From be7c2b82958e91ce08775bf6b5da3c311d3b00e5 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 20 Feb 2023 16:14:05 +0100
|
||||
Subject: [PATCH 2/2] Issue 5647 - Fix unused variable warning from previous
|
||||
commit (#5670)
|
||||
|
||||
* issue 5647 - memory leak in audit log when adding entries
|
||||
* Issue 5647 - Fix unused variable warning from previous commit
|
||||
---
|
||||
ldap/servers/slapd/auditlog.c | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 3128e0497..0597ecc6f 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -254,7 +254,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
} else {
|
||||
/* Return all attributes */
|
||||
for (; entry_attr; entry_attr = entry_attr->a_next) {
|
||||
- Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
char *attr = NULL;
|
||||
|
||||
slapi_attr_get_type(entry_attr, &attr);
|
||||
--
|
||||
2.43.0
|
||||
|
||||
@ -1,147 +0,0 @@
|
||||
From 692c4cec6cc5c0086cf58f83bcfa690c766c9887 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 2 Feb 2024 14:14:28 +0100
|
||||
Subject: [PATCH] Issue 5407 - sync_repl crashes if enabled while dynamic
|
||||
plugin is enabled (#5411)
|
||||
|
||||
Bug description:
|
||||
When dynamic plugin is enabled, if a MOD enables sync_repl plugin
|
||||
then sync_repl init function registers the postop callback
|
||||
that will be called for the MOD itself while the preop
|
||||
has not been called.
|
||||
postop expects preop to be called and so primary operation
|
||||
to be set. When it is not set it crashes
|
||||
|
||||
Fix description:
|
||||
If the primary operation is not set, just return
|
||||
|
||||
relates: #5407
|
||||
---
|
||||
.../suites/syncrepl_plugin/basic_test.py | 68 +++++++++++++++++++
|
||||
ldap/servers/plugins/sync/sync_persist.c | 23 ++++++-
|
||||
2 files changed, 90 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
index eb3770b78..cdf35eeaa 100644
|
||||
--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
@@ -592,6 +592,74 @@ def test_sync_repl_cenotaph(topo_m2, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+def test_sync_repl_dynamic_plugin(topology, request):
|
||||
+ """Test sync_repl with dynamic plugin
|
||||
+
|
||||
+ :id: d4f84913-c18a-459f-8525-110f610ca9e6
|
||||
+ :setup: install a standalone instance
|
||||
+ :steps:
|
||||
+ 1. reset instance to standard (no retroCL, no sync_repl, no dynamic plugin)
|
||||
+ 2. Enable dynamic plugin
|
||||
+ 3. Enable retroCL/content_sync
|
||||
+ 4. Establish a sync_repl req
|
||||
+ :expectedresults:
|
||||
+ 1. Should succeeds
|
||||
+ 2. Should succeeds
|
||||
+ 3. Should succeeds
|
||||
+ 4. Should succeeds
|
||||
+ """
|
||||
+
|
||||
+ # Reset the instance in a default config
|
||||
+ # Disable content sync plugin
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_REPL_SYNC)
|
||||
+
|
||||
+ # Disable retro changelog
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Disable dynamic plugins
|
||||
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'off')])
|
||||
+ topology.standalone.restart()
|
||||
+
|
||||
+ # Now start the test
|
||||
+ # Enable dynamic plugins
|
||||
+ try:
|
||||
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')])
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error('Failed to enable dynamic plugin! {}'.format(e.args[0]['desc']))
|
||||
+ assert False
|
||||
+
|
||||
+ # Enable retro changelog
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Enbale content sync plugin
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC)
|
||||
+
|
||||
+ # create a sync repl client and wait 5 seconds to be sure it is running
|
||||
+ sync_repl = Sync_persist(topology.standalone)
|
||||
+ sync_repl.start()
|
||||
+ time.sleep(5)
|
||||
+
|
||||
+ # create users
|
||||
+ users = UserAccounts(topology.standalone, DEFAULT_SUFFIX)
|
||||
+ users_set = []
|
||||
+ for i in range(10001, 10004):
|
||||
+ users_set.append(users.create_test_user(uid=i))
|
||||
+
|
||||
+ time.sleep(10)
|
||||
+ # delete users, that automember/memberof will generate nested updates
|
||||
+ for user in users_set:
|
||||
+ user.delete()
|
||||
+ # stop the server to get the sync_repl result set (exit from while loop).
|
||||
+ # Only way I found to acheive that.
|
||||
+ # and wait a bit to let sync_repl thread time to set its result before fetching it.
|
||||
+ topology.standalone.stop()
|
||||
+ sync_repl.get_result()
|
||||
+ sync_repl.join()
|
||||
+ log.info('test_sync_repl_dynamic_plugin: PASS\n')
|
||||
+
|
||||
+ # Success
|
||||
+ log.info('Test complete')
|
||||
+
|
||||
def test_sync_repl_invalid_cookie(topology, request):
|
||||
"""Test sync_repl with invalid cookie
|
||||
|
||||
diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c
|
||||
index d2210b64c..283607361 100644
|
||||
--- a/ldap/servers/plugins/sync/sync_persist.c
|
||||
+++ b/ldap/servers/plugins/sync/sync_persist.c
|
||||
@@ -156,6 +156,17 @@ ignore_op_pl(Slapi_PBlock *pb)
|
||||
* This is the same for ident
|
||||
*/
|
||||
prim_op = get_thread_primary_op();
|
||||
+ if (prim_op == NULL) {
|
||||
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
|
||||
+ * The only known case it happens is with dynamic plugin enabled and an
|
||||
+ * update that enable the sync_repl plugin. In such case sync_repl registers
|
||||
+ * the postop (sync_update_persist_op) that is called while the preop was not called
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ "ignore_op_pl - Operation without primary op set (0x%lx)\n",
|
||||
+ (ulong) op);
|
||||
+ return;
|
||||
+ }
|
||||
ident = sync_persist_get_operation_extension(pb);
|
||||
|
||||
if (ident) {
|
||||
@@ -232,8 +243,18 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
|
||||
|
||||
|
||||
prim_op = get_thread_primary_op();
|
||||
+ if (prim_op == NULL) {
|
||||
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
|
||||
+ * The only known case it happens is with dynamic plugin enabled and an
|
||||
+ * update that enable the sync_repl plugin. In such case sync_repl registers
|
||||
+ * the postop (sync_update_persist_op) that is called while the preop was not called
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ "sync_update_persist_op - Operation without primary op set (0x%lx)\n",
|
||||
+ (ulong) pb_op);
|
||||
+ return;
|
||||
+ }
|
||||
ident = sync_persist_get_operation_extension(pb);
|
||||
- PR_ASSERT(prim_op);
|
||||
|
||||
if ((ident == NULL) && operation_is_flag_set(pb_op, OP_FLAG_NOOP)) {
|
||||
/* This happens for URP (add cenotaph, fixup rename, tombstone resurrect)
|
||||
--
|
||||
2.43.0
|
||||
|
||||
@ -1,840 +0,0 @@
|
||||
From 8dc61a176323f0d41df730abd715ccff3034c2be Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Sun, 27 Nov 2022 09:37:19 -0500
|
||||
Subject: [PATCH] Issue 5547 - automember plugin improvements
|
||||
|
||||
Description:
|
||||
|
||||
Rebuild task has the following improvements:
|
||||
|
||||
- Only one task allowed at a time
|
||||
- Do not cleanup previous members by default. Add new CLI option to intentionally
|
||||
cleanup memberships before rebuilding from scratch.
|
||||
- Add better task logging to show fixup progress
|
||||
|
||||
To prevent automember from being called in a nested be_txn loop thread storage is
|
||||
used to check and skip these loops.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/5547
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
---
|
||||
.../automember_plugin/automember_mod_test.py | 43 +++-
|
||||
ldap/servers/plugins/automember/automember.c | 232 ++++++++++++++----
|
||||
ldap/servers/slapd/back-ldbm/ldbm_add.c | 11 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 10 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 11 +-
|
||||
.../lib389/cli_conf/plugins/automember.py | 10 +-
|
||||
src/lib389/lib389/plugins.py | 7 +-
|
||||
src/lib389/lib389/tasks.py | 9 +-
|
||||
8 files changed, 250 insertions(+), 83 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
index 8d25384bf..7a0ed3275 100644
|
||||
--- a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
+++ b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
@@ -5,12 +5,13 @@
|
||||
# License: GPL (version 3 or any later version).
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
-#
|
||||
+import ldap
|
||||
import logging
|
||||
import pytest
|
||||
import os
|
||||
+import time
|
||||
from lib389.utils import ds_is_older
|
||||
-from lib389._constants import *
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.idm.group import Groups
|
||||
@@ -41,6 +42,11 @@ def automember_fixture(topo, request):
|
||||
user_accts = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
user = user_accts.create_test_user()
|
||||
|
||||
+ # Create extra users
|
||||
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ for i in range(0, 100):
|
||||
+ users.create_test_user(uid=i)
|
||||
+
|
||||
# Create automember definitions and regex rules
|
||||
automember_prop = {
|
||||
'cn': 'testgroup_definition',
|
||||
@@ -59,7 +65,7 @@ def automember_fixture(topo, request):
|
||||
automemberplugin.enable()
|
||||
topo.standalone.restart()
|
||||
|
||||
- return (user, groups)
|
||||
+ return user, groups
|
||||
|
||||
|
||||
def test_mods(automember_fixture, topo):
|
||||
@@ -72,19 +78,21 @@ def test_mods(automember_fixture, topo):
|
||||
2. Update user that should add it to group[1]
|
||||
3. Update user that should add it to group[2]
|
||||
4. Update user that should add it to group[0]
|
||||
- 5. Test rebuild task correctly moves user to group[1]
|
||||
+ 5. Test rebuild task adds user to group[1]
|
||||
+ 6. Test rebuild task cleanups groups and only adds it to group[1]
|
||||
:expectedresults:
|
||||
1. Success
|
||||
2. Success
|
||||
3. Success
|
||||
4. Success
|
||||
5. Success
|
||||
+ 6. Success
|
||||
"""
|
||||
(user, groups) = automember_fixture
|
||||
|
||||
# Update user which should go into group[0]
|
||||
user.replace('cn', 'whatever')
|
||||
- groups[0].is_member(user.dn)
|
||||
+ assert groups[0].is_member(user.dn)
|
||||
if groups[1].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -92,7 +100,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user0 which should go into group[1]
|
||||
user.replace('cn', 'mark')
|
||||
- groups[1].is_member(user.dn)
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -100,7 +108,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user which should go into group[2]
|
||||
user.replace('cn', 'simon')
|
||||
- groups[2].is_member(user.dn)
|
||||
+ assert groups[2].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[1].is_member(user.dn):
|
||||
@@ -108,7 +116,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user which should go back into group[0] (full circle)
|
||||
user.replace('cn', 'whatever')
|
||||
- groups[0].is_member(user.dn)
|
||||
+ assert groups[0].is_member(user.dn)
|
||||
if groups[1].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -128,12 +136,24 @@ def test_mods(automember_fixture, topo):
|
||||
automemberplugin.enable()
|
||||
topo.standalone.restart()
|
||||
|
||||
- # Run rebuild task
|
||||
+ # Run rebuild task (no cleanup)
|
||||
task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount")
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ # test only one fixup task is allowed at a time
|
||||
+ automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=top")
|
||||
task.wait()
|
||||
|
||||
- # Test membership
|
||||
- groups[1].is_member(user.dn)
|
||||
+ # Test membership (user should still be in groups[0])
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
+ if not groups[0].is_member(user.dn):
|
||||
+ assert False
|
||||
+
|
||||
+ # Run rebuild task with cleanup
|
||||
+ task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount", cleanup=True)
|
||||
+ task.wait()
|
||||
+
|
||||
+ # Test membership (user should only be in groups[1])
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -148,4 +168,3 @@ if __name__ == '__main__':
|
||||
# -s for DEBUG mode
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main(["-s", CURRENT_FILE])
|
||||
-
|
||||
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
|
||||
index 3494d0343..419adb052 100644
|
||||
--- a/ldap/servers/plugins/automember/automember.c
|
||||
+++ b/ldap/servers/plugins/automember/automember.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2011 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -14,7 +14,7 @@
|
||||
* Auto Membership Plug-in
|
||||
*/
|
||||
#include "automember.h"
|
||||
-
|
||||
+#include <pthread.h>
|
||||
|
||||
/*
|
||||
* Plug-in globals
|
||||
@@ -22,7 +22,9 @@
|
||||
static PRCList *g_automember_config = NULL;
|
||||
static Slapi_RWLock *g_automember_config_lock = NULL;
|
||||
static uint64_t abort_rebuild_task = 0;
|
||||
-
|
||||
+static pthread_key_t td_automem_block_nested;
|
||||
+static PRBool fixup_running = PR_FALSE;
|
||||
+static PRLock *fixup_lock = NULL;
|
||||
static void *_PluginID = NULL;
|
||||
static Slapi_DN *_PluginDN = NULL;
|
||||
static Slapi_DN *_ConfigAreaDN = NULL;
|
||||
@@ -93,9 +95,43 @@ static void automember_task_export_destructor(Slapi_Task *task);
|
||||
static void automember_task_map_destructor(Slapi_Task *task);
|
||||
|
||||
#define DEFAULT_FILE_MODE PR_IRUSR | PR_IWUSR
|
||||
+#define FIXUP_PROGRESS_LIMIT 1000
|
||||
static uint64_t plugin_do_modify = 0;
|
||||
static uint64_t plugin_is_betxn = 0;
|
||||
|
||||
+/* automember_plugin fixup task and add operations should block other be_txn
|
||||
+ * plugins from calling automember_post_op_mod() */
|
||||
+static int32_t
|
||||
+slapi_td_block_nested_post_op(void)
|
||||
+{
|
||||
+ int32_t val = 12345;
|
||||
+
|
||||
+ if (pthread_setspecific(td_automem_block_nested, (void *)&val) != 0) {
|
||||
+ return PR_FAILURE;
|
||||
+ }
|
||||
+ return PR_SUCCESS;
|
||||
+}
|
||||
+
|
||||
+static int32_t
|
||||
+slapi_td_unblock_nested_post_op(void)
|
||||
+{
|
||||
+ if (pthread_setspecific(td_automem_block_nested, NULL) != 0) {
|
||||
+ return PR_FAILURE;
|
||||
+ }
|
||||
+ return PR_SUCCESS;
|
||||
+}
|
||||
+
|
||||
+static int32_t
|
||||
+slapi_td_is_post_op_nested(void)
|
||||
+{
|
||||
+ int32_t *value = pthread_getspecific(td_automem_block_nested);
|
||||
+
|
||||
+ if (value == NULL) {
|
||||
+ return 0;
|
||||
+ }
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Config cache locking functions
|
||||
*/
|
||||
@@ -317,6 +353,14 @@ automember_start(Slapi_PBlock *pb)
|
||||
return -1;
|
||||
}
|
||||
|
||||
+ if (fixup_lock == NULL) {
|
||||
+ if ((fixup_lock = PR_NewLock()) == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_start - Failed to create fixup lock.\n");
|
||||
+ return -1;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* Get the plug-in target dn from the system
|
||||
* and store it for future use. */
|
||||
@@ -360,6 +404,11 @@ automember_start(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
|
||||
+ if (pthread_key_create(&td_automem_block_nested, NULL) != 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_start - pthread_key_create failed\n");
|
||||
+ }
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_start - ready for service\n");
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
@@ -394,6 +443,8 @@ automember_close(Slapi_PBlock *pb __attribute__((unused)))
|
||||
slapi_sdn_free(&_ConfigAreaDN);
|
||||
slapi_destroy_rwlock(g_automember_config_lock);
|
||||
g_automember_config_lock = NULL;
|
||||
+ PR_DestroyLock(fixup_lock);
|
||||
+ fixup_lock = NULL;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"<-- automember_close\n");
|
||||
@@ -1619,7 +1670,6 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
-
|
||||
/*
|
||||
* automember_update_member_value()
|
||||
*
|
||||
@@ -1634,7 +1684,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
LDAPMod *mods[2];
|
||||
char *vals[2];
|
||||
char *member_value = NULL;
|
||||
- int rc = 0;
|
||||
+ int rc = LDAP_SUCCESS;
|
||||
Slapi_DN *group_sdn;
|
||||
|
||||
/* First thing check that the group still exists */
|
||||
@@ -1653,7 +1703,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
"automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n",
|
||||
group_dn, rc);
|
||||
}
|
||||
- return rc;
|
||||
+ goto out;
|
||||
}
|
||||
|
||||
/* If grouping_value is dn, we need to fetch the dn instead. */
|
||||
@@ -1879,6 +1929,13 @@ automember_mod_post_op(Slapi_PBlock *pb)
|
||||
PRCList *list = NULL;
|
||||
int rc = SLAPI_PLUGIN_SUCCESS;
|
||||
|
||||
+ if (slapi_td_is_post_op_nested()) {
|
||||
+ /* don't process op twice in the same thread */
|
||||
+ return rc;
|
||||
+ } else {
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ }
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"--> automember_mod_post_op\n");
|
||||
|
||||
@@ -2005,6 +2062,7 @@ automember_mod_post_op(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
}
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"<-- automember_mod_post_op (%d)\n", rc);
|
||||
@@ -2024,6 +2082,13 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"--> automember_add_post_op\n");
|
||||
|
||||
+ if (slapi_td_is_post_op_nested()) {
|
||||
+ /* don't process op twice in the same thread */
|
||||
+ return rc;
|
||||
+ } else {
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ }
|
||||
+
|
||||
/* Reload config if a config entry was added. */
|
||||
if ((sdn = automember_get_sdn(pb))) {
|
||||
if (automember_dn_is_config(sdn)) {
|
||||
@@ -2039,7 +2104,7 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
|
||||
/* If replication, just bail. */
|
||||
if (automember_isrepl(pb)) {
|
||||
- return SLAPI_PLUGIN_SUCCESS;
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Get the newly added entry. */
|
||||
@@ -2052,7 +2117,7 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
tombstone);
|
||||
slapi_value_free(&tombstone);
|
||||
if (is_tombstone) {
|
||||
- return SLAPI_PLUGIN_SUCCESS;
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Check if a config entry applies
|
||||
@@ -2063,21 +2128,19 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
list = PR_LIST_HEAD(g_automember_config);
|
||||
while (list != g_automember_config) {
|
||||
config = (struct configEntry *)list;
|
||||
-
|
||||
/* Does the entry meet scope and filter requirements? */
|
||||
if (slapi_dn_issuffix(slapi_sdn_get_dn(sdn), config->scope) &&
|
||||
- (slapi_filter_test_simple(e, config->filter) == 0)) {
|
||||
+ (slapi_filter_test_simple(e, config->filter) == 0))
|
||||
+ {
|
||||
/* Find out what membership changes are needed and make them. */
|
||||
if (automember_update_membership(config, e, NULL) == SLAPI_PLUGIN_FAILURE) {
|
||||
rc = SLAPI_PLUGIN_FAILURE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
-
|
||||
list = PR_NEXT_LINK(list);
|
||||
}
|
||||
}
|
||||
-
|
||||
automember_config_unlock();
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
@@ -2098,6 +2161,7 @@ bail:
|
||||
slapi_pblock_set(pb, SLAPI_RESULT_CODE, &result);
|
||||
slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, &errtxt);
|
||||
}
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -2138,6 +2202,7 @@ typedef struct _task_data
|
||||
Slapi_DN *base_dn;
|
||||
char *bind_dn;
|
||||
int scope;
|
||||
+ PRBool cleanup;
|
||||
} task_data;
|
||||
|
||||
static void
|
||||
@@ -2270,6 +2335,7 @@ automember_task_abort_thread(void *arg)
|
||||
* basedn: dc=example,dc=com
|
||||
* filter: (uid=*)
|
||||
* scope: sub
|
||||
+ * cleanup: yes/on (default is off)
|
||||
*
|
||||
* basedn and filter are required. If scope is omitted, the default is sub
|
||||
*/
|
||||
@@ -2284,9 +2350,22 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
const char *base_dn;
|
||||
const char *filter;
|
||||
const char *scope;
|
||||
+ const char *cleanup_str;
|
||||
+ PRBool cleanup = PR_FALSE;
|
||||
|
||||
*returncode = LDAP_SUCCESS;
|
||||
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ if (fixup_running) {
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_task_add - there is already a fixup task running\n");
|
||||
+ rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
+ goto out;
|
||||
+ }
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+
|
||||
/*
|
||||
* Grab the task params
|
||||
*/
|
||||
@@ -2300,6 +2379,12 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
goto out;
|
||||
}
|
||||
+ if ((cleanup_str = slapi_entry_attr_get_ref(e, "cleanup"))) {
|
||||
+ if (strcasecmp(cleanup_str, "yes") == 0 || strcasecmp(cleanup_str, "on")) {
|
||||
+ cleanup = PR_TRUE;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
scope = slapi_fetch_attr(e, "scope", "sub");
|
||||
/*
|
||||
* setup our task data
|
||||
@@ -2315,6 +2400,7 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
mytaskdata->bind_dn = slapi_ch_strdup(bind_dn);
|
||||
mytaskdata->base_dn = slapi_sdn_new_dn_byval(base_dn);
|
||||
mytaskdata->filter_str = slapi_ch_strdup(filter);
|
||||
+ mytaskdata->cleanup = cleanup;
|
||||
|
||||
if (scope) {
|
||||
if (strcasecmp(scope, "sub") == 0) {
|
||||
@@ -2334,6 +2420,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
task = slapi_plugin_new_task(slapi_entry_get_ndn(e), arg);
|
||||
slapi_task_set_destructor_fn(task, automember_task_destructor);
|
||||
slapi_task_set_data(task, mytaskdata);
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_TRUE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
/*
|
||||
* Start the task as a separate thread
|
||||
*/
|
||||
@@ -2345,6 +2434,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
"automember_task_add - Unable to create task thread!\n");
|
||||
*returncode = LDAP_OPERATIONS_ERROR;
|
||||
slapi_task_finish(task, *returncode);
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_FALSE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
} else {
|
||||
rv = SLAPI_DSE_CALLBACK_OK;
|
||||
@@ -2372,6 +2464,9 @@ automember_rebuild_task_thread(void *arg)
|
||||
PRCList *list = NULL;
|
||||
PRCList *include_list = NULL;
|
||||
int result = 0;
|
||||
+ int64_t fixup_progress_count = 0;
|
||||
+ int64_t fixup_progress_elapsed = 0;
|
||||
+ int64_t fixup_start_time = 0;
|
||||
size_t i = 0;
|
||||
|
||||
/* Reset abort flag */
|
||||
@@ -2380,6 +2475,7 @@ automember_rebuild_task_thread(void *arg)
|
||||
if (!task) {
|
||||
return; /* no task */
|
||||
}
|
||||
+
|
||||
slapi_task_inc_refcount(task);
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_rebuild_task_thread - Refcount incremented.\n");
|
||||
@@ -2393,9 +2489,11 @@ automember_rebuild_task_thread(void *arg)
|
||||
slapi_task_log_status(task, "Automember rebuild task starting (base dn: (%s) filter (%s)...",
|
||||
slapi_sdn_get_dn(td->base_dn), td->filter_str);
|
||||
/*
|
||||
- * Set the bind dn in the local thread data
|
||||
+ * Set the bind dn in the local thread data, and block post op mods
|
||||
*/
|
||||
slapi_td_set_dn(slapi_ch_strdup(td->bind_dn));
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ fixup_start_time = slapi_current_rel_time_t();
|
||||
/*
|
||||
* Take the config lock now and search the database
|
||||
*/
|
||||
@@ -2426,6 +2524,21 @@ automember_rebuild_task_thread(void *arg)
|
||||
* Loop over the entries
|
||||
*/
|
||||
for (i = 0; entries && (entries[i] != NULL); i++) {
|
||||
+ fixup_progress_count++;
|
||||
+ if (fixup_progress_count % FIXUP_PROGRESS_LIMIT == 0 ) {
|
||||
+ slapi_task_log_notice(task,
|
||||
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
|
||||
+ fixup_progress_count,
|
||||
+ slapi_current_rel_time_t() - fixup_start_time,
|
||||
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
|
||||
+ slapi_task_log_status(task,
|
||||
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
|
||||
+ fixup_progress_count,
|
||||
+ slapi_current_rel_time_t() - fixup_start_time,
|
||||
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
|
||||
+ slapi_task_inc_progress(task);
|
||||
+ fixup_progress_elapsed = slapi_current_rel_time_t();
|
||||
+ }
|
||||
if (slapi_atomic_load_64(&abort_rebuild_task, __ATOMIC_ACQUIRE) == 1) {
|
||||
/* The task was aborted */
|
||||
slapi_task_log_notice(task, "Automember rebuild task was intentionally aborted");
|
||||
@@ -2443,48 +2556,66 @@ automember_rebuild_task_thread(void *arg)
|
||||
if (slapi_dn_issuffix(slapi_entry_get_dn(entries[i]), config->scope) &&
|
||||
(slapi_filter_test_simple(entries[i], config->filter) == 0))
|
||||
{
|
||||
- /* First clear out all the defaults groups */
|
||||
- for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
|
||||
- if ((result = automember_update_member_value(entries[i], config->default_groups[ii],
|
||||
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
|
||||
- {
|
||||
- slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from default group (%s) error (%d)",
|
||||
- config->default_groups[ii], result);
|
||||
- slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from default group (%s) error (%d)",
|
||||
- config->default_groups[ii], result);
|
||||
- slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
- "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
- config->default_groups[ii], result);
|
||||
- goto out;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- /* Then clear out the non-default group */
|
||||
- if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
|
||||
- include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
|
||||
- while (include_list != (PRCList *)config->inclusive_rules) {
|
||||
- struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
|
||||
- if ((result = automember_update_member_value(entries[i], slapi_sdn_get_dn(curr_rule->target_group_dn),
|
||||
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
|
||||
+ if (td->cleanup) {
|
||||
+
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Cleaning up groups (config %s)\n",
|
||||
+ config->dn);
|
||||
+ /* First clear out all the defaults groups */
|
||||
+ for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
|
||||
+ if ((result = automember_update_member_value(entries[i],
|
||||
+ config->default_groups[ii],
|
||||
+ config->grouping_attr,
|
||||
+ config->grouping_value,
|
||||
+ NULL, DEL_MEMBER)))
|
||||
{
|
||||
slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from group (%s) error (%d)",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ "member from default group (%s) error (%d)",
|
||||
+ config->default_groups[ii], result);
|
||||
slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from group (%s) error (%d)",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ "member from default group (%s) error (%d)",
|
||||
+ config->default_groups[ii], result);
|
||||
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ config->default_groups[ii], result);
|
||||
goto out;
|
||||
}
|
||||
- include_list = PR_NEXT_LINK(include_list);
|
||||
}
|
||||
+
|
||||
+ /* Then clear out the non-default group */
|
||||
+ if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
|
||||
+ include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
|
||||
+ while (include_list != (PRCList *)config->inclusive_rules) {
|
||||
+ struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
|
||||
+ if ((result = automember_update_member_value(entries[i],
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn),
|
||||
+ config->grouping_attr,
|
||||
+ config->grouping_value,
|
||||
+ NULL, DEL_MEMBER)))
|
||||
+ {
|
||||
+ slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
+ "member from group (%s) error (%d)",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
+ "member from group (%s) error (%d)",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ include_list = PR_NEXT_LINK(include_list);
|
||||
+ }
|
||||
+ }
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Finished cleaning up groups (config %s)\n",
|
||||
+ config->dn);
|
||||
}
|
||||
|
||||
/* Update the memberships for this entries */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Updating membership (config %s)\n",
|
||||
+ config->dn);
|
||||
if (slapi_is_shutting_down() ||
|
||||
automember_update_membership(config, entries[i], NULL) == SLAPI_PLUGIN_FAILURE)
|
||||
{
|
||||
@@ -2508,15 +2639,22 @@ out:
|
||||
slapi_task_log_notice(task, "Automember rebuild task aborted. Error (%d)", result);
|
||||
slapi_task_log_status(task, "Automember rebuild task aborted. Error (%d)", result);
|
||||
} else {
|
||||
- slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
|
||||
- slapi_task_log_status(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
|
||||
+ slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
|
||||
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
|
||||
+ slapi_task_log_status(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
|
||||
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
|
||||
}
|
||||
slapi_task_inc_progress(task);
|
||||
slapi_task_finish(task, result);
|
||||
slapi_task_dec_refcount(task);
|
||||
slapi_atomic_store_64(&abort_rebuild_task, 0, __ATOMIC_RELEASE);
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_FALSE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
- "automember_rebuild_task_thread - Refcount decremented.\n");
|
||||
+ "automember_rebuild_task_thread - task finished, refcount decremented.\n");
|
||||
}
|
||||
|
||||
/*
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
index ba2d73a84..ce4c314a1 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -1264,10 +1264,6 @@ ldbm_back_add(Slapi_PBlock *pb)
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
if (addingentry_id_assigned) {
|
||||
next_id_return(be, addingentry->ep_id);
|
||||
}
|
||||
@@ -1376,6 +1372,11 @@ diskfull_return:
|
||||
if (!not_an_error) {
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
}
|
||||
+
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
}
|
||||
|
||||
common_return:
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
index de23190c3..27f0ac58a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
@@ -1407,11 +1407,6 @@ commit_return:
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
-
|
||||
if (tombstone) {
|
||||
if (cache_is_in_cache(&inst->inst_cache, tombstone)) {
|
||||
tomb_ep_id = tombstone->ep_id; /* Otherwise, tombstone might have been freed. */
|
||||
@@ -1496,6 +1491,11 @@ error_return:
|
||||
conn_id, op_id, parent_modify_c.old_entry, parent_modify_c.new_entry, myrc);
|
||||
}
|
||||
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
+
|
||||
common_return:
|
||||
if (orig_entry) {
|
||||
/* NOTE: #define SLAPI_DELETE_BEPREOP_ENTRY SLAPI_ENTRY_PRE_OP */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
index 537369055..64b293001 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -1043,11 +1043,6 @@ ldbm_back_modify(Slapi_PBlock *pb)
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
-
|
||||
if (postentry != NULL) {
|
||||
slapi_entry_free(postentry);
|
||||
postentry = NULL;
|
||||
@@ -1103,6 +1098,10 @@ error_return:
|
||||
if (!not_an_error) {
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
}
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
}
|
||||
|
||||
/* if ec is in cache, remove it, then add back e if we still have it */
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugins/automember.py b/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
index 15b00c633..568586ad8 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
@@ -155,7 +155,7 @@ def fixup(inst, basedn, log, args):
|
||||
log.info('Attempting to add task entry... This will fail if Automembership plug-in is not enabled.')
|
||||
if not plugin.status():
|
||||
log.error("'%s' is disabled. Rebuild membership task can't be executed" % plugin.rdn)
|
||||
- fixup_task = plugin.fixup(args.DN, args.filter)
|
||||
+ fixup_task = plugin.fixup(args.DN, args.filter, args.cleanup)
|
||||
if args.wait:
|
||||
log.info(f'Waiting for fixup task "{fixup_task.dn}" to complete. You can safely exit by pressing Control C ...')
|
||||
fixup_task.wait(timeout=args.timeout)
|
||||
@@ -225,8 +225,8 @@ def create_parser(subparsers):
|
||||
subcommands = automember.add_subparsers(help='action')
|
||||
add_generic_plugin_parsers(subcommands, AutoMembershipPlugin)
|
||||
|
||||
- list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
|
||||
- subcommands_list = list.add_subparsers(help='action')
|
||||
+ automember_list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
|
||||
+ subcommands_list = automember_list.add_subparsers(help='action')
|
||||
list_definitions = subcommands_list.add_parser('definitions', help='Lists Automembership definitions.')
|
||||
list_definitions.set_defaults(func=definition_list)
|
||||
list_regexes = subcommands_list.add_parser('regexes', help='List Automembership regex rules.')
|
||||
@@ -269,6 +269,8 @@ def create_parser(subparsers):
|
||||
fixup_task.add_argument('-f', '--filter', required=True, help='Sets the LDAP filter for entries to fix up')
|
||||
fixup_task.add_argument('-s', '--scope', required=True, choices=['sub', 'base', 'one'], type=str.lower,
|
||||
help='Sets the LDAP search scope for entries to fix up')
|
||||
+ fixup_task.add_argument('--cleanup', action='store_true',
|
||||
+ help="Clean up previous group memberships before rebuilding")
|
||||
fixup_task.add_argument('--wait', action='store_true',
|
||||
help="Wait for the task to finish, this could take a long time")
|
||||
fixup_task.add_argument('--timeout', default=0, type=int,
|
||||
@@ -279,7 +281,7 @@ def create_parser(subparsers):
|
||||
fixup_status.add_argument('--dn', help="The task entry's DN")
|
||||
fixup_status.add_argument('--show-log', action='store_true', help="Display the task log")
|
||||
fixup_status.add_argument('--watch', action='store_true',
|
||||
- help="Watch the task's status and wait for it to finish")
|
||||
+ help="Watch the task's status and wait for it to finish")
|
||||
|
||||
abort_fixup = subcommands.add_parser('abort-fixup', help='Abort the rebuild membership task.')
|
||||
abort_fixup.set_defaults(func=abort)
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 52691a44c..a1ad0a45b 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -1141,13 +1141,15 @@ class AutoMembershipPlugin(Plugin):
|
||||
def __init__(self, instance, dn="cn=Auto Membership Plugin,cn=plugins,cn=config"):
|
||||
super(AutoMembershipPlugin, self).__init__(instance, dn)
|
||||
|
||||
- def fixup(self, basedn, _filter=None):
|
||||
+ def fixup(self, basedn, _filter=None, cleanup=False):
|
||||
"""Create an automember rebuild membership task
|
||||
|
||||
:param basedn: Basedn to fix up
|
||||
:type basedn: str
|
||||
:param _filter: a filter for entries to fix up
|
||||
:type _filter: str
|
||||
+ :param cleanup: cleanup old group memberships
|
||||
+ :type cleanup: boolean
|
||||
|
||||
:returns: an instance of Task(DSLdapObject)
|
||||
"""
|
||||
@@ -1156,6 +1158,9 @@ class AutoMembershipPlugin(Plugin):
|
||||
task_properties = {'basedn': basedn}
|
||||
if _filter is not None:
|
||||
task_properties['filter'] = _filter
|
||||
+ if cleanup:
|
||||
+ task_properties['cleanup'] = "yes"
|
||||
+
|
||||
task.create(properties=task_properties)
|
||||
|
||||
return task
|
||||
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
|
||||
index 1a16bbb83..193805780 100644
|
||||
--- a/src/lib389/lib389/tasks.py
|
||||
+++ b/src/lib389/lib389/tasks.py
|
||||
@@ -1006,12 +1006,13 @@ class Tasks(object):
|
||||
return exitCode
|
||||
|
||||
def automemberRebuild(self, suffix=DEFAULT_SUFFIX, scope='sub',
|
||||
- filterstr='objectclass=top', args=None):
|
||||
+ filterstr='objectclass=top', cleanup=False, args=None):
|
||||
'''
|
||||
- @param suffix - The suffix the task should examine - defualt is
|
||||
+ @param suffix - The suffix the task should examine - default is
|
||||
"dc=example,dc=com"
|
||||
@param scope - The scope of the search to find entries
|
||||
- @param fitlerstr - THe search filter to find entries
|
||||
+ @param fitlerstr - The search filter to find entries
|
||||
+ @param cleanup - reset/clear the old group mmeberships prior to rebuilding
|
||||
@param args - is a dictionary that contains modifier of the task
|
||||
wait: True/[False] - If True, waits for the completion of
|
||||
the task before to return
|
||||
@@ -1027,6 +1028,8 @@ class Tasks(object):
|
||||
entry.setValues('basedn', suffix)
|
||||
entry.setValues('filter', filterstr)
|
||||
entry.setValues('scope', scope)
|
||||
+ if cleanup:
|
||||
+ entry.setValues('cleanup', 'yes')
|
||||
|
||||
# start the task and possibly wait for task completion
|
||||
try:
|
||||
--
|
||||
2.43.0
|
||||
|
||||
@ -1,83 +0,0 @@
|
||||
From 9319d5b022918f14cacb00e3faef85a6ab730a26 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 27 Feb 2024 16:30:47 -0800
|
||||
Subject: [PATCH] Issue 3527 - Support HAProxy and Instance on the same machine
|
||||
configuration (#6107)
|
||||
|
||||
Description: Improve how we handle HAProxy connections to work better when
|
||||
the DS and HAProxy are on the same machine.
|
||||
Ensure the client and header destination IPs are checked against the trusted IP list.
|
||||
|
||||
Additionally, this change will also allow configuration having
|
||||
HAProxy is listening on a different subnet than the one used to forward the request.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/3527
|
||||
|
||||
Reviewed by: @progier389, @jchapma (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/connection.c | 35 +++++++++++++++++++++++++--------
|
||||
1 file changed, 27 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index d28a39bf7..10a8cc577 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -1187,6 +1187,8 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
|
||||
char str_ip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
char str_haproxy_ip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
char str_haproxy_destip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
+ int trusted_matches_ip_found = 0;
|
||||
+ int trusted_matches_destip_found = 0;
|
||||
struct berval **bvals = NULL;
|
||||
int proxy_connection = 0;
|
||||
|
||||
@@ -1245,21 +1247,38 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
|
||||
normalize_IPv4(conn->cin_addr, buf_ip, sizeof(buf_ip), str_ip, sizeof(str_ip));
|
||||
normalize_IPv4(&pr_netaddr_dest, buf_haproxy_destip, sizeof(buf_haproxy_destip),
|
||||
str_haproxy_destip, sizeof(str_haproxy_destip));
|
||||
+ size_t ip_len = strlen(buf_ip);
|
||||
+ size_t destip_len = strlen(buf_haproxy_destip);
|
||||
|
||||
/* Now, reset RC and set it to 0 only if a match is found */
|
||||
haproxy_rc = -1;
|
||||
|
||||
- /* Allow only:
|
||||
- * Trusted IP == Original Client IP == HAProxy Header Destination IP */
|
||||
+ /*
|
||||
+ * We need to allow a configuration where DS instance and HAProxy are on the same machine.
|
||||
+ * In this case, we need to check if
|
||||
+ * the HAProxy client IP (which will be a loopback address) matches one of the the trusted IP addresses,
|
||||
+ * while still checking that
|
||||
+ * the HAProxy header destination IP address matches one of the trusted IP addresses.
|
||||
+ * Additionally, this change will also allow configuration having
|
||||
+ * HAProxy listening on a different subnet than one used to forward the request.
|
||||
+ */
|
||||
for (size_t i = 0; bvals[i] != NULL; ++i) {
|
||||
- if ((strlen(bvals[i]->bv_val) == strlen(buf_ip)) &&
|
||||
- (strlen(bvals[i]->bv_val) == strlen(buf_haproxy_destip)) &&
|
||||
- (strncasecmp(bvals[i]->bv_val, buf_ip, strlen(buf_ip)) == 0) &&
|
||||
- (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, strlen(buf_haproxy_destip)) == 0)) {
|
||||
- haproxy_rc = 0;
|
||||
- break;
|
||||
+ size_t bval_len = strlen(bvals[i]->bv_val);
|
||||
+
|
||||
+ /* Check if the Client IP (HAProxy's machine IP) address matches the trusted IP address */
|
||||
+ if (!trusted_matches_ip_found) {
|
||||
+ trusted_matches_ip_found = (bval_len == ip_len) && (strncasecmp(bvals[i]->bv_val, buf_ip, ip_len) == 0);
|
||||
+ }
|
||||
+ /* Check if the HAProxy header destination IP address matches the trusted IP address */
|
||||
+ if (!trusted_matches_destip_found) {
|
||||
+ trusted_matches_destip_found = (bval_len == destip_len) && (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, destip_len) == 0);
|
||||
}
|
||||
}
|
||||
+
|
||||
+ if (trusted_matches_ip_found && trusted_matches_destip_found) {
|
||||
+ haproxy_rc = 0;
|
||||
+ }
|
||||
+
|
||||
if (haproxy_rc == -1) {
|
||||
slapi_log_err(SLAPI_LOG_CONNS, "connection_read_operation", "HAProxy header received from unknown source.\n");
|
||||
disconnect_server_nomutex(conn, conn->c_connid, -1, SLAPD_DISCONNECT_PROXY_UNKNOWN, EPROTO);
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,108 +0,0 @@
|
||||
From 016a2b6bd3e27cbff36609824a75b020dfd24823 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 1 May 2024 15:01:33 +0100
|
||||
Subject: [PATCH] CVE-2024-2199
|
||||
|
||||
---
|
||||
.../tests/suites/password/password_test.py | 56 +++++++++++++++++++
|
||||
ldap/servers/slapd/modify.c | 8 ++-
|
||||
2 files changed, 62 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py
|
||||
index 38079476a..b3ff08904 100644
|
||||
--- a/dirsrvtests/tests/suites/password/password_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/password_test.py
|
||||
@@ -65,6 +65,62 @@ def test_password_delete_specific_password(topology_st):
|
||||
log.info('test_password_delete_specific_password: PASSED')
|
||||
|
||||
|
||||
+def test_password_modify_non_utf8(topology_st):
|
||||
+ """Attempt a modify of the userPassword attribute with
|
||||
+ an invalid non utf8 value
|
||||
+
|
||||
+ :id: a31af9d5-d665-42b9-8d6e-fea3d0837d36
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Add a user if it doesnt exist and set its password
|
||||
+ 2. Verify password with a bind
|
||||
+ 3. Modify userPassword attr with invalid value
|
||||
+ 4. Attempt a bind with invalid password value
|
||||
+ 5. Verify original password with a bind
|
||||
+ :expectedresults:
|
||||
+ 1. The user with userPassword should be added successfully
|
||||
+ 2. Operation should be successful
|
||||
+ 3. Server returns ldap.UNWILLING_TO_PERFORM
|
||||
+ 4. Server returns ldap.INVALID_CREDENTIALS
|
||||
+ 5. Operation should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info('Running test_password_modify_non_utf8...')
|
||||
+
|
||||
+ # Create user and set password
|
||||
+ standalone = topology_st.standalone
|
||||
+ users = UserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ if not users.exists(TEST_USER_PROPERTIES['uid'][0]):
|
||||
+ user = users.create(properties=TEST_USER_PROPERTIES)
|
||||
+ else:
|
||||
+ user = users.get(TEST_USER_PROPERTIES['uid'][0])
|
||||
+ user.set('userpassword', PASSWORD)
|
||||
+
|
||||
+ # Verify password
|
||||
+ try:
|
||||
+ user.bind(PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ # Modify userPassword with an invalid value
|
||||
+ password = b'tes\x82t-password' # A non UTF-8 encoded password
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ user.replace('userpassword', password)
|
||||
+
|
||||
+ # Verify a bind fails with invalid pasword
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ user.bind(password)
|
||||
+
|
||||
+ # Verify we can still bind with original password
|
||||
+ try:
|
||||
+ user.bind(PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('test_password_modify_non_utf8: PASSED')
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
|
||||
index 5ca78539c..669bb104c 100644
|
||||
--- a/ldap/servers/slapd/modify.c
|
||||
+++ b/ldap/servers/slapd/modify.c
|
||||
@@ -765,8 +765,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
|
||||
* flagged - leave mod attributes alone */
|
||||
if (!repl_op && !skip_modified_attrs && lastmod) {
|
||||
modify_update_last_modified_attr(pb, &smods);
|
||||
+ slapi_pblock_set(pb, SLAPI_MODIFY_MODS, slapi_mods_get_ldapmods_byref(&smods));
|
||||
}
|
||||
|
||||
+
|
||||
if (0 == slapi_mods_get_num_mods(&smods)) {
|
||||
/* nothing to do - no mods - this is not an error - just
|
||||
send back LDAP_SUCCESS */
|
||||
@@ -933,8 +935,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
|
||||
|
||||
/* encode password */
|
||||
if (pw_encodevals_ext(pb, sdn, va)) {
|
||||
- slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s.\n", slapi_entry_get_dn_const(e));
|
||||
- send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to store attribute \"userPassword\" correctly\n", 0, NULL);
|
||||
+ slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s, "
|
||||
+ "check value is utf8 string.\n", slapi_entry_get_dn_const(e));
|
||||
+ send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to hash \"userPassword\" attribute, "
|
||||
+ "check value is utf8 string.\n", 0, NULL);
|
||||
valuearray_free(&va);
|
||||
goto free_and_return;
|
||||
}
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,213 +0,0 @@
|
||||
From d5bbe52fbe84a7d3b5938bf82d5c4af15061a8e2 Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Wed, 17 Apr 2024 18:18:04 +0200
|
||||
Subject: [PATCH] CVE-2024-3657
|
||||
|
||||
---
|
||||
.../tests/suites/filter/large_filter_test.py | 34 +++++-
|
||||
ldap/servers/slapd/back-ldbm/index.c | 111 ++++++++++--------
|
||||
2 files changed, 92 insertions(+), 53 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/large_filter_test.py b/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
index ecc7bf979..40526bb16 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
@@ -13,19 +13,29 @@ verify and testing Filter from a search
|
||||
|
||||
import os
|
||||
import pytest
|
||||
+import ldap
|
||||
|
||||
-from lib389._constants import PW_DM
|
||||
+from lib389._constants import PW_DM, DEFAULT_SUFFIX, ErrorLog
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.user import UserAccounts, UserAccount
|
||||
from lib389.idm.account import Accounts
|
||||
from lib389.backend import Backends
|
||||
from lib389.idm.domain import Domain
|
||||
+from lib389.utils import get_ldapurl_from_serverid
|
||||
|
||||
SUFFIX = 'dc=anuj,dc=com'
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
|
||||
+def open_new_ldapi_conn(dsinstance):
|
||||
+ ldapurl, certdir = get_ldapurl_from_serverid(dsinstance)
|
||||
+ assert 'ldapi://' in ldapurl
|
||||
+ conn = ldap.initialize(ldapurl)
|
||||
+ conn.sasl_interactive_bind_s("", ldap.sasl.external())
|
||||
+ return conn
|
||||
+
|
||||
+
|
||||
@pytest.fixture(scope="module")
|
||||
def _create_entries(request, topo):
|
||||
"""
|
||||
@@ -160,6 +170,28 @@ def test_large_filter(topo, _create_entries, real_value):
|
||||
assert len(Accounts(conn, SUFFIX).filter(real_value)) == 3
|
||||
|
||||
|
||||
+def test_long_filter_value(topo):
|
||||
+ """Exercise large eq filter with dn syntax attributes
|
||||
+
|
||||
+ :id: b069ef72-fcc3-11ee-981c-482ae39447e5
|
||||
+ :setup: Standalone
|
||||
+ :steps:
|
||||
+ 1. Try to pass filter rules as per the condition.
|
||||
+ :expectedresults:
|
||||
+ 1. Pass
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ conn = open_new_ldapi_conn(inst.serverid)
|
||||
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE,ErrorLog.SEARCH_FILTER))
|
||||
+ filter_value = "a\x1Edmin" * 1025
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ filter_value = "aAdmin" * 1025
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ filter_value = "*"
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c
|
||||
index 410db23d1..30fa09ebb 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/index.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/index.c
|
||||
@@ -71,6 +71,32 @@ typedef struct _index_buffer_handle index_buffer_handle;
|
||||
#define INDEX_BUFFER_FLAG_SERIALIZE 1
|
||||
#define INDEX_BUFFER_FLAG_STATS 2
|
||||
|
||||
+/*
|
||||
+ * space needed to encode a byte:
|
||||
+ * 0x00-0x31 and 0x7f-0xff requires 3 bytes: \xx
|
||||
+ * 0x22 and 0x5C requires 2 bytes: \" and \\
|
||||
+ * other requires 1 byte: c
|
||||
+ */
|
||||
+static char encode_size[] = {
|
||||
+ /* 0x00 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x10 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x20 */ 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1,
|
||||
+ /* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3,
|
||||
+ /* 0x80 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x90 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xA0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xB0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xC0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xD0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xE0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xF0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+};
|
||||
+
|
||||
+
|
||||
/* Index buffering functions */
|
||||
|
||||
static int
|
||||
@@ -799,65 +825,46 @@ index_add_mods(
|
||||
|
||||
/*
|
||||
* Convert a 'struct berval' into a displayable ASCII string
|
||||
+ * returns the printable string
|
||||
*/
|
||||
-
|
||||
-#define SPECIAL(c) (c < 32 || c > 126 || c == '\\' || c == '"')
|
||||
-
|
||||
const char *
|
||||
encode(const struct berval *data, char buf[BUFSIZ])
|
||||
{
|
||||
- char *s;
|
||||
- char *last;
|
||||
- if (data == NULL || data->bv_len == 0)
|
||||
- return "";
|
||||
- last = data->bv_val + data->bv_len - 1;
|
||||
- for (s = data->bv_val; s < last; ++s) {
|
||||
- if (SPECIAL(*s)) {
|
||||
- char *first = data->bv_val;
|
||||
- char *bufNext = buf;
|
||||
- size_t bufSpace = BUFSIZ - 4;
|
||||
- while (1) {
|
||||
- /* printf ("%lu bytes ASCII\n", (unsigned long)(s - first)); */
|
||||
- if (bufSpace < (size_t)(s - first))
|
||||
- s = first + bufSpace - 1;
|
||||
- if (s != first) {
|
||||
- memcpy(bufNext, first, s - first);
|
||||
- bufNext += (s - first);
|
||||
- bufSpace -= (s - first);
|
||||
- }
|
||||
- do {
|
||||
- if (bufSpace) {
|
||||
- *bufNext++ = '\\';
|
||||
- --bufSpace;
|
||||
- }
|
||||
- if (bufSpace < 2) {
|
||||
- memcpy(bufNext, "..", 2);
|
||||
- bufNext += 2;
|
||||
- goto bail;
|
||||
- }
|
||||
- if (*s == '\\' || *s == '"') {
|
||||
- *bufNext++ = *s;
|
||||
- --bufSpace;
|
||||
- } else {
|
||||
- sprintf(bufNext, "%02x", (unsigned)*(unsigned char *)s);
|
||||
- bufNext += 2;
|
||||
- bufSpace -= 2;
|
||||
- }
|
||||
- } while (++s <= last && SPECIAL(*s));
|
||||
- if (s > last)
|
||||
- break;
|
||||
- first = s;
|
||||
- while (!SPECIAL(*s) && s <= last)
|
||||
- ++s;
|
||||
- }
|
||||
- bail:
|
||||
- *bufNext = '\0';
|
||||
- /* printf ("%lu chars in buffer\n", (unsigned long)(bufNext - buf)); */
|
||||
+ if (!data || !data->bv_val) {
|
||||
+ strcpy(buf, "<NULL>");
|
||||
+ return buf;
|
||||
+ }
|
||||
+ char *endbuff = &buf[BUFSIZ-4]; /* Reserve space to append "...\0" */
|
||||
+ char *ptout = buf;
|
||||
+ unsigned char *ptin = (unsigned char*) data->bv_val;
|
||||
+ unsigned char *endptin = ptin+data->bv_len;
|
||||
+
|
||||
+ while (ptin < endptin) {
|
||||
+ if (ptout >= endbuff) {
|
||||
+ /*
|
||||
+ * BUFSIZ(8K) > SLAPI_LOG_BUFSIZ(2K) so the error log message will be
|
||||
+ * truncated anyway. So there is no real interrest to test if the original
|
||||
+ * data contains no special characters and return it as is.
|
||||
+ */
|
||||
+ strcpy(endbuff, "...");
|
||||
return buf;
|
||||
}
|
||||
+ switch (encode_size[*ptin]) {
|
||||
+ case 1:
|
||||
+ *ptout++ = *ptin++;
|
||||
+ break;
|
||||
+ case 2:
|
||||
+ *ptout++ = '\\';
|
||||
+ *ptout++ = *ptin++;
|
||||
+ break;
|
||||
+ case 3:
|
||||
+ sprintf(ptout, "\\%02x", *ptin++);
|
||||
+ ptout += 3;
|
||||
+ break;
|
||||
+ }
|
||||
}
|
||||
- /* printf ("%lu bytes, all ASCII\n", (unsigned long)(s - data->bv_val)); */
|
||||
- return data->bv_val;
|
||||
+ *ptout = 0;
|
||||
+ return buf;
|
||||
}
|
||||
|
||||
static const char *
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,143 +0,0 @@
|
||||
From 6e5f03d5872129963106024f53765234a282406c Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 16 Feb 2024 11:13:16 +0000
|
||||
Subject: [PATCH] Issue 6096 - Improve connection timeout error logging (#6097)
|
||||
|
||||
Bug description: When a paged result search is run with a time limit,
|
||||
if the time limit is exceed the server closes the connection with
|
||||
closed IO timeout (nsslapd-ioblocktimeout) - T2. This error message
|
||||
is incorrect as the reason the connection has been closed was because
|
||||
the specified time limit on a paged result search has been exceeded.
|
||||
|
||||
Fix description: Correct error message
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6096
|
||||
|
||||
Reviewed by: @tbordaz (Thank you)
|
||||
---
|
||||
ldap/admin/src/logconv.pl | 24 ++++++++++++++++++-
|
||||
ldap/servers/slapd/daemon.c | 4 ++--
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 1 +
|
||||
ldap/servers/slapd/disconnect_errors.h | 2 +-
|
||||
4 files changed, 27 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl
|
||||
index 7698c383a..2a933c4a3 100755
|
||||
--- a/ldap/admin/src/logconv.pl
|
||||
+++ b/ldap/admin/src/logconv.pl
|
||||
@@ -267,7 +267,7 @@ my $optimeAvg = 0;
|
||||
my %cipher = ();
|
||||
my @removefiles = ();
|
||||
|
||||
-my @conncodes = qw(A1 B1 B4 T1 T2 B2 B3 R1 P1 P2 U1);
|
||||
+my @conncodes = qw(A1 B1 B4 T1 T2 T3 B2 B3 R1 P1 P2 U1);
|
||||
my %conn = ();
|
||||
map {$conn{$_} = $_} @conncodes;
|
||||
|
||||
@@ -355,6 +355,7 @@ $connmsg{"B1"} = "Bad Ber Tag Encountered";
|
||||
$connmsg{"B4"} = "Server failed to flush data (response) back to Client";
|
||||
$connmsg{"T1"} = "Idle Timeout Exceeded";
|
||||
$connmsg{"T2"} = "IO Block Timeout Exceeded or NTSSL Timeout";
|
||||
+$connmsg{"T3"} = "Paged Search Time Limit Exceeded";
|
||||
$connmsg{"B2"} = "Ber Too Big";
|
||||
$connmsg{"B3"} = "Ber Peek";
|
||||
$connmsg{"R1"} = "Revents";
|
||||
@@ -1723,6 +1724,10 @@ if ($usage =~ /j/i || $verb eq "yes"){
|
||||
print "\n $recCount. You have some coonections that are being closed by the ioblocktimeout setting. You may want to increase the ioblocktimeout.\n";
|
||||
$recCount++;
|
||||
}
|
||||
+ if (defined($conncount->{"T3"}) and $conncount->{"T3"} > 0){
|
||||
+ print "\n $recCount. You have some connections that are being closed because a paged result search limit has been exceeded. You may want to increase the search time limit.\n";
|
||||
+ $recCount++;
|
||||
+ }
|
||||
# compare binds to unbinds, if the difference is more than 30% of the binds, then report a issue
|
||||
if (($bindCount - $unbindCount) > ($bindCount*.3)){
|
||||
print "\n $recCount. You have a significant difference between binds and unbinds. You may want to investigate this difference.\n";
|
||||
@@ -2366,6 +2371,7 @@ sub parseLineNormal
|
||||
$brokenPipeCount++;
|
||||
if (m/- T1/){ $hashes->{rc}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{rc}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{rc}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{rc}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{rc}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{rc}->{"B4"}++; }
|
||||
@@ -2381,6 +2387,7 @@ sub parseLineNormal
|
||||
$connResetByPeerCount++;
|
||||
if (m/- T1/){ $hashes->{src}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{src}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{src}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{src}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{src}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{src}->{"B4"}++; }
|
||||
@@ -2396,6 +2403,7 @@ sub parseLineNormal
|
||||
$resourceUnavailCount++;
|
||||
if (m/- T1/){ $hashes->{rsrc}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{rsrc}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{rsrc}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{rsrc}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{rsrc}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{rsrc}->{"B4"}++; }
|
||||
@@ -2494,6 +2502,20 @@ sub parseLineNormal
|
||||
}
|
||||
}
|
||||
}
|
||||
+ if (m/- T3/){
|
||||
+ if ($_ =~ /conn= *([0-9A-Z]+)/i) {
|
||||
+ $exc = "no";
|
||||
+ $ip = getIPfromConn($1, $serverRestartCount);
|
||||
+ for (my $xxx = 0; $xxx < $#excludeIP; $xxx++){
|
||||
+ if ($ip eq $excludeIP[$xxx]){$exc = "yes";}
|
||||
+ }
|
||||
+ if ($exc ne "yes"){
|
||||
+ $hashes->{T3}->{$ip}++;
|
||||
+ $hashes->{conncount}->{"T3"}++;
|
||||
+ $connCodeCount++;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
if (m/- B2/){
|
||||
if ($_ =~ /conn= *([0-9A-Z]+)/i) {
|
||||
$exc = "no";
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 5a48aa66f..bb80dae36 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1599,9 +1599,9 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
int add_fd = 1;
|
||||
/* check timeout for PAGED RESULTS */
|
||||
if (pagedresults_is_timedout_nolock(c)) {
|
||||
- /* Exceeded the timelimit; disconnect the client */
|
||||
+ /* Exceeded the paged search timelimit; disconnect the client */
|
||||
disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_IO_TIMEOUT,
|
||||
+ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
0);
|
||||
connection_table_move_connection_out_of_active_list(ct,
|
||||
c);
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index f7a31d728..c2d9e283b 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -27,6 +27,7 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
|
||||
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
|
||||
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
|
||||
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
|
||||
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
|
||||
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
|
||||
ER2(SLAPD_DISCONNECT_POLL, "P2")
|
||||
diff --git a/ldap/servers/slapd/disconnect_errors.h b/ldap/servers/slapd/disconnect_errors.h
|
||||
index a0484f1c2..e118f674c 100644
|
||||
--- a/ldap/servers/slapd/disconnect_errors.h
|
||||
+++ b/ldap/servers/slapd/disconnect_errors.h
|
||||
@@ -35,6 +35,6 @@
|
||||
#define SLAPD_DISCONNECT_SASL_FAIL SLAPD_DISCONNECT_ERROR_BASE + 12
|
||||
#define SLAPD_DISCONNECT_PROXY_INVALID_HEADER SLAPD_DISCONNECT_ERROR_BASE + 13
|
||||
#define SLAPD_DISCONNECT_PROXY_UNKNOWN SLAPD_DISCONNECT_ERROR_BASE + 14
|
||||
-
|
||||
+#define SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT SLAPD_DISCONNECT_ERROR_BASE + 15
|
||||
|
||||
#endif /* __DISCONNECT_ERRORS_H_ */
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,44 +0,0 @@
|
||||
From a112394af3a20787755029804684d57a9c3ffa9a Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 21 Feb 2024 12:43:03 +0000
|
||||
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
|
||||
(#6104)
|
||||
|
||||
Bug description: A recent addition to the connection disconnect error
|
||||
messaging, conflicts with how errormap.c maps error codes/strings.
|
||||
|
||||
Fix description: errormap expects error codes/strings to be in ascending
|
||||
order. Moved the new error code to the bottom of the list.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6103
|
||||
|
||||
Reviewed by: @droideck. @progier389 (Thank you)
|
||||
---
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 5 +++--
|
||||
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index c2d9e283b..f603a08ce 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -14,7 +14,8 @@
|
||||
/* disconnect_error_strings.h
|
||||
*
|
||||
* Strings describing the errors used in logging the reason a connection
|
||||
- * was closed.
|
||||
+ * was closed. Ensure definitions are in the same order as the error codes
|
||||
+ * defined in disconnect_errors.h
|
||||
*/
|
||||
#ifndef __DISCONNECT_ERROR_STRINGS_H_
|
||||
#define __DISCONNECT_ERROR_STRINGS_H_
|
||||
@@ -35,6 +36,6 @@ ER2(SLAPD_DISCONNECT_NTSSL_TIMEOUT, "T2")
|
||||
ER2(SLAPD_DISCONNECT_SASL_FAIL, "S1")
|
||||
ER2(SLAPD_DISCONNECT_PROXY_INVALID_HEADER, "P3")
|
||||
ER2(SLAPD_DISCONNECT_PROXY_UNKNOWN, "P4")
|
||||
-
|
||||
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
|
||||
#endif /* __DISCONNECT_ERROR_STRINGS_H_ */
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,30 +0,0 @@
|
||||
From edd9abc8901604dde1d739d87ca2906734d53dd3 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 13 Jun 2024 13:35:09 +0200
|
||||
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
|
||||
|
||||
Description:
|
||||
Remove duplicate SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT error code.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6103
|
||||
|
||||
Reviewed by: @tbordaz (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index f603a08ce..d49cc79a2 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -28,7 +28,6 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
|
||||
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
|
||||
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
|
||||
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
|
||||
-ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
|
||||
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
|
||||
ER2(SLAPD_DISCONNECT_POLL, "P2")
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,220 +0,0 @@
|
||||
From 8cf981c00ae18d3efaeb10819282cd991621e9a2 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 22 May 2024 11:29:05 +0200
|
||||
Subject: [PATCH] Issue 6172 - RFE: improve the performance of evaluation of
|
||||
filter component when tested against a large valueset (like group members)
|
||||
(#6173)
|
||||
|
||||
Bug description:
|
||||
Before returning an entry (to a SRCH) the server checks that the entry matches the SRCH filter.
|
||||
If a filter component (equality) is testing the value (ava) against a
|
||||
large valueset (like uniquemember values), it takes a long time because
|
||||
of the large number of values and required normalization of the values.
|
||||
This can be improved taking benefit of sorted valueset. Those sorted
|
||||
valueset were created to improve updates of large valueset (groups) but
|
||||
at that time not implemented in SRCH path.
|
||||
|
||||
Fix description:
|
||||
In case of LDAP_FILTER_EQUALITY component, the server can get
|
||||
benefit of the sorted valuearray.
|
||||
To limit the risk of regression, we use the sorted valuearray
|
||||
only for the DN syntax attribute. Indeed the sorted valuearray was
|
||||
designed for those type of attribute.
|
||||
With those two limitations, there is no need of a toggle and
|
||||
the call to plugin_call_syntax_filter_ava can be replaced by
|
||||
a call to slapi_valueset_find.
|
||||
In both cases, sorted valueset and plugin_call_syntax_filter_ava, ava and
|
||||
values are normalized.
|
||||
In sorted valueset, the values have been normalized to insert the index
|
||||
in the sorted array and then comparison is done on normalized values.
|
||||
In plugin_call_syntax_filter_ava, all values in valuearray (of valueset) are normalized
|
||||
before comparison.
|
||||
|
||||
relates: #6172
|
||||
|
||||
Reviewed by: Pierre Rogier, Simon Pichugin (Big Thanks !!!)
|
||||
---
|
||||
.../tests/suites/filter/filter_test.py | 125 ++++++++++++++++++
|
||||
ldap/servers/slapd/filterentry.c | 22 ++-
|
||||
2 files changed, 146 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
index d6bfa5a3b..4baaf04a7 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
@@ -9,7 +9,11 @@
|
||||
import logging
|
||||
|
||||
import pytest
|
||||
+import time
|
||||
+from lib389.dirsrv_log import DirsrvAccessLog
|
||||
from lib389.tasks import *
|
||||
+from lib389.backend import Backends, Backend
|
||||
+from lib389.dbgen import dbgen_users, dbgen_groups
|
||||
from lib389.topologies import topology_st
|
||||
from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX
|
||||
from lib389.utils import *
|
||||
@@ -304,6 +308,127 @@ def test_extended_search(topology_st):
|
||||
ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
|
||||
assert len(ents) == 1
|
||||
|
||||
+def test_match_large_valueset(topology_st):
|
||||
+ """Test that when returning a big number of entries
|
||||
+ and that we need to match the filter from a large valueset
|
||||
+ we get benefit to use the sorted valueset
|
||||
+
|
||||
+ :id: 7db5aa88-50e0-4c31-85dd-1d2072cb674c
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Create a users and groups backends and tune them
|
||||
+ 2. Generate a test ldif (2k users and 1K groups with all users)
|
||||
+ 3. Import test ldif file using Offline import (ldif2db).
|
||||
+ 4. Prim the 'groups' entrycache with a "fast" search
|
||||
+ 5. Search the 'groups' with a difficult matching value
|
||||
+ 6. check that etime from step 5 is less than a second
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Create a users and groups backends should PASS
|
||||
+ 2. Generate LDIF should PASS.
|
||||
+ 3. Offline import should PASS.
|
||||
+ 4. Priming should PASS.
|
||||
+ 5. Performance search should PASS.
|
||||
+ 6. Etime of performance search should PASS.
|
||||
+ """
|
||||
+
|
||||
+ log.info('Running test_match_large_valueset...')
|
||||
+ #
|
||||
+ # Test online/offline LDIF imports
|
||||
+ #
|
||||
+ inst = topology_st.standalone
|
||||
+ inst.start()
|
||||
+ backends = Backends(inst)
|
||||
+ users_suffix = "ou=users,%s" % DEFAULT_SUFFIX
|
||||
+ users_backend = 'users'
|
||||
+ users_ldif = 'users_import.ldif'
|
||||
+ groups_suffix = "ou=groups,%s" % DEFAULT_SUFFIX
|
||||
+ groups_backend = 'groups'
|
||||
+ groups_ldif = 'groups_import.ldif'
|
||||
+ groups_entrycache = '200000000'
|
||||
+ users_number = 2000
|
||||
+ groups_number = 1000
|
||||
+
|
||||
+
|
||||
+ # For priming the cache we just want to be fast
|
||||
+ # taking the first value in the valueset is good
|
||||
+ # whether the valueset is sorted or not
|
||||
+ priming_user_rdn = "user0001"
|
||||
+
|
||||
+ # For performance testing, this is important to use
|
||||
+ # user1000 rather then user0001
|
||||
+ # Because user0001 is the first value in the valueset
|
||||
+ # whether we use the sorted valuearray or non sorted
|
||||
+ # valuearray the performance will be similar.
|
||||
+ # With middle value user1000, the performance boost of
|
||||
+ # the sorted valuearray will make the difference.
|
||||
+ perf_user_rdn = "user1000"
|
||||
+
|
||||
+ # Step 1. Prepare the backends and tune the groups entrycache
|
||||
+ try:
|
||||
+ be_users = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': users_suffix, 'name': users_backend})
|
||||
+ be_groups = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': groups_suffix, 'name': groups_backend})
|
||||
+
|
||||
+ # set the entry cache to 200Mb as the 1K groups of 2K users require at least 170Mb
|
||||
+ be_groups.replace('nsslapd-cachememsize', groups_entrycache)
|
||||
+ except:
|
||||
+ raise
|
||||
+
|
||||
+ # Step 2. Generate a test ldif (10k users entries)
|
||||
+ log.info("Generating users LDIF...")
|
||||
+ ldif_dir = inst.get_ldif_dir()
|
||||
+ users_import_ldif = "%s/%s" % (ldif_dir, users_ldif)
|
||||
+ groups_import_ldif = "%s/%s" % (ldif_dir, groups_ldif)
|
||||
+ dbgen_users(inst, users_number, users_import_ldif, suffix=users_suffix, generic=True, parent=users_suffix)
|
||||
+
|
||||
+ # Generate a test ldif (800 groups with 10k members) that fit in 700Mb entry cache
|
||||
+ props = {
|
||||
+ "name": "group",
|
||||
+ "suffix": groups_suffix,
|
||||
+ "parent": groups_suffix,
|
||||
+ "number": groups_number,
|
||||
+ "numMembers": users_number,
|
||||
+ "createMembers": False,
|
||||
+ "memberParent": users_suffix,
|
||||
+ "membershipAttr": "uniquemember",
|
||||
+ }
|
||||
+ dbgen_groups(inst, groups_import_ldif, props)
|
||||
+
|
||||
+ # Step 3. Do the both offline imports
|
||||
+ inst.stop()
|
||||
+ if not inst.ldif2db(users_backend, None, None, None, users_import_ldif):
|
||||
+ log.fatal('test_basic_import_export: Offline users import failed')
|
||||
+ assert False
|
||||
+ if not inst.ldif2db(groups_backend, None, None, None, groups_import_ldif):
|
||||
+ log.fatal('test_basic_import_export: Offline groups import failed')
|
||||
+ assert False
|
||||
+ inst.start()
|
||||
+
|
||||
+ # Step 4. first prime the cache
|
||||
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (priming_user_rdn, users_suffix), ['dn'])
|
||||
+ assert len(entries) == groups_number
|
||||
+
|
||||
+ # Step 5. Now do the real performance checking it should take less than a second
|
||||
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||
+ search_start = time.time()
|
||||
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (perf_user_rdn, users_suffix), ['dn'])
|
||||
+ duration = time.time() - search_start
|
||||
+ log.info("Duration of the search was %f", duration)
|
||||
+
|
||||
+ # Step 6. Gather the etime from the access log
|
||||
+ inst.stop()
|
||||
+ access_log = DirsrvAccessLog(inst)
|
||||
+ search_result = access_log.match(".*RESULT err=0 tag=101 nentries=%s.*" % groups_number)
|
||||
+ log.info("Found patterns are %s", search_result[0])
|
||||
+ log.info("Found patterns are %s", search_result[1])
|
||||
+ etime = float(search_result[1].split('etime=')[1])
|
||||
+ log.info("Duration of the search from access log was %f", etime)
|
||||
+ assert len(entries) == groups_number
|
||||
+ assert (etime < 1)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/filterentry.c b/ldap/servers/slapd/filterentry.c
|
||||
index fd8fdda9f..cae5c7edc 100644
|
||||
--- a/ldap/servers/slapd/filterentry.c
|
||||
+++ b/ldap/servers/slapd/filterentry.c
|
||||
@@ -296,7 +296,27 @@ test_ava_filter(
|
||||
rc = -1;
|
||||
for (; a != NULL; a = a->a_next) {
|
||||
if (slapi_attr_type_cmp(ava->ava_type, a->a_type, SLAPI_TYPE_CMP_SUBTYPE) == 0) {
|
||||
- rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||
+ if ((ftype == LDAP_FILTER_EQUALITY) &&
|
||||
+ (slapi_attr_is_dn_syntax_type(a->a_type))) {
|
||||
+ /* This path is for a performance improvement */
|
||||
+
|
||||
+ /* In case of equality filter we can get benefit of the
|
||||
+ * sorted valuearray (from valueset).
|
||||
+ * This improvement is limited to DN syntax attributes for
|
||||
+ * which the sorted valueset was designed.
|
||||
+ */
|
||||
+ Slapi_Value *sval = NULL;
|
||||
+ sval = slapi_value_new_berval(&ava->ava_value);
|
||||
+ if (slapi_valueset_find((const Slapi_Attr *)a, &a->a_present_values, sval)) {
|
||||
+ rc = 0;
|
||||
+ }
|
||||
+ slapi_value_free(&sval);
|
||||
+ } else {
|
||||
+ /* When sorted valuearray optimization cannot be used
|
||||
+ * lets filter the value according to its syntax
|
||||
+ */
|
||||
+ rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||
+ }
|
||||
if (rc == 0) {
|
||||
break;
|
||||
}
|
||||
--
|
||||
2.46.0
|
||||
|
||||
@ -1,163 +0,0 @@
|
||||
From 57051154bafaf50b83fc27dadbd89a49fd1c8c36 Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Fri, 14 Jun 2024 13:27:10 +0200
|
||||
Subject: [PATCH] Security fix for CVE-2024-5953
|
||||
|
||||
Description:
|
||||
A denial of service vulnerability was found in the 389 Directory Server.
|
||||
This issue may allow an authenticated user to cause a server denial
|
||||
of service while attempting to log in with a user with a malformed hash
|
||||
in their password.
|
||||
|
||||
Fix Description:
|
||||
To prevent buffer overflow when a bind request is processed, the bind fails
|
||||
if the hash size is not coherent without even attempting to process further
|
||||
the hashed password.
|
||||
|
||||
References:
|
||||
- https://nvd.nist.gov/vuln/detail/CVE-2024-5953
|
||||
- https://access.redhat.com/security/cve/CVE-2024-5953
|
||||
- https://bugzilla.redhat.com/show_bug.cgi?id=2292104
|
||||
---
|
||||
.../tests/suites/password/regression_test.py | 54 ++++++++++++++++++-
|
||||
ldap/servers/plugins/pwdstorage/md5_pwd.c | 9 +++-
|
||||
ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 6 +++
|
||||
3 files changed, 66 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/regression_test.py b/dirsrvtests/tests/suites/password/regression_test.py
|
||||
index 8f1facb6d..1fa581643 100644
|
||||
--- a/dirsrvtests/tests/suites/password/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/regression_test.py
|
||||
@@ -7,12 +7,14 @@
|
||||
#
|
||||
import pytest
|
||||
import time
|
||||
+import glob
|
||||
+import base64
|
||||
from lib389._constants import PASSWORD, DN_DM, DEFAULT_SUFFIX
|
||||
from lib389._constants import SUFFIX, PASSWORD, DN_DM, DN_CONFIG, PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DEFAULT_CHANGELOG_DB
|
||||
from lib389 import Entry
|
||||
from lib389.topologies import topology_m1 as topo_supplier
|
||||
-from lib389.idm.user import UserAccounts
|
||||
-from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer
|
||||
+from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer, ds_supports_new_changelog
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
|
||||
@@ -39,6 +41,13 @@ TEST_PASSWORDS += ['CNpwtest1ZZZZ', 'ZZZZZCNpwtest1',
|
||||
TEST_PASSWORDS2 = (
|
||||
'CN12pwtest31', 'SN3pwtest231', 'UID1pwtest123', 'MAIL2pwtest12@redhat.com', '2GN1pwtest123', 'People123')
|
||||
|
||||
+SUPPORTED_SCHEMES = (
|
||||
+ "{SHA}", "{SSHA}", "{SHA256}", "{SSHA256}",
|
||||
+ "{SHA384}", "{SSHA384}", "{SHA512}", "{SSHA512}",
|
||||
+ "{crypt}", "{NS-MTA-MD5}", "{clear}", "{MD5}",
|
||||
+ "{SMD5}", "{PBKDF2_SHA256}", "{PBKDF2_SHA512}",
|
||||
+ "{GOST_YESCRYPT}", "{PBKDF2-SHA256}", "{PBKDF2-SHA512}" )
|
||||
+
|
||||
def _check_unhashed_userpw(inst, user_dn, is_present=False):
|
||||
"""Check if unhashed#user#password attribute is present or not in the changelog"""
|
||||
unhashed_pwd_attribute = 'unhashed#user#password'
|
||||
@@ -319,6 +328,47 @@ def test_unhashed_pw_switch(topo_supplier):
|
||||
# Add debugging steps(if any)...
|
||||
pass
|
||||
|
||||
+@pytest.mark.parametrize("scheme", SUPPORTED_SCHEMES )
|
||||
+def test_long_hashed_password(topo, create_user, scheme):
|
||||
+ """Check that hashed password with very long value does not cause trouble
|
||||
+
|
||||
+ :id: 252a1f76-114b-11ef-8a7a-482ae39447e5
|
||||
+ :setup: standalone Instance
|
||||
+ :parametrized: yes
|
||||
+ :steps:
|
||||
+ 1. Add a test user user
|
||||
+ 2. Set a long password with requested scheme
|
||||
+ 3. Bind on that user using a wrong password
|
||||
+ 4. Check that instance is still alive
|
||||
+ 5. Remove the added user
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Should get ldap.INVALID_CREDENTIALS exception
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ # Make sure that server is started as this test may crash it
|
||||
+ inst.start()
|
||||
+ # Adding Test user (It may already exists if previous test failed)
|
||||
+ user2 = UserAccount(inst, dn='uid=test_user_1002,ou=People,dc=example,dc=com')
|
||||
+ if not user2.exists():
|
||||
+ user2 = users.create_test_user(uid=1002, gid=2002)
|
||||
+ # Setting hashed password
|
||||
+ passwd = 'A'*4000
|
||||
+ hashed_passwd = scheme.encode('utf-8') + base64.b64encode(passwd.encode('utf-8'))
|
||||
+ user2.replace('userpassword', hashed_passwd)
|
||||
+ # Bind on that user using a wrong password
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ conn = user2.bind(PASSWORD)
|
||||
+ # Check that instance is still alive
|
||||
+ assert inst.status()
|
||||
+ # Remove the added user
|
||||
+ user2.delete()
|
||||
+
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/md5_pwd.c b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
index 1e2cf58e7..b9a48d5ca 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
@@ -37,6 +37,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
unsigned char hash_out[MD5_HASH_LEN];
|
||||
unsigned char b2a_out[MD5_HASH_LEN * 2]; /* conservative */
|
||||
SECItem binary_item;
|
||||
+ size_t dbpwd_len = strlen(dbpwd);
|
||||
|
||||
ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||
if (ctx == NULL) {
|
||||
@@ -45,6 +46,12 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
goto loser;
|
||||
}
|
||||
|
||||
+ if (dbpwd_len >= sizeof b2a_out) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||
+ "The hashed password stored in the user entry is longer than any valid md5 hash");
|
||||
+ goto loser;
|
||||
+ }
|
||||
+
|
||||
/* create the hash */
|
||||
PK11_DigestBegin(ctx);
|
||||
PK11_DigestOp(ctx, (const unsigned char *)userpwd, strlen(userpwd));
|
||||
@@ -57,7 +64,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
bver = NSSBase64_EncodeItem(NULL, (char *)b2a_out, sizeof b2a_out, &binary_item);
|
||||
/* bver points to b2a_out upon success */
|
||||
if (bver) {
|
||||
- rc = slapi_ct_memcmp(bver, dbpwd, strlen(dbpwd));
|
||||
+ rc = slapi_ct_memcmp(bver, dbpwd, dbpwd_len);
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||
"Could not base64 encode hashed value for password compare");
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
index dcac4fcdd..82b8c9501 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
@@ -255,6 +255,12 @@ pbkdf2_sha256_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
passItem.data = (unsigned char *)userpwd;
|
||||
passItem.len = strlen(userpwd);
|
||||
|
||||
+ if (pwdstorage_base64_decode_len(dbpwd, dbpwd_len) > sizeof dbhash) {
|
||||
+ /* Hashed value is too long and cannot match any value generated by pbkdf2_sha256_hash */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value. (hashed value is too long)\n");
|
||||
+ return result;
|
||||
+ }
|
||||
+
|
||||
/* Decode the DBpwd to bytes from b64 */
|
||||
if (PL_Base64Decode(dbpwd, dbpwd_len, dbhash) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value\n");
|
||||
--
|
||||
2.46.0
|
||||
|
||||
@ -1,178 +0,0 @@
|
||||
From e8a5b1deef1b455aafecb71efc029d2407b1b06f Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 16 Jul 2024 08:32:21 -0700
|
||||
Subject: [PATCH] Issue 4778 - Add COMPACT_CL5 task to dsconf replication
|
||||
(#6260)
|
||||
|
||||
Description: In 1.4.3, the changelog is not part of a backend.
|
||||
It can be compacted with nsds5task: CAMPACT_CL5 as part of the replication entry.
|
||||
Add the task as a compact-changelog command under the dsconf replication tool.
|
||||
Add tests for the feature and fix old tests.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/4778
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
.../tests/suites/config/compact_test.py | 36 ++++++++++++++---
|
||||
src/lib389/lib389/cli_conf/replication.py | 10 +++++
|
||||
src/lib389/lib389/replica.py | 40 +++++++++++++++++++
|
||||
3 files changed, 81 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/compact_test.py b/dirsrvtests/tests/suites/config/compact_test.py
|
||||
index 317258d0e..31d98d10c 100644
|
||||
--- a/dirsrvtests/tests/suites/config/compact_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/compact_test.py
|
||||
@@ -13,14 +13,14 @@ import time
|
||||
import datetime
|
||||
from lib389.tasks import DBCompactTask
|
||||
from lib389.backend import DatabaseConfig
|
||||
-from lib389.replica import Changelog5
|
||||
+from lib389.replica import Changelog5, Replicas
|
||||
from lib389.topologies import topology_m1 as topo
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def test_compact_db_task(topo):
|
||||
- """Specify a test case purpose or name here
|
||||
+ """Test compaction of database
|
||||
|
||||
:id: 1b3222ef-a336-4259-be21-6a52f76e1859
|
||||
:setup: Standalone Instance
|
||||
@@ -48,7 +48,7 @@ def test_compact_db_task(topo):
|
||||
|
||||
|
||||
def test_compaction_interval_and_time(topo):
|
||||
- """Specify a test case purpose or name here
|
||||
+ """Test compaction interval and time for database and changelog
|
||||
|
||||
:id: f361bee9-d7e7-4569-9255-d7b60dd9d92e
|
||||
:setup: Supplier Instance
|
||||
@@ -95,10 +95,36 @@ def test_compaction_interval_and_time(topo):
|
||||
|
||||
# Check compaction occurred as expected
|
||||
time.sleep(45)
|
||||
- assert not inst.searchErrorsLog("Compacting databases")
|
||||
+ assert not inst.searchErrorsLog("compacting replication changelogs")
|
||||
|
||||
time.sleep(90)
|
||||
- assert inst.searchErrorsLog("Compacting databases")
|
||||
+ assert inst.searchErrorsLog("compacting replication changelogs")
|
||||
+ inst.deleteErrorLogs(restart=False)
|
||||
+
|
||||
+
|
||||
+def test_compact_cl5_task(topo):
|
||||
+ """Test compaction of changelog5 database
|
||||
+
|
||||
+ :id: aadfa9f7-73c0-463a-912c-0a29aa1f8167
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Run compaction task
|
||||
+ 2. Check errors log to show task was run
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+ inst = topo.ms["supplier1"]
|
||||
+
|
||||
+ replicas = Replicas(inst)
|
||||
+ replicas.compact_changelog(log=log)
|
||||
+
|
||||
+ # Check compaction occurred as expected. But instead of time.sleep(5) check 1 sec in loop
|
||||
+ for _ in range(5):
|
||||
+ time.sleep(1)
|
||||
+ if inst.searchErrorsLog("compacting replication changelogs"):
|
||||
+ break
|
||||
+ assert inst.searchErrorsLog("compacting replication changelogs")
|
||||
inst.deleteErrorLogs(restart=False)
|
||||
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 352c0ee5b..ccc394255 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -1199,6 +1199,11 @@ def restore_cl_dir(inst, basedn, log, args):
|
||||
replicas.restore_changelog(replica_roots=args.REPLICA_ROOTS, log=log)
|
||||
|
||||
|
||||
+def compact_cl5(inst, basedn, log, args):
|
||||
+ replicas = Replicas(inst)
|
||||
+ replicas.compact_changelog(replica_roots=args.REPLICA_ROOTS, log=log)
|
||||
+
|
||||
+
|
||||
def create_parser(subparsers):
|
||||
|
||||
############################################
|
||||
@@ -1326,6 +1331,11 @@ def create_parser(subparsers):
|
||||
help="Specify one replica root whose changelog you want to restore. "
|
||||
"The replica root will be consumed from the LDIF file name if the option is omitted.")
|
||||
|
||||
+ compact_cl = repl_subcommands.add_parser('compact-changelog', help='Compact the changelog database')
|
||||
+ compact_cl.set_defaults(func=compact_cl5)
|
||||
+ compact_cl.add_argument('REPLICA_ROOTS', nargs="+",
|
||||
+ help="Specify replica roots whose changelog you want to compact.")
|
||||
+
|
||||
restore_changelogdir = restore_subcommands.add_parser('from-changelogdir', help='Restore LDIF files from changelogdir.')
|
||||
restore_changelogdir.set_defaults(func=restore_cl_dir)
|
||||
restore_changelogdir.add_argument('REPLICA_ROOTS', nargs="+",
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 94e1fdad5..1f321972d 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -1648,6 +1648,11 @@ class Replica(DSLdapObject):
|
||||
"""
|
||||
self.replace('nsds5task', 'ldif2cl')
|
||||
|
||||
+ def begin_task_compact_cl5(self):
|
||||
+ """Begin COMPACT_CL5 task
|
||||
+ """
|
||||
+ self.replace('nsds5task', 'COMPACT_CL5')
|
||||
+
|
||||
def get_suffix(self):
|
||||
"""Return the suffix
|
||||
"""
|
||||
@@ -1829,6 +1834,41 @@ class Replicas(DSLdapObjects):
|
||||
log.error(f"Changelog LDIF for '{repl_root}' was not found")
|
||||
continue
|
||||
|
||||
+ def compact_changelog(self, replica_roots=[], log=None):
|
||||
+ """Compact Directory Server replication changelog
|
||||
+
|
||||
+ :param replica_roots: Replica suffixes that need to be processed (and optional LDIF file path)
|
||||
+ :type replica_roots: list of str
|
||||
+ :param log: The logger object
|
||||
+ :type log: logger
|
||||
+ """
|
||||
+
|
||||
+ if log is None:
|
||||
+ log = self._log
|
||||
+
|
||||
+ # Check if the changelog entry exists
|
||||
+ try:
|
||||
+ cl = Changelog5(self._instance)
|
||||
+ cl.get_attr_val_utf8_l("nsslapd-changelogdir")
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ raise ValueError("Changelog entry was not found. Probably, the replication is not enabled on this instance")
|
||||
+
|
||||
+ # Get all the replicas on the server if --replica-roots option is not specified
|
||||
+ repl_roots = []
|
||||
+ if not replica_roots:
|
||||
+ for replica in self.list():
|
||||
+ repl_roots.append(replica.get_attr_val_utf8("nsDS5ReplicaRoot"))
|
||||
+ else:
|
||||
+ for repl_root in replica_roots:
|
||||
+ repl_roots.append(repl_root)
|
||||
+
|
||||
+ # Dump the changelog for the replica
|
||||
+
|
||||
+ # Dump the changelog for the replica
|
||||
+ for repl_root in repl_roots:
|
||||
+ replica = self.get(repl_root)
|
||||
+ replica.begin_task_compact_cl5()
|
||||
+
|
||||
|
||||
class BootstrapReplicationManager(DSLdapObject):
|
||||
"""A Replication Manager credential for bootstrapping the repl process.
|
||||
--
|
||||
2.47.0
|
||||
|
||||
@ -1,55 +0,0 @@
|
||||
From d1cd9a5675e2953b7c8034ebb87a434cdd3ce0c3 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 2 Dec 2024 17:18:32 +0100
|
||||
Subject: [PATCH] Issue 6417 - If an entry RDN is identical to the suffix, then
|
||||
Entryrdn gets broken during a reindex (#6418)
|
||||
|
||||
Bug description:
|
||||
During a reindex, the entryrdn index is built at the end from
|
||||
each entry in the suffix.
|
||||
If one entry has a RDN that is identical to the suffix DN,
|
||||
then entryrdn_lookup_dn may erroneously return the suffix DN
|
||||
as the DN of the entry.
|
||||
|
||||
Fix description:
|
||||
When the lookup entry has no parent (because index is under
|
||||
work) the loop lookup the entry using the RDN.
|
||||
If this RDN matches the suffix DN, then it exits from the loop
|
||||
with the suffix DN.
|
||||
Before exiting it checks that the original lookup entryID
|
||||
is equal to suffix entryID. If it does not match
|
||||
the function fails and then the DN from the entry will be
|
||||
built from id2enty
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier, Simon Pichugin (Thanks !!!)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 11 ++++++++++-
|
||||
1 file changed, 10 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 5797dd779..83b041192 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1224,7 +1224,16 @@ entryrdn_lookup_dn(backend *be,
|
||||
}
|
||||
goto bail;
|
||||
}
|
||||
- maybesuffix = 1;
|
||||
+ if (workid == 1) {
|
||||
+ /* The loop (workid) iterates from the starting 'id'
|
||||
+ * up to the suffix ID (i.e. '1').
|
||||
+ * A corner case (#6417) is if an entry, on the path
|
||||
+ * 'id' -> suffix, has the same RDN than the suffix.
|
||||
+ * In order to erroneously believe the loop hits the suffix
|
||||
+ * we need to check that 'workid' is '1' (suffix)
|
||||
+ */
|
||||
+ maybesuffix = 1;
|
||||
+ }
|
||||
} else {
|
||||
_entryrdn_cursor_print_error("entryrdn_lookup_dn",
|
||||
key.data, data.size, data.ulen, rc);
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,267 +0,0 @@
|
||||
From 9b2fc77a36156ea987dcea6e2043f8e4c4a6b259 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 18 Jun 2024 14:21:07 +0200
|
||||
Subject: [PATCH] Issue 6224 - d2entry - Could not open id2entry err 0 - at
|
||||
startup when having sub-suffixes (#6225)
|
||||
|
||||
Problem:: d2entry - Could not open id2entry err 0 is logged at startup when having sub-suffixes
|
||||
Reason: The slapi_exist_referral internal search access a backend that is not yet started.
|
||||
Solution: Limit the internal search to a single backend
|
||||
|
||||
Issue: #6224
|
||||
|
||||
Reviewed by: @droideck Thanks!
|
||||
|
||||
(cherry picked from commit 796f703021e961fdd8cbc53b4ad4e20258af0e96)
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 1 +
|
||||
.../suites/mapping_tree/regression_test.py | 161 +++++++++++++++++-
|
||||
ldap/servers/slapd/backend.c | 7 +-
|
||||
3 files changed, 159 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 812936c62..84a9c6ec8 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,6 +1222,7 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+<<<<<<< HEAD
|
||||
def test_referral_subsuffix(topology_st, request):
|
||||
"""Test the results of an inverted parent suffix definition in the configuration.
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/mapping_tree/regression_test.py b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
index 99d4a1d5f..689ff9f59 100644
|
||||
--- a/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
@@ -11,10 +11,14 @@ import ldap
|
||||
import logging
|
||||
import os
|
||||
import pytest
|
||||
+import time
|
||||
from lib389.backend import Backends, Backend
|
||||
+from lib389._constants import HOST_STANDALONE, PORT_STANDALONE, DN_DM, PW_DM
|
||||
from lib389.dbgen import dbgen_users
|
||||
from lib389.mappingTree import MappingTrees
|
||||
from lib389.topologies import topology_st
|
||||
+from lib389.referral import Referrals, Referral
|
||||
+
|
||||
|
||||
try:
|
||||
from lib389.backend import BackendSuffixView
|
||||
@@ -31,14 +35,26 @@ else:
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
+PARENT_SUFFIX = "dc=parent"
|
||||
+CHILD1_SUFFIX = f"dc=child1,{PARENT_SUFFIX}"
|
||||
+CHILD2_SUFFIX = f"dc=child2,{PARENT_SUFFIX}"
|
||||
+
|
||||
+PARENT_REFERRAL_DN = f"cn=ref,ou=People,{PARENT_SUFFIX}"
|
||||
+CHILD1_REFERRAL_DN = f"cn=ref,ou=people,{CHILD1_SUFFIX}"
|
||||
+CHILD2_REFERRAL_DN = f"cn=ref,ou=people,{CHILD2_SUFFIX}"
|
||||
+
|
||||
+REFERRAL_CHECK_PEDIOD = 7
|
||||
+
|
||||
+
|
||||
+
|
||||
BESTRUCT = [
|
||||
- { "bename" : "parent", "suffix": "dc=parent" },
|
||||
- { "bename" : "child1", "suffix": "dc=child1,dc=parent" },
|
||||
- { "bename" : "child2", "suffix": "dc=child2,dc=parent" },
|
||||
+ { "bename" : "parent", "suffix": PARENT_SUFFIX },
|
||||
+ { "bename" : "child1", "suffix": CHILD1_SUFFIX },
|
||||
+ { "bename" : "child2", "suffix": CHILD2_SUFFIX },
|
||||
]
|
||||
|
||||
|
||||
-@pytest.fixture(scope="function")
|
||||
+@pytest.fixture(scope="module")
|
||||
def topo(topology_st, request):
|
||||
bes = []
|
||||
|
||||
@@ -50,6 +66,9 @@ def topo(topology_st, request):
|
||||
request.addfinalizer(fin)
|
||||
|
||||
inst = topology_st.standalone
|
||||
+ # Reduce nsslapd-referral-check-period to accelerate test
|
||||
+ topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK_PEDIOD))
|
||||
+
|
||||
ldif_files = {}
|
||||
for d in BESTRUCT:
|
||||
bename = d['bename']
|
||||
@@ -76,14 +95,13 @@ def topo(topology_st, request):
|
||||
inst.start()
|
||||
return topology_st
|
||||
|
||||
-# Parameters for test_change_repl_passwd
|
||||
-EXPECTED_ENTRIES = (("dc=parent", 39), ("dc=child1,dc=parent", 13), ("dc=child2,dc=parent", 13))
|
||||
+# Parameters for test_sub_suffixes
|
||||
@pytest.mark.parametrize(
|
||||
"orphan_param",
|
||||
[
|
||||
- pytest.param( ( True, { "dc=parent": 2, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-true" ),
|
||||
- pytest.param( ( False, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-false" ),
|
||||
- pytest.param( ( None, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="no-orphan" ),
|
||||
+ pytest.param( ( True, { PARENT_SUFFIX: 2, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="orphan-is-true" ),
|
||||
+ pytest.param( ( False, { PARENT_SUFFIX: 3, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="orphan-is-false" ),
|
||||
+ pytest.param( ( None, { PARENT_SUFFIX: 3, CHILD1_SUFFIX:1, CHILD2_SUFFIX:1}), id="no-orphan" ),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -128,3 +146,128 @@ def test_sub_suffixes(topo, orphan_param):
|
||||
log.info('Test PASSED')
|
||||
|
||||
|
||||
+def test_one_level_search_on_sub_suffixes(topo):
|
||||
+ """ Perform one level scoped search accross suffix and sub-suffix
|
||||
+
|
||||
+ :id: 92f3139e-280e-11ef-a989-482ae39447e5
|
||||
+ :feature: mapping-tree
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+ :steps:
|
||||
+ 1. Perform a ONE LEVEL search on dc=parent
|
||||
+ 2. Check that all expected entries have been returned
|
||||
+ 3. Check that only the expected entries have been returned
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. each expected dn should be in the result set
|
||||
+ 3. Number of returned entries should be the same as the number of expected entries
|
||||
+ """
|
||||
+ expected_dns = ( 'dc=child1,dc=parent',
|
||||
+ 'dc=child2,dc=parent',
|
||||
+ 'ou=accounting,dc=parent',
|
||||
+ 'ou=product development,dc=parent',
|
||||
+ 'ou=product testing,dc=parent',
|
||||
+ 'ou=human resources,dc=parent',
|
||||
+ 'ou=payroll,dc=parent',
|
||||
+ 'ou=people,dc=parent',
|
||||
+ 'ou=groups,dc=parent', )
|
||||
+ entries = topo.standalone.search_s("dc=parent", ldap.SCOPE_ONELEVEL, "(objectClass=*)",
|
||||
+ attrlist=("dc","ou"), escapehatch='i am sure')
|
||||
+ log.info(f'one level search on dc=parent returned the following entries: {entries}')
|
||||
+ dns = [ entry.dn for entry in entries ]
|
||||
+ for dn in expected_dns:
|
||||
+ assert dn in dns
|
||||
+ assert len(entries) == len(expected_dns)
|
||||
+
|
||||
+
|
||||
+def test_sub_suffixes_errlog(topo):
|
||||
+ """ check the entries found on suffix/sub-suffix
|
||||
+ used int
|
||||
+
|
||||
+ :id: 1db9d52e-28de-11ef-b286-482ae39447e5
|
||||
+ :feature: mapping-tree
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+ :steps:
|
||||
+ 1. Check that id2entry error message is not in the error log.
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ assert not inst.searchErrorsLog('id2entry - Could not open id2entry err 0')
|
||||
+
|
||||
+
|
||||
+# Parameters for test_referral_subsuffix:
|
||||
+# a tuple pair containing:
|
||||
+# - list of referral dn that must be created
|
||||
+# - dict of searches basedn: expected_number_of_referrals
|
||||
+@pytest.mark.parametrize(
|
||||
+ "parameters",
|
||||
+ [
|
||||
+ pytest.param( ((PARENT_REFERRAL_DN, CHILD1_REFERRAL_DN), {PARENT_SUFFIX: 2, CHILD1_SUFFIX:1, CHILD2_SUFFIX:0}), id="Both"),
|
||||
+ pytest.param( ((PARENT_REFERRAL_DN,), {PARENT_SUFFIX: 1, CHILD1_SUFFIX:0, CHILD2_SUFFIX:0}) , id="Parent"),
|
||||
+ pytest.param( ((CHILD1_REFERRAL_DN,), {PARENT_SUFFIX: 1, CHILD1_SUFFIX:1, CHILD2_SUFFIX:0}) , id="Child"),
|
||||
+ pytest.param( ((), {PARENT_SUFFIX: 0, CHILD1_SUFFIX:0, CHILD2_SUFFIX:0}), id="None"),
|
||||
+ ])
|
||||
+
|
||||
+def test_referral_subsuffix(topo, request, parameters):
|
||||
+ """Test the results of an inverted parent suffix definition in the configuration.
|
||||
+
|
||||
+ For more details see:
|
||||
+ https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html
|
||||
+
|
||||
+ :id: 4e111a22-2a5d-11ef-a890-482ae39447e5
|
||||
+ :feature: referrals
|
||||
+ :setup: Standalone instance with 3 additional backends:
|
||||
+ dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+ :parametrized: yes
|
||||
+ :steps:
|
||||
+ refs,searches = referrals
|
||||
+
|
||||
+ 1. Create the referrals according to the current parameter
|
||||
+ 2. Wait enough time so they get detected
|
||||
+ 3. For each search base dn, in the current parameter, perform the two following steps
|
||||
+ 4. In 3. loop: Perform a search with provided base dn
|
||||
+ 5. In 3. loop: Check that the number of returned referrals is the expected one.
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ all steps succeeds
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Deleting all referrals')
|
||||
+ for ref in Referrals(inst, PARENT_SUFFIX).list():
|
||||
+ ref.delete()
|
||||
+
|
||||
+ # Set cleanup callback
|
||||
+ if DEBUGGING:
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Remove all referrals
|
||||
+ fin()
|
||||
+ # Add requested referrals
|
||||
+ for dn in parameters[0]:
|
||||
+ refs = Referral(inst, dn=dn)
|
||||
+ refs.create(basedn=dn, properties={ 'cn': 'ref', 'ref': f'ldap://remote/{dn}'})
|
||||
+ # Wait that the internal search detects the referrals
|
||||
+ time.sleep(REFERRAL_CHECK_PEDIOD + 1)
|
||||
+ # Open a test connection
|
||||
+ ldc = ldap.initialize(f"ldap://{HOST_STANDALONE}:{PORT_STANDALONE}")
|
||||
+ ldc.set_option(ldap.OPT_REFERRALS,0)
|
||||
+ ldc.simple_bind_s(DN_DM,PW_DM)
|
||||
+
|
||||
+ # For each search base dn:
|
||||
+ for basedn,nbref in parameters[1].items():
|
||||
+ log.info(f"Referrals are: {parameters[0]}")
|
||||
+ # Perform a search with provided base dn
|
||||
+ result = ldc.search_s(basedn, ldap.SCOPE_SUBTREE, filterstr="(ou=People)")
|
||||
+ found_dns = [ dn for dn,entry in result if dn is not None ]
|
||||
+ found_refs = [ entry for dn,entry in result if dn is None ]
|
||||
+ log.info(f"Search on {basedn} returned {found_dns} and {found_refs}")
|
||||
+ # Check that the number of returned referrals is the expected one.
|
||||
+ log.info(f"Search returned {len(found_refs)} referrals. {nbref} are expected.")
|
||||
+ assert len(found_refs) == nbref
|
||||
+ ldc.unbind()
|
||||
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
|
||||
index 498f683b1..f86b0b9b6 100644
|
||||
--- a/ldap/servers/slapd/backend.c
|
||||
+++ b/ldap/servers/slapd/backend.c
|
||||
@@ -230,12 +230,17 @@ slapi_exist_referral(Slapi_Backend *be)
|
||||
|
||||
/* search for ("smart") referral entries */
|
||||
search_pb = slapi_pblock_new();
|
||||
- server_ctrls = (LDAPControl **) slapi_ch_calloc(2, sizeof (LDAPControl *));
|
||||
+ server_ctrls = (LDAPControl **) slapi_ch_calloc(3, sizeof (LDAPControl *));
|
||||
server_ctrls[0] = (LDAPControl *) slapi_ch_malloc(sizeof (LDAPControl));
|
||||
server_ctrls[0]->ldctl_oid = slapi_ch_strdup(LDAP_CONTROL_MANAGEDSAIT);
|
||||
server_ctrls[0]->ldctl_value.bv_val = NULL;
|
||||
server_ctrls[0]->ldctl_value.bv_len = 0;
|
||||
server_ctrls[0]->ldctl_iscritical = '\0';
|
||||
+ server_ctrls[1] = (LDAPControl *) slapi_ch_malloc(sizeof (LDAPControl));
|
||||
+ server_ctrls[1]->ldctl_oid = slapi_ch_strdup(MTN_CONTROL_USE_ONE_BACKEND_EXT_OID);
|
||||
+ server_ctrls[1]->ldctl_value.bv_val = NULL;
|
||||
+ server_ctrls[1]->ldctl_value.bv_len = 0;
|
||||
+ server_ctrls[1]->ldctl_iscritical = '\0';
|
||||
slapi_search_internal_set_pb(search_pb, suffix, LDAP_SCOPE_SUBTREE,
|
||||
filter, NULL, 0, server_ctrls, NULL,
|
||||
(void *) plugin_get_default_component_id(), 0);
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,32 +0,0 @@
|
||||
From ab06b3cebbe0287ef557c0307ca2ee86fe8cb761 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Thu, 21 Nov 2024 16:26:02 +0100
|
||||
Subject: [PATCH] Issue 6224 - Fix merge issue in 389-ds-base-2.1 for
|
||||
ds_log_test.py (#6414)
|
||||
|
||||
Fix a merge issue during cherry-pick over 389-ds-base-2.1 and 389-ds-base-1.4.3 branches
|
||||
|
||||
Issue: #6224
|
||||
|
||||
Reviewed by: @mreynolds389
|
||||
|
||||
(cherry picked from commit 2b541c64b8317209e4dafa4f82918d714039907c)
|
||||
---
|
||||
dirsrvtests/tests/suites/ds_logs/ds_logs_test.py | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 84a9c6ec8..812936c62 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,7 +1222,6 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-<<<<<<< HEAD
|
||||
def test_referral_subsuffix(topology_st, request):
|
||||
"""Test the results of an inverted parent suffix definition in the configuration.
|
||||
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,214 +0,0 @@
|
||||
From 3fe2cf7cdedcdf5cafb59867e52a1fbe4a643571 Mon Sep 17 00:00:00 2001
|
||||
From: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
Date: Fri, 20 Dec 2024 22:37:15 +0900
|
||||
Subject: [PATCH] Issue 6224 - Remove test_referral_subsuffix from
|
||||
ds_logs_test.py (#6456)
|
||||
|
||||
Bug Description:
|
||||
|
||||
test_referral_subsuffix test was removed from main branch and some other
|
||||
ones for higher versions. But, it was not removed from 389-ds-base-1.4.3
|
||||
and 389-ds-base-2.1. The test doesn't work anymore with the fix for
|
||||
Issue 6224, because the added new control limited one backend for internal
|
||||
search. The test should be removed.
|
||||
|
||||
Fix Description:
|
||||
|
||||
remove the test from ds_logs_test.py
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6224
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 177 ------------------
|
||||
1 file changed, 177 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 812936c62..84d721756 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -1222,183 +1222,6 @@ def test_referral_check(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
-def test_referral_subsuffix(topology_st, request):
|
||||
- """Test the results of an inverted parent suffix definition in the configuration.
|
||||
-
|
||||
- For more details see:
|
||||
- https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html
|
||||
-
|
||||
- :id: 4faf210a-4fde-4e4f-8834-865bdc8f4d37
|
||||
- :setup: Standalone instance
|
||||
- :steps:
|
||||
- 1. First create two Backends, without mapping trees.
|
||||
- 2. create the mapping trees for these backends
|
||||
- 3. reduce nsslapd-referral-check-period to accelerate test
|
||||
- 4. Remove error log file
|
||||
- 5. Create a referral entry on parent suffix
|
||||
- 6. Check that the server detected the referral
|
||||
- 7. Delete the referral entry
|
||||
- 8. Check that the server detected the deletion of the referral
|
||||
- 9. Remove error log file
|
||||
- 10. Create a referral entry on child suffix
|
||||
- 11. Check that the server detected the referral on both parent and child suffixes
|
||||
- 12. Delete the referral entry
|
||||
- 13. Check that the server detected the deletion of the referral on both parent and child suffixes
|
||||
- 14. Remove error log file
|
||||
- 15. Create a referral entry on parent suffix
|
||||
- 16. Check that the server detected the referral on both parent and child suffixes
|
||||
- 17. Delete the child referral entry
|
||||
- 18. Check that the server detected the deletion of the referral on child suffix but not on parent suffix
|
||||
- 19. Delete the parent referral entry
|
||||
- 20. Check that the server detected the deletion of the referral parent suffix
|
||||
-
|
||||
- :expectedresults:
|
||||
- all steps succeeds
|
||||
- """
|
||||
- inst = topology_st.standalone
|
||||
- # Step 1 First create two Backends, without mapping trees.
|
||||
- PARENT_SUFFIX='dc=parent,dc=com'
|
||||
- CHILD_SUFFIX='dc=child,%s' % PARENT_SUFFIX
|
||||
- be1 = create_backend(inst, 'Parent', PARENT_SUFFIX)
|
||||
- be2 = create_backend(inst, 'Child', CHILD_SUFFIX)
|
||||
- # Step 2 create the mapping trees for these backends
|
||||
- mts = MappingTrees(inst)
|
||||
- mt1 = mts.create(properties={
|
||||
- 'cn': PARENT_SUFFIX,
|
||||
- 'nsslapd-state': 'backend',
|
||||
- 'nsslapd-backend': 'Parent',
|
||||
- })
|
||||
- mt2 = mts.create(properties={
|
||||
- 'cn': CHILD_SUFFIX,
|
||||
- 'nsslapd-state': 'backend',
|
||||
- 'nsslapd-backend': 'Child',
|
||||
- 'nsslapd-parent-suffix': PARENT_SUFFIX,
|
||||
- })
|
||||
-
|
||||
- dc_ex = Domain(inst, dn=PARENT_SUFFIX)
|
||||
- assert dc_ex.exists()
|
||||
-
|
||||
- dc_st = Domain(inst, dn=CHILD_SUFFIX)
|
||||
- assert dc_st.exists()
|
||||
-
|
||||
- # Step 3 reduce nsslapd-referral-check-period to accelerate test
|
||||
- # requires a restart done on step 4
|
||||
- REFERRAL_CHECK=7
|
||||
- topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK))
|
||||
-
|
||||
- # Check that if we create a referral at parent level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is not detected at child backend
|
||||
-
|
||||
- # Step 3 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 4 Create a referral entry on parent suffix
|
||||
- rs_parent = Referrals(topology_st.standalone, PARENT_SUFFIX)
|
||||
-
|
||||
- referral_entry_parent = rs_parent.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 5 Check that the server detected the referral
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Step 6 Delete the referral entry
|
||||
- referral_entry_parent.delete()
|
||||
-
|
||||
- # Step 7 Check that the server detected the deletion of the referral
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Check that if we create a referral at child level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is detected at child backend
|
||||
-
|
||||
- # Step 8 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 9 Create a referral entry on child suffix
|
||||
- rs_child = Referrals(topology_st.standalone, CHILD_SUFFIX)
|
||||
- referral_entry_child = rs_child.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 10 Check that the server detected the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Step 11 Delete the referral entry
|
||||
- referral_entry_child.delete()
|
||||
-
|
||||
- # Step 12 Check that the server detected the deletion of the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Check that if we create a referral at child level and parent level
|
||||
- # - referral is detected at parent backend
|
||||
- # - referral is detected at child backend
|
||||
-
|
||||
- # Step 13 Remove error log file
|
||||
- topology_st.standalone.stop()
|
||||
- lpath = topology_st.standalone.ds_error_log._get_log_path()
|
||||
- os.unlink(lpath)
|
||||
- topology_st.standalone.start()
|
||||
-
|
||||
- # Step 14 Create a referral entry on parent suffix
|
||||
- # Create a referral entry on child suffix
|
||||
- referral_entry_parent = rs_parent.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
- referral_entry_child = rs_child.create(properties={
|
||||
- 'cn': 'testref',
|
||||
- 'ref': 'ldap://localhost:38901/ou=People,dc=example,dc=com'
|
||||
- })
|
||||
-
|
||||
- # Step 15 Check that the server detected the referral on both parent and child suffixes
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
-
|
||||
- # Step 16 Delete the child referral entry
|
||||
- referral_entry_child.delete()
|
||||
-
|
||||
- # Step 17 Check that the server detected the deletion of the referral on child suffix but not on parent suffix
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX)
|
||||
- assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- # Step 18 Delete the parent referral entry
|
||||
- referral_entry_parent.delete()
|
||||
-
|
||||
- # Step 19 Check that the server detected the deletion of the referral parent suffix
|
||||
- time.sleep(REFERRAL_CHECK + 1)
|
||||
- assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX)
|
||||
-
|
||||
- def fin():
|
||||
- log.info('Deleting referral')
|
||||
- try:
|
||||
- referral_entry_parent.delete()
|
||||
- referral.entry_child.delete()
|
||||
- except:
|
||||
- pass
|
||||
-
|
||||
- request.addfinalizer(fin)
|
||||
|
||||
def test_missing_backend_suffix(topology_st, request):
|
||||
"""Test that the server does not crash if a backend has no suffix
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,90 +0,0 @@
|
||||
From 4121ffe7a44fbacf513758661e71e483eb11ee3c Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 6 Jan 2025 14:00:39 +0100
|
||||
Subject: [PATCH] Issue 6417 - (2nd) If an entry RDN is identical to the
|
||||
suffix, then Entryrdn gets broken during a reindex (#6460)
|
||||
|
||||
Bug description:
|
||||
The primary fix has a flaw as it assumes that the
|
||||
suffix ID is '1'.
|
||||
If the RUV entry is the first entry of the database
|
||||
the server loops indefinitely
|
||||
|
||||
Fix description:
|
||||
Read the suffix ID from the entryrdn index
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier (also reviewed the first fix)
|
||||
---
|
||||
.../suites/replication/regression_m2_test.py | 9 +++++++++
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 19 ++++++++++++++++++-
|
||||
2 files changed, 27 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index abac46ada..72d4b9f89 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -1010,6 +1010,15 @@ def test_online_reinit_may_hang(topo_with_sigkill):
|
||||
"""
|
||||
M1 = topo_with_sigkill.ms["supplier1"]
|
||||
M2 = topo_with_sigkill.ms["supplier2"]
|
||||
+
|
||||
+ # The RFE 5367 (when enabled) retrieves the DN
|
||||
+ # from the dncache. This hides an issue
|
||||
+ # with primary fix for 6417.
|
||||
+ # We need to disable the RFE to verify that the primary
|
||||
+ # fix is properly fixed.
|
||||
+ if ds_is_newer('2.3.1'):
|
||||
+ M1.config.replace('nsslapd-return-original-entrydn', 'off')
|
||||
+
|
||||
M1.stop()
|
||||
ldif_file = '%s/supplier1.ldif' % M1.get_ldif_dir()
|
||||
M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 83b041192..1bbb6252a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1115,6 +1115,7 @@ entryrdn_lookup_dn(backend *be,
|
||||
rdn_elem *elem = NULL;
|
||||
int maybesuffix = 0;
|
||||
int db_retry = 0;
|
||||
+ ID suffix_id = 1;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "entryrdn_lookup_dn",
|
||||
"--> entryrdn_lookup_dn\n");
|
||||
@@ -1175,6 +1176,22 @@ entryrdn_lookup_dn(backend *be,
|
||||
/* Setting the bulk fetch buffer */
|
||||
data.flags = DB_DBT_MALLOC;
|
||||
|
||||
+ /* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
+ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
+ dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1);
|
||||
+ rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data);
|
||||
+ if (rc) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
+ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
+ slapi_sdn_get_ndn(be->be_suffix),
|
||||
+ suffix_id);
|
||||
+ } else {
|
||||
+ elem = (rdn_elem *)data.data;
|
||||
+ suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ }
|
||||
+ dblayer_value_free(be, &data);
|
||||
+ dblayer_value_free(be, &key);
|
||||
+
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
slapi_ch_free_string(&keybuf);
|
||||
@@ -1224,7 +1241,7 @@ entryrdn_lookup_dn(backend *be,
|
||||
}
|
||||
goto bail;
|
||||
}
|
||||
- if (workid == 1) {
|
||||
+ if (workid == suffix_id) {
|
||||
/* The loop (workid) iterates from the starting 'id'
|
||||
* up to the suffix ID (i.e. '1').
|
||||
* A corner case (#6417) is if an entry, on the path
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,40 +0,0 @@
|
||||
From 1ffcc9aa9a397180fe35283ee61b164471d073fb Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 7 Jan 2025 10:01:51 +0100
|
||||
Subject: [PATCH] Issue 6417 - (2nd) fix typo
|
||||
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 10 ++++++----
|
||||
1 file changed, 6 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index 1bbb6252a..e2b8273a2 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1178,8 +1178,10 @@ entryrdn_lookup_dn(backend *be,
|
||||
|
||||
/* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
- dblayer_value_set(be, &key, keybuf, strlen(keybuf) + 1);
|
||||
- rc = dblayer_cursor_op(&ctx.cursor, DBI_OP_MOVE_TO_KEY, &key, &data);
|
||||
+ key.data = keybuf;
|
||||
+ key.size = key.ulen = strlen(keybuf) + 1;
|
||||
+ key.flags = DB_DBT_USERMEM;
|
||||
+ rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
"Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
@@ -1189,8 +1191,8 @@ entryrdn_lookup_dn(backend *be,
|
||||
elem = (rdn_elem *)data.data;
|
||||
suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
}
|
||||
- dblayer_value_free(be, &data);
|
||||
- dblayer_value_free(be, &key);
|
||||
+ slapi_ch_free(&data.data);
|
||||
+ slapi_ch_free_string(&keybuf);
|
||||
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,75 +0,0 @@
|
||||
From 9e1284122a929fe14633a2aa6e2de4d72891f98f Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 13 Jan 2025 17:41:18 +0100
|
||||
Subject: [PATCH] Issue 6417 - (3rd) If an entry RDN is identical to the
|
||||
suffix, then Entryrdn gets broken during a reindex (#6480)
|
||||
|
||||
Bug description:
|
||||
The previous fix had a flaw.
|
||||
In case entryrdn_lookup_dn is called with an undefined suffix
|
||||
the lookup of the suffix trigger a crash.
|
||||
For example it can occur during internal search of an
|
||||
unexisting map (view plugin).
|
||||
The issue exists in all releases but is hidden since 2.3.
|
||||
|
||||
Fix description:
|
||||
testing the suffix is defined
|
||||
|
||||
fixes: #6417
|
||||
|
||||
Reviewed by: Pierre Rogier (THnaks !)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 36 +++++++++++---------
|
||||
1 file changed, 20 insertions(+), 16 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
index e2b8273a2..01c77156f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
|
||||
@@ -1176,23 +1176,27 @@ entryrdn_lookup_dn(backend *be,
|
||||
/* Setting the bulk fetch buffer */
|
||||
data.flags = DB_DBT_MALLOC;
|
||||
|
||||
- /* Just in case the suffix ID is not '1' retrieve it from the database */
|
||||
- keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
- key.data = keybuf;
|
||||
- key.size = key.ulen = strlen(keybuf) + 1;
|
||||
- key.flags = DB_DBT_USERMEM;
|
||||
- rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
- if (rc) {
|
||||
- slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
- "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
- slapi_sdn_get_ndn(be->be_suffix),
|
||||
- suffix_id);
|
||||
- } else {
|
||||
- elem = (rdn_elem *)data.data;
|
||||
- suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ /* Just in case the suffix ID is not '1' retrieve it from the database
|
||||
+ * if the suffix is not defined suffix_id remains '1'
|
||||
+ */
|
||||
+ if (be->be_suffix) {
|
||||
+ keybuf = slapi_ch_strdup(slapi_sdn_get_ndn(be->be_suffix));
|
||||
+ key.data = keybuf;
|
||||
+ key.size = key.ulen = strlen(keybuf) + 1;
|
||||
+ key.flags = DB_DBT_USERMEM;
|
||||
+ rc = cursor->c_get(cursor, &key, &data, DB_SET);
|
||||
+ if (rc) {
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, "entryrdn_lookup_dn",
|
||||
+ "Fails to retrieve the ID of suffix %s - keep the default value '%d'\n",
|
||||
+ slapi_sdn_get_ndn(be->be_suffix),
|
||||
+ suffix_id);
|
||||
+ } else {
|
||||
+ elem = (rdn_elem *) data.data;
|
||||
+ suffix_id = id_stored_to_internal(elem->rdn_elem_id);
|
||||
+ }
|
||||
+ slapi_ch_free(&data.data);
|
||||
+ slapi_ch_free_string(&keybuf);
|
||||
}
|
||||
- slapi_ch_free(&data.data);
|
||||
- slapi_ch_free_string(&keybuf);
|
||||
|
||||
do {
|
||||
/* Setting up a key for the node to get its parent */
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,297 +0,0 @@
|
||||
From d2f9dd82e3610ee9b73feea981c680c03bb21394 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 16 Jan 2025 08:42:53 -0500
|
||||
Subject: [PATCH] Issue 6509 - Race condition with Paged Result searches
|
||||
|
||||
Description:
|
||||
|
||||
There is a race condition with Paged Result searches when a new operation comes
|
||||
in while a paged search is finishing. This triggers an invalid time out error
|
||||
and closes the connection with a T3 code.
|
||||
|
||||
The problem is that we do not use the "PagedResult lock" when checking the
|
||||
connection's paged result data for a timeout event. This causes the paged
|
||||
result timeout value to change unexpectedly and trigger a false timeout when a
|
||||
new operation arrives.
|
||||
|
||||
Now we check the timeout without hte conn lock, if its expired it could
|
||||
be a race condition and false positive. Try the lock again and test the
|
||||
timeout. This also prevents blocking non-paged result searches from
|
||||
getting held up by the lock when it's not necessary.
|
||||
|
||||
This also fixes some memory leaks that occur when an error happens.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6509
|
||||
|
||||
Reviewed by: tbordaz & proger (Thanks!!)
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 61 ++++++++++++++++++-------------
|
||||
ldap/servers/slapd/opshared.c | 58 ++++++++++++++---------------
|
||||
ldap/servers/slapd/pagedresults.c | 9 +++++
|
||||
ldap/servers/slapd/slap.h | 2 +-
|
||||
4 files changed, 75 insertions(+), 55 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index bb80dae36..13dfe250d 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1578,7 +1578,29 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
if (c->c_state == CONN_STATE_FREE) {
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else {
|
||||
- /* we try to acquire the connection mutex, if it is already
|
||||
+ /* Check for a timeout for PAGED RESULTS */
|
||||
+ if (pagedresults_is_timedout_nolock(c)) {
|
||||
+ /*
|
||||
+ * There could be a race condition so lets try again with the
|
||||
+ * right lock
|
||||
+ */
|
||||
+ pthread_mutex_t *pr_mutex = pageresult_lock_get_addr(c);
|
||||
+ if (pthread_mutex_trylock(pr_mutex) == EBUSY) {
|
||||
+ c = next;
|
||||
+ continue;
|
||||
+ }
|
||||
+ if (pagedresults_is_timedout_nolock(c)) {
|
||||
+ pthread_mutex_unlock(pr_mutex);
|
||||
+ disconnect_server(c, c->c_connid, -1,
|
||||
+ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
+ 0);
|
||||
+ } else {
|
||||
+ pthread_mutex_unlock(pr_mutex);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * we try to acquire the connection mutex, if it is already
|
||||
* acquired by another thread, don't wait
|
||||
*/
|
||||
if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
@@ -1586,35 +1608,24 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
continue;
|
||||
}
|
||||
if (c->c_flags & CONN_FLAG_CLOSING) {
|
||||
- /* A worker thread has marked that this connection
|
||||
- * should be closed by calling disconnect_server.
|
||||
- * move this connection out of the active list
|
||||
- * the last thread to use the connection will close it
|
||||
+ /*
|
||||
+ * A worker thread, or paged result timeout, has marked that
|
||||
+ * this connection should be closed by calling
|
||||
+ * disconnect_server(). Move this connection out of the active
|
||||
+ * list then the last thread to use the connection will close
|
||||
+ * it.
|
||||
*/
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_sd == SLAPD_INVALID_SOCKET) {
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_prfd != NULL) {
|
||||
if ((!c->c_gettingber) && (c->c_threadnumber < c->c_max_threads_per_conn)) {
|
||||
- int add_fd = 1;
|
||||
- /* check timeout for PAGED RESULTS */
|
||||
- if (pagedresults_is_timedout_nolock(c)) {
|
||||
- /* Exceeded the paged search timelimit; disconnect the client */
|
||||
- disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
- 0);
|
||||
- connection_table_move_connection_out_of_active_list(ct,
|
||||
- c);
|
||||
- add_fd = 0; /* do not poll on this fd */
|
||||
- }
|
||||
- if (add_fd) {
|
||||
- ct->fd[count].fd = c->c_prfd;
|
||||
- ct->fd[count].in_flags = SLAPD_POLL_FLAGS;
|
||||
- /* slot i of the connection table is mapped to slot
|
||||
- * count of the fds array */
|
||||
- c->c_fdi = count;
|
||||
- count++;
|
||||
- }
|
||||
+ ct->fd[listnum][count].fd = c->c_prfd;
|
||||
+ ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS;
|
||||
+ /* slot i of the connection table is mapped to slot
|
||||
+ * count of the fds array */
|
||||
+ c->c_fdi = count;
|
||||
+ count++;
|
||||
} else {
|
||||
if (c->c_threadnumber >= c->c_max_threads_per_conn) {
|
||||
c->c_maxthreadsblocked++;
|
||||
@@ -1675,7 +1686,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused
|
||||
continue;
|
||||
}
|
||||
|
||||
- /* Try to get connection mutex, if not available just skip the connection and
|
||||
+ /* Try to get connection mutex, if not available just skip the connection and
|
||||
* process other connections events. May generates cpu load for listening thread
|
||||
* if connection mutex is held for a long time
|
||||
*/
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index 7ab4117cd..a29eed052 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -250,7 +250,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
char *errtext = NULL;
|
||||
int nentries, pnentries;
|
||||
int flag_search_base_found = 0;
|
||||
- int flag_no_such_object = 0;
|
||||
+ bool flag_no_such_object = false;
|
||||
int flag_referral = 0;
|
||||
int flag_psearch = 0;
|
||||
int err_code = LDAP_SUCCESS;
|
||||
@@ -315,7 +315,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
rc = -1;
|
||||
goto free_and_return_nolock;
|
||||
}
|
||||
-
|
||||
+
|
||||
/* Set the time we actually started the operation */
|
||||
slapi_operation_set_time_started(operation);
|
||||
|
||||
@@ -798,11 +798,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
}
|
||||
|
||||
/* subtree searches :
|
||||
- * if the search was started above the backend suffix
|
||||
- * - temporarily set the SLAPI_SEARCH_TARGET_SDN to the
|
||||
- * base of the node so that we don't get a NO SUCH OBJECT error
|
||||
- * - do not change the scope
|
||||
- */
|
||||
+ * if the search was started above the backend suffix
|
||||
+ * - temporarily set the SLAPI_SEARCH_TARGET_SDN to the
|
||||
+ * base of the node so that we don't get a NO SUCH OBJECT error
|
||||
+ * - do not change the scope
|
||||
+ */
|
||||
if (scope == LDAP_SCOPE_SUBTREE) {
|
||||
if (slapi_sdn_issuffix(be_suffix, basesdn)) {
|
||||
if (free_sdn) {
|
||||
@@ -825,53 +825,53 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
switch (rc) {
|
||||
case 1:
|
||||
/* if the backend returned LDAP_NO_SUCH_OBJECT for a SEARCH request,
|
||||
- * it will not have sent back a result - otherwise, it will have
|
||||
- * sent a result */
|
||||
+ * it will not have sent back a result - otherwise, it will have
|
||||
+ * sent a result */
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
if (err == LDAP_NO_SUCH_OBJECT) {
|
||||
/* may be the object exist somewhere else
|
||||
- * wait the end of the loop to send back this error
|
||||
- */
|
||||
- flag_no_such_object = 1;
|
||||
+ * wait the end of the loop to send back this error
|
||||
+ */
|
||||
+ flag_no_such_object = true;
|
||||
} else {
|
||||
/* err something other than LDAP_NO_SUCH_OBJECT, so the backend will
|
||||
- * have sent the result -
|
||||
- * Set a flag here so we don't return another result. */
|
||||
+ * have sent the result -
|
||||
+ * Set a flag here so we don't return another result. */
|
||||
sent_result = 1;
|
||||
}
|
||||
- /* fall through */
|
||||
+ /* fall through */
|
||||
|
||||
case -1: /* an error occurred */
|
||||
+ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
/* PAGED RESULTS */
|
||||
if (op_is_pagedresults(operation)) {
|
||||
/* cleanup the slot */
|
||||
pthread_mutex_lock(pagedresults_mutex);
|
||||
+ if (err != LDAP_NO_SUCH_OBJECT && !flag_no_such_object) {
|
||||
+ /* Free the results if not "no_such_object" */
|
||||
+ void *sr = NULL;
|
||||
+ slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
|
||||
+ be->be_search_results_release(&sr);
|
||||
+ }
|
||||
pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx);
|
||||
rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1);
|
||||
pthread_mutex_unlock(pagedresults_mutex);
|
||||
}
|
||||
- if (1 == flag_no_such_object) {
|
||||
- break;
|
||||
- }
|
||||
- slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
|
||||
- if (err == LDAP_NO_SUCH_OBJECT) {
|
||||
- /* may be the object exist somewhere else
|
||||
- * wait the end of the loop to send back this error
|
||||
- */
|
||||
- flag_no_such_object = 1;
|
||||
+
|
||||
+ if (err == LDAP_NO_SUCH_OBJECT || flag_no_such_object) {
|
||||
+ /* Maybe the object exists somewhere else, wait to the end
|
||||
+ * of the loop to send back this error */
|
||||
+ flag_no_such_object = true;
|
||||
break;
|
||||
} else {
|
||||
- /* for error other than LDAP_NO_SUCH_OBJECT
|
||||
- * the error has already been sent
|
||||
- * stop the search here
|
||||
- */
|
||||
+ /* For error other than LDAP_NO_SUCH_OBJECT the error has
|
||||
+ * already been sent stop the search here */
|
||||
cache_return_target_entry(pb, be, operation);
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
/* when rc == SLAPI_FAIL_DISKFULL this case is executed */
|
||||
-
|
||||
case SLAPI_FAIL_DISKFULL:
|
||||
operation_out_of_disk_space();
|
||||
cache_return_target_entry(pb, be, operation);
|
||||
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
|
||||
index db87e486e..4aa1fa3e5 100644
|
||||
--- a/ldap/servers/slapd/pagedresults.c
|
||||
+++ b/ldap/servers/slapd/pagedresults.c
|
||||
@@ -121,12 +121,15 @@ pagedresults_parse_control_value(Slapi_PBlock *pb,
|
||||
if (ber_scanf(ber, "{io}", pagesize, &cookie) == LBER_ERROR) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value",
|
||||
"<= corrupted control value\n");
|
||||
+ ber_free(ber, 1);
|
||||
return LDAP_PROTOCOL_ERROR;
|
||||
}
|
||||
if (!maxreqs) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "pagedresults_parse_control_value",
|
||||
"Simple paged results requests per conn exceeded the limit: %d\n",
|
||||
maxreqs);
|
||||
+ ber_free(ber, 1);
|
||||
+ slapi_ch_free_string(&cookie.bv_val);
|
||||
return LDAP_UNWILLING_TO_PERFORM;
|
||||
}
|
||||
|
||||
@@ -376,6 +379,10 @@ pagedresults_free_one_msgid(Connection *conn, ber_int_t msgid, pthread_mutex_t *
|
||||
}
|
||||
prp->pr_flags |= CONN_FLAG_PAGEDRESULTS_ABANDONED;
|
||||
prp->pr_flags &= ~CONN_FLAG_PAGEDRESULTS_PROCESSING;
|
||||
+ if (conn->c_pagedresults.prl_count > 0) {
|
||||
+ _pr_cleanup_one_slot(prp);
|
||||
+ conn->c_pagedresults.prl_count--;
|
||||
+ }
|
||||
rc = 0;
|
||||
break;
|
||||
}
|
||||
@@ -940,7 +947,9 @@ pagedresults_is_timedout_nolock(Connection *conn)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "<-- pagedresults_is_timedout", "<= false 2\n");
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
|
||||
index 072f6f962..469874fd1 100644
|
||||
--- a/ldap/servers/slapd/slap.h
|
||||
+++ b/ldap/servers/slapd/slap.h
|
||||
@@ -74,7 +74,7 @@ static char ptokPBE[34] = "Internal (Software) Token ";
|
||||
#include <sys/stat.h>
|
||||
#include <sys/socket.h>
|
||||
#include <netinet/in.h>
|
||||
-
|
||||
+#include <stdbool.h>
|
||||
#include <time.h> /* For timespec definitions */
|
||||
|
||||
/* Provides our int types and platform specific requirements. */
|
||||
--
|
||||
2.48.0
|
||||
|
||||
@ -1,29 +0,0 @@
|
||||
From 27cd055197bc3cae458a1f86621aa5410c66dd2c Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 20 Jan 2025 15:51:24 -0500
|
||||
Subject: [PATCH] Issue 6509 - Fix cherry pick issue (race condition in Paged
|
||||
results)
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6509
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 13dfe250d..57e07e5f5 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1620,8 +1620,8 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
connection_table_move_connection_out_of_active_list(ct, c);
|
||||
} else if (c->c_prfd != NULL) {
|
||||
if ((!c->c_gettingber) && (c->c_threadnumber < c->c_max_threads_per_conn)) {
|
||||
- ct->fd[listnum][count].fd = c->c_prfd;
|
||||
- ct->fd[listnum][count].in_flags = SLAPD_POLL_FLAGS;
|
||||
+ ct->fd[count].fd = c->c_prfd;
|
||||
+ ct->fd[count].in_flags = SLAPD_POLL_FLAGS;
|
||||
/* slot i of the connection table is mapped to slot
|
||||
* count of the fds array */
|
||||
c->c_fdi = count;
|
||||
--
|
||||
2.48.0
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,236 +0,0 @@
|
||||
From 1845aed98becaba6b975342229cb5e0de79d208d Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 29 Jan 2025 17:41:55 +0000
|
||||
Subject: [PATCH] Issue 6436 - MOD on a large group slow if substring index is
|
||||
present (#6437)
|
||||
|
||||
Bug Description: If the substring index is configured for the group
|
||||
membership attribute ( member or uniqueMember ), the removal of a
|
||||
member from a large static group is pretty slow.
|
||||
|
||||
Fix Description: A solution to this issue would be to introduce
|
||||
a new index to track a membership atttribute index. In the interm,
|
||||
we add a check to healthcheck to inform the user of the implications
|
||||
of this configuration.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6436
|
||||
|
||||
Reviewed by: @Firstyear, @tbordaz, @droideck (Thanks)
|
||||
---
|
||||
.../suites/healthcheck/health_config_test.py | 89 ++++++++++++++++++-
|
||||
src/lib389/lib389/lint.py | 15 ++++
|
||||
src/lib389/lib389/plugins.py | 37 +++++++-
|
||||
3 files changed, 137 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
|
||||
index 6d3d08bfa..747699486 100644
|
||||
--- a/dirsrvtests/tests/suites/healthcheck/health_config_test.py
|
||||
+++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
|
||||
@@ -212,6 +212,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st):
|
||||
MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
|
||||
|
||||
standalone = topology_st.standalone
|
||||
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
|
||||
|
||||
log.info('Enable RI plugin')
|
||||
plugin = ReferentialIntegrityPlugin(standalone)
|
||||
@@ -233,7 +234,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st):
|
||||
|
||||
|
||||
def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
- """Check if HealthCheck returns DSMOLE0002 code
|
||||
+ """Check if HealthCheck returns DSMOLE0001 code
|
||||
|
||||
:id: 236b0ec2-13da-48fb-b65a-db7406d56d5d
|
||||
:setup: Standalone instance
|
||||
@@ -248,8 +249,8 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
:expectedresults:
|
||||
1. Success
|
||||
2. Success
|
||||
- 3. Healthcheck reports DSMOLE0002 code and related details
|
||||
- 4. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 3. Healthcheck reports DSMOLE0001 code and related details
|
||||
+ 4. Healthcheck reports DSMOLE0001 code and related details
|
||||
5. Success
|
||||
6. Healthcheck reports no issue found
|
||||
7. Healthcheck reports no issue found
|
||||
@@ -259,6 +260,7 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
MO_GROUP_ATTR = 'creatorsname'
|
||||
|
||||
standalone = topology_st.standalone
|
||||
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
|
||||
|
||||
log.info('Enable MO plugin')
|
||||
plugin = MemberOfPlugin(standalone)
|
||||
@@ -279,6 +281,87 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st):
|
||||
run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
|
||||
|
||||
|
||||
+def test_healthcheck_MO_plugin_substring_index(topology_st):
|
||||
+ """Check if HealthCheck returns DSMOLE0002 code when the
|
||||
+ member, uniquemember attribute contains a substring index type
|
||||
+
|
||||
+ :id: 10954811-24ac-4886-8183-e30892f8e02d
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Create DS instance
|
||||
+ 2. Configure the instance with MO Plugin
|
||||
+ 3. Change index type to substring for member attribute
|
||||
+ 4. Use HealthCheck without --json option
|
||||
+ 5. Use HealthCheck with --json option
|
||||
+ 6. Change index type back to equality for member attribute
|
||||
+ 7. Use HealthCheck without --json option
|
||||
+ 8. Use HealthCheck with --json option
|
||||
+ 9. Change index type to substring for uniquemember attribute
|
||||
+ 10. Use HealthCheck without --json option
|
||||
+ 11. Use HealthCheck with --json option
|
||||
+ 12. Change index type back to equality for uniquemember attribute
|
||||
+ 13. Use HealthCheck without --json option
|
||||
+ 14. Use HealthCheck with --json option
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 5. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 6. Success
|
||||
+ 7. Healthcheck reports no issue found
|
||||
+ 8. Healthcheck reports no issue found
|
||||
+ 9. Success
|
||||
+ 10. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 11. Healthcheck reports DSMOLE0002 code and related details
|
||||
+ 12. Success
|
||||
+ 13. Healthcheck reports no issue found
|
||||
+ 14. Healthcheck reports no issue found
|
||||
+ """
|
||||
+
|
||||
+ RET_CODE = 'DSMOLE0002'
|
||||
+ MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
|
||||
+ UNIQUE_MEMBER_DN = 'cn=uniquemember,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
|
||||
+
|
||||
+ log.info('Enable MO plugin')
|
||||
+ plugin = MemberOfPlugin(standalone)
|
||||
+ plugin.disable()
|
||||
+ plugin.enable()
|
||||
+
|
||||
+ log.info('Change the index type of the member attribute index to substring')
|
||||
+ index = Index(topology_st.standalone, MEMBER_DN)
|
||||
+ index.replace('nsIndexType', 'sub')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE)
|
||||
+
|
||||
+ log.info('Set the index type of the member attribute index back to eq')
|
||||
+ index.replace('nsIndexType', 'eq')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
|
||||
+
|
||||
+ log.info('Change the index type of the uniquemember attribute index to substring')
|
||||
+ index = Index(topology_st.standalone, UNIQUE_MEMBER_DN)
|
||||
+ index.replace('nsIndexType', 'sub')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE)
|
||||
+
|
||||
+ log.info('Set the index type of the uniquemember attribute index back to eq')
|
||||
+ index.replace('nsIndexType', 'eq')
|
||||
+
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT)
|
||||
+ run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
|
||||
+
|
||||
+ # Restart the instance after changing the plugin to avoid breaking the other tests
|
||||
+ standalone.restart()
|
||||
+
|
||||
+
|
||||
@pytest.mark.ds50873
|
||||
@pytest.mark.bz1685160
|
||||
@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented")
|
||||
diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
|
||||
index 4d9cbb666..3d3c79ea3 100644
|
||||
--- a/src/lib389/lib389/lint.py
|
||||
+++ b/src/lib389/lib389/lint.py
|
||||
@@ -231,6 +231,21 @@ database after adding the missing index type. Here is an example using dsconf:
|
||||
"""
|
||||
}
|
||||
|
||||
+DSMOLE0002 = {
|
||||
+ 'dsle': 'DSMOLE0002',
|
||||
+ 'severity': 'LOW',
|
||||
+ 'description': 'Removal of a member can be slow ',
|
||||
+ 'items': ['cn=memberof plugin,cn=plugins,cn=config', ],
|
||||
+ 'detail': """If the substring index is configured for a membership attribute. The removal of a member
|
||||
+from the large group can be slow.
|
||||
+
|
||||
+""",
|
||||
+ 'fix': """If not required, you can remove the substring index type using dsconf:
|
||||
+
|
||||
+ # dsconf slapd-YOUR_INSTANCE backend index set --attr=ATTR BACKEND --del-type=sub
|
||||
+"""
|
||||
+}
|
||||
+
|
||||
# Disk Space check. Note - PARTITION is replaced by the calling function
|
||||
DSDSLE0001 = {
|
||||
'dsle': 'DSDSLE0001',
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 6bf1843ad..185398e5b 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -12,7 +12,7 @@ import copy
|
||||
import os.path
|
||||
from lib389 import tasks
|
||||
from lib389._mapped_object import DSLdapObjects, DSLdapObject
|
||||
-from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001
|
||||
+from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001, DSMOLE0002
|
||||
from lib389.utils import ensure_str, ensure_list_bytes
|
||||
from lib389.schema import Schema
|
||||
from lib389._constants import (
|
||||
@@ -827,6 +827,41 @@ class MemberOfPlugin(Plugin):
|
||||
report['check'] = f'memberof:attr_indexes'
|
||||
yield report
|
||||
|
||||
+ def _lint_member_substring_index(self):
|
||||
+ if self.status():
|
||||
+ from lib389.backend import Backends
|
||||
+ backends = Backends(self._instance).list()
|
||||
+ membership_attrs = ['member', 'uniquemember']
|
||||
+ container = self.get_attr_val_utf8_l("nsslapd-plugincontainerscope")
|
||||
+ for backend in backends:
|
||||
+ suffix = backend.get_attr_val_utf8_l('nsslapd-suffix')
|
||||
+ if suffix == "cn=changelog":
|
||||
+ # Always skip retro changelog
|
||||
+ continue
|
||||
+ if container is not None:
|
||||
+ # Check if this backend is in the scope
|
||||
+ if not container.endswith(suffix):
|
||||
+ # skip this backend that is not in the scope
|
||||
+ continue
|
||||
+ indexes = backend.get_indexes()
|
||||
+ for attr in membership_attrs:
|
||||
+ report = copy.deepcopy(DSMOLE0002)
|
||||
+ try:
|
||||
+ index = indexes.get(attr)
|
||||
+ types = index.get_attr_vals_utf8_l("nsIndexType")
|
||||
+ if "sub" in types:
|
||||
+ report['detail'] = report['detail'].replace('ATTR', attr)
|
||||
+ report['detail'] = report['detail'].replace('BACKEND', suffix)
|
||||
+ report['fix'] = report['fix'].replace('ATTR', attr)
|
||||
+ report['fix'] = report['fix'].replace('BACKEND', suffix)
|
||||
+ report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid)
|
||||
+ report['items'].append(suffix)
|
||||
+ report['items'].append(attr)
|
||||
+ report['check'] = f'attr:substring_index'
|
||||
+ yield report
|
||||
+ except KeyError:
|
||||
+ continue
|
||||
+
|
||||
def get_attr(self):
|
||||
"""Get memberofattr attribute"""
|
||||
|
||||
--
|
||||
2.48.1
|
||||
|
||||
@ -1,651 +0,0 @@
|
||||
From dba27e56161943fbcf54ecbc28337e2c81b07979 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 13 Jan 2025 18:03:07 +0100
|
||||
Subject: [PATCH] Issue 6494 - Various errors when using extended matching rule
|
||||
on vlv sort filter (#6495)
|
||||
|
||||
* Issue 6494 - Various errors when using extended matching rule on vlv sort filter
|
||||
|
||||
Various issues when configuring and using extended matching rule within a vlv sort filter:
|
||||
|
||||
Race condition about the keys storage while indexing leading to various heap and data corruption. (lmdb only)
|
||||
Crash while indexing if vlv are misconfigured because NULL key is not checked.
|
||||
Read after block because of data type mismatch between SlapiValue and berval
|
||||
Memory leaks
|
||||
Solution:
|
||||
|
||||
Serialize the vlv index key generation if vlv filter has an extended matching rule.
|
||||
Check null keys
|
||||
Always provides SlapiValue even ifg we want to get keys as bervals
|
||||
Free properly the resources
|
||||
Issue: #6494
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
|
||||
(cherry picked from commit 4bd27ecc4e1d21c8af5ab8cad795d70477179a98)
|
||||
(cherry picked from commit 223a20250cbf29a546dcb398cfc76024d2f91347)
|
||||
(cherry picked from commit 280043740a525eaf0438129fd8b99ca251c62366)
|
||||
---
|
||||
.../tests/suites/indexes/regression_test.py | 29 +++
|
||||
.../tests/suites/vlv/regression_test.py | 183 ++++++++++++++++++
|
||||
ldap/servers/slapd/back-ldbm/cleanup.c | 8 +
|
||||
ldap/servers/slapd/back-ldbm/dblayer.c | 22 ++-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_attr.c | 2 +-
|
||||
ldap/servers/slapd/back-ldbm/matchrule.c | 8 +-
|
||||
.../servers/slapd/back-ldbm/proto-back-ldbm.h | 3 +-
|
||||
ldap/servers/slapd/back-ldbm/sort.c | 37 ++--
|
||||
ldap/servers/slapd/back-ldbm/vlv.c | 26 +--
|
||||
ldap/servers/slapd/back-ldbm/vlv_srch.c | 4 +-
|
||||
ldap/servers/slapd/generation.c | 5 +
|
||||
ldap/servers/slapd/plugin_mr.c | 12 +-
|
||||
src/lib389/lib389/backend.py | 10 +
|
||||
13 files changed, 292 insertions(+), 57 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
index fc6db727f..2196fb2ed 100644
|
||||
--- a/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
@@ -227,6 +227,35 @@ def test_reject_virtual_attr_for_indexing(topo):
|
||||
break
|
||||
|
||||
|
||||
+def test_reindex_extended_matching_rule(topo, add_backend_and_ldif_50K_users):
|
||||
+ """Check that index with extended matching rule are reindexed properly.
|
||||
+
|
||||
+ :id: 8a3198e8-cc5a-11ef-a3e7-482ae39447e5
|
||||
+ :setup: Standalone instance + a second backend with 50K users
|
||||
+ :steps:
|
||||
+ 1. Configure uid with 2.5.13.2 matching rule
|
||||
+ 1. Configure cn with 2.5.13.2 matching rule
|
||||
+ 2. Reindex
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+ tasks = Tasks(inst)
|
||||
+ be2 = Backends(topo.standalone).get_backend(SUFFIX2)
|
||||
+ index = be2.get_index('uid')
|
||||
+ index.replace('nsMatchingRule', '2.5.13.2')
|
||||
+ index = be2.get_index('cn')
|
||||
+ index.replace('nsMatchingRule', '2.5.13.2')
|
||||
+
|
||||
+ assert tasks.reindex(
|
||||
+ suffix=SUFFIX2,
|
||||
+ args={TASK_WAIT: True}
|
||||
+ ) == 0
|
||||
+
|
||||
+
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index 3b66de8b5..6ab709bd3 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -22,6 +22,146 @@ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
+class BackendHandler:
|
||||
+ def __init__(self, inst, bedict, scope=ldap.SCOPE_ONELEVEL):
|
||||
+ self.inst = inst
|
||||
+ self.bedict = bedict
|
||||
+ self.bes = Backends(inst)
|
||||
+ self.scope = scope
|
||||
+ self.data = {}
|
||||
+
|
||||
+ def find_backend(self, bename):
|
||||
+ for be in self.bes.list():
|
||||
+ if be.get_attr_val_utf8_l('cn') == bename:
|
||||
+ return be
|
||||
+ return None
|
||||
+
|
||||
+ def cleanup(self):
|
||||
+ benames = list(self.bedict.keys())
|
||||
+ benames.reverse()
|
||||
+ for bename in benames:
|
||||
+ be = self.find_backend(bename)
|
||||
+ if be:
|
||||
+ be.delete()
|
||||
+
|
||||
+ def setup(self):
|
||||
+ # Create backends, add vlv index and populate the backends.
|
||||
+ for bename,suffix in self.bedict.items():
|
||||
+ be = self.bes.create(properties={
|
||||
+ 'cn': bename,
|
||||
+ 'nsslapd-suffix': suffix,
|
||||
+ })
|
||||
+ # Add suffix entry
|
||||
+ Organization(self.inst, dn=suffix).create(properties={ 'o': bename, })
|
||||
+ # Configure vlv
|
||||
+ vlv_search, vlv_index = create_vlv_search_and_index(
|
||||
+ self.inst, basedn=suffix,
|
||||
+ bename=bename, scope=self.scope,
|
||||
+ prefix=f'vlv_1lvl_{bename}')
|
||||
+ # Reindex
|
||||
+ reindex_task = Tasks(self.inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=suffix,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+ # Add ou=People entry
|
||||
+ OrganizationalUnits(self.inst, suffix).create(properties={'ou': 'People'})
|
||||
+ # Add another ou that will be deleted before the export
|
||||
+ # so that import will change the vlv search basedn entryid
|
||||
+ ou2 = OrganizationalUnits(self.inst, suffix).create(properties={'ou': 'dummy ou'})
|
||||
+ # Add a demo user so that vlv_check is happy
|
||||
+ dn = f'uid=demo_user,ou=people,{suffix}'
|
||||
+ UserAccount(self.inst, dn=dn).create( properties= {
|
||||
+ 'uid': 'demo_user',
|
||||
+ 'cn': 'Demo user',
|
||||
+ 'sn': 'Demo user',
|
||||
+ 'uidNumber': '99998',
|
||||
+ 'gidNumber': '99998',
|
||||
+ 'homeDirectory': '/var/empty',
|
||||
+ 'loginShell': '/bin/false',
|
||||
+ 'userpassword': DEMO_PW })
|
||||
+ # Add regular user
|
||||
+ add_users(self.inst, 10, suffix=suffix)
|
||||
+ # Removing ou2
|
||||
+ ou2.delete()
|
||||
+ # And export
|
||||
+ tasks = Tasks(self.inst)
|
||||
+ ldif = f'{self.inst.get_ldif_dir()}/db-{bename}.ldif'
|
||||
+ assert tasks.exportLDIF(suffix=suffix,
|
||||
+ output_file=ldif,
|
||||
+ args={TASK_WAIT: True}) == 0
|
||||
+ # Add the various parameters in topology_st.belist
|
||||
+ self.data[bename] = { 'be': be,
|
||||
+ 'suffix': suffix,
|
||||
+ 'ldif': ldif,
|
||||
+ 'vlv_search' : vlv_search,
|
||||
+ 'vlv_index' : vlv_index,
|
||||
+ 'dn' : dn}
|
||||
+
|
||||
+
|
||||
+def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
+ scope=ldap.SCOPE_SUBTREE, prefix="vlv", vlvsort="cn"):
|
||||
+ vlv_searches = VLVSearch(inst)
|
||||
+ vlv_search_properties = {
|
||||
+ "objectclass": ["top", "vlvSearch"],
|
||||
+ "cn": f"{prefix}Srch",
|
||||
+ "vlvbase": basedn,
|
||||
+ "vlvfilter": "(uid=*)",
|
||||
+ "vlvscope": str(scope),
|
||||
+ }
|
||||
+ vlv_searches.create(
|
||||
+ basedn=f"cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_search_properties
|
||||
+ )
|
||||
+
|
||||
+ vlv_index = VLVIndex(inst)
|
||||
+ vlv_index_properties = {
|
||||
+ "objectclass": ["top", "vlvIndex"],
|
||||
+ "cn": f"{prefix}Idx",
|
||||
+ "vlvsort": vlvsort,
|
||||
+ }
|
||||
+ vlv_index.create(
|
||||
+ basedn=f"cn={prefix}Srch,cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_index_properties
|
||||
+ )
|
||||
+ return vlv_searches, vlv_index
|
||||
+
|
||||
+
|
||||
+@pytest.fixture
|
||||
+def vlv_setup_with_uid_mr(topology_st, request):
|
||||
+ inst = topology_st.standalone
|
||||
+ bename = 'be1'
|
||||
+ besuffix = f'o={bename}'
|
||||
+ beh = BackendHandler(inst, { bename: besuffix })
|
||||
+
|
||||
+ def fin():
|
||||
+ # Cleanup function
|
||||
+ if not DEBUGGING and inst.exists() and inst.status():
|
||||
+ beh.cleanup()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Make sure that our backend are not already present.
|
||||
+ beh.cleanup()
|
||||
+
|
||||
+ # Then add the new backend
|
||||
+ beh.setup()
|
||||
+
|
||||
+ index = Index(inst, f'cn=uid,cn=index,cn={bename},cn=ldbm database,cn=plugins,cn=config')
|
||||
+ index.add('nsMatchingRule', '2.5.13.2')
|
||||
+ reindex_task = Tasks(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=besuffix,
|
||||
+ attrname='uid',
|
||||
+ args={TASK_WAIT: True}
|
||||
+ ) == 0
|
||||
+
|
||||
+ topology_st.beh = beh
|
||||
+ return topology_st
|
||||
+
|
||||
+
|
||||
@pytest.mark.DS47966
|
||||
def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2):
|
||||
"""
|
||||
@@ -105,6 +245,49 @@ def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2):
|
||||
entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)")
|
||||
|
||||
|
||||
+def test_vlv_with_mr(vlv_setup_with_uid_mr):
|
||||
+ """
|
||||
+ Testing vlv having specific matching rule
|
||||
+
|
||||
+ :id: 5e04afe2-beec-11ef-aa84-482ae39447e5
|
||||
+ :setup: Standalone with uid have a matching rule index
|
||||
+ :steps:
|
||||
+ 1. Append vlvIndex entries then vlvSearch entry in the dse.ldif
|
||||
+ 2. Restart the server
|
||||
+ :expectedresults:
|
||||
+ 1. Should Success.
|
||||
+ 2. Should Success.
|
||||
+ """
|
||||
+ inst = vlv_setup_with_uid_mr.standalone
|
||||
+ beh = vlv_setup_with_uid_mr.beh
|
||||
+ bename, besuffix = next(iter(beh.bedict.items()))
|
||||
+ vlv_searches, vlv_index = create_vlv_search_and_index(
|
||||
+ inst, basedn=besuffix, bename=bename,
|
||||
+ vlvsort="uid:2.5.13.2")
|
||||
+ # Reindex the vlv
|
||||
+ reindex_task = Tasks(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=besuffix,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+
|
||||
+ inst.restart()
|
||||
+ users = UserAccounts(inst, besuffix)
|
||||
+ user_properties = {
|
||||
+ 'uid': f'a new testuser',
|
||||
+ 'cn': f'a new testuser',
|
||||
+ 'sn': 'user',
|
||||
+ 'uidNumber': '0',
|
||||
+ 'gidNumber': '0',
|
||||
+ 'homeDirectory': 'foo'
|
||||
+ }
|
||||
+ user = users.create(properties=user_properties)
|
||||
+ user.delete()
|
||||
+ assert inst.status()
|
||||
+
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/cleanup.c b/ldap/servers/slapd/back-ldbm/cleanup.c
|
||||
index 6b2e9faef..939d8bc4f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/cleanup.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/cleanup.c
|
||||
@@ -15,12 +15,14 @@
|
||||
|
||||
#include "back-ldbm.h"
|
||||
#include "dblayer.h"
|
||||
+#include "vlv_srch.h"
|
||||
|
||||
int
|
||||
ldbm_back_cleanup(Slapi_PBlock *pb)
|
||||
{
|
||||
struct ldbminfo *li;
|
||||
Slapi_Backend *be;
|
||||
+ struct vlvSearch *nextp;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_cleanup", "ldbm backend cleaning up\n");
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
|
||||
@@ -45,6 +47,12 @@ ldbm_back_cleanup(Slapi_PBlock *pb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
+ /* Release the vlv list */
|
||||
+ for (struct vlvSearch *p=be->vlvSearchList; p; p=nextp) {
|
||||
+ nextp = p->vlv_next;
|
||||
+ vlvSearch_delete(&p);
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* We check if li is NULL. Because of an issue in how we create backends
|
||||
* we share the li and plugin info between many unique backends. This causes
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
|
||||
index 05cc5b891..6b8ce0016 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
|
||||
@@ -494,8 +494,12 @@ int
|
||||
dblayer_close(struct ldbminfo *li, int dbmode)
|
||||
{
|
||||
dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
|
||||
-
|
||||
- return priv->dblayer_close_fn(li, dbmode);
|
||||
+ int rc = priv->dblayer_close_fn(li, dbmode);
|
||||
+ if (rc == 0) {
|
||||
+ /* Clean thread specific data */
|
||||
+ dblayer_destroy_txn_stack();
|
||||
+ }
|
||||
+ return rc;
|
||||
}
|
||||
|
||||
/* Routines for opening and closing random files in the DB_ENV.
|
||||
@@ -621,6 +625,9 @@ dblayer_erase_index_file(backend *be, struct attrinfo *a, PRBool use_lock, int n
|
||||
return 0;
|
||||
}
|
||||
struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
|
||||
+ if (NULL == li) {
|
||||
+ return 0;
|
||||
+ }
|
||||
dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
|
||||
|
||||
return priv->dblayer_rm_db_file_fn(be, a, use_lock, no_force_chkpt);
|
||||
@@ -1382,3 +1389,14 @@ dblayer_pop_pvt_txn(void)
|
||||
}
|
||||
return;
|
||||
}
|
||||
+
|
||||
+void
|
||||
+dblayer_destroy_txn_stack(void)
|
||||
+{
|
||||
+ /*
|
||||
+ * Cleanup for the main thread to avoid false/positive leaks from libasan
|
||||
+ * Note: data is freed because PR_SetThreadPrivate calls the
|
||||
+ * dblayer_cleanup_txn_stack callback
|
||||
+ */
|
||||
+ PR_SetThreadPrivate(thread_private_txn_stack, NULL);
|
||||
+}
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
|
||||
index 708756d3e..70700ca1d 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
|
||||
@@ -54,7 +54,7 @@ attrinfo_delete(struct attrinfo **pp)
|
||||
idl_release_private(*pp);
|
||||
(*pp)->ai_key_cmp_fn = NULL;
|
||||
slapi_ch_free((void **)&((*pp)->ai_type));
|
||||
- slapi_ch_free((void **)(*pp)->ai_index_rules);
|
||||
+ charray_free((*pp)->ai_index_rules);
|
||||
slapi_ch_free((void **)&((*pp)->ai_attrcrypt));
|
||||
attr_done(&((*pp)->ai_sattr));
|
||||
attrinfo_delete_idlistinfo(&(*pp)->ai_idlistinfo);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/matchrule.c b/ldap/servers/slapd/back-ldbm/matchrule.c
|
||||
index 5d516b9f8..5365e8acf 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/matchrule.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/matchrule.c
|
||||
@@ -107,7 +107,7 @@ destroy_matchrule_indexer(Slapi_PBlock *pb)
|
||||
* is destroyed
|
||||
*/
|
||||
int
|
||||
-matchrule_values_to_keys(Slapi_PBlock *pb, struct berval **input_values, struct berval ***output_values)
|
||||
+matchrule_values_to_keys(Slapi_PBlock *pb, Slapi_Value **input_values, struct berval ***output_values)
|
||||
{
|
||||
IFP mrINDEX = NULL;
|
||||
|
||||
@@ -135,10 +135,8 @@ matchrule_values_to_keys_sv(Slapi_PBlock *pb, Slapi_Value **input_values, Slapi_
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_MR_INDEX_SV_FN, &mrINDEX);
|
||||
if (NULL == mrINDEX) { /* old school - does not have SV function */
|
||||
int rc;
|
||||
- struct berval **bvi = NULL, **bvo = NULL;
|
||||
- valuearray_get_bervalarray(input_values, &bvi);
|
||||
- rc = matchrule_values_to_keys(pb, bvi, &bvo);
|
||||
- ber_bvecfree(bvi);
|
||||
+ struct berval **bvo = NULL;
|
||||
+ rc = matchrule_values_to_keys(pb, input_values, &bvo);
|
||||
/* note - the indexer owns bvo and will free it when destroyed */
|
||||
valuearray_init_bervalarray(bvo, output_values);
|
||||
/* store output values in SV form - caller expects SLAPI_PLUGIN_MR_KEYS is Slapi_Value** */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
index d93ff9239..157788fa4 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
|
||||
@@ -84,6 +84,7 @@ int dblayer_release_index_file(backend *be, struct attrinfo *a, DB *pDB);
|
||||
int dblayer_erase_index_file(backend *be, struct attrinfo *a, PRBool use_lock, int no_force_chkpt);
|
||||
int dblayer_get_id2entry(backend *be, DB **ppDB);
|
||||
int dblayer_release_id2entry(backend *be, DB *pDB);
|
||||
+void dblayer_destroy_txn_stack(void);
|
||||
int dblayer_txn_init(struct ldbminfo *li, back_txn *txn);
|
||||
int dblayer_txn_begin(backend *be, back_txnid parent_txn, back_txn *txn);
|
||||
int dblayer_txn_begin_ext(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool use_lock);
|
||||
@@ -560,7 +561,7 @@ int compute_allids_limit(Slapi_PBlock *pb, struct ldbminfo *li);
|
||||
*/
|
||||
int create_matchrule_indexer(Slapi_PBlock **pb, char *matchrule, char *type);
|
||||
int destroy_matchrule_indexer(Slapi_PBlock *pb);
|
||||
-int matchrule_values_to_keys(Slapi_PBlock *pb, struct berval **input_values, struct berval ***output_values);
|
||||
+int matchrule_values_to_keys(Slapi_PBlock *pb, Slapi_Value **input_values, struct berval ***output_values);
|
||||
int matchrule_values_to_keys_sv(Slapi_PBlock *pb, Slapi_Value **input_values, Slapi_Value ***output_values);
|
||||
|
||||
/*
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/sort.c b/ldap/servers/slapd/back-ldbm/sort.c
|
||||
index 70ac60803..196af753f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/sort.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/sort.c
|
||||
@@ -536,30 +536,18 @@ compare_entries_sv(ID *id_a, ID *id_b, sort_spec *s, baggage_carrier *bc, int *e
|
||||
valuearray_get_bervalarray(valueset_get_valuearray(&attr_b->a_present_values), &value_b);
|
||||
} else {
|
||||
/* Match rule case */
|
||||
- struct berval **actual_value_a = NULL;
|
||||
- struct berval **actual_value_b = NULL;
|
||||
- struct berval **temp_value = NULL;
|
||||
-
|
||||
- valuearray_get_bervalarray(valueset_get_valuearray(&attr_a->a_present_values), &actual_value_a);
|
||||
- valuearray_get_bervalarray(valueset_get_valuearray(&attr_b->a_present_values), &actual_value_b);
|
||||
- matchrule_values_to_keys(this_one->mr_pb, actual_value_a, &temp_value);
|
||||
- /* Now copy it, so the second call doesn't crap on it */
|
||||
- value_a = slapi_ch_bvecdup(temp_value); /* Really, we'd prefer to not call the chXXX variant...*/
|
||||
- matchrule_values_to_keys(this_one->mr_pb, actual_value_b, &value_b);
|
||||
-
|
||||
- if ((actual_value_a && !value_a) ||
|
||||
- (actual_value_b && !value_b)) {
|
||||
- ber_bvecfree(actual_value_a);
|
||||
- ber_bvecfree(actual_value_b);
|
||||
- CACHE_RETURN(&inst->inst_cache, &a);
|
||||
- CACHE_RETURN(&inst->inst_cache, &b);
|
||||
- *error = 1;
|
||||
- return 0;
|
||||
+ Slapi_Value **va_a = valueset_get_valuearray(&attr_a->a_present_values);
|
||||
+ Slapi_Value **va_b = valueset_get_valuearray(&attr_b->a_present_values);
|
||||
+
|
||||
+ matchrule_values_to_keys(this_one->mr_pb, va_a, &value_a);
|
||||
+ /* Plugin owns the memory ==> duplicate the key before next call garble it */
|
||||
+ value_a = slapi_ch_bvecdup(value_a);
|
||||
+ matchrule_values_to_keys(this_one->mr_pb, va_b, &value_b);
|
||||
+
|
||||
+ if ((va_a && !value_a) || (va_b && !value_b)) {
|
||||
+ result = 0;
|
||||
+ goto bail;
|
||||
}
|
||||
- if (actual_value_a)
|
||||
- ber_bvecfree(actual_value_a);
|
||||
- if (actual_value_b)
|
||||
- ber_bvecfree(actual_value_b);
|
||||
}
|
||||
/* Compare them */
|
||||
if (!order) {
|
||||
@@ -582,9 +570,10 @@ compare_entries_sv(ID *id_a, ID *id_b, sort_spec *s, baggage_carrier *bc, int *e
|
||||
}
|
||||
/* If so, proceed to the next attribute for comparison */
|
||||
}
|
||||
+ *error = 0;
|
||||
+bail:
|
||||
CACHE_RETURN(&inst->inst_cache, &a);
|
||||
CACHE_RETURN(&inst->inst_cache, &b);
|
||||
- *error = 0;
|
||||
return result;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
index 121fb3667..70e0bac85 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/vlv.c
|
||||
@@ -605,7 +605,7 @@ vlv_getindices(IFP callback_fn, void *param, backend *be)
|
||||
* generate the same composite key, so we append the EntryID
|
||||
* to ensure the uniqueness of the key.
|
||||
*
|
||||
- * Always creates a key. Never returns NULL.
|
||||
+ * May return NULL in case of errors (typically in some configuration error cases)
|
||||
*/
|
||||
static struct vlv_key *
|
||||
vlv_create_key(struct vlvIndex *p, struct backentry *e)
|
||||
@@ -659,10 +659,8 @@ vlv_create_key(struct vlvIndex *p, struct backentry *e)
|
||||
/* Matching rule. Do the magic mangling. Plugin owns the memory. */
|
||||
if (p->vlv_mrpb[sortattr] != NULL) {
|
||||
/* xxxPINAKI */
|
||||
- struct berval **bval = NULL;
|
||||
Slapi_Value **va = valueset_get_valuearray(&attr->a_present_values);
|
||||
- valuearray_get_bervalarray(va, &bval);
|
||||
- matchrule_values_to_keys(p->vlv_mrpb[sortattr], bval, &value);
|
||||
+ matchrule_values_to_keys(p->vlv_mrpb[sortattr], va, &value);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -779,6 +777,13 @@ do_vlv_update_index(back_txn *txn, struct ldbminfo *li __attribute__((unused)),
|
||||
}
|
||||
|
||||
key = vlv_create_key(pIndex, entry);
|
||||
+ if (key == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "vlv_create_key", "Unable to generate vlv %s index key."
|
||||
+ " There may be a configuration issue.\n", pIndex->vlv_name);
|
||||
+ dblayer_release_index_file(be, pIndex->vlv_attrinfo, db);
|
||||
+ return rc;
|
||||
+ }
|
||||
+
|
||||
if (NULL != txn) {
|
||||
db_txn = txn->back_txn_txn;
|
||||
} else {
|
||||
@@ -949,11 +954,11 @@ vlv_create_matching_rule_value(Slapi_PBlock *pb, struct berval *original_value)
|
||||
struct berval **value = NULL;
|
||||
if (pb != NULL) {
|
||||
struct berval **outvalue = NULL;
|
||||
- struct berval *invalue[2];
|
||||
- invalue[0] = original_value; /* jcm: cast away const */
|
||||
- invalue[1] = NULL;
|
||||
+ Slapi_Value v_in = {0};
|
||||
+ Slapi_Value *va_in[2] = { &v_in, NULL };
|
||||
+ slapi_value_init_berval(&v_in, original_value);
|
||||
/* The plugin owns the memory it returns in outvalue */
|
||||
- matchrule_values_to_keys(pb, invalue, &outvalue);
|
||||
+ matchrule_values_to_keys(pb, va_in, &outvalue);
|
||||
if (outvalue != NULL) {
|
||||
value = slapi_ch_bvecdup(outvalue);
|
||||
}
|
||||
@@ -1610,11 +1615,8 @@ retry:
|
||||
PRBool needFree = PR_FALSE;
|
||||
|
||||
if (sort_control->mr_pb != NULL) {
|
||||
- struct berval **tmp_entry_value = NULL;
|
||||
-
|
||||
- valuearray_get_bervalarray(csn_value, &tmp_entry_value);
|
||||
/* Matching rule. Do the magic mangling. Plugin owns the memory. */
|
||||
- matchrule_values_to_keys(sort_control->mr_pb, /* xxxPINAKI needs modification attr->a_vals */ tmp_entry_value, &entry_value);
|
||||
+ matchrule_values_to_keys(sort_control->mr_pb, csn_value, &entry_value);
|
||||
} else {
|
||||
valuearray_get_bervalarray(csn_value, &entry_value);
|
||||
needFree = PR_TRUE; /* entry_value is a copy */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/vlv_srch.c b/ldap/servers/slapd/back-ldbm/vlv_srch.c
|
||||
index fe1208d59..11d1c715b 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/vlv_srch.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/vlv_srch.c
|
||||
@@ -203,6 +203,9 @@ vlvSearch_delete(struct vlvSearch **ppvs)
|
||||
{
|
||||
if (ppvs != NULL && *ppvs != NULL) {
|
||||
struct vlvIndex *pi, *ni;
|
||||
+ if ((*ppvs)->vlv_e) {
|
||||
+ slapi_entry_free((struct slapi_entry *)((*ppvs)->vlv_e));
|
||||
+ }
|
||||
slapi_sdn_free(&((*ppvs)->vlv_dn));
|
||||
slapi_ch_free((void **)&((*ppvs)->vlv_name));
|
||||
slapi_sdn_free(&((*ppvs)->vlv_base));
|
||||
@@ -217,7 +220,6 @@ vlvSearch_delete(struct vlvSearch **ppvs)
|
||||
pi = ni;
|
||||
}
|
||||
slapi_ch_free((void **)ppvs);
|
||||
- *ppvs = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/generation.c b/ldap/servers/slapd/generation.c
|
||||
index c4f20f793..89f097322 100644
|
||||
--- a/ldap/servers/slapd/generation.c
|
||||
+++ b/ldap/servers/slapd/generation.c
|
||||
@@ -93,9 +93,13 @@ get_server_dataversion()
|
||||
lenstr *l = NULL;
|
||||
Slapi_Backend *be;
|
||||
char *cookie;
|
||||
+ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
+ /* Serialize to avoid race condition */
|
||||
+ pthread_mutex_lock(&mutex);
|
||||
/* we already cached the copy - just return it */
|
||||
if (server_dataversion_id != NULL) {
|
||||
+ pthread_mutex_unlock(&mutex);
|
||||
return server_dataversion_id;
|
||||
}
|
||||
|
||||
@@ -130,5 +134,6 @@ get_server_dataversion()
|
||||
server_dataversion_id = slapi_ch_strdup(l->ls_buf);
|
||||
}
|
||||
lenstr_free(&l);
|
||||
+ pthread_mutex_unlock(&mutex);
|
||||
return server_dataversion_id;
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c
|
||||
index 13f76fe52..6cf88b7de 100644
|
||||
--- a/ldap/servers/slapd/plugin_mr.c
|
||||
+++ b/ldap/servers/slapd/plugin_mr.c
|
||||
@@ -391,28 +391,18 @@ mr_wrap_mr_index_sv_fn(Slapi_PBlock *pb)
|
||||
return rc;
|
||||
}
|
||||
|
||||
-/* this function takes SLAPI_PLUGIN_MR_VALUES as struct berval ** and
|
||||
+/* this function takes SLAPI_PLUGIN_MR_VALUES as Slapi_Value ** and
|
||||
returns SLAPI_PLUGIN_MR_KEYS as struct berval **
|
||||
*/
|
||||
static int
|
||||
mr_wrap_mr_index_fn(Slapi_PBlock *pb)
|
||||
{
|
||||
int rc = -1;
|
||||
- struct berval **in_vals = NULL;
|
||||
struct berval **out_vals = NULL;
|
||||
struct mr_private *mrpriv = NULL;
|
||||
- Slapi_Value **in_vals_sv = NULL;
|
||||
Slapi_Value **out_vals_sv = NULL;
|
||||
|
||||
- slapi_pblock_get(pb, SLAPI_PLUGIN_MR_VALUES, &in_vals); /* get bervals */
|
||||
- /* convert bervals to sv ary */
|
||||
- valuearray_init_bervalarray(in_vals, &in_vals_sv);
|
||||
- slapi_pblock_set(pb, SLAPI_PLUGIN_MR_VALUES, in_vals_sv); /* use sv */
|
||||
rc = mr_wrap_mr_index_sv_fn(pb);
|
||||
- /* clean up in_vals_sv */
|
||||
- valuearray_free(&in_vals_sv);
|
||||
- /* restore old in_vals */
|
||||
- slapi_pblock_set(pb, SLAPI_PLUGIN_MR_VALUES, in_vals);
|
||||
/* get result sv keys */
|
||||
slapi_pblock_get(pb, SLAPI_PLUGIN_MR_KEYS, &out_vals_sv);
|
||||
/* convert to bvec */
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index 9acced205..cee073ea7 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -1029,6 +1029,16 @@ class Backends(DSLdapObjects):
|
||||
for be in sorted(self.list(), key=lambda be: len(be.get_suffix()), reverse=True):
|
||||
be.delete()
|
||||
|
||||
+ def get_backend(self, suffix):
|
||||
+ """
|
||||
+ Return the backend associated with the provided suffix.
|
||||
+ """
|
||||
+ suffix_l = suffix.lower()
|
||||
+ for be in self.list():
|
||||
+ if be.get_attr_val_utf8_l('nsslapd-suffix') == suffix_l:
|
||||
+ return be
|
||||
+ return None
|
||||
+
|
||||
|
||||
class DatabaseConfig(DSLdapObject):
|
||||
"""Backend Database configuration
|
||||
--
|
||||
2.48.1
|
||||
|
||||
@ -1,230 +0,0 @@
|
||||
From bd2829d04491556c35a0b36b591c09a69baf6546 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 11 Dec 2023 11:58:40 +0100
|
||||
Subject: [PATCH] Issue 6004 - idletimeout may be ignored (#6005)
|
||||
|
||||
* Issue 6004 - idletimeout may be ignored
|
||||
|
||||
Problem: idletimeout is still not handled when binding as non root (unless there are some activity
|
||||
on another connection)
|
||||
Fix:
|
||||
Add a slapi_eq_repeat_rel handler that walks all active connection every seconds and check if the timeout is expired.
|
||||
Note about CI test:
|
||||
Notice that idletimeout is never enforced for connections bound as root (i.e cn=directory manager).
|
||||
|
||||
Issue #6004
|
||||
|
||||
Reviewed by: @droideck, @tbordaz (Thanks!)
|
||||
|
||||
(cherry picked from commit 86b5969acbe124eec8c89bcf1ab2156b2b140c17)
|
||||
(cherry picked from commit bdb0a72b4953678e5418406b3c202dfa2c7469a2)
|
||||
(cherry picked from commit 61cebc191cd4090072dda691b9956dbde4cf7c48)
|
||||
---
|
||||
.../tests/suites/config/regression_test.py | 82 ++++++++++++++++++-
|
||||
ldap/servers/slapd/daemon.c | 52 +++++++++++-
|
||||
2 files changed, 128 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/config/regression_test.py b/dirsrvtests/tests/suites/config/regression_test.py
|
||||
index 0000dd82d..8dbba8cd2 100644
|
||||
--- a/dirsrvtests/tests/suites/config/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/config/regression_test.py
|
||||
@@ -6,20 +6,49 @@
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
#
|
||||
+import os
|
||||
import logging
|
||||
import pytest
|
||||
+import time
|
||||
from lib389.utils import *
|
||||
from lib389.dseldif import DSEldif
|
||||
-from lib389.config import LDBMConfig
|
||||
+from lib389.config import BDB_LDBMConfig, LDBMConfig, Config
|
||||
from lib389.backend import Backends
|
||||
from lib389.topologies import topology_st as topo
|
||||
+from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
|
||||
+from lib389._constants import DEFAULT_SUFFIX, PASSWORD, DN_DM
|
||||
|
||||
pytestmark = pytest.mark.tier0
|
||||
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
CUSTOM_MEM = '9100100100'
|
||||
+IDLETIMEOUT = 5
|
||||
+DN_TEST_USER = f'uid={TEST_USER_PROPERTIES["uid"]},ou=People,{DEFAULT_SUFFIX}'
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="module")
|
||||
+def idletimeout_topo(topo, request):
|
||||
+ """Create an instance with a test user and set idletimeout"""
|
||||
+ inst = topo.standalone
|
||||
+ config = Config(inst)
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ user = users.create(properties={
|
||||
+ **TEST_USER_PROPERTIES,
|
||||
+ 'userpassword' : PASSWORD,
|
||||
+ })
|
||||
+ config.replace('nsslapd-idletimeout', str(IDLETIMEOUT))
|
||||
+
|
||||
+ def fin():
|
||||
+ if not DEBUGGING:
|
||||
+ config.reset('nsslapd-idletimeout')
|
||||
+ user.delete()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+ return topo
|
||||
|
||||
|
||||
# Function to return value of available memory in kb
|
||||
@@ -79,7 +108,7 @@ def test_maxbersize_repl(topo):
|
||||
nsslapd-errorlog-logmaxdiskspace are set in certain order
|
||||
|
||||
:id: 743e912c-2be4-4f5f-9c2a-93dcb18f51a0
|
||||
- :setup: MMR with two suppliers
|
||||
+ :setup: Standalone Instance
|
||||
:steps:
|
||||
1. Stop the instance
|
||||
2. Set nsslapd-errorlog-maxlogsize before/after
|
||||
@@ -112,3 +141,52 @@ def test_maxbersize_repl(topo):
|
||||
log.info("Assert no init_dse_file errors in the error log")
|
||||
assert not inst.ds_error_log.match('.*ERR - init_dse_file.*')
|
||||
|
||||
+
|
||||
+def test_bdb_config(topo):
|
||||
+ """Check that bdb config entry exists
|
||||
+
|
||||
+ :id: edbc6f54-7c98-11ee-b1c0-482ae39447e5
|
||||
+ :setup: standalone
|
||||
+ :steps:
|
||||
+ 1. Check that bdb config instance exists.
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topo.standalone
|
||||
+ assert BDB_LDBMConfig(inst).exists()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("dn,expected_result", [(DN_TEST_USER, True), (DN_DM, False)])
|
||||
+def test_idletimeout(idletimeout_topo, dn, expected_result):
|
||||
+ """Check that bdb config entry exists
|
||||
+
|
||||
+ :id: b20f2826-942a-11ee-827b-482ae39447e5
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance with test user and idletimeout
|
||||
+ :steps:
|
||||
+ 1. Open new ldap connection
|
||||
+ 2. Bind with the provided dn
|
||||
+ 3. Wait longer than idletimeout
|
||||
+ 4. Try to bind again the provided dn and check if
|
||||
+ connection is closed or not.
|
||||
+ 5. Check if result is the expected one.
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = idletimeout_topo.standalone
|
||||
+
|
||||
+ l = ldap.initialize(f'ldap://localhost:{inst.port}')
|
||||
+ l.bind_s(dn, PASSWORD)
|
||||
+ time.sleep(IDLETIMEOUT+1)
|
||||
+ try:
|
||||
+ l.bind_s(dn, PASSWORD)
|
||||
+ result = False
|
||||
+ except ldap.SERVER_DOWN:
|
||||
+ result = True
|
||||
+ assert expected_result == result
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 57e07e5f5..6df109760 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -68,6 +68,8 @@
|
||||
#define SLAPD_ACCEPT_WAKEUP_TIMER 250
|
||||
#endif
|
||||
|
||||
+#define MILLISECONDS_PER_SECOND 1000
|
||||
+
|
||||
int slapd_wakeup_timer = SLAPD_WAKEUP_TIMER; /* time in ms to wakeup */
|
||||
int slapd_accept_wakeup_timer = SLAPD_ACCEPT_WAKEUP_TIMER; /* time in ms to wakeup */
|
||||
#ifdef notdef /* GGOODREPL */
|
||||
@@ -1045,6 +1047,48 @@ slapd_sockets_ports_free(daemon_ports_t *ports_info)
|
||||
#endif
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Tells if idle timeout has expired
|
||||
+ */
|
||||
+static inline int __attribute__((always_inline))
|
||||
+has_idletimeout_expired(Connection *c, time_t curtime)
|
||||
+{
|
||||
+ return (c->c_state != CONN_STATE_FREE && !c->c_gettingber &&
|
||||
+ c->c_idletimeout > 0 && NULL == c->c_ops &&
|
||||
+ curtime - c->c_idlesince >= c->c_idletimeout);
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * slapi_eq_repeat_rel callback that checks that idletimeout has not expired.
|
||||
+ */
|
||||
+void
|
||||
+check_idletimeout(time_t when __attribute__((unused)), void *arg __attribute__((unused)) )
|
||||
+{
|
||||
+ Connection_Table *ct = the_connection_table;
|
||||
+ time_t curtime = slapi_current_rel_time_t();
|
||||
+ /* Walk all active connections of all connection listeners */
|
||||
+ for (int list_num = 0; list_num < ct->list_num; list_num++) {
|
||||
+ for (Connection *c = connection_table_get_first_active_connection(ct, list_num);
|
||||
+ c != NULL; c = connection_table_get_next_active_connection(ct, c)) {
|
||||
+ if (!has_idletimeout_expired(c, curtime)) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ /* Looks like idletimeout has expired, lets acquire the lock
|
||||
+ * and double check.
|
||||
+ */
|
||||
+ if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ if (has_idletimeout_expired(c, curtime)) {
|
||||
+ /* idle timeout has expired */
|
||||
+ disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
+ SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
+ }
|
||||
+ pthread_mutex_unlock(&(c->c_mutex));
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
void
|
||||
slapd_daemon(daemon_ports_t *ports)
|
||||
{
|
||||
@@ -1258,7 +1302,9 @@ slapd_daemon(daemon_ports_t *ports)
|
||||
"MAINPID=%lu",
|
||||
(unsigned long)getpid());
|
||||
#endif
|
||||
-
|
||||
+ slapi_eq_repeat_rel(check_idletimeout, NULL,
|
||||
+ slapi_current_rel_time_t(),
|
||||
+ MILLISECONDS_PER_SECOND);
|
||||
/* The meat of the operation is in a loop on a call to select */
|
||||
while (!g_get_shutdown()) {
|
||||
int select_return = 0;
|
||||
@@ -1734,9 +1780,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused
|
||||
disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
SLAPD_DISCONNECT_POLL, EPIPE);
|
||||
}
|
||||
- } else if (c->c_idletimeout > 0 &&
|
||||
- (curtime - c->c_idlesince) >= c->c_idletimeout &&
|
||||
- NULL == c->c_ops) {
|
||||
+ } else if (has_idletimeout_expired(c, curtime)) {
|
||||
/* idle timeout */
|
||||
disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
--
|
||||
2.48.1
|
||||
|
||||
@ -1,69 +0,0 @@
|
||||
From e9fe6e074130406328b8e932a5c2efa814d190a0 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 5 Feb 2025 09:41:30 +0100
|
||||
Subject: [PATCH] Issue 6004 - (2nd) idletimeout may be ignored (#6569)
|
||||
|
||||
Problem:
|
||||
multiple listener threads was implemented in 2.x and after
|
||||
This is missing in 1.4.3 so the cherry pick should be adapted
|
||||
Fix:
|
||||
skip the loop with listeners
|
||||
|
||||
Issue #6004
|
||||
|
||||
Reviewed by: Jamie Chapman (Thanks !)
|
||||
---
|
||||
ldap/servers/slapd/daemon.c | 36 +++++++++++++++++-------------------
|
||||
1 file changed, 17 insertions(+), 19 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 6df109760..bef75e4a3 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1066,26 +1066,24 @@ check_idletimeout(time_t when __attribute__((unused)), void *arg __attribute__((
|
||||
{
|
||||
Connection_Table *ct = the_connection_table;
|
||||
time_t curtime = slapi_current_rel_time_t();
|
||||
- /* Walk all active connections of all connection listeners */
|
||||
- for (int list_num = 0; list_num < ct->list_num; list_num++) {
|
||||
- for (Connection *c = connection_table_get_first_active_connection(ct, list_num);
|
||||
- c != NULL; c = connection_table_get_next_active_connection(ct, c)) {
|
||||
- if (!has_idletimeout_expired(c, curtime)) {
|
||||
- continue;
|
||||
- }
|
||||
- /* Looks like idletimeout has expired, lets acquire the lock
|
||||
- * and double check.
|
||||
- */
|
||||
- if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
- continue;
|
||||
- }
|
||||
- if (has_idletimeout_expired(c, curtime)) {
|
||||
- /* idle timeout has expired */
|
||||
- disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
- }
|
||||
- pthread_mutex_unlock(&(c->c_mutex));
|
||||
+ /* Walk all active connections */
|
||||
+ for (Connection *c = connection_table_get_first_active_connection(ct);
|
||||
+ c != NULL; c = connection_table_get_next_active_connection(ct, c)) {
|
||||
+ if (!has_idletimeout_expired(c, curtime)) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ /* Looks like idletimeout has expired, lets acquire the lock
|
||||
+ * and double check.
|
||||
+ */
|
||||
+ if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ if (has_idletimeout_expired(c, curtime)) {
|
||||
+ /* idle timeout has expired */
|
||||
+ disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
+ SLAPD_DISCONNECT_IDLE_TIMEOUT, ETIMEDOUT);
|
||||
}
|
||||
+ pthread_mutex_unlock(&(c->c_mutex));
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.48.1
|
||||
|
||||
@ -1,52 +0,0 @@
|
||||
From b2edc371c5ca4fd24ef469c64829c48824098e7f Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 8 Jan 2025 12:57:52 -0500
|
||||
Subject: [PATCH] Issue 6485 - Fix double free in USN cleanup task
|
||||
|
||||
Description:
|
||||
|
||||
ASAN report shows double free of bind dn in the USN cleanup task data. The bind
|
||||
dn was passed as a reference so it should never have to be freed by the cleanup
|
||||
task.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6485
|
||||
|
||||
Reviewed by: tbordaz(Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/usn/usn_cleanup.c | 6 ++----
|
||||
1 file changed, 2 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/usn/usn_cleanup.c b/ldap/servers/plugins/usn/usn_cleanup.c
|
||||
index bdb55e6b1..7eaf0f88f 100644
|
||||
--- a/ldap/servers/plugins/usn/usn_cleanup.c
|
||||
+++ b/ldap/servers/plugins/usn/usn_cleanup.c
|
||||
@@ -240,7 +240,7 @@ usn_cleanup_add(Slapi_PBlock *pb,
|
||||
char *suffix = NULL;
|
||||
char *backend = NULL;
|
||||
char *maxusn = NULL;
|
||||
- char *bind_dn;
|
||||
+ char *bind_dn = NULL;
|
||||
struct usn_cleanup_data *cleanup_data = NULL;
|
||||
int rv = SLAPI_DSE_CALLBACK_OK;
|
||||
Slapi_Task *task = NULL;
|
||||
@@ -323,8 +323,7 @@ usn_cleanup_add(Slapi_PBlock *pb,
|
||||
suffix = NULL; /* don't free in this function */
|
||||
cleanup_data->maxusn_to_delete = maxusn;
|
||||
maxusn = NULL; /* don't free in this function */
|
||||
- cleanup_data->bind_dn = bind_dn;
|
||||
- bind_dn = NULL; /* don't free in this function */
|
||||
+ cleanup_data->bind_dn = slapi_ch_strdup(bind_dn);
|
||||
slapi_task_set_data(task, cleanup_data);
|
||||
|
||||
/* start the USN tombstone cleanup task as a separate thread */
|
||||
@@ -363,7 +362,6 @@ usn_cleanup_task_destructor(Slapi_Task *task)
|
||||
slapi_ch_free_string(&mydata->suffix);
|
||||
slapi_ch_free_string(&mydata->maxusn_to_delete);
|
||||
slapi_ch_free_string(&mydata->bind_dn);
|
||||
- /* Need to cast to avoid a compiler warning */
|
||||
slapi_ch_free((void **)&mydata);
|
||||
}
|
||||
}
|
||||
--
|
||||
2.48.1
|
||||
|
||||
@ -1,38 +0,0 @@
|
||||
From 679262c0c292413851d2d004b588ecfd7d91c85a Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Tue, 11 Feb 2025 18:06:34 +0000
|
||||
Subject: [PATCH] Issue 5841 - dsconf incorrectly setting up Pass-Through
|
||||
Authentication (#6601)
|
||||
|
||||
Bug description:
|
||||
During init, PAMPassThroughAuthConfigs defines an "objectclass=nsslapdplugin"
|
||||
plugin object. During filter creation, dsconf fails as objectclass=nsslapdplugin
|
||||
is not present in the PAM PT config entry. This objectclass has been removed in
|
||||
all other branches, branch 1.4.3 was skipped as there are cherry pick conflicts.
|
||||
|
||||
Fix description:
|
||||
Remove nsslapdplugin from the plugin objecti, objectclass list.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/5841
|
||||
|
||||
Reviewed by: @progier389 (Thank you)
|
||||
---
|
||||
src/lib389/lib389/plugins.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 185398e5b..25b49dae4 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -1579,7 +1579,7 @@ class PAMPassThroughAuthConfigs(DSLdapObjects):
|
||||
|
||||
def __init__(self, instance, basedn="cn=PAM Pass Through Auth,cn=plugins,cn=config"):
|
||||
super(PAMPassThroughAuthConfigs, self).__init__(instance)
|
||||
- self._objectclasses = ['top', 'extensibleObject', 'nsslapdplugin', 'pamConfig']
|
||||
+ self._objectclasses = ['top', 'extensibleObject', 'pamConfig']
|
||||
self._filterattrs = ['cn']
|
||||
self._scope = ldap.SCOPE_ONELEVEL
|
||||
self._childobject = PAMPassThroughAuthConfig
|
||||
--
|
||||
2.48.1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,319 +0,0 @@
|
||||
From 7d534efdcd96b13524dae587c3c5994ed01924ab Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 16 Feb 2024 13:52:36 -0800
|
||||
Subject: [PATCH] Issue 6067 - Improve dsidm CLI No Such Entry handling (#6079)
|
||||
|
||||
Description: Add additional error processing to dsidm CLI tool for when basedn
|
||||
or OU subentries are absent.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6067
|
||||
|
||||
Reviewed by: @vashirov (Thanks!)
|
||||
---
|
||||
src/lib389/cli/dsidm | 21 ++++++++-------
|
||||
src/lib389/lib389/cli_idm/__init__.py | 38 ++++++++++++++++++++++++++-
|
||||
src/lib389/lib389/cli_idm/account.py | 4 +--
|
||||
src/lib389/lib389/cli_idm/service.py | 4 ++-
|
||||
src/lib389/lib389/idm/group.py | 10 ++++---
|
||||
src/lib389/lib389/idm/posixgroup.py | 5 ++--
|
||||
src/lib389/lib389/idm/services.py | 5 ++--
|
||||
src/lib389/lib389/idm/user.py | 5 ++--
|
||||
8 files changed, 67 insertions(+), 25 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/cli/dsidm b/src/lib389/cli/dsidm
|
||||
index 1b739b103..970973f4f 100755
|
||||
--- a/src/lib389/cli/dsidm
|
||||
+++ b/src/lib389/cli/dsidm
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -19,6 +19,7 @@ import argparse
|
||||
import argcomplete
|
||||
from lib389.utils import get_instance_list, instance_choices
|
||||
from lib389._constants import DSRC_HOME
|
||||
+from lib389.cli_idm import _get_basedn_arg
|
||||
from lib389.cli_idm import account as cli_account
|
||||
from lib389.cli_idm import initialise as cli_init
|
||||
from lib389.cli_idm import organizationalunit as cli_ou
|
||||
@@ -117,14 +118,6 @@ if __name__ == '__main__':
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
- if dsrc_inst['basedn'] is None:
|
||||
- errmsg = "Must provide a basedn!"
|
||||
- if args.json:
|
||||
- sys.stderr.write('{"desc": "%s"}\n' % errmsg)
|
||||
- else:
|
||||
- log.error(errmsg)
|
||||
- sys.exit(1)
|
||||
-
|
||||
if not args.verbose:
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
@@ -135,7 +128,15 @@ if __name__ == '__main__':
|
||||
result = False
|
||||
try:
|
||||
inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose, args=args)
|
||||
- result = args.func(inst, dsrc_inst['basedn'], log, args)
|
||||
+ basedn = _get_basedn_arg(inst, args, log, msg="Enter basedn")
|
||||
+ if basedn is None:
|
||||
+ errmsg = "Must provide a basedn!"
|
||||
+ if args.json:
|
||||
+ sys.stderr.write('{"desc": "%s"}\n' % errmsg)
|
||||
+ else:
|
||||
+ log.error(errmsg)
|
||||
+ sys.exit(1)
|
||||
+ result = args.func(inst, basedn, log, args)
|
||||
if args.verbose:
|
||||
log.info("Command successful.")
|
||||
except Exception as e:
|
||||
diff --git a/src/lib389/lib389/cli_idm/__init__.py b/src/lib389/lib389/cli_idm/__init__.py
|
||||
index 0dab54847..e3622246d 100644
|
||||
--- a/src/lib389/lib389/cli_idm/__init__.py
|
||||
+++ b/src/lib389/lib389/cli_idm/__init__.py
|
||||
@@ -1,15 +1,30 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
|
||||
+import sys
|
||||
import ldap
|
||||
from getpass import getpass
|
||||
import json
|
||||
+from lib389._mapped_object import DSLdapObject
|
||||
+from lib389.cli_base import _get_dn_arg
|
||||
+from lib389.idm.user import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_USER
|
||||
+from lib389.idm.group import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_GROUP
|
||||
+from lib389.idm.posixgroup import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_POSIXGROUP
|
||||
+from lib389.idm.services import DEFAULT_BASEDN_RDN as DEFAULT_BASEDN_RDN_SERVICES
|
||||
+
|
||||
+# The key is module name, the value is default RDN
|
||||
+BASEDN_RDNS = {
|
||||
+ 'user': DEFAULT_BASEDN_RDN_USER,
|
||||
+ 'group': DEFAULT_BASEDN_RDN_GROUP,
|
||||
+ 'posixgroup': DEFAULT_BASEDN_RDN_POSIXGROUP,
|
||||
+ 'service': DEFAULT_BASEDN_RDN_SERVICES,
|
||||
+}
|
||||
|
||||
|
||||
def _get_arg(args, msg=None):
|
||||
@@ -37,6 +52,27 @@ def _get_args(args, kws):
|
||||
return kwargs
|
||||
|
||||
|
||||
+def _get_basedn_arg(inst, args, log, msg=None):
|
||||
+ basedn_arg = _get_dn_arg(args.basedn, msg="Enter basedn")
|
||||
+ if not DSLdapObject(inst, basedn_arg).exists():
|
||||
+ raise ValueError(f'The base DN "{basedn_arg}" does not exist.')
|
||||
+
|
||||
+ # Get the RDN based on the last part of the module name if applicable
|
||||
+ # (lib389.cli_idm.user -> user)
|
||||
+ try:
|
||||
+ command_name = args.func.__module__.split('.')[-1]
|
||||
+ object_rdn = BASEDN_RDNS[command_name]
|
||||
+ # Check if the DN for our command exists
|
||||
+ command_basedn = f'{object_rdn},{basedn_arg}'
|
||||
+ if not DSLdapObject(inst, command_basedn).exists():
|
||||
+ errmsg = f'The DN "{command_basedn}" does not exist.'
|
||||
+ errmsg += f' It is required for "{command_name}" subcommand. Please create it first.'
|
||||
+ raise ValueError(errmsg)
|
||||
+ except KeyError:
|
||||
+ pass
|
||||
+ return basedn_arg
|
||||
+
|
||||
+
|
||||
# This is really similar to get_args, but generates from an array
|
||||
def _get_attributes(args, attrs):
|
||||
kwargs = {}
|
||||
diff --git a/src/lib389/lib389/cli_idm/account.py b/src/lib389/lib389/cli_idm/account.py
|
||||
index 5d7b9cc77..15f766588 100644
|
||||
--- a/src/lib389/lib389/cli_idm/account.py
|
||||
+++ b/src/lib389/lib389/cli_idm/account.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2023, Red Hat inc,
|
||||
+# Copyright (C) 2024, Red Hat inc,
|
||||
# Copyright (C) 2018, William Brown <william@blackhats.net.au>
|
||||
# All rights reserved.
|
||||
#
|
||||
@@ -91,7 +91,6 @@ def entry_status(inst, basedn, log, args):
|
||||
|
||||
|
||||
def subtree_status(inst, basedn, log, args):
|
||||
- basedn = _get_dn_arg(args.basedn, msg="Enter basedn to check")
|
||||
filter = ""
|
||||
scope = ldap.SCOPE_SUBTREE
|
||||
epoch_inactive_time = None
|
||||
@@ -121,7 +120,6 @@ def subtree_status(inst, basedn, log, args):
|
||||
|
||||
|
||||
def bulk_update(inst, basedn, log, args):
|
||||
- basedn = _get_dn_arg(args.basedn, msg="Enter basedn to search")
|
||||
search_filter = "(objectclass=*)"
|
||||
scope = ldap.SCOPE_SUBTREE
|
||||
scope_str = "sub"
|
||||
diff --git a/src/lib389/lib389/cli_idm/service.py b/src/lib389/lib389/cli_idm/service.py
|
||||
index c62fc12d1..c2b2c8c84 100644
|
||||
--- a/src/lib389/lib389/cli_idm/service.py
|
||||
+++ b/src/lib389/lib389/cli_idm/service.py
|
||||
@@ -57,7 +57,9 @@ def rename(inst, basedn, log, args, warn=True):
|
||||
_generic_rename(inst, basedn, log.getChild('_generic_rename'), MANY, rdn, args)
|
||||
|
||||
def create_parser(subparsers):
|
||||
- service_parser = subparsers.add_parser('service', help='Manage service accounts', formatter_class=CustomHelpFormatter)
|
||||
+ service_parser = subparsers.add_parser('service',
|
||||
+ help='Manage service accounts. The organizationalUnit (by default "ou=Services") '
|
||||
+ 'needs to exist prior to managing service accounts.', formatter_class=CustomHelpFormatter)
|
||||
|
||||
subcommands = service_parser.add_subparsers(help='action')
|
||||
|
||||
diff --git a/src/lib389/lib389/idm/group.py b/src/lib389/lib389/idm/group.py
|
||||
index 1b60a1f51..2cf2c7b23 100644
|
||||
--- a/src/lib389/lib389/idm/group.py
|
||||
+++ b/src/lib389/lib389/idm/group.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -16,6 +16,8 @@ MUST_ATTRIBUTES = [
|
||||
'cn',
|
||||
]
|
||||
RDN = 'cn'
|
||||
+DEFAULT_BASEDN_RDN = 'ou=Groups'
|
||||
+DEFAULT_BASEDN_RDN_ADMIN_GROUPS = 'ou=People'
|
||||
|
||||
|
||||
class Group(DSLdapObject):
|
||||
@@ -93,7 +95,7 @@ class Groups(DSLdapObjects):
|
||||
:type basedn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=Groups'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(Groups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'groupOfNames',
|
||||
@@ -140,7 +142,7 @@ class UniqueGroup(DSLdapObject):
|
||||
class UniqueGroups(DSLdapObjects):
|
||||
# WARNING!!!
|
||||
# Use group, not unique group!!!
|
||||
- def __init__(self, instance, basedn, rdn='ou=Groups'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(UniqueGroups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'groupOfUniqueNames',
|
||||
@@ -203,7 +205,7 @@ class nsAdminGroups(DSLdapObjects):
|
||||
:type rdn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=People'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN_ADMIN_GROUPS):
|
||||
super(nsAdminGroups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'nsAdminGroup'
|
||||
diff --git a/src/lib389/lib389/idm/posixgroup.py b/src/lib389/lib389/idm/posixgroup.py
|
||||
index d1debcf12..45735c579 100644
|
||||
--- a/src/lib389/lib389/idm/posixgroup.py
|
||||
+++ b/src/lib389/lib389/idm/posixgroup.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -17,6 +17,7 @@ MUST_ATTRIBUTES = [
|
||||
'gidNumber',
|
||||
]
|
||||
RDN = 'cn'
|
||||
+DEFAULT_BASEDN_RDN = 'ou=Groups'
|
||||
|
||||
|
||||
class PosixGroup(DSLdapObject):
|
||||
@@ -72,7 +73,7 @@ class PosixGroups(DSLdapObjects):
|
||||
:type basedn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=Groups'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(PosixGroups, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'groupOfNames',
|
||||
diff --git a/src/lib389/lib389/idm/services.py b/src/lib389/lib389/idm/services.py
|
||||
index d1e5b4693..e750a32c4 100644
|
||||
--- a/src/lib389/lib389/idm/services.py
|
||||
+++ b/src/lib389/lib389/idm/services.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2021 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -16,6 +16,7 @@ RDN = 'cn'
|
||||
MUST_ATTRIBUTES = [
|
||||
'cn',
|
||||
]
|
||||
+DEFAULT_BASEDN_RDN = 'ou=Services'
|
||||
|
||||
class ServiceAccount(Account):
|
||||
"""A single instance of Service entry
|
||||
@@ -59,7 +60,7 @@ class ServiceAccounts(DSLdapObjects):
|
||||
:type basedn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=Services'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(ServiceAccounts, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'applicationProcess',
|
||||
diff --git a/src/lib389/lib389/idm/user.py b/src/lib389/lib389/idm/user.py
|
||||
index 1206a6e08..3b21ccf1c 100644
|
||||
--- a/src/lib389/lib389/idm/user.py
|
||||
+++ b/src/lib389/lib389/idm/user.py
|
||||
@@ -1,6 +1,6 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
# Copyright (C) 2016, William Brown <william at blackhats.net.au>
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -23,6 +23,7 @@ MUST_ATTRIBUTES = [
|
||||
'homeDirectory',
|
||||
]
|
||||
RDN = 'uid'
|
||||
+DEFAULT_BASEDN_RDN = 'ou=People'
|
||||
|
||||
TEST_USER_PROPERTIES = {
|
||||
'uid': 'testuser',
|
||||
@@ -201,7 +202,7 @@ class UserAccounts(DSLdapObjects):
|
||||
:type rdn: str
|
||||
"""
|
||||
|
||||
- def __init__(self, instance, basedn, rdn='ou=People'):
|
||||
+ def __init__(self, instance, basedn, rdn=DEFAULT_BASEDN_RDN):
|
||||
super(UserAccounts, self).__init__(instance)
|
||||
self._objectclasses = [
|
||||
'account',
|
||||
--
|
||||
2.48.1
|
||||
|
||||
@ -1,52 +0,0 @@
|
||||
From ee03e8443a108cff0cc4c7a03962fdc3a1fbf94d Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Wed, 16 Oct 2024 19:24:55 -0700
|
||||
Subject: [PATCH] Issue 6067 - Update dsidm to prioritize basedn from .dsrc
|
||||
over interactive input (#6362)
|
||||
|
||||
Description: Modifies dsidm CLI tool to check for the basedn in the .dsrc configuration file
|
||||
when the -b option is not provided.
|
||||
Previously, users were required to always specify the basedn interactively if -b was omitted,
|
||||
even if it was available in .dsrc.
|
||||
Now, the basedn is determined by first checking the -b option, then the .dsrc file, and finally
|
||||
prompting the user if neither is set.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6067
|
||||
|
||||
Reviewed by: @Firstyear (Thanks!)
|
||||
---
|
||||
src/lib389/cli/dsidm | 2 +-
|
||||
src/lib389/lib389/cli_idm/__init__.py | 4 ++--
|
||||
2 files changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/cli/dsidm b/src/lib389/cli/dsidm
|
||||
index 970973f4f..d318664bc 100755
|
||||
--- a/src/lib389/cli/dsidm
|
||||
+++ b/src/lib389/cli/dsidm
|
||||
@@ -128,7 +128,7 @@ if __name__ == '__main__':
|
||||
result = False
|
||||
try:
|
||||
inst = connect_instance(dsrc_inst=dsrc_inst, verbose=args.verbose, args=args)
|
||||
- basedn = _get_basedn_arg(inst, args, log, msg="Enter basedn")
|
||||
+ basedn = _get_basedn_arg(inst, args, dsrc_inst['basedn'], log, msg="Enter basedn")
|
||||
if basedn is None:
|
||||
errmsg = "Must provide a basedn!"
|
||||
if args.json:
|
||||
diff --git a/src/lib389/lib389/cli_idm/__init__.py b/src/lib389/lib389/cli_idm/__init__.py
|
||||
index e3622246d..1f3e2dc86 100644
|
||||
--- a/src/lib389/lib389/cli_idm/__init__.py
|
||||
+++ b/src/lib389/lib389/cli_idm/__init__.py
|
||||
@@ -52,8 +52,8 @@ def _get_args(args, kws):
|
||||
return kwargs
|
||||
|
||||
|
||||
-def _get_basedn_arg(inst, args, log, msg=None):
|
||||
- basedn_arg = _get_dn_arg(args.basedn, msg="Enter basedn")
|
||||
+def _get_basedn_arg(inst, args, basedn, log, msg=None):
|
||||
+ basedn_arg = _get_dn_arg(basedn, msg="Enter basedn")
|
||||
if not DSLdapObject(inst, basedn_arg).exists():
|
||||
raise ValueError(f'The base DN "{basedn_arg}" does not exist.')
|
||||
|
||||
--
|
||||
2.48.1
|
||||
|
||||
@ -1,520 +0,0 @@
|
||||
From b8c079c770d3eaa4de49e997d42e1501c28a153b Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 8 Jul 2024 11:19:09 +0200
|
||||
Subject: [PATCH] Issue 6155 - ldap-agent fails to start because of permission
|
||||
error (#6179)
|
||||
|
||||
Issue: dirsrv-snmp service fails to starts when SELinux is enforced because of AVC preventing to open some files
|
||||
One workaround is to use the dac_override capability but it is a bad practice.
|
||||
Fix: Setting proper permissions:
|
||||
|
||||
Running ldap-agent with uid=root and gid=dirsrv to be able to access both snmp and dirsrv resources.
|
||||
Setting read permission on the group for the dse.ldif file
|
||||
Setting r/w permissions on the group for the snmp semaphore and mmap file
|
||||
For that one special care is needed because ns-slapd umask overrides the file creation permission
|
||||
as is better to avoid changing the umask (changing umask within the code is not thread safe,
|
||||
and the current 0022 umask value is correct for most of the files) so the safest way is to chmod the snmp file
|
||||
if the needed permission are not set.
|
||||
Issue: #6155
|
||||
|
||||
Reviewed by: @droideck , @vashirov (Thanks ! )
|
||||
|
||||
(cherry picked from commit eb7e57d77b557b63c65fdf38f9069893b021f049)
|
||||
---
|
||||
.github/scripts/generate_matrix.py | 4 +-
|
||||
dirsrvtests/tests/suites/snmp/snmp.py | 214 ++++++++++++++++++++++++++
|
||||
ldap/servers/slapd/agtmmap.c | 72 ++++++++-
|
||||
ldap/servers/slapd/agtmmap.h | 13 ++
|
||||
ldap/servers/slapd/dse.c | 6 +-
|
||||
ldap/servers/slapd/slap.h | 6 +
|
||||
ldap/servers/slapd/snmp_collator.c | 4 +-
|
||||
src/lib389/lib389/instance/setup.py | 5 +
|
||||
wrappers/systemd-snmp.service.in | 1 +
|
||||
9 files changed, 313 insertions(+), 12 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/snmp/snmp.py
|
||||
|
||||
diff --git a/.github/scripts/generate_matrix.py b/.github/scripts/generate_matrix.py
|
||||
index 584374597..8d67a1dc7 100644
|
||||
--- a/.github/scripts/generate_matrix.py
|
||||
+++ b/.github/scripts/generate_matrix.py
|
||||
@@ -21,8 +21,8 @@ else:
|
||||
# Use tests from the source
|
||||
suites = next(os.walk('dirsrvtests/tests/suites/'))[1]
|
||||
|
||||
- # Filter out snmp as it is an empty directory:
|
||||
- suites.remove('snmp')
|
||||
+ # Filter out webui because of broken tests
|
||||
+ suites.remove('webui')
|
||||
|
||||
# Run each replication test module separately to speed things up
|
||||
suites.remove('replication')
|
||||
diff --git a/dirsrvtests/tests/suites/snmp/snmp.py b/dirsrvtests/tests/suites/snmp/snmp.py
|
||||
new file mode 100644
|
||||
index 000000000..0952deb40
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/snmp/snmp.py
|
||||
@@ -0,0 +1,214 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2024 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import os
|
||||
+import pytest
|
||||
+import logging
|
||||
+import subprocess
|
||||
+import ldap
|
||||
+from datetime import datetime
|
||||
+from shutil import copyfile
|
||||
+from lib389.topologies import topology_m2 as topo_m2
|
||||
+from lib389.utils import selinux_present
|
||||
+
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+if DEBUGGING:
|
||||
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+else:
|
||||
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+SNMP_USER = 'user_name'
|
||||
+SNMP_PASSWORD = 'authentication_password'
|
||||
+SNMP_PRIVATE = 'private_password'
|
||||
+
|
||||
+# LDAP OID in MIB
|
||||
+LDAP_OID = '.1.3.6.1.4.1.2312.6.1.1'
|
||||
+LDAPCONNECTIONS_OID = f'{LDAP_OID}.21'
|
||||
+
|
||||
+
|
||||
+def run_cmd(cmd, check_returncode=True):
|
||||
+ """Run a command"""
|
||||
+
|
||||
+ log.info(f'Run: {cmd}')
|
||||
+ result = subprocess.run(cmd, capture_output=True, universal_newlines=True)
|
||||
+ log.info(f'STDOUT of {cmd} is:\n{result.stdout}')
|
||||
+ log.info(f'STDERR of {cmd} is:\n{result.stderr}')
|
||||
+ if check_returncode:
|
||||
+ result.check_returncode()
|
||||
+ return result
|
||||
+
|
||||
+
|
||||
+def add_lines(lines, filename):
|
||||
+ """Add lines that are not already present at the end of a file"""
|
||||
+
|
||||
+ log.info(f'add_lines({lines}, {filename})')
|
||||
+ try:
|
||||
+ with open(filename, 'r') as fd:
|
||||
+ for line in fd:
|
||||
+ try:
|
||||
+ lines.remove(line.strip())
|
||||
+ except ValueError:
|
||||
+ pass
|
||||
+ except FileNotFoundError:
|
||||
+ pass
|
||||
+ if lines:
|
||||
+ with open(filename, 'a') as fd:
|
||||
+ for line in lines:
|
||||
+ fd.write(f'{line}\n')
|
||||
+
|
||||
+
|
||||
+def remove_lines(lines, filename):
|
||||
+ """Remove lines in a file"""
|
||||
+
|
||||
+ log.info(f'remove_lines({lines}, {filename})')
|
||||
+ file_lines = []
|
||||
+ with open(filename, 'r') as fd:
|
||||
+ for line in fd:
|
||||
+ if not line.strip() in lines:
|
||||
+ file_lines.append(line)
|
||||
+ with open(filename, 'w') as fd:
|
||||
+ for line in file_lines:
|
||||
+ fd.write(line)
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="module")
|
||||
+def setup_snmp(topo_m2, request):
|
||||
+ """Install snmp and configure it
|
||||
+
|
||||
+ Returns the time just before dirsrv-snmp get restarted
|
||||
+ """
|
||||
+
|
||||
+ inst1 = topo_m2.ms["supplier1"]
|
||||
+ inst2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+ # Check for the test prerequisites
|
||||
+ if os.getuid() != 0:
|
||||
+ pytest.skip('This test should be run by root superuser')
|
||||
+ return None
|
||||
+ if not inst1.with_systemd_running():
|
||||
+ pytest.skip('This test requires systemd')
|
||||
+ return None
|
||||
+ required_packages = {
|
||||
+ '389-ds-base-snmp': os.path.join(inst1.get_sbin_dir(), 'ldap-agent'),
|
||||
+ 'net-snmp': '/etc/snmp/snmpd.conf', }
|
||||
+ skip_msg = ""
|
||||
+ for package,file in required_packages.items():
|
||||
+ if not os.path.exists(file):
|
||||
+ skip_msg += f"Package {package} is not installed ({file} is missing).\n"
|
||||
+ if skip_msg != "":
|
||||
+ pytest.skip(f'This test requires the following package(s): {skip_msg}')
|
||||
+ return None
|
||||
+
|
||||
+ # Install snmp
|
||||
+ # run_cmd(['/usr/bin/dnf', 'install', '-y', 'net-snmp', 'net-snmp-utils', '389-ds-base-snmp'])
|
||||
+
|
||||
+ # Prepare the lines to add/remove in files:
|
||||
+ # master agentx
|
||||
+ # snmp user (user_name - authentication_password - private_password)
|
||||
+ # ldap_agent ds instances
|
||||
+ #
|
||||
+ # Adding rwuser and createUser lines is the same as running:
|
||||
+ # net-snmp-create-v3-user -A authentication_password -a SHA -X private_password -x AES user_name
|
||||
+ # but has the advantage of removing the user at cleanup phase
|
||||
+ #
|
||||
+ agent_cfg = '/etc/dirsrv/config/ldap-agent.conf'
|
||||
+ lines_dict = { '/etc/snmp/snmpd.conf' : ['master agentx', f'rwuser {SNMP_USER}'],
|
||||
+ '/var/lib/net-snmp/snmpd.conf' : [
|
||||
+ f'createUser {SNMP_USER} SHA "{SNMP_PASSWORD}" AES "{SNMP_PRIVATE}"',],
|
||||
+ agent_cfg : [] }
|
||||
+ for inst in topo_m2:
|
||||
+ lines_dict[agent_cfg].append(f'server slapd-{inst.serverid}')
|
||||
+
|
||||
+ # Prepare the cleanup
|
||||
+ def fin():
|
||||
+ run_cmd(['systemctl', 'stop', 'dirsrv-snmp'])
|
||||
+ if not DEBUGGING:
|
||||
+ run_cmd(['systemctl', 'stop', 'snmpd'])
|
||||
+ try:
|
||||
+ os.remove('/usr/share/snmp/mibs/redhat-directory.mib')
|
||||
+ except FileNotFoundError:
|
||||
+ pass
|
||||
+ for filename,lines in lines_dict.items():
|
||||
+ remove_lines(lines, filename)
|
||||
+ run_cmd(['systemctl', 'start', 'snmpd'])
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Copy RHDS MIB in default MIB search path (Ugly because I have not found how to change the search path)
|
||||
+ copyfile('/usr/share/dirsrv/mibs/redhat-directory.mib', '/usr/share/snmp/mibs/redhat-directory.mib')
|
||||
+
|
||||
+ run_cmd(['systemctl', 'stop', 'snmpd'])
|
||||
+ for filename,lines in lines_dict.items():
|
||||
+ add_lines(lines, filename)
|
||||
+
|
||||
+ run_cmd(['systemctl', 'start', 'snmpd'])
|
||||
+
|
||||
+ curtime = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||
+
|
||||
+ run_cmd(['systemctl', 'start', 'dirsrv-snmp'])
|
||||
+ return curtime
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(not os.path.exists('/usr/bin/snmpwalk'), reason="net-snmp-utils package is not installed")
|
||||
+def test_snmpwalk(topo_m2, setup_snmp):
|
||||
+ """snmp smoke tests.
|
||||
+
|
||||
+ :id: e5d29998-1c21-11ef-a654-482ae39447e5
|
||||
+ :setup: Two suppliers replication setup, snmp
|
||||
+ :steps:
|
||||
+ 1. use snmpwalk to display LDAP statistics
|
||||
+ 2. use snmpwalk to get the number of open connections
|
||||
+ :expectedresults:
|
||||
+ 1. Success and no messages in stderr
|
||||
+ 2. The number of open connections should be positive
|
||||
+ """
|
||||
+
|
||||
+ inst1 = topo_m2.ms["supplier1"]
|
||||
+ inst2 = topo_m2.ms["supplier2"]
|
||||
+
|
||||
+
|
||||
+ cmd = [ '/usr/bin/snmpwalk', '-v3', '-u', SNMP_USER, '-l', 'AuthPriv',
|
||||
+ '-m', '+RHDS-MIB', '-A', SNMP_PASSWORD, '-a', 'SHA',
|
||||
+ '-X', SNMP_PRIVATE, '-x', 'AES', 'localhost',
|
||||
+ LDAP_OID ]
|
||||
+ result = run_cmd(cmd)
|
||||
+ assert not result.stderr
|
||||
+
|
||||
+ cmd = [ '/usr/bin/snmpwalk', '-v3', '-u', SNMP_USER, '-l', 'AuthPriv',
|
||||
+ '-m', '+RHDS-MIB', '-A', SNMP_PASSWORD, '-a', 'SHA',
|
||||
+ '-X', SNMP_PRIVATE, '-x', 'AES', 'localhost',
|
||||
+ f'{LDAPCONNECTIONS_OID}.{inst1.port}', '-Ov' ]
|
||||
+ result = run_cmd(cmd)
|
||||
+ nbconns = int(result.stdout.split()[1])
|
||||
+ log.info(f'There are {nbconns} open connections on {inst1.serverid}')
|
||||
+ assert nbconns > 0
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(not selinux_present(), reason="SELinux is not enabled")
|
||||
+def test_snmp_avc(topo_m2, setup_snmp):
|
||||
+ """snmp smoke tests.
|
||||
+
|
||||
+ :id: fb79728e-1d0d-11ef-9213-482ae39447e5
|
||||
+ :setup: Two suppliers replication setup, snmp
|
||||
+ :steps:
|
||||
+ 1. Get the system journal about ldap-agent
|
||||
+ :expectedresults:
|
||||
+ 1. No AVC should be present
|
||||
+ """
|
||||
+ result = run_cmd(['journalctl', '-S', setup_snmp, '-g', 'ldap-agent'])
|
||||
+ assert not 'AVC' in result.stdout
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/agtmmap.c b/ldap/servers/slapd/agtmmap.c
|
||||
index bc5fe1ee1..4dc67dcfb 100644
|
||||
--- a/ldap/servers/slapd/agtmmap.c
|
||||
+++ b/ldap/servers/slapd/agtmmap.c
|
||||
@@ -34,6 +34,70 @@
|
||||
agt_mmap_context_t mmap_tbl[2] = {{AGT_MAP_UNINIT, -1, (caddr_t)-1},
|
||||
{AGT_MAP_UNINIT, -1, (caddr_t)-1}};
|
||||
|
||||
+#define CHECK_MAP_FAILURE(addr) ((addr)==NULL || (addr) == (caddr_t) -1)
|
||||
+
|
||||
+
|
||||
+/****************************************************************************
|
||||
+ *
|
||||
+ * agt_set_fmode () - try to increase file mode if some flags are missing.
|
||||
+ *
|
||||
+ *
|
||||
+ * Inputs:
|
||||
+ * fd -> The file descriptor.
|
||||
+ *
|
||||
+ * mode -> the wanted mode
|
||||
+ *
|
||||
+ * Outputs: None
|
||||
+ * Return Values: None
|
||||
+ *
|
||||
+ ****************************************************************************/
|
||||
+static void
|
||||
+agt_set_fmode(int fd, mode_t mode)
|
||||
+{
|
||||
+ /* ns-slapd umask is 0022 which is usually fine.
|
||||
+ * but ldap-agen needs S_IWGRP permission on snmp semaphore and mmap file
|
||||
+ * ( when SELinux is enforced process with uid=0 does not bypass the file permission
|
||||
+ * (unless the unfamous dac_override capability is set)
|
||||
+ * Changing umask could lead to race conditions so it is better to check the
|
||||
+ * file permission and change them if needed and if the process own the file.
|
||||
+ */
|
||||
+ struct stat fileinfo = {0};
|
||||
+ if (fstat(fd, &fileinfo) == 0 && fileinfo.st_uid == getuid() &&
|
||||
+ (fileinfo.st_mode & mode) != mode) {
|
||||
+ (void) fchmod(fd, fileinfo.st_mode | mode);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+/****************************************************************************
|
||||
+ *
|
||||
+ * agt_sem_open () - Like sem_open but ignores umask
|
||||
+ *
|
||||
+ *
|
||||
+ * Inputs: see sem_open man page.
|
||||
+ * Outputs: see sem_open man page.
|
||||
+ * Return Values: see sem_open man page.
|
||||
+ *
|
||||
+ ****************************************************************************/
|
||||
+sem_t *
|
||||
+agt_sem_open(const char *name, int oflag, mode_t mode, unsigned int value)
|
||||
+{
|
||||
+ sem_t *sem = sem_open(name, oflag, mode, value);
|
||||
+ char *semname = NULL;
|
||||
+
|
||||
+ if (sem != NULL) {
|
||||
+ if (asprintf(&semname, "/dev/shm/sem.%s", name+1) > 0) {
|
||||
+ int fd = open(semname, O_RDONLY);
|
||||
+ if (fd >= 0) {
|
||||
+ agt_set_fmode(fd, mode);
|
||||
+ (void) close(fd);
|
||||
+ }
|
||||
+ free(semname);
|
||||
+ semname = NULL;
|
||||
+ }
|
||||
+ }
|
||||
+ return sem;
|
||||
+}
|
||||
+
|
||||
/****************************************************************************
|
||||
*
|
||||
* agt_mopen_stats () - open and Memory Map the stats file. agt_mclose_stats()
|
||||
@@ -52,7 +116,6 @@ agt_mmap_context_t mmap_tbl[2] = {{AGT_MAP_UNINIT, -1, (caddr_t)-1},
|
||||
* as defined in <errno.h>, otherwise.
|
||||
*
|
||||
****************************************************************************/
|
||||
-
|
||||
int
|
||||
agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
{
|
||||
@@ -64,6 +127,7 @@ agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
int err;
|
||||
size_t sz;
|
||||
struct stat fileinfo;
|
||||
+ mode_t rw_mode = S_IWUSR | S_IRUSR | S_IRGRP | S_IWGRP | S_IROTH;
|
||||
|
||||
switch (mode) {
|
||||
case O_RDONLY:
|
||||
@@ -128,10 +192,7 @@ agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
break;
|
||||
|
||||
case O_RDWR:
|
||||
- fd = open(path,
|
||||
- O_RDWR | O_CREAT,
|
||||
- S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH);
|
||||
-
|
||||
+ fd = open(path, O_RDWR | O_CREAT, rw_mode);
|
||||
if (fd < 0) {
|
||||
err = errno;
|
||||
#if (0)
|
||||
@@ -140,6 +201,7 @@ agt_mopen_stats(char *statsfile, int mode, int *hdl)
|
||||
rc = err;
|
||||
goto bail;
|
||||
}
|
||||
+ agt_set_fmode(fd, rw_mode);
|
||||
|
||||
if (fstat(fd, &fileinfo) != 0) {
|
||||
close(fd);
|
||||
diff --git a/ldap/servers/slapd/agtmmap.h b/ldap/servers/slapd/agtmmap.h
|
||||
index fb27ab2c4..99a8584a3 100644
|
||||
--- a/ldap/servers/slapd/agtmmap.h
|
||||
+++ b/ldap/servers/slapd/agtmmap.h
|
||||
@@ -28,6 +28,7 @@
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
+#include <semaphore.h>
|
||||
#include <errno.h>
|
||||
#include "nspr.h"
|
||||
|
||||
@@ -188,6 +189,18 @@ int agt_mclose_stats(int hdl);
|
||||
|
||||
int agt_mread_stats(int hdl, struct hdr_stats_t *, struct ops_stats_t *, struct entries_stats_t *);
|
||||
|
||||
+/****************************************************************************
|
||||
+ *
|
||||
+ * agt_sem_open () - Like sem_open but ignores umask
|
||||
+ *
|
||||
+ *
|
||||
+ * Inputs: see sem_open man page.
|
||||
+ * Outputs: see sem_open man page.
|
||||
+ * Return Values: see sem_open man page.
|
||||
+ *
|
||||
+ ****************************************************************************/
|
||||
+sem_t *agt_sem_open(const char *name, int oflag, mode_t mode, unsigned int value);
|
||||
+
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c
|
||||
index b04fafde6..f1e48c6b1 100644
|
||||
--- a/ldap/servers/slapd/dse.c
|
||||
+++ b/ldap/servers/slapd/dse.c
|
||||
@@ -683,7 +683,7 @@ dse_read_one_file(struct dse *pdse, const char *filename, Slapi_PBlock *pb, int
|
||||
"The configuration file %s could not be accessed, error %d\n",
|
||||
filename, rc);
|
||||
rc = 0; /* Fail */
|
||||
- } else if ((prfd = PR_Open(filename, PR_RDONLY, SLAPD_DEFAULT_FILE_MODE)) == NULL) {
|
||||
+ } else if ((prfd = PR_Open(filename, PR_RDONLY, SLAPD_DEFAULT_DSE_FILE_MODE)) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "dse_read_one_file",
|
||||
"The configuration file %s could not be read. " SLAPI_COMPONENT_NAME_NSPR " %d (%s)\n",
|
||||
filename,
|
||||
@@ -871,7 +871,7 @@ dse_rw_permission_to_one_file(const char *name, int loglevel)
|
||||
PRFileDesc *prfd;
|
||||
|
||||
prfd = PR_Open(name, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE,
|
||||
- SLAPD_DEFAULT_FILE_MODE);
|
||||
+ SLAPD_DEFAULT_DSE_FILE_MODE);
|
||||
if (NULL == prfd) {
|
||||
prerr = PR_GetError();
|
||||
accesstype = "create";
|
||||
@@ -970,7 +970,7 @@ dse_write_file_nolock(struct dse *pdse)
|
||||
fpw.fpw_prfd = NULL;
|
||||
|
||||
if (NULL != pdse->dse_filename) {
|
||||
- if ((fpw.fpw_prfd = PR_Open(pdse->dse_tmpfile, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, SLAPD_DEFAULT_FILE_MODE)) == NULL) {
|
||||
+ if ((fpw.fpw_prfd = PR_Open(pdse->dse_tmpfile, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, SLAPD_DEFAULT_DSE_FILE_MODE)) == NULL) {
|
||||
rc = PR_GetOSError();
|
||||
slapi_log_err(SLAPI_LOG_ERR, "dse_write_file_nolock", "Cannot open "
|
||||
"temporary DSE file \"%s\" for update: OS error %d (%s)\n",
|
||||
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
|
||||
index 469874fd1..927576b70 100644
|
||||
--- a/ldap/servers/slapd/slap.h
|
||||
+++ b/ldap/servers/slapd/slap.h
|
||||
@@ -238,6 +238,12 @@ typedef void (*VFPV)(); /* takes undefined arguments */
|
||||
*/
|
||||
|
||||
#define SLAPD_DEFAULT_FILE_MODE S_IRUSR | S_IWUSR
|
||||
+/* ldap_agent run as uid=root gid=dirsrv and requires S_IRGRP | S_IWGRP
|
||||
+ * on semaphore and mmap file if SELinux is enforced.
|
||||
+ */
|
||||
+#define SLAPD_DEFAULT_SNMP_FILE_MODE S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP
|
||||
+/* ldap_agent run as uid=root gid=dirsrv and requires S_IRGRP on dse.ldif if SELinux is enforced. */
|
||||
+#define SLAPD_DEFAULT_DSE_FILE_MODE S_IRUSR | S_IWUSR | S_IRGRP
|
||||
#define SLAPD_DEFAULT_DIR_MODE S_IRWXU
|
||||
#define SLAPD_DEFAULT_IDLE_TIMEOUT 3600 /* seconds - 0 == never */
|
||||
#define SLAPD_DEFAULT_IDLE_TIMEOUT_STR "3600"
|
||||
diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c
|
||||
index c998d4262..bd7020585 100644
|
||||
--- a/ldap/servers/slapd/snmp_collator.c
|
||||
+++ b/ldap/servers/slapd/snmp_collator.c
|
||||
@@ -474,7 +474,7 @@ static void
|
||||
snmp_collator_create_semaphore(void)
|
||||
{
|
||||
/* First just try to create the semaphore. This should usually just work. */
|
||||
- if ((stats_sem = sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
+ if ((stats_sem = agt_sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_SNMP_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
if (errno == EEXIST) {
|
||||
/* It appears that we didn't exit cleanly last time and left the semaphore
|
||||
* around. Recreate it since we don't know what state it is in. */
|
||||
@@ -486,7 +486,7 @@ snmp_collator_create_semaphore(void)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
- if ((stats_sem = sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
+ if ((stats_sem = agt_sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_SNMP_FILE_MODE, 1)) == SEM_FAILED) {
|
||||
/* No dice */
|
||||
slapi_log_err(SLAPI_LOG_EMERG, "snmp_collator_create_semaphore",
|
||||
"Failed to create semaphore for stats file (/dev/shm/sem.%s). Error %d (%s).\n",
|
||||
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
|
||||
index 036664447..fca03383e 100644
|
||||
--- a/src/lib389/lib389/instance/setup.py
|
||||
+++ b/src/lib389/lib389/instance/setup.py
|
||||
@@ -10,6 +10,7 @@
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
+import stat
|
||||
import pwd
|
||||
import grp
|
||||
import re
|
||||
@@ -773,6 +774,10 @@ class SetupDs(object):
|
||||
ldapi_autobind="on",
|
||||
)
|
||||
file_dse.write(dse_fmt)
|
||||
+ # Set minimum permission required by snmp ldap-agent
|
||||
+ status = os.fstat(file_dse.fileno())
|
||||
+ os.fchmod(file_dse.fileno(), status.st_mode | stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP)
|
||||
+ os.chown(os.path.join(slapd['config_dir'], 'dse.ldif'), slapd['user_uid'], slapd['group_gid'])
|
||||
|
||||
self.log.info("Create file system structures ...")
|
||||
# Create all the needed paths
|
||||
diff --git a/wrappers/systemd-snmp.service.in b/wrappers/systemd-snmp.service.in
|
||||
index f18766cb4..d344367a0 100644
|
||||
--- a/wrappers/systemd-snmp.service.in
|
||||
+++ b/wrappers/systemd-snmp.service.in
|
||||
@@ -9,6 +9,7 @@ After=network.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
+Group=@defaultgroup@
|
||||
PIDFile=/run/dirsrv/ldap-agent.pid
|
||||
ExecStart=@sbindir@/ldap-agent @configdir@/ldap-agent.conf
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,60 +0,0 @@
|
||||
From 12870f410545fb055f664b588df2a2b7ab1c228e Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 4 Mar 2024 07:22:00 +0100
|
||||
Subject: [PATCH] Issue 5305 - OpenLDAP version autodetection doesn't work
|
||||
|
||||
Bug Description:
|
||||
An error is logged during a build in `mock` with Bash 4.4:
|
||||
|
||||
```
|
||||
checking for --with-libldap-r... ./configure: command substitution: line 22848: syntax error near unexpected token `>'
|
||||
./configure: command substitution: line 22848: `ldapsearch -VV 2> >(sed -n '/ldapsearch/ s/.*ldapsearch \([0-9]\+\.[0-9]\+\.[0-9]\+\) .*/\1/p')'
|
||||
no
|
||||
```
|
||||
|
||||
`mock` runs Bash as `sh` (POSIX mode). Support for process substitution
|
||||
in POSIX mode was added in version 5.1:
|
||||
https://lists.gnu.org/archive/html/bug-bash/2020-12/msg00002.html
|
||||
|
||||
> Process substitution is now available in posix mode.
|
||||
|
||||
Fix Description:
|
||||
* Add missing `BuildRequires` for openldap-clients
|
||||
* Replace process substitution with a pipe
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/5305
|
||||
|
||||
Reviewed by: @progier389, @tbordaz (Thanks!)
|
||||
---
|
||||
configure.ac | 2 +-
|
||||
rpm/389-ds-base.spec.in | 1 +
|
||||
2 files changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/configure.ac b/configure.ac
|
||||
index ffc2aac14..a690765a3 100644
|
||||
--- a/configure.ac
|
||||
+++ b/configure.ac
|
||||
@@ -912,7 +912,7 @@ AC_ARG_WITH(libldap-r, AS_HELP_STRING([--with-libldap-r],[Use lldap_r shared lib
|
||||
AC_SUBST(with_libldap_r)
|
||||
fi
|
||||
],
|
||||
-OPENLDAP_VERSION=`ldapsearch -VV 2> >(sed -n '/ldapsearch/ s/.*ldapsearch \([[[0-9]]]\+\.[[[0-9]]]\+\.[[[0-9]]]\+\) .*/\1/p')`
|
||||
+OPENLDAP_VERSION=`ldapsearch -VV 2>&1 | sed -n '/ldapsearch/ s/.*ldapsearch \([[[0-9]]]\+\.[[[0-9]]]\+\.[[[0-9]]]\+\) .*/\1/p'`
|
||||
AX_COMPARE_VERSION([$OPENLDAP_VERSION], [lt], [2.5], [ with_libldap_r=yes ], [ with_libldap_r=no ])
|
||||
AC_MSG_RESULT($with_libldap_r))
|
||||
|
||||
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
|
||||
index cd86138ea..b8c14cd14 100644
|
||||
--- a/rpm/389-ds-base.spec.in
|
||||
+++ b/rpm/389-ds-base.spec.in
|
||||
@@ -65,6 +65,7 @@ Provides: ldif2ldbm
|
||||
# Attach the buildrequires to the top level package:
|
||||
BuildRequires: nspr-devel
|
||||
BuildRequires: nss-devel >= 3.34
|
||||
+BuildRequires: openldap-clients
|
||||
BuildRequires: openldap-devel
|
||||
BuildRequires: libdb-devel
|
||||
BuildRequires: cyrus-sasl-devel
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,245 +0,0 @@
|
||||
From eca6f5fe18f768fd407d38c85624a5212bcf16ab Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Wed, 27 Sep 2023 15:40:33 -0700
|
||||
Subject: [PATCH] Issue 1925 - Add a CI test (#5936)
|
||||
|
||||
Description: Verify that the issue is not present. Cover the scenario when
|
||||
we remove existing VLVs, create new VLVs (with the same name) and then
|
||||
we do online re-indexing.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/1925
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
|
||||
(cherry picked from the 9633e8d32d28345409680f8e462fb4a53d3b4f83)
|
||||
---
|
||||
.../tests/suites/vlv/regression_test.py | 175 +++++++++++++++---
|
||||
1 file changed, 145 insertions(+), 30 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index 6ab709bd3..536fe950f 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2018 Red Hat, Inc.
|
||||
+# Copyright (C) 2023 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -9,12 +9,16 @@
|
||||
import pytest, time
|
||||
from lib389.tasks import *
|
||||
from lib389.utils import *
|
||||
-from lib389.topologies import topology_m2
|
||||
+from lib389.topologies import topology_m2, topology_st
|
||||
from lib389.replica import *
|
||||
from lib389._constants import *
|
||||
+from lib389.properties import TASK_WAIT
|
||||
from lib389.index import *
|
||||
from lib389.mappingTree import *
|
||||
from lib389.backend import *
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from ldap.controls.vlv import VLVRequestControl
|
||||
+from ldap.controls.sss import SSSRequestControl
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
@@ -22,6 +26,88 @@ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
+def open_new_ldapi_conn(dsinstance):
|
||||
+ ldapurl, certdir = get_ldapurl_from_serverid(dsinstance)
|
||||
+ assert 'ldapi://' in ldapurl
|
||||
+ conn = ldap.initialize(ldapurl)
|
||||
+ conn.sasl_interactive_bind_s("", ldap.sasl.external())
|
||||
+ return conn
|
||||
+
|
||||
+
|
||||
+def check_vlv_search(conn):
|
||||
+ before_count=1
|
||||
+ after_count=3
|
||||
+ offset=3501
|
||||
+
|
||||
+ vlv_control = VLVRequestControl(criticality=True,
|
||||
+ before_count=before_count,
|
||||
+ after_count=after_count,
|
||||
+ offset=offset,
|
||||
+ content_count=0,
|
||||
+ greater_than_or_equal=None,
|
||||
+ context_id=None)
|
||||
+
|
||||
+ sss_control = SSSRequestControl(criticality=True, ordering_rules=['cn'])
|
||||
+ result = conn.search_ext_s(
|
||||
+ base='dc=example,dc=com',
|
||||
+ scope=ldap.SCOPE_SUBTREE,
|
||||
+ filterstr='(uid=*)',
|
||||
+ serverctrls=[vlv_control, sss_control]
|
||||
+ )
|
||||
+ imin = offset + 998 - before_count
|
||||
+ imax = offset + 998 + after_count
|
||||
+
|
||||
+ for i, (dn, entry) in enumerate(result, start=imin):
|
||||
+ assert i <= imax
|
||||
+ expected_dn = f'uid=testuser{i},ou=People,dc=example,dc=com'
|
||||
+ log.debug(f'found {dn} expected {expected_dn}')
|
||||
+ assert dn.lower() == expected_dn.lower()
|
||||
+
|
||||
+
|
||||
+def add_users(inst, users_num):
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ log.info(f'Adding {users_num} users')
|
||||
+ for i in range(0, users_num):
|
||||
+ uid = 1000 + i
|
||||
+ user_properties = {
|
||||
+ 'uid': f'testuser{uid}',
|
||||
+ 'cn': f'testuser{uid}',
|
||||
+ 'sn': 'user',
|
||||
+ 'uidNumber': str(uid),
|
||||
+ 'gidNumber': str(uid),
|
||||
+ 'homeDirectory': f'/home/testuser{uid}'
|
||||
+ }
|
||||
+ users.create(properties=user_properties)
|
||||
+
|
||||
+
|
||||
+
|
||||
+def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
+ scope=ldap.SCOPE_SUBTREE, prefix="vlv", vlvsort="cn"):
|
||||
+ vlv_searches = VLVSearch(inst)
|
||||
+ vlv_search_properties = {
|
||||
+ "objectclass": ["top", "vlvSearch"],
|
||||
+ "cn": f"{prefix}Srch",
|
||||
+ "vlvbase": basedn,
|
||||
+ "vlvfilter": "(uid=*)",
|
||||
+ "vlvscope": str(scope),
|
||||
+ }
|
||||
+ vlv_searches.create(
|
||||
+ basedn=f"cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_search_properties
|
||||
+ )
|
||||
+
|
||||
+ vlv_index = VLVIndex(inst)
|
||||
+ vlv_index_properties = {
|
||||
+ "objectclass": ["top", "vlvIndex"],
|
||||
+ "cn": f"{prefix}Idx",
|
||||
+ "vlvsort": vlvsort,
|
||||
+ }
|
||||
+ vlv_index.create(
|
||||
+ basedn=f"cn={prefix}Srch,cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
+ properties=vlv_index_properties
|
||||
+ )
|
||||
+ return vlv_searches, vlv_index
|
||||
+
|
||||
class BackendHandler:
|
||||
def __init__(self, inst, bedict, scope=ldap.SCOPE_ONELEVEL):
|
||||
self.inst = inst
|
||||
@@ -101,34 +187,6 @@ class BackendHandler:
|
||||
'dn' : dn}
|
||||
|
||||
|
||||
-def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
- scope=ldap.SCOPE_SUBTREE, prefix="vlv", vlvsort="cn"):
|
||||
- vlv_searches = VLVSearch(inst)
|
||||
- vlv_search_properties = {
|
||||
- "objectclass": ["top", "vlvSearch"],
|
||||
- "cn": f"{prefix}Srch",
|
||||
- "vlvbase": basedn,
|
||||
- "vlvfilter": "(uid=*)",
|
||||
- "vlvscope": str(scope),
|
||||
- }
|
||||
- vlv_searches.create(
|
||||
- basedn=f"cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
- properties=vlv_search_properties
|
||||
- )
|
||||
-
|
||||
- vlv_index = VLVIndex(inst)
|
||||
- vlv_index_properties = {
|
||||
- "objectclass": ["top", "vlvIndex"],
|
||||
- "cn": f"{prefix}Idx",
|
||||
- "vlvsort": vlvsort,
|
||||
- }
|
||||
- vlv_index.create(
|
||||
- basedn=f"cn={prefix}Srch,cn={bename},cn=ldbm database,cn=plugins,cn=config",
|
||||
- properties=vlv_index_properties
|
||||
- )
|
||||
- return vlv_searches, vlv_index
|
||||
-
|
||||
-
|
||||
@pytest.fixture
|
||||
def vlv_setup_with_uid_mr(topology_st, request):
|
||||
inst = topology_st.standalone
|
||||
@@ -245,6 +303,62 @@ def test_bulk_import_when_the_backend_with_vlv_was_recreated(topology_m2):
|
||||
entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)")
|
||||
|
||||
|
||||
+def test_vlv_recreation_reindex(topology_st):
|
||||
+ """Test VLV recreation and reindexing.
|
||||
+
|
||||
+ :id: 29f4567f-4ac0-410f-bc99-a32e217a939f
|
||||
+ :setup: Standalone instance.
|
||||
+ :steps:
|
||||
+ 1. Create new VLVs and do the reindex.
|
||||
+ 2. Test the new VLVs.
|
||||
+ 3. Remove the existing VLVs.
|
||||
+ 4. Create new VLVs (with the same name).
|
||||
+ 5. Perform online re-indexing of the new VLVs.
|
||||
+ 6. Test the new VLVs.
|
||||
+ :expectedresults:
|
||||
+ 1. Should Success.
|
||||
+ 2. Should Success.
|
||||
+ 3. Should Success.
|
||||
+ 4. Should Success.
|
||||
+ 5. Should Success.
|
||||
+ 6. Should Success.
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ reindex_task = Tasks(inst)
|
||||
+
|
||||
+ # Create and test VLVs
|
||||
+ vlv_search, vlv_index = create_vlv_search_and_index(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=DEFAULT_SUFFIX,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+
|
||||
+ add_users(inst, 5000)
|
||||
+
|
||||
+ conn = open_new_ldapi_conn(inst.serverid)
|
||||
+ assert len(conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(cn=*)")) > 0
|
||||
+ check_vlv_search(conn)
|
||||
+
|
||||
+ # Remove and recreate VLVs
|
||||
+ vlv_index.delete()
|
||||
+ vlv_search.delete()
|
||||
+
|
||||
+ vlv_search, vlv_index = create_vlv_search_and_index(inst)
|
||||
+ assert reindex_task.reindex(
|
||||
+ suffix=DEFAULT_SUFFIX,
|
||||
+ attrname=vlv_index.rdn,
|
||||
+ args={TASK_WAIT: True},
|
||||
+ vlv=True
|
||||
+ ) == 0
|
||||
+
|
||||
+ conn = open_new_ldapi_conn(inst.serverid)
|
||||
+ assert len(conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(cn=*)")) > 0
|
||||
+ check_vlv_search(conn)
|
||||
+
|
||||
+
|
||||
def test_vlv_with_mr(vlv_setup_with_uid_mr):
|
||||
"""
|
||||
Testing vlv having specific matching rule
|
||||
@@ -288,6 +402,7 @@ def test_vlv_with_mr(vlv_setup_with_uid_mr):
|
||||
assert inst.status()
|
||||
|
||||
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,75 +0,0 @@
|
||||
From af3fa90f91efda86f4337e8823bca6581ab61792 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 7 Feb 2025 09:43:08 +0100
|
||||
Subject: [PATCH] Issue 6494 - (2nd) Various errors when using extended
|
||||
matching rule on vlv sort filter
|
||||
|
||||
---
|
||||
.../tests/suites/indexes/regression_test.py | 40 +++++++++++++++++++
|
||||
1 file changed, 40 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
index 2196fb2ed..b5bcccc8f 100644
|
||||
--- a/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/indexes/regression_test.py
|
||||
@@ -11,17 +11,57 @@ import os
|
||||
import pytest
|
||||
import ldap
|
||||
from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX
|
||||
+from lib389.backend import Backend, Backends, DatabaseConfig
|
||||
from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate
|
||||
+from lib389.dbgen import dbgen_users
|
||||
from lib389.index import Indexes
|
||||
from lib389.backend import Backends
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.utils import ds_is_older
|
||||
from lib389.idm.nscontainer import nsContainer
|
||||
+from lib389.properties import TASK_WAIT
|
||||
+from lib389.tasks import Tasks, Task
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
|
||||
+SUFFIX2 = 'dc=example2,dc=com'
|
||||
+BENAME2 = 'be2'
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def add_backend_and_ldif_50K_users(request, topo):
|
||||
+ """
|
||||
+ Add an empty backend and associated 50K users ldif file
|
||||
+ """
|
||||
+
|
||||
+ tasks = Tasks(topo.standalone)
|
||||
+ import_ldif = f'{topo.standalone.ldifdir}/be2_50K_users.ldif'
|
||||
+ be2 = Backend(topo.standalone)
|
||||
+ be2.create(properties={
|
||||
+ 'cn': BENAME2,
|
||||
+ 'nsslapd-suffix': SUFFIX2,
|
||||
+ },
|
||||
+ )
|
||||
+
|
||||
+ def fin():
|
||||
+ nonlocal be2
|
||||
+ if not DEBUGGING:
|
||||
+ be2.delete()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+ parent = f'ou=people,{SUFFIX2}'
|
||||
+ dbgen_users(topo.standalone, 50000, import_ldif, SUFFIX2, generic=True, parent=parent)
|
||||
+ assert tasks.importLDIF(
|
||||
+ suffix=SUFFIX2,
|
||||
+ input_file=import_ldif,
|
||||
+ args={TASK_WAIT: True}
|
||||
+ ) == 0
|
||||
+
|
||||
+ return import_ldif
|
||||
+
|
||||
@pytest.fixture(scope="function")
|
||||
def add_a_group_with_users(request, topo):
|
||||
"""
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,45 +0,0 @@
|
||||
From 0ad0eb34972c99f30334d7d420f3056e0e794d74 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 7 Feb 2025 14:33:46 +0100
|
||||
Subject: [PATCH] Issue 6494 - (3rd) Various errors when using extended
|
||||
matching rule on vlv sort filter
|
||||
|
||||
(cherry picked from the commit f2f917ca55c34c81b578bce1dd5275abff6abb72)
|
||||
---
|
||||
dirsrvtests/tests/suites/vlv/regression_test.py | 8 ++++++--
|
||||
1 file changed, 6 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index 536fe950f..d069fdbaf 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -16,12 +16,16 @@ from lib389.properties import TASK_WAIT
|
||||
from lib389.index import *
|
||||
from lib389.mappingTree import *
|
||||
from lib389.backend import *
|
||||
-from lib389.idm.user import UserAccounts
|
||||
+from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.idm.organization import Organization
|
||||
+from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
from ldap.controls.vlv import VLVRequestControl
|
||||
from ldap.controls.sss import SSSRequestControl
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
+DEMO_PW = 'secret12'
|
||||
+
|
||||
logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -169,7 +173,7 @@ class BackendHandler:
|
||||
'loginShell': '/bin/false',
|
||||
'userpassword': DEMO_PW })
|
||||
# Add regular user
|
||||
- add_users(self.inst, 10, suffix=suffix)
|
||||
+ add_users(self.inst, 10)
|
||||
# Removing ou2
|
||||
ou2.delete()
|
||||
# And export
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,72 +0,0 @@
|
||||
From 52041811b200292af6670490c9ebc1f599439a22 Mon Sep 17 00:00:00 2001
|
||||
From: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
Date: Sat, 22 Mar 2025 01:25:25 +0900
|
||||
Subject: [PATCH] Issue 6494 - (4th) Various errors when using extended
|
||||
matching rule on vlv sort filter
|
||||
|
||||
test_vlv_with_mr uses vlv_setup_with_uid_mr fixture to setup backend
|
||||
and testusers. add_users function is called in beh.setup without any
|
||||
suffix for the created backend. As a result, testusers always are
|
||||
created in the DEFAULT_SUFFIX only by add_users function. Another test
|
||||
like test_vlv_recreation_reindex can create the same test user in
|
||||
DEFAULT_SUFFIX, and it caused the ALREADY_EXISTS failure in
|
||||
test_vlv_with_mr test.
|
||||
|
||||
In main branch, add_users have suffix argument. Test users are created
|
||||
on the specific suffix, and the backend is cleaned up after the test.
|
||||
This PR is to follow the same implementation.
|
||||
|
||||
Also, suppressing ldap.ALREADY_EXISTS makes the add_users func to be
|
||||
used easily.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6494
|
||||
---
|
||||
dirsrvtests/tests/suites/vlv/regression_test.py | 11 ++++++-----
|
||||
1 file changed, 6 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index d069fdbaf..e9408117b 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -21,6 +21,7 @@ from lib389.idm.organization import Organization
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
from ldap.controls.vlv import VLVRequestControl
|
||||
from ldap.controls.sss import SSSRequestControl
|
||||
+from contextlib import suppress
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
@@ -68,8 +69,8 @@ def check_vlv_search(conn):
|
||||
assert dn.lower() == expected_dn.lower()
|
||||
|
||||
|
||||
-def add_users(inst, users_num):
|
||||
- users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+def add_users(inst, users_num, suffix=DEFAULT_SUFFIX):
|
||||
+ users = UserAccounts(inst, suffix)
|
||||
log.info(f'Adding {users_num} users')
|
||||
for i in range(0, users_num):
|
||||
uid = 1000 + i
|
||||
@@ -81,8 +82,8 @@ def add_users(inst, users_num):
|
||||
'gidNumber': str(uid),
|
||||
'homeDirectory': f'/home/testuser{uid}'
|
||||
}
|
||||
- users.create(properties=user_properties)
|
||||
-
|
||||
+ with suppress(ldap.ALREADY_EXISTS):
|
||||
+ users.create(properties=user_properties)
|
||||
|
||||
|
||||
def create_vlv_search_and_index(inst, basedn=DEFAULT_SUFFIX, bename='userRoot',
|
||||
@@ -173,7 +174,7 @@ class BackendHandler:
|
||||
'loginShell': '/bin/false',
|
||||
'userpassword': DEMO_PW })
|
||||
# Add regular user
|
||||
- add_users(self.inst, 10)
|
||||
+ add_users(self.inst, 10, suffix=suffix)
|
||||
# Removing ou2
|
||||
ou2.delete()
|
||||
# And export
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,357 +0,0 @@
|
||||
From b812afe4da6db134c1221eb48a6155480e4c2cb3 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 14 Jan 2025 13:55:03 -0500
|
||||
Subject: [PATCH] Issue 6497 - lib389 - Configure replication for multiple
|
||||
suffixes (#6498)
|
||||
|
||||
Bug Description: When trying to set up replication across multiple suffixes -
|
||||
particularly if one of those suffixes is a subsuffix - lib389 fails to properly
|
||||
configure the replication agreements, service accounts, and required groups.
|
||||
The references to the replication_managers group and service account
|
||||
naming do not correctly account for non-default additional suffixes.
|
||||
|
||||
Fix Description: Ensure replication DNs and credentials are correctly tied to each suffix.
|
||||
Enable DSLdapObject.present method to compare values as
|
||||
a normalized DNs if they are DNs.
|
||||
Add a test (test_multi_subsuffix_replication) to verify multi-suffix
|
||||
replication across four suppliers.
|
||||
Fix tests that are related to repl service accounts.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6497
|
||||
|
||||
Reviewed: @progier389 (Thanks!)
|
||||
---
|
||||
.../tests/suites/ds_tools/replcheck_test.py | 4 +-
|
||||
.../suites/replication/acceptance_test.py | 153 ++++++++++++++++++
|
||||
.../cleanallruv_shutdown_crash_test.py | 4 +-
|
||||
.../suites/replication/regression_m2_test.py | 2 +-
|
||||
.../replication/tls_client_auth_repl_test.py | 4 +-
|
||||
src/lib389/lib389/_mapped_object.py | 21 ++-
|
||||
src/lib389/lib389/replica.py | 10 +-
|
||||
7 files changed, 182 insertions(+), 16 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_tools/replcheck_test.py b/dirsrvtests/tests/suites/ds_tools/replcheck_test.py
|
||||
index f61fc432d..dfa1d9423 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_tools/replcheck_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_tools/replcheck_test.py
|
||||
@@ -67,10 +67,10 @@ def topo_tls_ldapi(topo):
|
||||
|
||||
# Create the replication dns
|
||||
services = ServiceAccounts(m1, DEFAULT_SUFFIX)
|
||||
- repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
|
||||
+ repl_m1 = services.get(f'{DEFAULT_SUFFIX}:{m1.host}:{m1.sslport}')
|
||||
repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())
|
||||
|
||||
- repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
|
||||
+ repl_m2 = services.get(f'{DEFAULT_SUFFIX}:{m2.host}:{m2.sslport}')
|
||||
repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())
|
||||
|
||||
# Check the replication is "done".
|
||||
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
index d1cfa8bdb..fc8622051 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
@@ -9,6 +9,7 @@
|
||||
import pytest
|
||||
import logging
|
||||
import time
|
||||
+from lib389.backend import Backend
|
||||
from lib389.replica import Replicas
|
||||
from lib389.tasks import *
|
||||
from lib389.utils import *
|
||||
@@ -325,6 +326,158 @@ def test_modify_stripattrs(topo_m4):
|
||||
assert attr_value in entries[0].data['nsds5replicastripattrs']
|
||||
|
||||
|
||||
+def test_multi_subsuffix_replication(topo_m4):
|
||||
+ """Check that replication works with multiple subsuffixes
|
||||
+
|
||||
+ :id: ac1aaeae-173e-48e7-847f-03b9867443c4
|
||||
+ :setup: Four suppliers replication setup
|
||||
+ :steps:
|
||||
+ 1. Create additional suffixes
|
||||
+ 2. Setup replication for all suppliers
|
||||
+ 3. Generate test data for each suffix (add, modify, remove)
|
||||
+ 4. Wait for replication to complete across all suppliers for each suffix
|
||||
+ 5. Check that all expected data is present on all suppliers
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success (the data is replicated everywhere)
|
||||
+ """
|
||||
+
|
||||
+ SUFFIX_2 = "dc=test2"
|
||||
+ SUFFIX_3 = f"dc=test3,{DEFAULT_SUFFIX}"
|
||||
+ all_suffixes = [DEFAULT_SUFFIX, SUFFIX_2, SUFFIX_3]
|
||||
+
|
||||
+ test_users_by_suffix = {suffix: [] for suffix in all_suffixes}
|
||||
+ created_backends = []
|
||||
+
|
||||
+ suppliers = [
|
||||
+ topo_m4.ms["supplier1"],
|
||||
+ topo_m4.ms["supplier2"],
|
||||
+ topo_m4.ms["supplier3"],
|
||||
+ topo_m4.ms["supplier4"]
|
||||
+ ]
|
||||
+
|
||||
+ try:
|
||||
+ # Setup additional backends and replication for the new suffixes
|
||||
+ for suffix in [SUFFIX_2, SUFFIX_3]:
|
||||
+ repl = ReplicationManager(suffix)
|
||||
+ for supplier in suppliers:
|
||||
+ # Create a new backend for this suffix
|
||||
+ props = {
|
||||
+ 'cn': f'userRoot_{suffix.split(",")[0][3:]}',
|
||||
+ 'nsslapd-suffix': suffix
|
||||
+ }
|
||||
+ be = Backend(supplier)
|
||||
+ be.create(properties=props)
|
||||
+ be.create_sample_entries('001004002')
|
||||
+
|
||||
+ # Track the backend so we can remove it later
|
||||
+ created_backends.append((supplier, props['cn']))
|
||||
+
|
||||
+ # Enable replication
|
||||
+ if supplier == suppliers[0]:
|
||||
+ repl.create_first_supplier(supplier)
|
||||
+ else:
|
||||
+ repl.join_supplier(suppliers[0], supplier)
|
||||
+
|
||||
+ # Create a full mesh topology for this suffix
|
||||
+ for i, supplier_i in enumerate(suppliers):
|
||||
+ for j, supplier_j in enumerate(suppliers):
|
||||
+ if i != j:
|
||||
+ repl.ensure_agreement(supplier_i, supplier_j)
|
||||
+
|
||||
+ # Generate test data for each suffix (add, modify, remove)
|
||||
+ for suffix in all_suffixes:
|
||||
+ # Create some user entries in supplier1
|
||||
+ for i in range(20):
|
||||
+ user_dn = f'uid=test_user_{i},{suffix}'
|
||||
+ test_user = UserAccount(suppliers[0], user_dn)
|
||||
+ test_user.create(properties={
|
||||
+ 'uid': f'test_user_{i}',
|
||||
+ 'cn': f'Test User {i}',
|
||||
+ 'sn': f'User{i}',
|
||||
+ 'userPassword': 'password',
|
||||
+ 'uidNumber': str(1000 + i),
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': f'/home/test_user_{i}'
|
||||
+ })
|
||||
+ test_users_by_suffix[suffix].append(test_user)
|
||||
+
|
||||
+ # Perform modifications on these entries
|
||||
+ for user in test_users_by_suffix[suffix]:
|
||||
+ # Add some attributes
|
||||
+ for j in range(3):
|
||||
+ user.add('description', f'Description {j}')
|
||||
+ # Replace an attribute
|
||||
+ user.replace('cn', f'Modified User {user.get_attr_val_utf8("uid")}')
|
||||
+ # Delete the attributes we added
|
||||
+ for j in range(3):
|
||||
+ try:
|
||||
+ user.remove('description', f'Description {j}')
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+ # Wait for replication to complete across all suppliers, for each suffix
|
||||
+ for suffix in all_suffixes:
|
||||
+ repl = ReplicationManager(suffix)
|
||||
+ for i, supplier_i in enumerate(suppliers):
|
||||
+ for j, supplier_j in enumerate(suppliers):
|
||||
+ if i != j:
|
||||
+ repl.wait_for_replication(supplier_i, supplier_j)
|
||||
+
|
||||
+ # Verify that each user and modification replicated to all suppliers
|
||||
+ for suffix in all_suffixes:
|
||||
+ for i in range(20):
|
||||
+ user_dn = f'uid=test_user_{i},{suffix}'
|
||||
+ # Retrieve this user from all suppliers
|
||||
+ all_user_objs = topo_m4.all_get_dsldapobject(user_dn, UserAccount)
|
||||
+ # Ensure it exists in all 4 suppliers
|
||||
+ assert len(all_user_objs) == 4, (
|
||||
+ f"User {user_dn} not found on all suppliers. "
|
||||
+ f"Found only on {len(all_user_objs)} suppliers."
|
||||
+ )
|
||||
+ # Check modifications: 'cn' should now be 'Modified User test_user_{i}'
|
||||
+ for user_obj in all_user_objs:
|
||||
+ expected_cn = f"Modified User test_user_{i}"
|
||||
+ actual_cn = user_obj.get_attr_val_utf8("cn")
|
||||
+ assert actual_cn == expected_cn, (
|
||||
+ f"User {user_dn} has unexpected 'cn': {actual_cn} "
|
||||
+ f"(expected '{expected_cn}') on supplier {user_obj._instance.serverid}"
|
||||
+ )
|
||||
+ # And check that 'description' attributes were removed
|
||||
+ desc_vals = user_obj.get_attr_vals_utf8('description')
|
||||
+ for j in range(3):
|
||||
+ assert f"Description {j}" not in desc_vals, (
|
||||
+ f"User {user_dn} on supplier {user_obj._instance.serverid} "
|
||||
+ f"still has 'Description {j}'"
|
||||
+ )
|
||||
+ finally:
|
||||
+ for suffix, test_users in test_users_by_suffix.items():
|
||||
+ for user in test_users:
|
||||
+ try:
|
||||
+ if user.exists():
|
||||
+ user.delete()
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+ for suffix in [SUFFIX_2, SUFFIX_3]:
|
||||
+ repl = ReplicationManager(suffix)
|
||||
+ for supplier in suppliers:
|
||||
+ try:
|
||||
+ repl.remove_supplier(supplier)
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+ for (supplier, backend_name) in created_backends:
|
||||
+ be = Backend(supplier, backend_name)
|
||||
+ try:
|
||||
+ be.delete()
|
||||
+ except Exception:
|
||||
+ pass
|
||||
+
|
||||
+
|
||||
def test_new_suffix(topo_m4, new_suffix):
|
||||
"""Check that we can enable replication on a new suffix
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py
|
||||
index b4b74e339..fe9955e7e 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py
|
||||
@@ -66,10 +66,10 @@ def test_clean_shutdown_crash(topology_m2):
|
||||
|
||||
log.info('Creating replication dns')
|
||||
services = ServiceAccounts(m1, DEFAULT_SUFFIX)
|
||||
- repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
|
||||
+ repl_m1 = services.get(f'{DEFAULT_SUFFIX}:{m1.host}:{m1.sslport}')
|
||||
repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())
|
||||
|
||||
- repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
|
||||
+ repl_m2 = services.get(f'{DEFAULT_SUFFIX}:{m2.host}:{m2.sslport}')
|
||||
repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())
|
||||
|
||||
log.info('Changing auth type')
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index 72d4b9f89..9c707615f 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -64,7 +64,7 @@ class _AgmtHelper:
|
||||
self.binddn = f'cn={cn},cn=config'
|
||||
else:
|
||||
self.usedn = False
|
||||
- self.cn = f'{self.from_inst.host}:{self.from_inst.sslport}'
|
||||
+ self.cn = ldap.dn.escape_dn_chars(f'{DEFAULT_SUFFIX}:{self.from_inst.host}:{self.from_inst.sslport}')
|
||||
self.binddn = f'cn={self.cn}, ou=Services, {DEFAULT_SUFFIX}'
|
||||
self.original_state = []
|
||||
self._pass = False
|
||||
diff --git a/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py b/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py
|
||||
index a00dc5b78..ca17554c7 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py
|
||||
@@ -56,10 +56,10 @@ def tls_client_auth(topo_m2):
|
||||
|
||||
# Create the replication dns
|
||||
services = ServiceAccounts(m1, DEFAULT_SUFFIX)
|
||||
- repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport))
|
||||
+ repl_m1 = services.get(f'{DEFAULT_SUFFIX}:{m1.host}:{m1.sslport}')
|
||||
repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject())
|
||||
|
||||
- repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport))
|
||||
+ repl_m2 = services.get(f'{DEFAULT_SUFFIX}:{m2.host}:{m2.sslport}')
|
||||
repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject())
|
||||
|
||||
# Check the replication is "done".
|
||||
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
|
||||
index b7391d8cc..ae00c95d0 100644
|
||||
--- a/src/lib389/lib389/_mapped_object.py
|
||||
+++ b/src/lib389/lib389/_mapped_object.py
|
||||
@@ -19,7 +19,7 @@ from lib389._constants import DIRSRV_STATE_ONLINE
|
||||
from lib389._mapped_object_lint import DSLint, DSLints
|
||||
from lib389.utils import (
|
||||
ensure_bytes, ensure_str, ensure_int, ensure_list_bytes, ensure_list_str,
|
||||
- ensure_list_int, display_log_value, display_log_data
|
||||
+ ensure_list_int, display_log_value, display_log_data, is_a_dn, normalizeDN
|
||||
)
|
||||
|
||||
# This function filter and term generation provided thanks to
|
||||
@@ -292,15 +292,28 @@ class DSLdapObject(DSLogging, DSLint):
|
||||
_search_ext_s(self._instance,self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[attr, ],
|
||||
serverctrls=self._server_controls, clientctrls=self._client_controls,
|
||||
escapehatch='i am sure')[0]
|
||||
- values = self.get_attr_vals_bytes(attr)
|
||||
+ values = self.get_attr_vals_utf8(attr)
|
||||
self._log.debug("%s contains %s" % (self._dn, values))
|
||||
|
||||
if value is None:
|
||||
# We are just checking if SOMETHING is present ....
|
||||
return len(values) > 0
|
||||
+
|
||||
+ # Otherwise, we are checking a specific value
|
||||
+ if is_a_dn(value):
|
||||
+ normalized_value = normalizeDN(value)
|
||||
else:
|
||||
- # Check if a value really does exist.
|
||||
- return ensure_bytes(value).lower() in [x.lower() for x in values]
|
||||
+ normalized_value = ensure_bytes(value).lower()
|
||||
+
|
||||
+ # Normalize each returned value depending on whether it is a DN
|
||||
+ normalized_values = []
|
||||
+ for v in values:
|
||||
+ if is_a_dn(v):
|
||||
+ normalized_values.append(normalizeDN(v))
|
||||
+ else:
|
||||
+ normalized_values.append(ensure_bytes(v.lower()))
|
||||
+
|
||||
+ return normalized_value in normalized_values
|
||||
|
||||
def add(self, key, value):
|
||||
"""Add an attribute with a value
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 1f321972d..cd46e86d5 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -2011,7 +2011,7 @@ class ReplicationManager(object):
|
||||
return repl_group
|
||||
else:
|
||||
try:
|
||||
- repl_group = groups.get('replication_managers')
|
||||
+ repl_group = groups.get(dn=f'cn=replication_managers,{self._suffix}')
|
||||
return repl_group
|
||||
except ldap.NO_SUCH_OBJECT:
|
||||
self._log.warning("{} doesn't have cn=replication_managers,{} entry \
|
||||
@@ -2035,7 +2035,7 @@ class ReplicationManager(object):
|
||||
services = ServiceAccounts(from_instance, self._suffix)
|
||||
# Generate the password and save the credentials
|
||||
# for putting them into agreements in the future
|
||||
- service_name = '{}:{}'.format(to_instance.host, port)
|
||||
+ service_name = f'{self._suffix}:{to_instance.host}:{port}'
|
||||
creds = password_generate()
|
||||
repl_service = services.ensure_state(properties={
|
||||
'cn': service_name,
|
||||
@@ -2299,7 +2299,7 @@ class ReplicationManager(object):
|
||||
Internal Only.
|
||||
"""
|
||||
|
||||
- rdn = '{}:{}'.format(from_instance.host, from_instance.sslport)
|
||||
+ rdn = f'{self._suffix}:{from_instance.host}:{from_instance.sslport}'
|
||||
try:
|
||||
creds = self._repl_creds[rdn]
|
||||
except KeyError:
|
||||
@@ -2499,8 +2499,8 @@ class ReplicationManager(object):
|
||||
# Touch something then wait_for_replication.
|
||||
from_groups = Groups(from_instance, basedn=self._suffix, rdn=None)
|
||||
to_groups = Groups(to_instance, basedn=self._suffix, rdn=None)
|
||||
- from_group = from_groups.get('replication_managers')
|
||||
- to_group = to_groups.get('replication_managers')
|
||||
+ from_group = from_groups.get(dn=f'cn=replication_managers,{self._suffix}')
|
||||
+ to_group = to_groups.get(dn=f'cn=replication_managers,{self._suffix}')
|
||||
|
||||
change = str(uuid.uuid4())
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,126 +0,0 @@
|
||||
From ebe986c78c6cd4e1f10172d8a8a11faf814fbc22 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 6 Mar 2025 16:49:53 -0500
|
||||
Subject: [PATCH] Issue 6655 - fix replication release replica decoding error
|
||||
|
||||
Description:
|
||||
|
||||
When a start replication session extended op is received acquire and
|
||||
release exclusive access before returning the result to the client.
|
||||
Otherwise there is a race condition where a "end" replication extended
|
||||
op can arrive before the replica is released and that leads to a
|
||||
decoding error on the other replica.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6655
|
||||
|
||||
Reviewed by: spichugi, tbordaz, and vashirov(Thanks!!!)
|
||||
---
|
||||
.../suites/replication/acceptance_test.py | 12 ++++++++++
|
||||
ldap/servers/plugins/replication/repl_extop.c | 24 ++++++++++++-------
|
||||
2 files changed, 27 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
index fc8622051..0f18edb44 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
@@ -1,5 +1,9 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+<<<<<<< HEAD
|
||||
# Copyright (C) 2021 Red Hat, Inc.
|
||||
+=======
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+>>>>>>> a623c3f90 (Issue 6655 - fix replication release replica decoding error)
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -453,6 +457,13 @@ def test_multi_subsuffix_replication(topo_m4):
|
||||
f"User {user_dn} on supplier {user_obj._instance.serverid} "
|
||||
f"still has 'Description {j}'"
|
||||
)
|
||||
+
|
||||
+ # Check there are no decoding errors
|
||||
+ assert not topo_m4.ms["supplier1"].ds_error_log.match('.*decoding failed.*')
|
||||
+ assert not topo_m4.ms["supplier2"].ds_error_log.match('.*decoding failed.*')
|
||||
+ assert not topo_m4.ms["supplier3"].ds_error_log.match('.*decoding failed.*')
|
||||
+ assert not topo_m4.ms["supplier4"].ds_error_log.match('.*decoding failed.*')
|
||||
+
|
||||
finally:
|
||||
for suffix, test_users in test_users_by_suffix.items():
|
||||
for user in test_users:
|
||||
@@ -507,6 +518,7 @@ def test_new_suffix(topo_m4, new_suffix):
|
||||
repl.remove_supplier(m1)
|
||||
repl.remove_supplier(m2)
|
||||
|
||||
+
|
||||
def test_many_attrs(topo_m4, create_entry):
|
||||
"""Check a replication with many attributes (add and delete)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
|
||||
index 14b756df1..dacc611c0 100644
|
||||
--- a/ldap/servers/plugins/replication/repl_extop.c
|
||||
+++ b/ldap/servers/plugins/replication/repl_extop.c
|
||||
@@ -1134,6 +1134,12 @@ send_response:
|
||||
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_OID, REPL_NSDS50_REPLICATION_RESPONSE_OID);
|
||||
}
|
||||
|
||||
+ /* connext (release our hold on it at least) */
|
||||
+ if (NULL != connext) {
|
||||
+ /* don't free it, just let go of it */
|
||||
+ consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
+ }
|
||||
+
|
||||
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_VALUE, resp_bval);
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
|
||||
"multimaster_extop_StartNSDS50ReplicationRequest - "
|
||||
@@ -1251,12 +1257,6 @@ send_response:
|
||||
if (NULL != ruv_bervals) {
|
||||
ber_bvecfree(ruv_bervals);
|
||||
}
|
||||
- /* connext (our hold on it at least) */
|
||||
- if (NULL != connext) {
|
||||
- /* don't free it, just let go of it */
|
||||
- consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
- connext = NULL;
|
||||
- }
|
||||
|
||||
return return_value;
|
||||
}
|
||||
@@ -1389,6 +1389,13 @@ multimaster_extop_EndNSDS50ReplicationRequest(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
send_response:
|
||||
+ /* connext (release our hold on it at least) */
|
||||
+ if (NULL != connext) {
|
||||
+ /* don't free it, just let go of it */
|
||||
+ consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
+ connext = NULL;
|
||||
+ }
|
||||
+
|
||||
/* Send the response code */
|
||||
if ((resp_bere = der_alloc()) == NULL) {
|
||||
goto free_and_return;
|
||||
@@ -1419,11 +1426,10 @@ free_and_return:
|
||||
if (NULL != resp_bval) {
|
||||
ber_bvfree(resp_bval);
|
||||
}
|
||||
- /* connext (our hold on it at least) */
|
||||
+ /* connext (release our hold on it if not already released) */
|
||||
if (NULL != connext) {
|
||||
/* don't free it, just let go of it */
|
||||
consumer_connection_extension_relinquish_exclusive_access(conn, connid, opid, PR_FALSE);
|
||||
- connext = NULL;
|
||||
}
|
||||
|
||||
return return_value;
|
||||
@@ -1516,7 +1522,7 @@ multimaster_extop_abort_cleanruv(Slapi_PBlock *pb)
|
||||
rid);
|
||||
}
|
||||
/*
|
||||
- * Get the replica
|
||||
+ * Get the replica
|
||||
*/
|
||||
if ((r = replica_get_replica_from_root(repl_root)) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "multimaster_extop_abort_cleanruv - "
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,26 +0,0 @@
|
||||
From 5b12463bfeb518f016acb14bc118b5f8ad3eef5e Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 15 May 2025 09:22:22 +0200
|
||||
Subject: [PATCH] Issue 6655 - fix merge conflict
|
||||
|
||||
---
|
||||
dirsrvtests/tests/suites/replication/acceptance_test.py | 4 ----
|
||||
1 file changed, 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
index 0f18edb44..6b5186127 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
|
||||
@@ -1,9 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-<<<<<<< HEAD
|
||||
-# Copyright (C) 2021 Red Hat, Inc.
|
||||
-=======
|
||||
# Copyright (C) 2025 Red Hat, Inc.
|
||||
->>>>>>> a623c3f90 (Issue 6655 - fix replication release replica decoding error)
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,291 +0,0 @@
|
||||
From 8d62124fb4d0700378b6f0669cc9d47338a8151c Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 25 Mar 2025 09:20:50 +0100
|
||||
Subject: [PATCH] Issue 6571 - Nested group does not receive memberOf attribute
|
||||
(#6679)
|
||||
|
||||
Bug description:
|
||||
There is a risk to create a loop in group membership.
|
||||
For example G2 is member of G1 and G1 is member of G2.
|
||||
Memberof plugins iterates from a node to its ancestors
|
||||
to update the 'memberof' values of the node.
|
||||
The plugin uses a valueset ('already_seen_ndn_vals')
|
||||
to keep the track of the nodes it already visited.
|
||||
It uses this valueset to detect a possible loop and
|
||||
in that case it does not add the ancestor as the
|
||||
memberof value of the node.
|
||||
This is an error in case there are multiples paths
|
||||
up to an ancestor.
|
||||
|
||||
Fix description:
|
||||
The ancestor should be added to the node systematically,
|
||||
just in case the ancestor is in 'already_seen_ndn_vals'
|
||||
it skips the final recursion
|
||||
|
||||
fixes: #6571
|
||||
|
||||
Reviewed by: Pierre Rogier, Mark Reynolds (Thanks !!!)
|
||||
---
|
||||
.../suites/memberof_plugin/regression_test.py | 109 ++++++++++++++++++
|
||||
.../tests/suites/plugins/memberof_test.py | 5 +
|
||||
ldap/servers/plugins/memberof/memberof.c | 52 ++++-----
|
||||
3 files changed, 137 insertions(+), 29 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
index 4c681a909..dba908975 100644
|
||||
--- a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
@@ -467,6 +467,21 @@ def _find_memberof_ext(server, user_dn=None, group_dn=None, find_result=True):
|
||||
else:
|
||||
assert (not found)
|
||||
|
||||
+def _check_membership(server, entry, expected_members, expected_memberof):
|
||||
+ assert server
|
||||
+ assert entry
|
||||
+
|
||||
+ memberof = entry.get_attr_vals('memberof')
|
||||
+ member = entry.get_attr_vals('member')
|
||||
+ assert len(member) == len(expected_members)
|
||||
+ assert len(memberof) == len(expected_memberof)
|
||||
+ for e in expected_members:
|
||||
+ server.log.info("Checking %s has member %s" % (entry.dn, e.dn))
|
||||
+ assert e.dn.encode() in member
|
||||
+ for e in expected_memberof:
|
||||
+ server.log.info("Checking %s is member of %s" % (entry.dn, e.dn))
|
||||
+ assert e.dn.encode() in memberof
|
||||
+
|
||||
|
||||
@pytest.mark.ds49161
|
||||
def test_memberof_group(topology_st):
|
||||
@@ -535,6 +550,100 @@ def test_memberof_group(topology_st):
|
||||
_find_memberof_ext(inst, dn1, g2n, True)
|
||||
_find_memberof_ext(inst, dn2, g2n, True)
|
||||
|
||||
+def test_multipaths(topology_st, request):
|
||||
+ """Test memberof succeeds to update memberof when
|
||||
+ there are multiple paths from a leaf to an intermediate node
|
||||
+
|
||||
+ :id: 35aa704a-b895-4153-9dcb-1e8a13612ebf
|
||||
+
|
||||
+ :setup: Single instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Create a graph G1->U1, G2->G21->U1
|
||||
+ 2. Add G2 as member of G1: G1->U1, G1->G2->G21->U1
|
||||
+ 3. Check members and memberof in entries G1,G2,G21,User1
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Graph should be created
|
||||
+ 2. succeed
|
||||
+ 3. Membership is okay
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ memberof = MemberOfPlugin(inst)
|
||||
+ memberof.enable()
|
||||
+ memberof.replace('memberOfEntryScope', SUFFIX)
|
||||
+ if (memberof.get_memberofdeferredupdate() and memberof.get_memberofdeferredupdate().lower() == "on"):
|
||||
+ delay = 3
|
||||
+ else:
|
||||
+ delay = 0
|
||||
+ inst.restart()
|
||||
+
|
||||
+ #
|
||||
+ # Create the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ---------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp2 ----> Grp21 ------/
|
||||
+ #
|
||||
+ users = UserAccounts(inst, SUFFIX, rdn=None)
|
||||
+ user1 = users.create(properties={'uid': "user1",
|
||||
+ 'cn': "user1",
|
||||
+ 'sn': 'SN',
|
||||
+ 'description': 'leaf',
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': '/home/user1'
|
||||
+ })
|
||||
+ group = Groups(inst, SUFFIX, rdn=None)
|
||||
+ g1 = group.create(properties={'cn': 'group1',
|
||||
+ 'member': user1.dn,
|
||||
+ 'description': 'group1'})
|
||||
+ g21 = group.create(properties={'cn': 'group21',
|
||||
+ 'member': user1.dn,
|
||||
+ 'description': 'group21'})
|
||||
+ g2 = group.create(properties={'cn': 'group2',
|
||||
+ 'member': [g21.dn],
|
||||
+ 'description': 'group2'})
|
||||
+
|
||||
+ # Enable debug logs if necessary
|
||||
+ #inst.config.replace('nsslapd-errorlog-level', '65536')
|
||||
+ #inst.config.set('nsslapd-accesslog-level','260')
|
||||
+ #inst.config.set('nsslapd-plugin-logging', 'on')
|
||||
+ #inst.config.set('nsslapd-auditlog-logging-enabled','on')
|
||||
+ #inst.config.set('nsslapd-auditfaillog-logging-enabled','on')
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # \ ^
|
||||
+ # \ /
|
||||
+ # --> Grp2 --> Grp21 --
|
||||
+ #
|
||||
+ g1.add_member(g2.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[g2, user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g1])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2, g1])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ def fin():
|
||||
+ try:
|
||||
+ user1.delete()
|
||||
+ g1.delete()
|
||||
+ g2.delete()
|
||||
+ g21.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+ request.addfinalizer(fin)
|
||||
|
||||
def _config_memberof_entrycache_on_modrdn_failure(server):
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/memberof_test.py b/dirsrvtests/tests/suites/plugins/memberof_test.py
|
||||
index 2de1389fd..621c45daf 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/memberof_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/memberof_test.py
|
||||
@@ -2168,9 +2168,14 @@ def test_complex_group_scenario_6(topology_st):
|
||||
|
||||
# add Grp[1-4] (uniqueMember) to grp5
|
||||
# it creates a membership loop !!!
|
||||
+ topology_st.standalone.config.replace('nsslapd-errorlog-level', '65536')
|
||||
mods = [(ldap.MOD_ADD, 'uniqueMember', memofegrp020_5)]
|
||||
for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
|
||||
topology_st.standalone.modify_s(ensure_str(grp), mods)
|
||||
+ topology_st.standalone.config.replace('nsslapd-errorlog-level', '0')
|
||||
+
|
||||
+ results = topology_st.standalone.ds_error_log.match('.*detecting a loop in group.*')
|
||||
+ assert results
|
||||
|
||||
time.sleep(5)
|
||||
# assert user[1-4] are member of grp20_[1-4]
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index e75b99b14..32bdcf3f1 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -1592,7 +1592,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn
|
||||
ht_grp = ancestors_cache_lookup(config, (const void *)ndn);
|
||||
if (ht_grp) {
|
||||
#if MEMBEROF_CACHE_DEBUG
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s already cached (%x)\n", ndn, ht_grp);
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s already cached (%lx)\n", ndn, (ulong) ht_grp);
|
||||
#endif
|
||||
add_ancestors_cbdata(ht_grp, callback_data);
|
||||
*cached = 1;
|
||||
@@ -1600,7 +1600,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn
|
||||
}
|
||||
}
|
||||
#if MEMBEROF_CACHE_DEBUG
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s not cached\n", ndn);
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s not cached\n", slapi_sdn_get_ndn(sdn));
|
||||
#endif
|
||||
|
||||
/* Escape the dn, and build the search filter. */
|
||||
@@ -3233,7 +3233,8 @@ cache_ancestors(MemberOfConfig *config, Slapi_Value **member_ndn_val, memberof_g
|
||||
return;
|
||||
}
|
||||
#if MEMBEROF_CACHE_DEBUG
|
||||
- if (double_check = ancestors_cache_lookup(config, (const void*) key)) {
|
||||
+ double_check = ancestors_cache_lookup(config, (const void*) key);
|
||||
+ if (double_check) {
|
||||
dump_cache_entry(double_check, "read back");
|
||||
}
|
||||
#endif
|
||||
@@ -3263,13 +3264,13 @@ merge_ancestors(Slapi_Value **member_ndn_val, memberof_get_groups_data *v1, memb
|
||||
sval_dn = slapi_value_new_string(slapi_value_get_string(sval));
|
||||
if (sval_dn) {
|
||||
/* Use the normalized dn from v1 to search it
|
||||
- * in v2
|
||||
- */
|
||||
+ * in v2
|
||||
+ */
|
||||
val_sdn = slapi_sdn_new_dn_byval(slapi_value_get_string(sval_dn));
|
||||
sval_ndn = slapi_value_new_string(slapi_sdn_get_ndn(val_sdn));
|
||||
if (!slapi_valueset_find(
|
||||
((memberof_get_groups_data *)v2)->config->group_slapiattrs[0], v2_group_norm_vals, sval_ndn)) {
|
||||
-/* This ancestor was not already present in v2 => Add it
|
||||
+ /* This ancestor was not already present in v2 => Add it
|
||||
* Using slapi_valueset_add_value it consumes val
|
||||
* so do not free sval
|
||||
*/
|
||||
@@ -3318,7 +3319,7 @@ memberof_get_groups_r(MemberOfConfig *config, Slapi_DN *member_sdn, memberof_get
|
||||
|
||||
merge_ancestors(&member_ndn_val, &member_data, data);
|
||||
if (!cached && member_data.use_cache)
|
||||
- cache_ancestors(config, &member_ndn_val, &member_data);
|
||||
+ cache_ancestors(config, &member_ndn_val, data);
|
||||
|
||||
slapi_value_free(&member_ndn_val);
|
||||
slapi_valueset_free(groupvals);
|
||||
@@ -3379,25 +3380,6 @@ memberof_get_groups_callback(Slapi_Entry *e, void *callback_data)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
- /* Have we been here before? Note that we don't loop through all of the group_slapiattrs
|
||||
- * in config. We only need this attribute for it's syntax so the comparison can be
|
||||
- * performed. Since all of the grouping attributes are validated to use the Dinstinguished
|
||||
- * Name syntax, we can safely just use the first group_slapiattr. */
|
||||
- if (slapi_valueset_find(
|
||||
- ((memberof_get_groups_data *)callback_data)->config->group_slapiattrs[0], already_seen_ndn_vals, group_ndn_val)) {
|
||||
- /* we either hit a recursive grouping, or an entry is
|
||||
- * a member of a group through multiple paths. Either
|
||||
- * way, we can just skip processing this entry since we've
|
||||
- * already gone through this part of the grouping hierarchy. */
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
- "memberof_get_groups_callback - Possible group recursion"
|
||||
- " detected in %s\n",
|
||||
- group_ndn);
|
||||
- slapi_value_free(&group_ndn_val);
|
||||
- ((memberof_get_groups_data *)callback_data)->use_cache = PR_FALSE;
|
||||
- goto bail;
|
||||
- }
|
||||
-
|
||||
/* if the group does not belong to an excluded subtree, adds it to the valueset */
|
||||
if (memberof_entry_in_scope(config, group_sdn)) {
|
||||
/* Push group_dn_val into the valueset. This memory is now owned
|
||||
@@ -3407,9 +3389,21 @@ memberof_get_groups_callback(Slapi_Entry *e, void *callback_data)
|
||||
group_dn_val = slapi_value_new_string(group_dn);
|
||||
slapi_valueset_add_value_ext(groupvals, group_dn_val, SLAPI_VALUE_FLAG_PASSIN);
|
||||
|
||||
- /* push this ndn to detect group recursion */
|
||||
- already_seen_ndn_val = slapi_value_new_string(group_ndn);
|
||||
- slapi_valueset_add_value_ext(already_seen_ndn_vals, already_seen_ndn_val, SLAPI_VALUE_FLAG_PASSIN);
|
||||
+ if (slapi_valueset_find(
|
||||
+ ((memberof_get_groups_data *)callback_data)->config->group_slapiattrs[0], already_seen_ndn_vals, group_ndn_val)) {
|
||||
+ /* The group group_ndn_val has already been processed
|
||||
+ * skip the final recursion to prevent infinite loop
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "memberof_get_groups_callback - detecting a loop in group %s (stop building memberof)\n",
|
||||
+ group_ndn);
|
||||
+ ((memberof_get_groups_data *)callback_data)->use_cache = PR_FALSE;
|
||||
+ goto bail;
|
||||
+ } else {
|
||||
+ /* keep this ndn to detect a possible group recursion */
|
||||
+ already_seen_ndn_val = slapi_value_new_string(group_ndn);
|
||||
+ slapi_valueset_add_value_ext(already_seen_ndn_vals, already_seen_ndn_val, SLAPI_VALUE_FLAG_PASSIN);
|
||||
+ }
|
||||
}
|
||||
if (!config->skip_nested || config->fixup_task) {
|
||||
/* now recurse to find ancestors groups of e */
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,272 +0,0 @@
|
||||
From 17da0257b24749765777a4e64c3626cb39cca639 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 31 Mar 2025 11:05:01 +0200
|
||||
Subject: [PATCH] Issue 6571 - (2nd) Nested group does not receive memberOf
|
||||
attribute (#6697)
|
||||
|
||||
Bug description:
|
||||
erroneous debug change made in previous fix
|
||||
where cache_ancestors is called with the wrong parameter
|
||||
|
||||
Fix description:
|
||||
Restore the orginal param 'member_data'
|
||||
Increase the set of tests around multipaths
|
||||
|
||||
fixes: #6571
|
||||
|
||||
review by: Simon Pichugin (Thanks !!)
|
||||
---
|
||||
.../suites/memberof_plugin/regression_test.py | 154 ++++++++++++++++++
|
||||
ldap/servers/plugins/memberof/memberof.c | 50 +++++-
|
||||
2 files changed, 203 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
index dba908975..9ba40a0c3 100644
|
||||
--- a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py
|
||||
@@ -598,6 +598,8 @@ def test_multipaths(topology_st, request):
|
||||
'homeDirectory': '/home/user1'
|
||||
})
|
||||
group = Groups(inst, SUFFIX, rdn=None)
|
||||
+ g0 = group.create(properties={'cn': 'group0',
|
||||
+ 'description': 'group0'})
|
||||
g1 = group.create(properties={'cn': 'group1',
|
||||
'member': user1.dn,
|
||||
'description': 'group1'})
|
||||
@@ -635,6 +637,158 @@ def test_multipaths(topology_st, request):
|
||||
_check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2, g1])
|
||||
_check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
|
||||
+ #inst.config.replace('nsslapd-errorlog-level', '65536')
|
||||
+ #inst.config.set('nsslapd-accesslog-level','260')
|
||||
+ #inst.config.set('nsslapd-plugin-logging', 'on')
|
||||
+ #inst.config.set('nsslapd-auditlog-logging-enabled','on')
|
||||
+ #inst.config.set('nsslapd-auditfaillog-logging-enabled','on')
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp2 --> Grp21 --
|
||||
+ #
|
||||
+ g1.remove_member(g2.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # \__________ ^
|
||||
+ # | /
|
||||
+ # v /
|
||||
+ # Grp2 --> Grp21 ----
|
||||
+ #
|
||||
+ g1.add_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[user1, g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2, g1])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp2 --> Grp21 --
|
||||
+ #
|
||||
+ g1.remove_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^
|
||||
+ # /
|
||||
+ # Grp0 ---> Grp2 ---> Grp21 ---
|
||||
+ #
|
||||
+ g0.add_member(g2.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1, g0])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^ ^
|
||||
+ # / /
|
||||
+ # Grp0 ---> Grp2 ---> Grp21 ---
|
||||
+ #
|
||||
+ g0.add_member(g1.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g1,g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1, g0])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^ \_____________ ^
|
||||
+ # / | /
|
||||
+ # / V /
|
||||
+ # Grp0 ---> Grp2 ---> Grp21 ---
|
||||
+ #
|
||||
+ g1.add_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g1, g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1, g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g1, g2])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1, g0])
|
||||
+
|
||||
+ #
|
||||
+ # Update the hierarchy
|
||||
+ #
|
||||
+ #
|
||||
+ # Grp1 ----------------> User1
|
||||
+ # ^ \_____________ ^
|
||||
+ # / | /
|
||||
+ # / V /
|
||||
+ # Grp0 ---> Grp2 Grp21 ---
|
||||
+ #
|
||||
+ g2.remove_member(g21.dn)
|
||||
+ time.sleep(delay)
|
||||
+
|
||||
+ #
|
||||
+ # Check G0,G1, G2, G21 and User1 members and memberof
|
||||
+ #
|
||||
+ _check_membership(inst, g0, expected_members=[g1, g2], expected_memberof=[])
|
||||
+ _check_membership(inst, g1, expected_members=[user1, g21], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g2, expected_members=[], expected_memberof=[g0])
|
||||
+ _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g0, g1])
|
||||
+ _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g1, g0])
|
||||
+
|
||||
def fin():
|
||||
try:
|
||||
user1.delete()
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index 32bdcf3f1..f79b083a9 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -3258,6 +3258,35 @@ merge_ancestors(Slapi_Value **member_ndn_val, memberof_get_groups_data *v1, memb
|
||||
Slapi_ValueSet *v2_group_norm_vals = *((memberof_get_groups_data *)v2)->group_norm_vals;
|
||||
int merged_cnt = 0;
|
||||
|
||||
+#if MEMBEROF_CACHE_DEBUG
|
||||
+ {
|
||||
+ Slapi_Value *val = 0;
|
||||
+ int hint = 0;
|
||||
+ struct berval *bv;
|
||||
+ hint = slapi_valueset_first_value(v2_groupvals, &val);
|
||||
+ while (val) {
|
||||
+ /* this makes a copy of the berval */
|
||||
+ bv = slapi_value_get_berval(val);
|
||||
+ if (bv && bv->bv_len) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "merge_ancestors: V2 contains %s\n",
|
||||
+ bv->bv_val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_next_value(v2_groupvals, hint, &val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_first_value(v1_groupvals, &val);
|
||||
+ while (val) {
|
||||
+ /* this makes a copy of the berval */
|
||||
+ bv = slapi_value_get_berval(val);
|
||||
+ if (bv && bv->bv_len) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "merge_ancestors: add %s (from V1)\n",
|
||||
+ bv->bv_val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_next_value(v1_groupvals, hint, &val);
|
||||
+ }
|
||||
+ }
|
||||
+#endif
|
||||
hint = slapi_valueset_first_value(v1_groupvals, &sval);
|
||||
while (sval) {
|
||||
if (memberof_compare(config, member_ndn_val, &sval)) {
|
||||
@@ -3319,7 +3348,7 @@ memberof_get_groups_r(MemberOfConfig *config, Slapi_DN *member_sdn, memberof_get
|
||||
|
||||
merge_ancestors(&member_ndn_val, &member_data, data);
|
||||
if (!cached && member_data.use_cache)
|
||||
- cache_ancestors(config, &member_ndn_val, data);
|
||||
+ cache_ancestors(config, &member_ndn_val, &member_data);
|
||||
|
||||
slapi_value_free(&member_ndn_val);
|
||||
slapi_valueset_free(groupvals);
|
||||
@@ -4285,6 +4314,25 @@ memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data)
|
||||
|
||||
/* get a list of all of the groups this user belongs to */
|
||||
groups = memberof_get_groups(config, sdn);
|
||||
+#if MEMBEROF_CACHE_DEBUG
|
||||
+ {
|
||||
+ Slapi_Value *val = 0;
|
||||
+ int hint = 0;
|
||||
+ struct berval *bv;
|
||||
+ hint = slapi_valueset_first_value(groups, &val);
|
||||
+ while (val) {
|
||||
+ /* this makes a copy of the berval */
|
||||
+ bv = slapi_value_get_berval(val);
|
||||
+ if (bv && bv->bv_len) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "memberof_fix_memberof_callback: %s belongs to %s\n",
|
||||
+ ndn,
|
||||
+ bv->bv_val);
|
||||
+ }
|
||||
+ hint = slapi_valueset_next_value(groups, hint, &val);
|
||||
+ }
|
||||
+ }
|
||||
+#endif
|
||||
|
||||
if (config->group_filter) {
|
||||
if (slapi_filter_test_simple(e, config->group_filter)) {
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,192 +0,0 @@
|
||||
From ff364a4b1c88e1a8f678e056af88cce50cd8717c Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Fri, 28 Mar 2025 17:32:14 +0100
|
||||
Subject: [PATCH] Issue 6698 - NPE after configuring invalid filtered role
|
||||
(#6699)
|
||||
|
||||
Server crash when doing search after configuring filtered role with invalid filter.
|
||||
Reason: The part of the filter that should be overwritten are freed before knowing that the filter is invalid.
|
||||
Solution: Check first that the filter is valid before freeing the filtere bits
|
||||
|
||||
Issue: #6698
|
||||
|
||||
Reviewed by: @tbordaz , @mreynolds389 (Thanks!)
|
||||
|
||||
(cherry picked from commit 31e120d2349eda7a41380cf78fc04cf41e394359)
|
||||
---
|
||||
dirsrvtests/tests/suites/roles/basic_test.py | 80 ++++++++++++++++++--
|
||||
ldap/servers/slapd/filter.c | 17 ++++-
|
||||
2 files changed, 88 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
index 875ac47c1..b79816c58 100644
|
||||
--- a/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
@@ -28,6 +28,7 @@ from lib389.dbgen import dbgen_users
|
||||
from lib389.tasks import ImportTask
|
||||
from lib389.utils import get_default_db_lib
|
||||
from lib389.rewriters import *
|
||||
+from lib389._mapped_object import DSLdapObject
|
||||
from lib389.backend import Backends
|
||||
|
||||
logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
@@ -427,7 +428,6 @@ def test_vattr_on_filtered_role_restart(topo, request):
|
||||
log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF")
|
||||
assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off')
|
||||
|
||||
-
|
||||
log.info("Check the virtual attribute definition is found (after a required delay)")
|
||||
topo.standalone.restart()
|
||||
time.sleep(5)
|
||||
@@ -541,7 +541,7 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
indexes = backend.get_indexes()
|
||||
try:
|
||||
index = indexes.create(properties={
|
||||
- 'cn': attrname,
|
||||
+ 'cn': attrname,
|
||||
'nsSystemIndex': 'false',
|
||||
'nsIndexType': ['eq', 'pres']
|
||||
})
|
||||
@@ -593,7 +593,6 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
dn = "uid=%s0000%d,%s" % (RDN, i, PARENT)
|
||||
topo.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsRoleDN', [role.dn.encode()])])
|
||||
|
||||
-
|
||||
# Now check that search is fast, evaluating only 4 entries
|
||||
search_start = time.time()
|
||||
entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
@@ -676,7 +675,7 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
indexes = backend.get_indexes()
|
||||
try:
|
||||
index = indexes.create(properties={
|
||||
- 'cn': attrname,
|
||||
+ 'cn': attrname,
|
||||
'nsSystemIndex': 'false',
|
||||
'nsIndexType': ['eq', 'pres']
|
||||
})
|
||||
@@ -730,7 +729,7 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
|
||||
# Enable plugin level to check message
|
||||
topo.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.PLUGIN))
|
||||
-
|
||||
+
|
||||
# Now check that search is fast, evaluating only 4 entries
|
||||
search_start = time.time()
|
||||
entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(|(nsrole=%s)(nsrole=cn=not_such_entry_role,%s))" % (role.dn, DEFAULT_SUFFIX))
|
||||
@@ -758,6 +757,77 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+
|
||||
+def test_rewriter_with_invalid_filter(topo, request):
|
||||
+ """Test that server does not crash when having
|
||||
+ invalid filter in filtered role
|
||||
+
|
||||
+ :id: 5013b0b2-0af6-11f0-8684-482ae39447e5
|
||||
+ :setup: standalone server
|
||||
+ :steps:
|
||||
+ 1. Setup filtered role with good filter
|
||||
+ 2. Setup nsrole rewriter
|
||||
+ 3. Restart the server
|
||||
+ 4. Search for entries
|
||||
+ 5. Setup filtered role with bad filter
|
||||
+ 6. Search for entries
|
||||
+ :expectedresults:
|
||||
+ 1. Operation should succeed
|
||||
+ 2. Operation should succeed
|
||||
+ 3. Operation should succeed
|
||||
+ 4. Operation should succeed
|
||||
+ 5. Operation should succeed
|
||||
+ 6. Operation should succeed
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ entries = []
|
||||
+
|
||||
+ def fin():
|
||||
+ inst.start()
|
||||
+ for entry in entries:
|
||||
+ entry.delete()
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Setup filtered role
|
||||
+ roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}')
|
||||
+ filter_ko = '(&((objectClass=top)(objectClass=nsPerson))'
|
||||
+ filter_ok = '(&(objectClass=top)(objectClass=nsPerson))'
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ok,
|
||||
+ 'description': 'Test good filter',
|
||||
+ }
|
||||
+ role = roles.create(properties=role_properties)
|
||||
+ entries.append(role)
|
||||
+
|
||||
+ # Setup nsrole rewriter
|
||||
+ rewriters = Rewriters(inst)
|
||||
+ rewriter_properties = {
|
||||
+ "cn": "nsrole",
|
||||
+ "nsslapd-libpath": 'libroles-plugin',
|
||||
+ "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter',
|
||||
+ }
|
||||
+ rewriter = rewriters.ensure_state(properties=rewriter_properties)
|
||||
+ entries.append(rewriter)
|
||||
+
|
||||
+ # Restart thge instance
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+ # Set bad filter
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ko,
|
||||
+ 'description': 'Test bad filter',
|
||||
+ }
|
||||
+ role.ensure_state(properties=role_properties)
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/filter.c b/ldap/servers/slapd/filter.c
|
||||
index ce09891b8..f541b8fc1 100644
|
||||
--- a/ldap/servers/slapd/filter.c
|
||||
+++ b/ldap/servers/slapd/filter.c
|
||||
@@ -1038,9 +1038,11 @@ slapi_filter_get_subfilt(
|
||||
}
|
||||
|
||||
/*
|
||||
- * Before calling this function, you must free all the parts
|
||||
+ * The function does not know how to free all the parts
|
||||
* which will be overwritten (i.e. slapi_free_the_filter_bits),
|
||||
- * this function dosn't know how to do that
|
||||
+ * so the caller must take care of that.
|
||||
+ * But it must do so AFTER calling slapi_filter_replace_ex to
|
||||
+ * avoid getting invalid filter if slapi_filter_replace_ex fails.
|
||||
*/
|
||||
int
|
||||
slapi_filter_replace_ex(Slapi_Filter *f, char *s)
|
||||
@@ -1099,8 +1101,15 @@ slapi_filter_free_bits(Slapi_Filter *f)
|
||||
int
|
||||
slapi_filter_replace_strfilter(Slapi_Filter *f, char *strfilter)
|
||||
{
|
||||
- slapi_filter_free_bits(f);
|
||||
- return (slapi_filter_replace_ex(f, strfilter));
|
||||
+ /* slapi_filter_replace_ex may fail and we cannot
|
||||
+ * free filter bits before calling it.
|
||||
+ */
|
||||
+ Slapi_Filter save_f = *f;
|
||||
+ int ret = slapi_filter_replace_ex(f, strfilter);
|
||||
+ if (ret == 0) {
|
||||
+ slapi_filter_free_bits(&save_f);
|
||||
+ }
|
||||
+ return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,455 +0,0 @@
|
||||
From 446a23d0ed2d3ffa76c5fb5e9576d6876bdbf04f Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 28 Mar 2025 11:28:54 -0700
|
||||
Subject: [PATCH] Issue 6686 - CLI - Re-enabling user accounts that reached
|
||||
inactivity limit fails with error (#6687)
|
||||
|
||||
Description: When attempting to unlock a user account that has been locked due
|
||||
to exceeding the Account Policy Plugin's inactivity limit, the dsidm account
|
||||
unlock command fails with a Python type error: "float() argument must be a
|
||||
string or a number, not 'NoneType'".
|
||||
|
||||
Enhance the unlock method to properly handle different account locking states,
|
||||
including inactivity limit exceeded states.
|
||||
Add test cases to verify account inactivity locking/unlocking functionality
|
||||
with CoS and role-based indirect locking.
|
||||
|
||||
Fix CoS template class to include the required 'ldapsubentry' objectClass.
|
||||
Improv error messages to provide better guidance on unlocking indirectly
|
||||
locked accounts.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6686
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
.../clu/dsidm_account_inactivity_test.py | 329 ++++++++++++++++++
|
||||
src/lib389/lib389/cli_idm/account.py | 25 +-
|
||||
src/lib389/lib389/idm/account.py | 28 +-
|
||||
3 files changed, 377 insertions(+), 5 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py b/dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py
|
||||
new file mode 100644
|
||||
index 000000000..88a34abf6
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_account_inactivity_test.py
|
||||
@@ -0,0 +1,329 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import ldap
|
||||
+import time
|
||||
+import pytest
|
||||
+import logging
|
||||
+import os
|
||||
+from datetime import datetime, timedelta
|
||||
+
|
||||
+from lib389 import DEFAULT_SUFFIX, DN_PLUGIN, DN_CONFIG
|
||||
+from lib389.cli_idm.account import entry_status, unlock
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389.cli_base import FakeArgs
|
||||
+from lib389.utils import ds_is_older
|
||||
+from lib389.plugins import AccountPolicyPlugin, AccountPolicyConfigs
|
||||
+from lib389.idm.role import FilteredRoles
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.cos import CosTemplate, CosPointerDefinition
|
||||
+from lib389.idm.domain import Domain
|
||||
+from . import check_value_in_log_and_reset
|
||||
+
|
||||
+pytestmark = pytest.mark.tier0
|
||||
+
|
||||
+logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+# Constants
|
||||
+PLUGIN_ACCT_POLICY = "Account Policy Plugin"
|
||||
+ACCP_DN = f"cn={PLUGIN_ACCT_POLICY},{DN_PLUGIN}"
|
||||
+ACCP_CONF = f"{DN_CONFIG},{ACCP_DN}"
|
||||
+POLICY_NAME = "Account Inactivity Policy"
|
||||
+POLICY_DN = f"cn={POLICY_NAME},{DEFAULT_SUFFIX}"
|
||||
+COS_TEMPLATE_NAME = "TemplateCoS"
|
||||
+COS_TEMPLATE_DN = f"cn={COS_TEMPLATE_NAME},{DEFAULT_SUFFIX}"
|
||||
+COS_DEFINITION_NAME = "DefinitionCoS"
|
||||
+COS_DEFINITION_DN = f"cn={COS_DEFINITION_NAME},{DEFAULT_SUFFIX}"
|
||||
+TEST_USER_NAME = "test_inactive_user"
|
||||
+TEST_USER_DN = f"uid={TEST_USER_NAME},{DEFAULT_SUFFIX}"
|
||||
+TEST_USER_PW = "password"
|
||||
+INACTIVITY_LIMIT = 30
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def account_policy_setup(topology_st, request):
|
||||
+ """Set up account policy plugin, configuration, and CoS objects"""
|
||||
+ log.info("Setting up Account Policy Plugin and CoS")
|
||||
+
|
||||
+ # Enable Account Policy Plugin
|
||||
+ plugin = AccountPolicyPlugin(topology_st.standalone)
|
||||
+ if not plugin.status():
|
||||
+ plugin.enable()
|
||||
+ plugin.set('nsslapd-pluginarg0', ACCP_CONF)
|
||||
+
|
||||
+ # Configure Account Policy
|
||||
+ accp_configs = AccountPolicyConfigs(topology_st.standalone)
|
||||
+ accp_config = accp_configs.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': 'config',
|
||||
+ 'alwaysrecordlogin': 'yes',
|
||||
+ 'stateattrname': 'lastLoginTime',
|
||||
+ 'altstateattrname': '1.1',
|
||||
+ 'specattrname': 'acctPolicySubentry',
|
||||
+ 'limitattrname': 'accountInactivityLimit'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Add ACI for anonymous access if it doesn't exist
|
||||
+ domain = Domain(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ anon_aci = '(targetattr="*")(version 3.0; acl "Anonymous read access"; allow (read,search,compare) userdn="ldap:///anyone";)'
|
||||
+ domain.ensure_present('aci', anon_aci)
|
||||
+
|
||||
+ # Restart the server to apply plugin configuration
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ # Create or update account policy entry
|
||||
+ accp_configs = AccountPolicyConfigs(topology_st.standalone, basedn=DEFAULT_SUFFIX)
|
||||
+ policy = accp_configs.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': POLICY_NAME,
|
||||
+ 'objectClass': ['top', 'ldapsubentry', 'extensibleObject', 'accountpolicy'],
|
||||
+ 'accountInactivityLimit': str(INACTIVITY_LIMIT)
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Create or update CoS template entry
|
||||
+ cos_template = CosTemplate(topology_st.standalone, dn=COS_TEMPLATE_DN)
|
||||
+ cos_template.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': COS_TEMPLATE_NAME,
|
||||
+ 'objectClass': ['top', 'cosTemplate', 'extensibleObject'],
|
||||
+ 'acctPolicySubentry': policy.dn
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Create or update CoS definition entry
|
||||
+ cos_def = CosPointerDefinition(topology_st.standalone, dn=COS_DEFINITION_DN)
|
||||
+ cos_def.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': COS_DEFINITION_NAME,
|
||||
+ 'objectClass': ['top', 'ldapsubentry', 'cosSuperDefinition', 'cosPointerDefinition'],
|
||||
+ 'cosTemplateDn': COS_TEMPLATE_DN,
|
||||
+ 'cosAttribute': 'acctPolicySubentry default operational-default'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Restart server to ensure CoS is applied
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Cleaning up Account Policy settings')
|
||||
+ try:
|
||||
+ # Delete CoS and policy entries
|
||||
+ if cos_def.exists():
|
||||
+ cos_def.delete()
|
||||
+ if cos_template.exists():
|
||||
+ cos_template.delete()
|
||||
+ if policy.exists():
|
||||
+ policy.delete()
|
||||
+
|
||||
+ # Disable the plugin
|
||||
+ if plugin.status():
|
||||
+ plugin.disable()
|
||||
+ topology_st.standalone.restart()
|
||||
+ except Exception as e:
|
||||
+ log.error(f'Failed to clean up: {e}')
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ return topology_st.standalone
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def create_test_user(topology_st, account_policy_setup, request):
|
||||
+ """Create a test user for the inactivity test"""
|
||||
+ log.info('Creating test user')
|
||||
+
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ user = users.ensure_state(
|
||||
+ properties={
|
||||
+ 'uid': TEST_USER_NAME,
|
||||
+ 'cn': TEST_USER_NAME,
|
||||
+ 'sn': TEST_USER_NAME,
|
||||
+ 'userPassword': TEST_USER_PW,
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': f'/home/{TEST_USER_NAME}'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Deleting test user')
|
||||
+ if user.exists():
|
||||
+ user.delete()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+ return user
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Indirect account locking not implemented")
|
||||
+def test_dsidm_account_inactivity_lock_unlock(topology_st, create_test_user):
|
||||
+ """Test dsidm account unlock functionality with indirectly locked accounts
|
||||
+
|
||||
+ :id: d7b57083-6111-4dbf-af84-6fca7fc7fb31
|
||||
+ :setup: Standalone instance with Account Policy Plugin and CoS configured
|
||||
+ :steps:
|
||||
+ 1. Create a test user
|
||||
+ 2. Bind as the test user to set lastLoginTime
|
||||
+ 3. Check account status - should be active
|
||||
+ 4. Set user's lastLoginTime to a time in the past that exceeds inactivity limit
|
||||
+ 5. Check account status - should be locked due to inactivity
|
||||
+ 6. Attempt to bind as the user - should fail with constraint violation
|
||||
+ 7. Unlock the account using dsidm account unlock
|
||||
+ 8. Verify account status is active again
|
||||
+ 9. Verify the user can bind again
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Account status shows as activated
|
||||
+ 4. Success
|
||||
+ 5. Account status shows as inactivity limit exceeded
|
||||
+ 6. Bind attempt fails with constraint violation
|
||||
+ 7. Account unlocked successfully
|
||||
+ 8. Account status shows as activated
|
||||
+ 9. User can bind successfully
|
||||
+ """
|
||||
+ standalone = topology_st.standalone
|
||||
+ user = create_test_user
|
||||
+
|
||||
+ # Set up FakeArgs for dsidm commands
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = user.dn
|
||||
+ args.json = False
|
||||
+ args.details = False
|
||||
+
|
||||
+ # 1. Check initial account status - should be active
|
||||
+ log.info('Step 1: Checking initial account status')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+ # 2. Bind as test user to set initial lastLoginTime
|
||||
+ log.info('Step 2: Binding as test user to set lastLoginTime')
|
||||
+ try:
|
||||
+ conn = user.bind(TEST_USER_PW)
|
||||
+ conn.unbind()
|
||||
+ log.info("Successfully bound as test user")
|
||||
+ except ldap.LDAPError as e:
|
||||
+ pytest.fail(f"Failed to bind as test user: {e}")
|
||||
+
|
||||
+ # 3. Set lastLoginTime to a time in the past that exceeds inactivity limit
|
||||
+ log.info('Step 3: Setting lastLoginTime to the past')
|
||||
+ past_time = datetime.utcnow() - timedelta(seconds=INACTIVITY_LIMIT * 2)
|
||||
+ past_time_str = past_time.strftime('%Y%m%d%H%M%SZ')
|
||||
+ user.replace('lastLoginTime', past_time_str)
|
||||
+
|
||||
+ # 4. Check account status - should now be locked due to inactivity
|
||||
+ log.info('Step 4: Checking account status after setting old lastLoginTime')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: inactivity limit exceeded')
|
||||
+
|
||||
+ # 5. Attempt to bind as the user - should fail
|
||||
+ log.info('Step 5: Attempting to bind as user (should fail)')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION) as excinfo:
|
||||
+ conn = user.bind(TEST_USER_PW)
|
||||
+ assert "Account inactivity limit exceeded" in str(excinfo.value)
|
||||
+
|
||||
+ # 6. Unlock the account using dsidm account unlock
|
||||
+ log.info('Step 6: Unlocking the account with dsidm')
|
||||
+ unlock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st,
|
||||
+ check_value='now unlocked by resetting lastLoginTime')
|
||||
+
|
||||
+ # 7. Verify account status is active again
|
||||
+ log.info('Step 7: Checking account status after unlock')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+ # 8. Verify the user can bind again
|
||||
+ log.info('Step 8: Verifying user can bind again')
|
||||
+ try:
|
||||
+ conn = user.bind(TEST_USER_PW)
|
||||
+ conn.unbind()
|
||||
+ log.info("Successfully bound as test user after unlock")
|
||||
+ except ldap.LDAPError as e:
|
||||
+ pytest.fail(f"Failed to bind as test user after unlock: {e}")
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Indirect account locking not implemented")
|
||||
+def test_dsidm_indirectly_locked_via_role(topology_st, create_test_user):
|
||||
+ """Test dsidm account unlock functionality with accounts indirectly locked via role
|
||||
+
|
||||
+ :id: 7bfe69bb-cf99-4214-a763-051ab2b9cf89
|
||||
+ :setup: Standalone instance with Role and user configured
|
||||
+ :steps:
|
||||
+ 1. Create a test user
|
||||
+ 2. Create a Filtered Role that includes the test user
|
||||
+ 3. Lock the role
|
||||
+ 4. Check account status - should be indirectly locked through the role
|
||||
+ 5. Attempt to unlock the account - should fail with appropriate message
|
||||
+ 6. Unlock the role
|
||||
+ 7. Verify account status is active again
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Account status shows as indirectly locked
|
||||
+ 5. Unlock attempt fails with appropriate error message
|
||||
+ 6. Success
|
||||
+ 7. Account status shows as activated
|
||||
+ """
|
||||
+ standalone = topology_st.standalone
|
||||
+ user = create_test_user
|
||||
+
|
||||
+ # Use FilteredRoles and ensure_state for role creation
|
||||
+ log.info('Step 1: Creating Filtered Role')
|
||||
+ roles = FilteredRoles(standalone, DEFAULT_SUFFIX)
|
||||
+ role = roles.ensure_state(
|
||||
+ properties={
|
||||
+ 'cn': 'TestFilterRole',
|
||||
+ 'nsRoleFilter': f'(uid={TEST_USER_NAME})'
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Set up FakeArgs for dsidm commands
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = user.dn
|
||||
+ args.json = False
|
||||
+ args.details = False
|
||||
+
|
||||
+ # 2. Check account status before locking role
|
||||
+ log.info('Step 2: Checking account status before locking role')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+ # 3. Lock the role
|
||||
+ log.info('Step 3: Locking the role')
|
||||
+ role.lock()
|
||||
+
|
||||
+ # 4. Check account status - should be indirectly locked
|
||||
+ log.info('Step 4: Checking account status after locking role')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: indirectly locked through a Role')
|
||||
+
|
||||
+ # 5. Attempt to unlock the account - should fail
|
||||
+ log.info('Step 5: Attempting to unlock indirectly locked account')
|
||||
+ unlock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st,
|
||||
+ check_value='Account is locked through role')
|
||||
+
|
||||
+ # 6. Unlock the role
|
||||
+ log.info('Step 6: Unlocking the role')
|
||||
+ role.unlock()
|
||||
+
|
||||
+ # 7. Verify account status is active again
|
||||
+ log.info('Step 7: Checking account status after unlocking role')
|
||||
+ entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value='Entry State: activated')
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
\ No newline at end of file
|
||||
diff --git a/src/lib389/lib389/cli_idm/account.py b/src/lib389/lib389/cli_idm/account.py
|
||||
index 15f766588..a0dfd8f65 100644
|
||||
--- a/src/lib389/lib389/cli_idm/account.py
|
||||
+++ b/src/lib389/lib389/cli_idm/account.py
|
||||
@@ -176,8 +176,29 @@ def unlock(inst, basedn, log, args):
|
||||
dn = _get_dn_arg(args.dn, msg="Enter dn to unlock")
|
||||
accounts = Accounts(inst, basedn)
|
||||
acct = accounts.get(dn=dn)
|
||||
- acct.unlock()
|
||||
- log.info(f'Entry {dn} is unlocked')
|
||||
+
|
||||
+ try:
|
||||
+ # Get the account status before attempting to unlock
|
||||
+ status = acct.status()
|
||||
+ state = status["state"]
|
||||
+
|
||||
+ # Attempt to unlock the account
|
||||
+ acct.unlock()
|
||||
+
|
||||
+ # Success message
|
||||
+ log.info(f'Entry {dn} is unlocked')
|
||||
+ if state == AccountState.DIRECTLY_LOCKED:
|
||||
+ log.info(f'The entry was directly locked')
|
||||
+ elif state == AccountState.INACTIVITY_LIMIT_EXCEEDED:
|
||||
+ log.info(f'The entry was locked due to inactivity and is now unlocked by resetting lastLoginTime')
|
||||
+
|
||||
+ except ValueError as e:
|
||||
+ # Provide a more detailed error message based on failure reason
|
||||
+ if "through role" in str(e):
|
||||
+ log.error(f"Cannot unlock {dn}: {str(e)}")
|
||||
+ log.info("To unlock this account, you must modify the role that's locking it.")
|
||||
+ else:
|
||||
+ log.error(f"Failed to unlock {dn}: {str(e)}")
|
||||
|
||||
|
||||
def reset_password(inst, basedn, log, args):
|
||||
diff --git a/src/lib389/lib389/idm/account.py b/src/lib389/lib389/idm/account.py
|
||||
index 4b823b662..faf6f6f16 100644
|
||||
--- a/src/lib389/lib389/idm/account.py
|
||||
+++ b/src/lib389/lib389/idm/account.py
|
||||
@@ -140,7 +140,8 @@ class Account(DSLdapObject):
|
||||
"nsAccountLock", state_attr])
|
||||
|
||||
last_login_time = self._dict_get_with_ignore_indexerror(account_data, state_attr)
|
||||
- if not last_login_time:
|
||||
+ # if last_login_time not exist then check alt_state_attr only if its not disabled and exist
|
||||
+ if not last_login_time and alt_state_attr in account_data:
|
||||
last_login_time = self._dict_get_with_ignore_indexerror(account_data, alt_state_attr)
|
||||
|
||||
create_time = self._dict_get_with_ignore_indexerror(account_data, "createTimestamp")
|
||||
@@ -203,12 +204,33 @@ class Account(DSLdapObject):
|
||||
self.replace('nsAccountLock', 'true')
|
||||
|
||||
def unlock(self):
|
||||
- """Unset nsAccountLock"""
|
||||
+ """Unset nsAccountLock if it's set and reset lastLoginTime if account is locked due to inactivity"""
|
||||
|
||||
current_status = self.status()
|
||||
+
|
||||
if current_status["state"] == AccountState.ACTIVATED:
|
||||
raise ValueError("Account is already active")
|
||||
- self.remove('nsAccountLock', None)
|
||||
+
|
||||
+ if current_status["state"] == AccountState.DIRECTLY_LOCKED:
|
||||
+ # Account is directly locked with nsAccountLock attribute
|
||||
+ self.remove('nsAccountLock', None)
|
||||
+ elif current_status["state"] == AccountState.INACTIVITY_LIMIT_EXCEEDED:
|
||||
+ # Account is locked due to inactivity - reset lastLoginTime to current time
|
||||
+ # The lastLoginTime attribute stores its value in GMT/UTC time (Zulu time zone)
|
||||
+ current_time = time.strftime('%Y%m%d%H%M%SZ', time.gmtime())
|
||||
+ self.replace('lastLoginTime', current_time)
|
||||
+ elif current_status["state"] == AccountState.INDIRECTLY_LOCKED:
|
||||
+ # Account is locked through a role
|
||||
+ role_dn = current_status.get("role_dn")
|
||||
+ if role_dn:
|
||||
+ raise ValueError(f"Account is locked through role {role_dn}. "
|
||||
+ f"Please modify the role to unlock this account.")
|
||||
+ else:
|
||||
+ raise ValueError("Account is locked through an unknown role. "
|
||||
+ "Please check the roles configuration to unlock this account.")
|
||||
+ else:
|
||||
+ # Should not happen, but just in case
|
||||
+ raise ValueError(f"Unknown lock state: {current_status['state'].value}")
|
||||
|
||||
# If the account can be bound to, this will attempt to do so. We don't check
|
||||
# for exceptions, just pass them back!
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,70 +0,0 @@
|
||||
From 09a284ee43c2b4346da892f8756f97accd15ca68 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Wed, 4 Dec 2024 21:59:40 -0500
|
||||
Subject: [PATCH] Issue 6302 - Allow to run replication status without a prompt
|
||||
(#6410)
|
||||
|
||||
Description: We should allow running replication status and
|
||||
other similar commands without requesting a password and bind DN.
|
||||
|
||||
This way, the current instance's root DN and root PW will be used on other
|
||||
instances when requesting CSN info. If they are incorrect,
|
||||
then the info won't be printed, but otherwise, the agreement status
|
||||
will be displayed correctly.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6302
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
src/lib389/lib389/cli_conf/replication.py | 15 +++------------
|
||||
1 file changed, 3 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 399d0d2f8..cd4a331a8 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -319,12 +319,9 @@ def list_suffixes(inst, basedn, log, args):
|
||||
def get_repl_status(inst, basedn, log, args):
|
||||
replicas = Replicas(inst)
|
||||
replica = replicas.get(args.suffix)
|
||||
- pw_and_dn_prompt = False
|
||||
if args.bind_passwd_file is not None:
|
||||
args.bind_passwd = get_passwd_from_file(args.bind_passwd_file)
|
||||
- if args.bind_passwd_prompt or args.bind_dn is None or args.bind_passwd is None:
|
||||
- pw_and_dn_prompt = True
|
||||
- status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=pw_and_dn_prompt)
|
||||
+ status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=args.bind_passwd_prompt)
|
||||
if args.json:
|
||||
log.info(json.dumps({"type": "list", "items": status}, indent=4))
|
||||
else:
|
||||
@@ -335,12 +332,9 @@ def get_repl_status(inst, basedn, log, args):
|
||||
def get_repl_winsync_status(inst, basedn, log, args):
|
||||
replicas = Replicas(inst)
|
||||
replica = replicas.get(args.suffix)
|
||||
- pw_and_dn_prompt = False
|
||||
if args.bind_passwd_file is not None:
|
||||
args.bind_passwd = get_passwd_from_file(args.bind_passwd_file)
|
||||
- if args.bind_passwd_prompt or args.bind_dn is None or args.bind_passwd is None:
|
||||
- pw_and_dn_prompt = True
|
||||
- status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, winsync=True, pwprompt=pw_and_dn_prompt)
|
||||
+ status = replica.status(binddn=args.bind_dn, bindpw=args.bind_passwd, winsync=True, pwprompt=args.bind_passwd_prompt)
|
||||
if args.json:
|
||||
log.info(json.dumps({"type": "list", "items": status}, indent=4))
|
||||
else:
|
||||
@@ -874,12 +868,9 @@ def poke_agmt(inst, basedn, log, args):
|
||||
|
||||
def get_agmt_status(inst, basedn, log, args):
|
||||
agmt = get_agmt(inst, args)
|
||||
- pw_and_dn_prompt = False
|
||||
if args.bind_passwd_file is not None:
|
||||
args.bind_passwd = get_passwd_from_file(args.bind_passwd_file)
|
||||
- if args.bind_passwd_prompt or args.bind_dn is None or args.bind_passwd is None:
|
||||
- pw_and_dn_prompt = True
|
||||
- status = agmt.status(use_json=args.json, binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=pw_and_dn_prompt)
|
||||
+ status = agmt.status(use_json=args.json, binddn=args.bind_dn, bindpw=args.bind_passwd, pwprompt=args.bind_passwd_prompt)
|
||||
log.info(status)
|
||||
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,399 +0,0 @@
|
||||
From 3ba73d2aa55f18ff73d4b3901ce101133745effc Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 9 Jul 2025 14:18:50 -0400
|
||||
Subject: [PATCH] Issue 6859 - str2filter is not fully applying matching rules
|
||||
|
||||
Description:
|
||||
|
||||
When we have an extended filter, one with a MR applied, it is ignored during
|
||||
internal searches:
|
||||
|
||||
"(cn:CaseExactMatch:=Value)"
|
||||
|
||||
For internal searches we use str2filter() and it doesn't fully apply extended
|
||||
search filter matching rules
|
||||
|
||||
Also needed to update attr uniqueness plugin to apply this change for mod
|
||||
operations (previously only Adds were correctly handling these attribute
|
||||
filters)
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6857
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6859
|
||||
|
||||
Reviewed by: spichugi & tbordaz(Thanks!!)
|
||||
---
|
||||
.../tests/suites/plugins/attruniq_test.py | 295 +++++++++++++++++-
|
||||
ldap/servers/plugins/uiduniq/uid.c | 7 +
|
||||
ldap/servers/slapd/plugin_mr.c | 2 +-
|
||||
ldap/servers/slapd/str2filter.c | 8 +
|
||||
4 files changed, 309 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/attruniq_test.py b/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
index b190e0ec1..b338f405f 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2021 Red Hat, Inc.
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -80,4 +80,295 @@ def test_modrdn_attr_uniqueness(topology_st):
|
||||
log.debug(excinfo.value)
|
||||
|
||||
log.debug('Move user2 to group1')
|
||||
- user2.rename(f'uid={user2.rdn}', group1.dn)
|
||||
\ No newline at end of file
|
||||
+
|
||||
+ user2.rename(f'uid={user2.rdn}', group1.dn)
|
||||
+
|
||||
+ # Cleanup for next test
|
||||
+ user1.delete()
|
||||
+ user2.delete()
|
||||
+ attruniq.disable()
|
||||
+ attruniq.delete()
|
||||
+
|
||||
+
|
||||
+def test_multiple_attr_uniqueness(topology_st):
|
||||
+ """ Test that attribute uniqueness works properly with multiple attributes
|
||||
+
|
||||
+ :id: c49aa5c1-7e65-45fd-b064-55e0b815e9bc
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Setup attribute uniqueness plugin to ensure uniqueness of attributes 'mail' and 'mailAlternateAddress'
|
||||
+ 2. Add user with unique 'mail=non-uniq@value.net' and 'mailAlternateAddress=alt-mail@value.net'
|
||||
+ 3. Try adding another user with 'mail=non-uniq@value.net'
|
||||
+ 4. Try adding another user with 'mailAlternateAddress=alt-mail@value.net'
|
||||
+ 5. Try adding another user with 'mail=alt-mail@value.net'
|
||||
+ 6. Try adding another user with 'mailAlternateAddress=non-uniq@value.net'
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Should raise CONSTRAINT_VIOLATION
|
||||
+ 4. Should raise CONSTRAINT_VIOLATION
|
||||
+ 5. Should raise CONSTRAINT_VIOLATION
|
||||
+ 6. Should raise CONSTRAINT_VIOLATION
|
||||
+ """
|
||||
+ attruniq = AttributeUniquenessPlugin(topology_st.standalone, dn="cn=attruniq,cn=plugins,cn=config")
|
||||
+
|
||||
+ try:
|
||||
+ log.debug(f'Setup PLUGIN_ATTR_UNIQUENESS plugin for {MAIL_ATTR_VALUE} attribute for the group2')
|
||||
+ attruniq.create(properties={'cn': 'attruniq'})
|
||||
+ attruniq.add_unique_attribute('mail')
|
||||
+ attruniq.add_unique_attribute('mailAlternateAddress')
|
||||
+ attruniq.add_unique_subtree(DEFAULT_SUFFIX)
|
||||
+ attruniq.enable_all_subtrees()
|
||||
+ log.debug(f'Enable PLUGIN_ATTR_UNIQUENESS plugin as "ON"')
|
||||
+ attruniq.enable()
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('test_multiple_attribute_uniqueness: Failed to configure plugin for "mail": error {}'.format(e.args[0]['desc']))
|
||||
+ assert False
|
||||
+
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ testuser1 = users.create_test_user(100,100)
|
||||
+ testuser1.add('objectclass', 'extensibleObject')
|
||||
+ testuser1.add('mail', MAIL_ATTR_VALUE)
|
||||
+ testuser1.add('mailAlternateAddress', MAIL_ATTR_VALUE_ALT)
|
||||
+
|
||||
+ testuser2 = users.create_test_user(200, 200)
|
||||
+ testuser2.add('objectclass', 'extensibleObject')
|
||||
+
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ testuser2.add('mail', MAIL_ATTR_VALUE)
|
||||
+
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ testuser2.add('mailAlternateAddress', MAIL_ATTR_VALUE_ALT)
|
||||
+
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ testuser2.add('mail', MAIL_ATTR_VALUE_ALT)
|
||||
+
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ testuser2.add('mailAlternateAddress', MAIL_ATTR_VALUE)
|
||||
+
|
||||
+ # Cleanup
|
||||
+ testuser1.delete()
|
||||
+ testuser2.delete()
|
||||
+ attruniq.disable()
|
||||
+ attruniq.delete()
|
||||
+
|
||||
+
|
||||
+def test_exclude_subtrees(topology_st):
|
||||
+ """ Test attribute uniqueness with exclude scope
|
||||
+
|
||||
+ :id: 43d29a60-40e1-4ebd-b897-6ef9f20e9f27
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Setup and enable attribute uniqueness plugin for telephonenumber unique attribute
|
||||
+ 2. Create subtrees and test users
|
||||
+ 3. Add a unique attribute to a user within uniqueness scope
|
||||
+ 4. Add exclude subtree
|
||||
+ 5. Try to add existing value attribute to an entry within uniqueness scope
|
||||
+ 6. Try to add existing value attribute to an entry within exclude scope
|
||||
+ 7. Remove the attribute from affected entries
|
||||
+ 8. Add a unique attribute to a user within exclude scope
|
||||
+ 9. Try to add existing value attribute to an entry within uniqueness scope
|
||||
+ 10. Try to add existing value attribute to another entry within uniqueness scope
|
||||
+ 11. Remove the attribute from affected entries
|
||||
+ 12. Add another exclude subtree
|
||||
+ 13. Add a unique attribute to a user within uniqueness scope
|
||||
+ 14. Try to add existing value attribute to an entry within uniqueness scope
|
||||
+ 15. Try to add existing value attribute to an entry within exclude scope
|
||||
+ 16. Try to add existing value attribute to an entry within another exclude scope
|
||||
+ 17. Clean up entries
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Should raise CONSTRAINT_VIOLATION
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Success
|
||||
+ 9. Success
|
||||
+ 10. Should raise CONSTRAINT_VIOLATION
|
||||
+ 11. Success
|
||||
+ 12. Success
|
||||
+ 13. Success
|
||||
+ 14. Should raise CONSTRAINT_VIOLATION
|
||||
+ 15. Success
|
||||
+ 16. Success
|
||||
+ 17. Success
|
||||
+ """
|
||||
+ log.info('Setup attribute uniqueness plugin')
|
||||
+ attruniq = AttributeUniquenessPlugin(topology_st.standalone, dn="cn=attruniq,cn=plugins,cn=config")
|
||||
+ attruniq.create(properties={'cn': 'attruniq'})
|
||||
+ attruniq.add_unique_attribute('telephonenumber')
|
||||
+ attruniq.add_unique_subtree(DEFAULT_SUFFIX)
|
||||
+ attruniq.enable_all_subtrees()
|
||||
+ attruniq.enable()
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ log.info('Create subtrees container')
|
||||
+ containers = nsContainers(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ cont1 = containers.create(properties={'cn': EXCLUDED_CONTAINER_CN})
|
||||
+ cont2 = containers.create(properties={'cn': EXCLUDED_BIS_CONTAINER_CN})
|
||||
+ cont3 = containers.create(properties={'cn': ENFORCED_CONTAINER_CN})
|
||||
+
|
||||
+ log.info('Create test users')
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX,
|
||||
+ rdn='cn={}'.format(ENFORCED_CONTAINER_CN))
|
||||
+ users_excluded = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX,
|
||||
+ rdn='cn={}'.format(EXCLUDED_CONTAINER_CN))
|
||||
+ users_excluded2 = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX,
|
||||
+ rdn='cn={}'.format(EXCLUDED_BIS_CONTAINER_CN))
|
||||
+
|
||||
+ user1 = users.create(properties={'cn': USER_1_CN,
|
||||
+ 'uid': USER_1_CN,
|
||||
+ 'sn': USER_1_CN,
|
||||
+ 'uidNumber': '1',
|
||||
+ 'gidNumber': '11',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_1_CN)})
|
||||
+ user2 = users.create(properties={'cn': USER_2_CN,
|
||||
+ 'uid': USER_2_CN,
|
||||
+ 'sn': USER_2_CN,
|
||||
+ 'uidNumber': '2',
|
||||
+ 'gidNumber': '22',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_2_CN)})
|
||||
+ user3 = users_excluded.create(properties={'cn': USER_3_CN,
|
||||
+ 'uid': USER_3_CN,
|
||||
+ 'sn': USER_3_CN,
|
||||
+ 'uidNumber': '3',
|
||||
+ 'gidNumber': '33',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_3_CN)})
|
||||
+ user4 = users_excluded2.create(properties={'cn': USER_4_CN,
|
||||
+ 'uid': USER_4_CN,
|
||||
+ 'sn': USER_4_CN,
|
||||
+ 'uidNumber': '4',
|
||||
+ 'gidNumber': '44',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_4_CN)})
|
||||
+
|
||||
+ UNIQUE_VALUE = '1234'
|
||||
+
|
||||
+ try:
|
||||
+ log.info('Create user with unique attribute')
|
||||
+ user1.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user1.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Add exclude subtree')
|
||||
+ attruniq.add_exclude_subtree(EXCLUDED_CONTAINER_DN)
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ log.info('Verify an already used attribute value cannot be added within the same subtree')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user2.add('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify an entry with same attribute value can be added within exclude subtree')
|
||||
+ user3.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user3.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Cleanup unique attribute values')
|
||||
+ user1.remove_all('telephonenumber')
|
||||
+ user3.remove_all('telephonenumber')
|
||||
+
|
||||
+ log.info('Add a unique value to an entry in excluded scope')
|
||||
+ user3.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user3.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify the same value can be added to an entry within uniqueness scope')
|
||||
+ user1.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user1.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify that yet another same value cannot be added to another entry within uniqueness scope')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user2.add('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Cleanup unique attribute values')
|
||||
+ user1.remove_all('telephonenumber')
|
||||
+ user3.remove_all('telephonenumber')
|
||||
+
|
||||
+ log.info('Add another exclude subtree')
|
||||
+ attruniq.add_exclude_subtree(EXCLUDED_BIS_CONTAINER_DN)
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ user1.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ log.info('Verify an already used attribute value cannot be added within the same subtree')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user2.add('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify an already used attribute can be added to an entry in exclude scope')
|
||||
+ user3.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user3.present('telephonenumber', UNIQUE_VALUE)
|
||||
+ user4.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user4.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ finally:
|
||||
+ log.info('Clean up users, containers and attribute uniqueness plugin')
|
||||
+ user1.delete()
|
||||
+ user2.delete()
|
||||
+ user3.delete()
|
||||
+ user4.delete()
|
||||
+ cont1.delete()
|
||||
+ cont2.delete()
|
||||
+ cont3.delete()
|
||||
+ attruniq.disable()
|
||||
+ attruniq.delete()
|
||||
+
|
||||
+
|
||||
+def test_matchingrule_attr(topology_st):
|
||||
+ """ Test list extension MR attribute. Check for "cn" using CES (versus it
|
||||
+ being defined as CIS)
|
||||
+
|
||||
+ :id: 5cde4342-6fa3-4225-b23d-0af918981075
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Setup and enable attribute uniqueness plugin to use CN attribute
|
||||
+ with a matching rule of CaseExactMatch.
|
||||
+ 2. Add user with CN value is lowercase
|
||||
+ 3. Add second user with same lowercase CN which should be rejected
|
||||
+ 4. Add second user with same CN value but with mixed case
|
||||
+ 5. Modify second user replacing CN value to lc which should be rejected
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+
|
||||
+ attruniq = AttributeUniquenessPlugin(inst,
|
||||
+ dn="cn=attribute uniqueness,cn=plugins,cn=config")
|
||||
+ attruniq.add_unique_attribute('cn:CaseExactMatch:')
|
||||
+ attruniq.enable_all_subtrees()
|
||||
+ attruniq.enable()
|
||||
+ inst.restart()
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ users.create(properties={'cn': "common_name",
|
||||
+ 'uid': "uid_name",
|
||||
+ 'sn': "uid_name",
|
||||
+ 'uidNumber': '1',
|
||||
+ 'gidNumber': '11',
|
||||
+ 'homeDirectory': '/home/uid_name'})
|
||||
+
|
||||
+ log.info('Add entry with the exact CN value which should be rejected')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ users.create(properties={'cn': "common_name",
|
||||
+ 'uid': "uid_name2",
|
||||
+ 'sn': "uid_name2",
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'homeDirectory': '/home/uid_name2'})
|
||||
+
|
||||
+ log.info('Add entry with the mixed case CN value which should be allowed')
|
||||
+ user = users.create(properties={'cn': "Common_Name",
|
||||
+ 'uid': "uid_name2",
|
||||
+ 'sn': "uid_name2",
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'homeDirectory': '/home/uid_name2'})
|
||||
+
|
||||
+ log.info('Mod entry with exact case CN value which should be rejected')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user.replace('cn', 'common_name')
|
||||
diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
|
||||
index 15cf88477..5a0d61b86 100644
|
||||
--- a/ldap/servers/plugins/uiduniq/uid.c
|
||||
+++ b/ldap/servers/plugins/uiduniq/uid.c
|
||||
@@ -1179,6 +1179,10 @@ preop_modify(Slapi_PBlock *pb)
|
||||
for (; mods && *mods; mods++) {
|
||||
mod = *mods;
|
||||
for (i = 0; attrNames && attrNames[i]; i++) {
|
||||
+ char *attr_match = strchr(attrNames[i], ':');
|
||||
+ if (attr_match != NULL) {
|
||||
+ attr_match[0] = '\0';
|
||||
+ }
|
||||
if ((slapi_attr_type_cmp(mod->mod_type, attrNames[i], 1) == 0) && /* mod contains target attr */
|
||||
(mod->mod_op & LDAP_MOD_BVALUES) && /* mod is bval encoded (not string val) */
|
||||
(mod->mod_bvalues && mod->mod_bvalues[0]) && /* mod actually contains some values */
|
||||
@@ -1187,6 +1191,9 @@ preop_modify(Slapi_PBlock *pb)
|
||||
{
|
||||
addMod(&checkmods, &checkmodsCapacity, &modcount, mod);
|
||||
}
|
||||
+ if (attr_match != NULL) {
|
||||
+ attr_match[0] = ':';
|
||||
+ }
|
||||
}
|
||||
}
|
||||
if (modcount == 0) {
|
||||
diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c
|
||||
index 6cf88b7de..f40c9a39b 100644
|
||||
--- a/ldap/servers/slapd/plugin_mr.c
|
||||
+++ b/ldap/servers/slapd/plugin_mr.c
|
||||
@@ -624,7 +624,7 @@ attempt_mr_filter_create(mr_filter_t *f, struct slapdplugin *mrp, Slapi_PBlock *
|
||||
int rc;
|
||||
IFP mrf_create = NULL;
|
||||
f->mrf_match = NULL;
|
||||
- pblock_init(pb);
|
||||
+ slapi_pblock_init(pb);
|
||||
if (!(rc = slapi_pblock_set(pb, SLAPI_PLUGIN, mrp)) &&
|
||||
!(rc = slapi_pblock_get(pb, SLAPI_PLUGIN_MR_FILTER_CREATE_FN, &mrf_create)) &&
|
||||
mrf_create != NULL &&
|
||||
diff --git a/ldap/servers/slapd/str2filter.c b/ldap/servers/slapd/str2filter.c
|
||||
index 9fdc500f7..5620b7439 100644
|
||||
--- a/ldap/servers/slapd/str2filter.c
|
||||
+++ b/ldap/servers/slapd/str2filter.c
|
||||
@@ -344,6 +344,14 @@ str2simple(char *str, int unescape_filter)
|
||||
return NULL; /* error */
|
||||
} else {
|
||||
f->f_choice = LDAP_FILTER_EXTENDED;
|
||||
+ if (f->f_mr_oid) {
|
||||
+ /* apply the MR indexers */
|
||||
+ rc = plugin_mr_filter_create(&f->f_mr);
|
||||
+ if (rc) {
|
||||
+ slapi_filter_free(f, 1);
|
||||
+ return NULL; /* error */
|
||||
+ }
|
||||
+ }
|
||||
}
|
||||
} else if (str_find_star(value) == NULL) {
|
||||
f->f_choice = LDAP_FILTER_EQUALITY;
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -1,55 +0,0 @@
|
||||
From 77cc17e5dfb7ed71a320844d14a90c99c1474cc3 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Tue, 20 May 2025 08:13:24 -0400
|
||||
Subject: [PATCH] Issue 6787 - Improve error message when bulk import
|
||||
connection is closed
|
||||
|
||||
Description:
|
||||
|
||||
If an online replication initialization connection is closed a vague error
|
||||
message is reported when the init is aborted:
|
||||
|
||||
factory_destructor - ERROR bulk import abandoned
|
||||
|
||||
It should be clear that the import is being abandoned because the connection
|
||||
was closed and identify the conn id.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6787
|
||||
|
||||
Reviewed by: progier(Thanks!)
|
||||
|
||||
(cherry picked from commit d472dd83d49f8dce6d71e202cbb4d897218ceffb)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c | 6 ++++--
|
||||
1 file changed, 4 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
index 67d6e3abc..e433f3db2 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
|
||||
@@ -3432,9 +3432,10 @@ factory_constructor(void *object __attribute__((unused)), void *parent __attribu
|
||||
}
|
||||
|
||||
void
|
||||
-factory_destructor(void *extension, void *object __attribute__((unused)), void *parent __attribute__((unused)))
|
||||
+factory_destructor(void *extension, void *object, void *parent __attribute__((unused)))
|
||||
{
|
||||
ImportJob *job = (ImportJob *)extension;
|
||||
+ Connection *conn = (Connection *)object;
|
||||
PRThread *thread;
|
||||
|
||||
if (extension == NULL)
|
||||
@@ -3446,7 +3447,8 @@ factory_destructor(void *extension, void *object __attribute__((unused)), void *
|
||||
*/
|
||||
thread = job->main_thread;
|
||||
slapi_log_err(SLAPI_LOG_ERR, "factory_destructor",
|
||||
- "ERROR bulk import abandoned\n");
|
||||
+ "ERROR bulk import abandoned: conn=%ld was closed\n",
|
||||
+ conn->c_connid);
|
||||
import_abort_all(job, 1);
|
||||
/* wait for import_main to finish... */
|
||||
PR_JoinThread(thread);
|
||||
--
|
||||
2.51.1
|
||||
|
||||
@ -1,536 +0,0 @@
|
||||
From 8cba0dd699541d562d74502f35176df33f188512 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 30 May 2025 11:12:43 +0000
|
||||
Subject: [PATCH] Issue 6641 - modrdn fails when a user is member of multiple
|
||||
groups (#6643)
|
||||
|
||||
Bug description:
|
||||
Rename of a user that is member of multiple AM groups fail when MO and
|
||||
RI plugins are enabled.
|
||||
|
||||
Fix description:
|
||||
MO plugin - After updating the entry member attribute, check the return
|
||||
value. Retry the delete if the attr value exists and retry the add if the
|
||||
attr value is missing.
|
||||
|
||||
RI plugin - A previous commit checked if the attr value was not present
|
||||
before adding a mod. This commit was reverted in favour of overriding
|
||||
the internal op return value, consistent with other plugins.
|
||||
|
||||
CI test from Viktor Ashirov <vashirov@redhat.com>
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6641
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6566
|
||||
|
||||
Reviewed by: @progier389, @tbordaz, @vashirov (Thank you)
|
||||
|
||||
(cherry picked from commit 132ce4ab158679475cb83dbe28cc4fd7ced5cd19)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
.../tests/suites/plugins/modrdn_test.py | 174 ++++++++++++++++++
|
||||
ldap/servers/plugins/automember/automember.c | 11 +-
|
||||
ldap/servers/plugins/memberof/memberof.c | 123 +++++--------
|
||||
ldap/servers/plugins/referint/referint.c | 30 +--
|
||||
ldap/servers/slapd/modify.c | 51 +++++
|
||||
ldap/servers/slapd/slapi-plugin.h | 1 +
|
||||
6 files changed, 301 insertions(+), 89 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/plugins/modrdn_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/modrdn_test.py b/dirsrvtests/tests/suites/plugins/modrdn_test.py
|
||||
new file mode 100644
|
||||
index 000000000..be79b0c3c
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/plugins/modrdn_test.py
|
||||
@@ -0,0 +1,174 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import pytest
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
+from lib389.idm.group import Groups
|
||||
+from lib389.idm.user import nsUserAccounts
|
||||
+from lib389.plugins import (
|
||||
+ AutoMembershipDefinitions,
|
||||
+ AutoMembershipPlugin,
|
||||
+ AutoMembershipRegexRules,
|
||||
+ MemberOfPlugin,
|
||||
+ ReferentialIntegrityPlugin,
|
||||
+)
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+USER_PROPERTIES = {
|
||||
+ "uid": "userwith",
|
||||
+ "cn": "userwith",
|
||||
+ "uidNumber": "1000",
|
||||
+ "gidNumber": "2000",
|
||||
+ "homeDirectory": "/home/testuser",
|
||||
+ "displayName": "test user",
|
||||
+}
|
||||
+
|
||||
+
|
||||
+def test_modrdn_of_a_member_of_2_automember_groups(topology_st):
|
||||
+ """Test that a member of 2 automember groups can be renamed
|
||||
+
|
||||
+ :id: 0e40bdc4-a2d2-4bb8-8368-e02c8920bad2
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Enable automember plugin
|
||||
+ 2. Create definiton for users with A in the name
|
||||
+ 3. Create regex rule for users with A in the name
|
||||
+ 4. Create definiton for users with Z in the name
|
||||
+ 5. Create regex rule for users with Z in the name
|
||||
+ 6. Enable memberof plugin
|
||||
+ 7. Enable referential integrity plugin
|
||||
+ 8. Restart the instance
|
||||
+ 9. Create groups
|
||||
+ 10. Create users userwitha, userwithz, userwithaz
|
||||
+ 11. Rename userwithaz
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Success
|
||||
+ 9. Success
|
||||
+ 10. Success
|
||||
+ 11. Success
|
||||
+ """
|
||||
+ inst = topology_st.standalone
|
||||
+
|
||||
+ # Enable automember plugin
|
||||
+ automember_plugin = AutoMembershipPlugin(inst)
|
||||
+ automember_plugin.enable()
|
||||
+
|
||||
+ # Create definiton for users with A in the name
|
||||
+ automembers = AutoMembershipDefinitions(inst)
|
||||
+ automember = automembers.create(
|
||||
+ properties={
|
||||
+ "cn": "userswithA",
|
||||
+ "autoMemberScope": DEFAULT_SUFFIX,
|
||||
+ "autoMemberFilter": "objectclass=posixAccount",
|
||||
+ "autoMemberGroupingAttr": "member:dn",
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Create regex rule for users with A in the name
|
||||
+ automembers_regex_rule = AutoMembershipRegexRules(inst, f"{automember.dn}")
|
||||
+ automembers_regex_rule.create(
|
||||
+ properties={
|
||||
+ "cn": "userswithA",
|
||||
+ "autoMemberInclusiveRegex": ["cn=.*a.*"],
|
||||
+ "autoMemberTargetGroup": [f"cn=userswithA,ou=Groups,{DEFAULT_SUFFIX}"],
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Create definiton for users with Z in the name
|
||||
+ automember = automembers.create(
|
||||
+ properties={
|
||||
+ "cn": "userswithZ",
|
||||
+ "autoMemberScope": DEFAULT_SUFFIX,
|
||||
+ "autoMemberFilter": "objectclass=posixAccount",
|
||||
+ "autoMemberGroupingAttr": "member:dn",
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Create regex rule for users with Z in the name
|
||||
+ automembers_regex_rule = AutoMembershipRegexRules(inst, f"{automember.dn}")
|
||||
+ automembers_regex_rule.create(
|
||||
+ properties={
|
||||
+ "cn": "userswithZ",
|
||||
+ "autoMemberInclusiveRegex": ["cn=.*z.*"],
|
||||
+ "autoMemberTargetGroup": [f"cn=userswithZ,ou=Groups,{DEFAULT_SUFFIX}"],
|
||||
+ }
|
||||
+ )
|
||||
+
|
||||
+ # Enable memberof plugin
|
||||
+ memberof_plugin = MemberOfPlugin(inst)
|
||||
+ memberof_plugin.enable()
|
||||
+
|
||||
+ # Enable referential integrity plugin
|
||||
+ referint_plugin = ReferentialIntegrityPlugin(inst)
|
||||
+ referint_plugin.enable()
|
||||
+
|
||||
+ # Restart the instance
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Create groups
|
||||
+ groups = Groups(inst, DEFAULT_SUFFIX)
|
||||
+ groupA = groups.create(properties={"cn": "userswithA"})
|
||||
+ groupZ = groups.create(properties={"cn": "userswithZ"})
|
||||
+
|
||||
+ # Create users
|
||||
+ users = nsUserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ # userwitha
|
||||
+ user_props = USER_PROPERTIES.copy()
|
||||
+ user_props.update(
|
||||
+ {
|
||||
+ "uid": USER_PROPERTIES["uid"] + "a",
|
||||
+ "cn": USER_PROPERTIES["cn"] + "a",
|
||||
+ }
|
||||
+ )
|
||||
+ user = users.create(properties=user_props)
|
||||
+
|
||||
+ # userwithz
|
||||
+ user_props.update(
|
||||
+ {
|
||||
+ "uid": USER_PROPERTIES["uid"] + "z",
|
||||
+ "cn": USER_PROPERTIES["cn"] + "z",
|
||||
+ }
|
||||
+ )
|
||||
+ user = users.create(properties=user_props)
|
||||
+
|
||||
+ # userwithaz
|
||||
+ user_props.update(
|
||||
+ {
|
||||
+ "uid": USER_PROPERTIES["uid"] + "az",
|
||||
+ "cn": USER_PROPERTIES["cn"] + "az",
|
||||
+ }
|
||||
+ )
|
||||
+ user = users.create(properties=user_props)
|
||||
+ user_orig_dn = user.dn
|
||||
+
|
||||
+ # Rename userwithaz
|
||||
+ user.rename(new_rdn="uid=userwith")
|
||||
+ user_new_dn = user.dn
|
||||
+
|
||||
+ assert user.get_attr_val_utf8("uid") != "userwithaz"
|
||||
+
|
||||
+ # Check groups contain renamed username
|
||||
+ assert groupA.is_member(user_new_dn)
|
||||
+ assert groupZ.is_member(user_new_dn)
|
||||
+
|
||||
+ # Check groups dont contain original username
|
||||
+ assert not groupA.is_member(user_orig_dn)
|
||||
+ assert not groupZ.is_member(user_orig_dn)
|
||||
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
|
||||
index 419adb052..fde92ee12 100644
|
||||
--- a/ldap/servers/plugins/automember/automember.c
|
||||
+++ b/ldap/servers/plugins/automember/automember.c
|
||||
@@ -1754,13 +1754,12 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
}
|
||||
|
||||
mod_pb = slapi_pblock_new();
|
||||
- slapi_modify_internal_set_pb(mod_pb, group_dn,
|
||||
- mods, 0, 0, automember_get_plugin_id(), 0);
|
||||
- slapi_modify_internal_pb(mod_pb);
|
||||
- slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &result);
|
||||
+ /* Do a single mod with error overrides for DEL/ADD */
|
||||
+ result = slapi_single_modify_internal_override(mod_pb, slapi_sdn_new_dn_byval(group_dn), mods,
|
||||
+ automember_get_plugin_id(), 0);
|
||||
|
||||
if(add){
|
||||
- if ((result != LDAP_SUCCESS) && (result != LDAP_TYPE_OR_VALUE_EXISTS)) {
|
||||
+ if (result != LDAP_SUCCESS) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_update_member_value - Unable to add \"%s\" as "
|
||||
"a \"%s\" value to group \"%s\" (%s).\n",
|
||||
@@ -1770,7 +1769,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
}
|
||||
} else {
|
||||
/* delete value */
|
||||
- if ((result != LDAP_SUCCESS) && (result != LDAP_NO_SUCH_ATTRIBUTE)) {
|
||||
+ if (result != LDAP_SUCCESS) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_update_member_value - Unable to delete \"%s\" as "
|
||||
"a \"%s\" value from group \"%s\" (%s).\n",
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index f79b083a9..f3dc7cf00 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -1482,18 +1482,9 @@ memberof_del_dn_type_callback(Slapi_Entry *e, void *callback_data)
|
||||
mod.mod_op = LDAP_MOD_DELETE;
|
||||
mod.mod_type = ((memberof_del_dn_data *)callback_data)->type;
|
||||
mod.mod_values = val;
|
||||
-
|
||||
- slapi_modify_internal_set_pb_ext(
|
||||
- mod_pb, slapi_entry_get_sdn(e),
|
||||
- mods, 0, 0,
|
||||
- memberof_get_plugin_id(), SLAPI_OP_FLAG_BYPASS_REFERRALS);
|
||||
-
|
||||
- slapi_modify_internal_pb(mod_pb);
|
||||
-
|
||||
- slapi_pblock_get(mod_pb,
|
||||
- SLAPI_PLUGIN_INTOP_RESULT,
|
||||
- &rc);
|
||||
-
|
||||
+ /* Internal mod with error overrides for DEL/ADD */
|
||||
+ rc = slapi_single_modify_internal_override(mod_pb, slapi_entry_get_sdn(e), mods,
|
||||
+ memberof_get_plugin_id(), SLAPI_OP_FLAG_BYPASS_REFERRALS);
|
||||
slapi_pblock_destroy(mod_pb);
|
||||
|
||||
if (rc == LDAP_NO_SUCH_ATTRIBUTE && val[0] == NULL) {
|
||||
@@ -1966,6 +1957,7 @@ memberof_replace_dn_type_callback(Slapi_Entry *e, void *callback_data)
|
||||
|
||||
return rc;
|
||||
}
|
||||
+
|
||||
LDAPMod **
|
||||
my_copy_mods(LDAPMod **orig_mods)
|
||||
{
|
||||
@@ -2774,33 +2766,6 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o
|
||||
replace_mod.mod_values = replace_val;
|
||||
}
|
||||
rc = memberof_add_memberof_attr(mods, op_to, config->auto_add_oc);
|
||||
- if (rc == LDAP_NO_SUCH_ATTRIBUTE || rc == LDAP_TYPE_OR_VALUE_EXISTS) {
|
||||
- if (rc == LDAP_TYPE_OR_VALUE_EXISTS) {
|
||||
- /*
|
||||
- * For some reason the new modrdn value is present, so retry
|
||||
- * the delete by itself and ignore the add op by tweaking
|
||||
- * the mod array.
|
||||
- */
|
||||
- mods[1] = NULL;
|
||||
- rc = memberof_add_memberof_attr(mods, op_to, config->auto_add_oc);
|
||||
- } else {
|
||||
- /*
|
||||
- * The memberof value to be replaced does not exist so just
|
||||
- * add the new value. Shuffle the mod array to apply only
|
||||
- * the add operation.
|
||||
- */
|
||||
- mods[0] = mods[1];
|
||||
- mods[1] = NULL;
|
||||
- rc = memberof_add_memberof_attr(mods, op_to, config->auto_add_oc);
|
||||
- if (rc == LDAP_TYPE_OR_VALUE_EXISTS) {
|
||||
- /*
|
||||
- * The entry already has the expected memberOf value, no
|
||||
- * problem just return success.
|
||||
- */
|
||||
- rc = LDAP_SUCCESS;
|
||||
- }
|
||||
- }
|
||||
- }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4454,43 +4419,57 @@ memberof_add_memberof_attr(LDAPMod **mods, const char *dn, char *add_oc)
|
||||
Slapi_PBlock *mod_pb = NULL;
|
||||
int added_oc = 0;
|
||||
int rc = 0;
|
||||
+ LDAPMod *single_mod[2];
|
||||
|
||||
- while (1) {
|
||||
- mod_pb = slapi_pblock_new();
|
||||
- slapi_modify_internal_set_pb(
|
||||
- mod_pb, dn, mods, 0, 0,
|
||||
- memberof_get_plugin_id(), SLAPI_OP_FLAG_BYPASS_REFERRALS);
|
||||
- slapi_modify_internal_pb(mod_pb);
|
||||
-
|
||||
- slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
|
||||
- if (rc == LDAP_OBJECT_CLASS_VIOLATION) {
|
||||
- if (!add_oc || added_oc) {
|
||||
- /*
|
||||
- * We aren't auto adding an objectclass, or we already
|
||||
- * added the objectclass, and we are still failing.
|
||||
- */
|
||||
+ if (!dn || !mods) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "Invalid argument: %s%s is NULL\n",
|
||||
+ !dn ? "dn " : "",
|
||||
+ !mods ? "mods " : "");
|
||||
+ return LDAP_PARAM_ERROR;
|
||||
+ }
|
||||
+
|
||||
+
|
||||
+ mod_pb = slapi_pblock_new();
|
||||
+ /* Split multiple mods into individual mod operations */
|
||||
+ for (size_t i = 0; (mods != NULL) && (mods[i] != NULL); i++) {
|
||||
+ single_mod[0] = mods[i];
|
||||
+ single_mod[1] = NULL;
|
||||
+
|
||||
+ while (1) {
|
||||
+ slapi_pblock_init(mod_pb);
|
||||
+
|
||||
+ /* Internal mod with error overrides for DEL/ADD */
|
||||
+ rc = slapi_single_modify_internal_override(mod_pb, slapi_sdn_new_normdn_byref(dn), single_mod,
|
||||
+ memberof_get_plugin_id(), SLAPI_OP_FLAG_BYPASS_REFERRALS);
|
||||
+ if (rc == LDAP_OBJECT_CLASS_VIOLATION) {
|
||||
+ if (!add_oc || added_oc) {
|
||||
+ /*
|
||||
+ * We aren't auto adding an objectclass, or we already
|
||||
+ * added the objectclass, and we are still failing.
|
||||
+ */
|
||||
+ break;
|
||||
+ }
|
||||
+ rc = memberof_add_objectclass(add_oc, dn);
|
||||
+ slapi_log_err(SLAPI_LOG_WARNING, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "Entry %s - schema violation caught - repair operation %s\n",
|
||||
+ dn ? dn : "unknown",
|
||||
+ rc ? "failed" : "succeeded");
|
||||
+ if (rc) {
|
||||
+ /* Failed to add objectclass */
|
||||
+ rc = LDAP_OBJECT_CLASS_VIOLATION;
|
||||
+ break;
|
||||
+ }
|
||||
+ added_oc = 1;
|
||||
+ } else if (rc) {
|
||||
+ /* Some other fatal error */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
+ "memberof_add_memberof_attr - Internal modify failed. rc=%d\n", rc);
|
||||
break;
|
||||
- }
|
||||
- rc = memberof_add_objectclass(add_oc, dn);
|
||||
- slapi_log_err(SLAPI_LOG_WARNING, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
- "Entry %s - schema violation caught - repair operation %s\n",
|
||||
- dn ? dn : "unknown",
|
||||
- rc ? "failed" : "succeeded");
|
||||
- if (rc) {
|
||||
- /* Failed to add objectclass */
|
||||
- rc = LDAP_OBJECT_CLASS_VIOLATION;
|
||||
+ } else {
|
||||
+ /* success */
|
||||
break;
|
||||
}
|
||||
- added_oc = 1;
|
||||
- slapi_pblock_destroy(mod_pb);
|
||||
- } else if (rc) {
|
||||
- /* Some other fatal error */
|
||||
- slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
|
||||
- "memberof_add_memberof_attr - Internal modify failed. rc=%d\n", rc);
|
||||
- break;
|
||||
- } else {
|
||||
- /* success */
|
||||
- break;
|
||||
}
|
||||
}
|
||||
slapi_pblock_destroy(mod_pb);
|
||||
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
|
||||
index 28240c1f6..c5e259d8d 100644
|
||||
--- a/ldap/servers/plugins/referint/referint.c
|
||||
+++ b/ldap/servers/plugins/referint/referint.c
|
||||
@@ -711,19 +711,28 @@ static int
|
||||
_do_modify(Slapi_PBlock *mod_pb, Slapi_DN *entrySDN, LDAPMod **mods)
|
||||
{
|
||||
int rc = 0;
|
||||
+ LDAPMod *mod[2];
|
||||
|
||||
- slapi_pblock_init(mod_pb);
|
||||
+ /* Split multiple modifications into individual modify operations */
|
||||
+ for (size_t i = 0; (mods != NULL) && (mods[i] != NULL); i++) {
|
||||
+ mod[0] = mods[i];
|
||||
+ mod[1] = NULL;
|
||||
|
||||
- if (allow_repl) {
|
||||
- /* Must set as a replicated operation */
|
||||
- slapi_modify_internal_set_pb_ext(mod_pb, entrySDN, mods, NULL, NULL,
|
||||
- referint_plugin_identity, OP_FLAG_REPLICATED);
|
||||
- } else {
|
||||
- slapi_modify_internal_set_pb_ext(mod_pb, entrySDN, mods, NULL, NULL,
|
||||
- referint_plugin_identity, 0);
|
||||
+ slapi_pblock_init(mod_pb);
|
||||
+
|
||||
+ /* Do a single mod with error overrides for DEL/ADD */
|
||||
+ if (allow_repl) {
|
||||
+ rc = slapi_single_modify_internal_override(mod_pb, entrySDN, mod,
|
||||
+ referint_plugin_identity, OP_FLAG_REPLICATED);
|
||||
+ } else {
|
||||
+ rc = slapi_single_modify_internal_override(mod_pb, entrySDN, mod,
|
||||
+ referint_plugin_identity, 0);
|
||||
+ }
|
||||
+
|
||||
+ if (rc != LDAP_SUCCESS) {
|
||||
+ return rc;
|
||||
+ }
|
||||
}
|
||||
- slapi_modify_internal_pb(mod_pb);
|
||||
- slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -1033,7 +1042,6 @@ _update_all_per_mod(Slapi_DN *entrySDN, /* DN of the searched entry */
|
||||
/* (case 1) */
|
||||
slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
|
||||
slapi_mods_add_string(smods, LDAP_MOD_ADD, attrName, newDN);
|
||||
-
|
||||
} else if (p) {
|
||||
/* (case 2) */
|
||||
slapi_mods_add_string(smods, LDAP_MOD_DELETE, attrName, sval);
|
||||
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
|
||||
index 669bb104c..455eb63ec 100644
|
||||
--- a/ldap/servers/slapd/modify.c
|
||||
+++ b/ldap/servers/slapd/modify.c
|
||||
@@ -492,6 +492,57 @@ slapi_modify_internal_set_pb_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod
|
||||
slapi_pblock_set(pb, SLAPI_PLUGIN_IDENTITY, plugin_identity);
|
||||
}
|
||||
|
||||
+/* Performs a single LDAP modify operation with error overrides.
|
||||
+ *
|
||||
+ * If specific errors occur, such as attempting to add an existing attribute or
|
||||
+ * delete a non-existent one, the function overrides the error and returns success:
|
||||
+ * - LDAP_MOD_ADD -> LDAP_TYPE_OR_VALUE_EXISTS (ignored)
|
||||
+ * - LDAP_MOD_DELETE -> LDAP_NO_SUCH_ATTRIBUTE (ignored)
|
||||
+ *
|
||||
+ * Any other errors encountered during the operation will be returned as-is.
|
||||
+ */
|
||||
+int
|
||||
+slapi_single_modify_internal_override(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod **mod, Slapi_ComponentId *plugin_id, int op_flags)
|
||||
+{
|
||||
+ int rc = 0;
|
||||
+ int result = 0;
|
||||
+ int result_reset = 0;
|
||||
+ int mod_op = 0;
|
||||
+
|
||||
+ if (!pb || !sdn || !mod || !mod[0]) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "slapi_single_modify_internal_override",
|
||||
+ "Invalid argument: %s%s%s%s is NULL\n",
|
||||
+ !pb ? "pb " : "",
|
||||
+ !sdn ? "sdn " : "",
|
||||
+ !mod ? "mod " : "",
|
||||
+ !mod[0] ? "mod[0] " : "");
|
||||
+
|
||||
+ return LDAP_PARAM_ERROR;
|
||||
+ }
|
||||
+
|
||||
+ slapi_modify_internal_set_pb_ext(pb, sdn, mod, NULL, NULL, plugin_id, op_flags);
|
||||
+ slapi_modify_internal_pb(pb);
|
||||
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &result);
|
||||
+
|
||||
+ if (result != LDAP_SUCCESS) {
|
||||
+ mod_op = mod[0]->mod_op & LDAP_MOD_OP;
|
||||
+ if ((mod_op == LDAP_MOD_ADD && result == LDAP_TYPE_OR_VALUE_EXISTS) ||
|
||||
+ (mod_op == LDAP_MOD_DELETE && result == LDAP_NO_SUCH_ATTRIBUTE)) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, "slapi_single_modify_internal_override",
|
||||
+ "Overriding return code - plugin:%s dn:%s mod_op:%d result:%d\n",
|
||||
+ plugin_id ? plugin_id->sci_component_name : "unknown",
|
||||
+ sdn ? sdn->udn : "unknown", mod_op, result);
|
||||
+
|
||||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_INTOP_RESULT, &result_reset);
|
||||
+ rc = LDAP_SUCCESS;
|
||||
+ } else {
|
||||
+ rc = result;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ return rc;
|
||||
+}
|
||||
+
|
||||
/* Helper functions */
|
||||
|
||||
static int
|
||||
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
|
||||
index 9fdcaccc8..a84a60c92 100644
|
||||
--- a/ldap/servers/slapd/slapi-plugin.h
|
||||
+++ b/ldap/servers/slapd/slapi-plugin.h
|
||||
@@ -5965,6 +5965,7 @@ void slapi_add_entry_internal_set_pb(Slapi_PBlock *pb, Slapi_Entry *e, LDAPContr
|
||||
int slapi_add_internal_set_pb(Slapi_PBlock *pb, const char *dn, LDAPMod **attrs, LDAPControl **controls, Slapi_ComponentId *plugin_identity, int operation_flags);
|
||||
void slapi_modify_internal_set_pb(Slapi_PBlock *pb, const char *dn, LDAPMod **mods, LDAPControl **controls, const char *uniqueid, Slapi_ComponentId *plugin_identity, int operation_flags);
|
||||
void slapi_modify_internal_set_pb_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod **mods, LDAPControl **controls, const char *uniqueid, Slapi_ComponentId *plugin_identity, int operation_flags);
|
||||
+int slapi_single_modify_internal_override(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod **mod, Slapi_ComponentId *plugin_identity, int operation_flags);
|
||||
/**
|
||||
* Set \c Slapi_PBlock to perform modrdn/rename internally
|
||||
*
|
||||
--
|
||||
2.51.1
|
||||
|
||||
@ -1,415 +0,0 @@
|
||||
From ccaaaa31a86eb059315580249838d72e4a51bf8b Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 14 Jan 2025 18:12:56 +0100
|
||||
Subject: [PATCH] Issue 6470 - Some replication status data are reset upon a
|
||||
restart (#6471)
|
||||
|
||||
Bug description:
|
||||
The replication agreement contains operational attributes
|
||||
related to the total init: nsds5replicaLastInitStart,
|
||||
nsds5replicaLastInitEnd, nsds5replicaLastInitStatus.
|
||||
Those attributes are reset at restart
|
||||
|
||||
Fix description:
|
||||
When reading the replication agreement from config
|
||||
(agmt_new_from_entry) restore the attributes into
|
||||
the in-memory RA.
|
||||
Updates the RA config entry from the in-memory RA
|
||||
during shutdown/cleanallruv/enable_ra
|
||||
|
||||
fixes: #6470
|
||||
|
||||
Reviewed by: Simon Pichugin (Thanks !!)
|
||||
|
||||
(cherry picked from commit 90071a334517be523e498bded5b663c50c40ee3f)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
.../suites/replication/single_master_test.py | 128 ++++++++++++++++
|
||||
ldap/servers/plugins/replication/repl5.h | 4 +
|
||||
ldap/servers/plugins/replication/repl5_agmt.c | 140 +++++++++++++++++-
|
||||
.../plugins/replication/repl5_agmtlist.c | 1 +
|
||||
.../replication/repl5_replica_config.c | 1 +
|
||||
.../plugins/replication/repl_globals.c | 3 +
|
||||
6 files changed, 273 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/single_master_test.py b/dirsrvtests/tests/suites/replication/single_master_test.py
|
||||
index e927e6cfd..f448d2342 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/single_master_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/single_master_test.py
|
||||
@@ -13,6 +13,7 @@ from lib389.utils import *
|
||||
from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
|
||||
|
||||
from lib389.replica import ReplicationManager, Replicas
|
||||
+from lib389.agreement import Agreements
|
||||
from lib389.backend import Backends
|
||||
|
||||
from lib389.topologies import topology_m1c1 as topo_r # Replication
|
||||
@@ -154,6 +155,133 @@ def test_lastupdate_attr_before_init(topo_nr):
|
||||
json_obj = json.loads(json_status)
|
||||
log.debug("JSON status message: {}".format(json_obj))
|
||||
|
||||
+def test_total_init_operational_attr(topo_r):
|
||||
+ """Check that operation attributes nsds5replicaLastInitStatus
|
||||
+ nsds5replicaLastInitStart and nsds5replicaLastInitEnd
|
||||
+ are preserved between restart
|
||||
+
|
||||
+ :id: 6ba00bb1-87c0-47dd-86e0-ccf892b3985b
|
||||
+ :customerscenario: True
|
||||
+ :setup: Replication setup with supplier and consumer instances,
|
||||
+ test user on supplier
|
||||
+ :steps:
|
||||
+ 1. Check that user was replicated to consumer
|
||||
+ 2. Trigger a first total init
|
||||
+ 3. Check status/start/end values are set on the supplier
|
||||
+ 4. Restart supplier
|
||||
+ 5. Check previous status/start/end values are preserved
|
||||
+ 6. Trigger a second total init
|
||||
+ 7. Check status/start/end values are set on the supplier
|
||||
+ 8. Restart supplier
|
||||
+ 9. Check previous status/start/end values are preserved
|
||||
+ 10. Check status/start/end values are different between
|
||||
+ first and second total init
|
||||
+ :expectedresults:
|
||||
+ 1. The user should be replicated to consumer
|
||||
+ 2. Total init should be successful
|
||||
+ 3. It must exist a values
|
||||
+ 4. Operation should be successful
|
||||
+ 5. Check values are identical before/after restart
|
||||
+ 6. Total init should be successful
|
||||
+ 7. It must exist a values
|
||||
+ 8. Operation should be successful
|
||||
+ 9. Check values are identical before/after restart
|
||||
+ 10. values must differ between first/second total init
|
||||
+ """
|
||||
+
|
||||
+ supplier = topo_r.ms["supplier1"]
|
||||
+ consumer = topo_r.cs["consumer1"]
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+
|
||||
+ # Create a test user
|
||||
+ m_users = UserAccounts(topo_r.ms["supplier1"], DEFAULT_SUFFIX)
|
||||
+ m_user = m_users.ensure_state(properties=TEST_USER_PROPERTIES)
|
||||
+ m_user.ensure_present('mail', 'testuser@redhat.com')
|
||||
+
|
||||
+ # Then check it is replicated
|
||||
+ log.info("Check that replication is working")
|
||||
+ repl.wait_for_replication(supplier, consumer)
|
||||
+ c_users = UserAccounts(topo_r.cs["consumer1"], DEFAULT_SUFFIX)
|
||||
+ c_user = c_users.get('testuser')
|
||||
+ assert c_user
|
||||
+
|
||||
+ # Retrieve the replication agreement S1->C1
|
||||
+ replica_supplier = Replicas(supplier).get(DEFAULT_SUFFIX)
|
||||
+ agmts_supplier = Agreements(supplier, replica_supplier.dn)
|
||||
+ supplier_consumer = None
|
||||
+ for agmt in agmts_supplier.list():
|
||||
+ if (agmt.get_attr_val_utf8('nsDS5ReplicaPort') == str(consumer.port) and
|
||||
+ agmt.get_attr_val_utf8('nsDS5ReplicaHost') == consumer.host):
|
||||
+ supplier_consumer = agmt
|
||||
+ break
|
||||
+ assert supplier_consumer
|
||||
+
|
||||
+ # Trigger a first total init and check that
|
||||
+ # start/end/status is updated AND preserved during a restart
|
||||
+ log.info("First total init")
|
||||
+ supplier_consumer.begin_reinit()
|
||||
+ (done, error) = supplier_consumer.wait_reinit()
|
||||
+ assert done is True
|
||||
+
|
||||
+ status_1 = supplier_consumer.get_attr_val_utf8("nsds5replicaLastInitStatus")
|
||||
+ assert status_1
|
||||
+
|
||||
+ initStart_1 = supplier_consumer.get_attr_val_utf8("nsds5replicaLastInitStart")
|
||||
+ assert initStart_1
|
||||
+
|
||||
+ initEnd_1 = supplier_consumer.get_attr_val_utf8("nsds5replicaLastInitEnd")
|
||||
+ assert initEnd_1
|
||||
+
|
||||
+ log.info("Check values from first total init are preserved")
|
||||
+ supplier.restart()
|
||||
+ post_restart_status_1 = supplier_consumer.get_attr_val_utf8("nsds5replicaLastInitStatus")
|
||||
+ assert post_restart_status_1
|
||||
+ assert post_restart_status_1 == status_1
|
||||
+
|
||||
+ post_restart_initStart_1 = supplier_consumer.get_attr_val_utf8("nsds5replicaLastInitStart")
|
||||
+ assert post_restart_initStart_1
|
||||
+ assert post_restart_initStart_1 == initStart_1
|
||||
+
|
||||
+ post_restart_initEnd_1 = supplier_consumer.get_attr_val_utf8("nsds5replicaLastInitEnd")
|
||||
+ assert post_restart_initEnd_1 == initEnd_1
|
||||
+
|
||||
+ # Trigger a second total init and check that
|
||||
+ # start/end/status is updated (differ from previous values)
|
||||
+ # AND new values are preserved during a restart
|
||||
+ time.sleep(1)
|
||||
+ log.info("Second total init")
|
||||
+ supplier_consumer.begin_reinit()
|
||||
+ (done, error) = supplier_consumer.wait_reinit()
|
||||
+ assert done is True
|
||||
+
|
||||
+ status_2 = supplier_consumer.get_attr_val_utf8("nsds5replicaLastInitStatus")
|
||||
+ assert status_2
|
||||
+
|
||||
+ initStart_2 = supplier_consumer.get_attr_val_utf8("nsds5replicaLastInitStart")
|
||||
+ assert initStart_2
|
||||
+
|
||||
+ initEnd_2 = supplier_consumer.get_attr_val_utf8("nsds5replicaLastInitEnd")
|
||||
+ assert initEnd_2
|
||||
+
|
||||
+ log.info("Check values from second total init are preserved")
|
||||
+ supplier.restart()
|
||||
+ post_restart_status_2 = supplier_consumer.get_attr_val_utf8("nsds5replicaLastInitStatus")
|
||||
+ assert post_restart_status_2
|
||||
+ assert post_restart_status_2 == status_2
|
||||
+
|
||||
+ post_restart_initStart_2 = supplier_consumer.get_attr_val_utf8("nsds5replicaLastInitStart")
|
||||
+ assert post_restart_initStart_2
|
||||
+ assert post_restart_initStart_2 == initStart_2
|
||||
+
|
||||
+ post_restart_initEnd_2 = supplier_consumer.get_attr_val_utf8("nsds5replicaLastInitEnd")
|
||||
+ assert post_restart_initEnd_2 == initEnd_2
|
||||
+
|
||||
+ # Check that values are updated by total init
|
||||
+ log.info("Check values from first/second total init are different")
|
||||
+ assert status_2 == status_1
|
||||
+ assert initStart_2 != initStart_1
|
||||
+ assert initEnd_2 != initEnd_1
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
|
||||
index c2fbff8c0..65e2059e7 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5.h
|
||||
+++ b/ldap/servers/plugins/replication/repl5.h
|
||||
@@ -165,6 +165,9 @@ extern const char *type_nsds5ReplicaBootstrapCredentials;
|
||||
extern const char *type_nsds5ReplicaBootstrapBindMethod;
|
||||
extern const char *type_nsds5ReplicaBootstrapTransportInfo;
|
||||
extern const char *type_replicaKeepAliveUpdateInterval;
|
||||
+extern const char *type_nsds5ReplicaLastInitStart;
|
||||
+extern const char *type_nsds5ReplicaLastInitEnd;
|
||||
+extern const char *type_nsds5ReplicaLastInitStatus;
|
||||
|
||||
/* Attribute names for windows replication agreements */
|
||||
extern const char *type_nsds7WindowsReplicaArea;
|
||||
@@ -430,6 +433,7 @@ void agmt_notify_change(Repl_Agmt *ra, Slapi_PBlock *pb);
|
||||
Object *agmt_get_consumer_ruv(Repl_Agmt *ra);
|
||||
ReplicaId agmt_get_consumer_rid(Repl_Agmt *ra, void *conn);
|
||||
int agmt_set_consumer_ruv(Repl_Agmt *ra, RUV *ruv);
|
||||
+void agmt_update_init_status(Repl_Agmt *ra);
|
||||
void agmt_update_consumer_ruv(Repl_Agmt *ra);
|
||||
CSN *agmt_get_consumer_schema_csn(Repl_Agmt *ra);
|
||||
void agmt_set_consumer_schema_csn(Repl_Agmt *ra, CSN *csn);
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
index a71343dec..c3b8d298c 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
@@ -56,6 +56,7 @@
|
||||
#include "repl5_prot_private.h"
|
||||
#include "cl5_api.h"
|
||||
#include "slapi-plugin.h"
|
||||
+#include "slap.h"
|
||||
|
||||
#define DEFAULT_TIMEOUT 120 /* (seconds) default outbound LDAP connection */
|
||||
#define DEFAULT_FLOWCONTROL_WINDOW 1000 /* #entries sent without acknowledgment */
|
||||
@@ -510,10 +511,33 @@ agmt_new_from_entry(Slapi_Entry *e)
|
||||
ra->last_update_status[0] = '\0';
|
||||
ra->update_in_progress = PR_FALSE;
|
||||
ra->stop_in_progress = PR_FALSE;
|
||||
- ra->last_init_end_time = 0UL;
|
||||
- ra->last_init_start_time = 0UL;
|
||||
- ra->last_init_status[0] = '\0';
|
||||
- ra->changecounters = (struct changecounter **)slapi_ch_calloc(MAX_NUM_OF_MASTERS + 1,
|
||||
+ val = (char *)slapi_entry_attr_get_ref(e, type_nsds5ReplicaLastInitEnd);
|
||||
+ if (val) {
|
||||
+ time_t init_end_time;
|
||||
+
|
||||
+ init_end_time = parse_genTime((char *) val);
|
||||
+ if (init_end_time == NO_TIME || init_end_time == SLAPD_END_TIME) {
|
||||
+ ra->last_init_end_time = 0UL;
|
||||
+ } else {
|
||||
+ ra->last_init_end_time = init_end_time;
|
||||
+ }
|
||||
+ }
|
||||
+ val = (char *)slapi_entry_attr_get_ref(e, type_nsds5ReplicaLastInitStart);
|
||||
+ if (val) {
|
||||
+ time_t init_start_time;
|
||||
+
|
||||
+ init_start_time = parse_genTime((char *) val);
|
||||
+ if (init_start_time == NO_TIME || init_start_time == SLAPD_END_TIME) {
|
||||
+ ra->last_init_start_time = 0UL;
|
||||
+ } else {
|
||||
+ ra->last_init_start_time = init_start_time;
|
||||
+ }
|
||||
+ }
|
||||
+ val = (char *)slapi_entry_attr_get_ref(e, type_nsds5ReplicaLastInitStatus);
|
||||
+ if (val) {
|
||||
+ strcpy(ra->last_init_status, val);
|
||||
+ }
|
||||
+ ra->changecounters = (struct changecounter **)slapi_ch_calloc(MAX_NUM_OF_SUPPLIERS + 1,
|
||||
sizeof(struct changecounter *));
|
||||
ra->num_changecounters = 0;
|
||||
ra->max_changecounters = MAX_NUM_OF_MASTERS;
|
||||
@@ -2504,6 +2528,113 @@ agmt_set_consumer_ruv(Repl_Agmt *ra, RUV *ruv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
+void
|
||||
+agmt_update_init_status(Repl_Agmt *ra)
|
||||
+{
|
||||
+ int rc;
|
||||
+ Slapi_PBlock *pb;
|
||||
+ LDAPMod **mods;
|
||||
+ int nb_mods = 0;
|
||||
+ int mod_idx;
|
||||
+ Slapi_Mod smod_start_time = {0};
|
||||
+ Slapi_Mod smod_end_time = {0};
|
||||
+ Slapi_Mod smod_status = {0};
|
||||
+
|
||||
+ PR_ASSERT(ra);
|
||||
+ PR_Lock(ra->lock);
|
||||
+
|
||||
+ if (ra->last_init_start_time) {
|
||||
+ nb_mods++;
|
||||
+ }
|
||||
+ if (ra->last_init_end_time) {
|
||||
+ nb_mods++;
|
||||
+ }
|
||||
+ if (ra->last_init_status[0] != '\0') {
|
||||
+ nb_mods++;
|
||||
+ }
|
||||
+ if (nb_mods == 0) {
|
||||
+ /* shortcut. no need to go further */
|
||||
+ PR_Unlock(ra->lock);
|
||||
+ return;
|
||||
+ }
|
||||
+ mods = (LDAPMod **) slapi_ch_malloc((nb_mods + 1) * sizeof(LDAPMod *));
|
||||
+ mod_idx = 0;
|
||||
+ if (ra->last_init_start_time) {
|
||||
+ struct berval val;
|
||||
+ char *time_tmp = NULL;
|
||||
+ slapi_mod_init(&smod_start_time, 1);
|
||||
+ slapi_mod_set_type(&smod_start_time, type_nsds5ReplicaLastInitStart);
|
||||
+ slapi_mod_set_operation(&smod_start_time, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES);
|
||||
+
|
||||
+ time_tmp = format_genTime(ra->last_init_start_time);
|
||||
+ val.bv_val = time_tmp;
|
||||
+ val.bv_len = strlen(time_tmp);
|
||||
+ slapi_mod_add_value(&smod_start_time, &val);
|
||||
+ slapi_ch_free((void **)&time_tmp);
|
||||
+ mods[mod_idx] = (LDAPMod *)slapi_mod_get_ldapmod_byref(&smod_start_time);
|
||||
+ mod_idx++;
|
||||
+ }
|
||||
+ if (ra->last_init_end_time) {
|
||||
+ struct berval val;
|
||||
+ char *time_tmp = NULL;
|
||||
+ slapi_mod_init(&smod_end_time, 1);
|
||||
+ slapi_mod_set_type(&smod_end_time, type_nsds5ReplicaLastInitEnd);
|
||||
+ slapi_mod_set_operation(&smod_end_time, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES);
|
||||
+
|
||||
+ time_tmp = format_genTime(ra->last_init_end_time);
|
||||
+ val.bv_val = time_tmp;
|
||||
+ val.bv_len = strlen(time_tmp);
|
||||
+ slapi_mod_add_value(&smod_end_time, &val);
|
||||
+ slapi_ch_free((void **)&time_tmp);
|
||||
+ mods[mod_idx] = (LDAPMod *)slapi_mod_get_ldapmod_byref(&smod_end_time);
|
||||
+ mod_idx++;
|
||||
+ }
|
||||
+ if (ra->last_init_status[0] != '\0') {
|
||||
+ struct berval val;
|
||||
+ char *init_status = NULL;
|
||||
+ slapi_mod_init(&smod_status, 1);
|
||||
+ slapi_mod_set_type(&smod_status, type_nsds5ReplicaLastInitStatus);
|
||||
+ slapi_mod_set_operation(&smod_status, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES);
|
||||
+
|
||||
+ init_status = slapi_ch_strdup(ra->last_init_status);
|
||||
+ val.bv_val = init_status;
|
||||
+ val.bv_len = strlen(init_status);
|
||||
+ slapi_mod_add_value(&smod_status, &val);
|
||||
+ slapi_ch_free((void **)&init_status);
|
||||
+ mods[mod_idx] = (LDAPMod *)slapi_mod_get_ldapmod_byref(&smod_status);
|
||||
+ mod_idx++;
|
||||
+ }
|
||||
+
|
||||
+ if (nb_mods) {
|
||||
+ /* it is ok to release the lock here because we are done with the agreement data.
|
||||
+ we have to do it before issuing the modify operation because it causes
|
||||
+ agmtlist_notify_all to be called which uses the same lock - hence the deadlock */
|
||||
+ PR_Unlock(ra->lock);
|
||||
+
|
||||
+ pb = slapi_pblock_new();
|
||||
+ mods[nb_mods] = NULL;
|
||||
+
|
||||
+ slapi_modify_internal_set_pb_ext(pb, ra->dn, mods, NULL, NULL,
|
||||
+ repl_get_plugin_identity(PLUGIN_MULTISUPPLIER_REPLICATION), 0);
|
||||
+ slapi_modify_internal_pb(pb);
|
||||
+
|
||||
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
|
||||
+ if (rc != LDAP_SUCCESS && rc != LDAP_NO_SUCH_ATTRIBUTE) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "agmt_update_consumer_ruv - "
|
||||
+ "%s: agmt_update_consumer_ruv: "
|
||||
+ "failed to update consumer's RUV; LDAP error - %d\n",
|
||||
+ ra->long_name, rc);
|
||||
+ }
|
||||
+
|
||||
+ slapi_pblock_destroy(pb);
|
||||
+ } else {
|
||||
+ PR_Unlock(ra->lock);
|
||||
+ }
|
||||
+ slapi_mod_done(&smod_start_time);
|
||||
+ slapi_mod_done(&smod_end_time);
|
||||
+ slapi_mod_done(&smod_status);
|
||||
+}
|
||||
+
|
||||
void
|
||||
agmt_update_consumer_ruv(Repl_Agmt *ra)
|
||||
{
|
||||
@@ -3123,6 +3254,7 @@ agmt_set_enabled_from_entry(Repl_Agmt *ra, Slapi_Entry *e, char *returntext)
|
||||
PR_Unlock(ra->lock);
|
||||
agmt_stop(ra);
|
||||
agmt_update_consumer_ruv(ra);
|
||||
+ agmt_update_init_status(ra);
|
||||
agmt_set_last_update_status(ra, 0, 0, "agreement disabled");
|
||||
return rc;
|
||||
}
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_agmtlist.c b/ldap/servers/plugins/replication/repl5_agmtlist.c
|
||||
index 18b641f8c..e3b1e814c 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_agmtlist.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_agmtlist.c
|
||||
@@ -782,6 +782,7 @@ agmtlist_shutdown()
|
||||
ra = (Repl_Agmt *)object_get_data(ro);
|
||||
agmt_stop(ra);
|
||||
agmt_update_consumer_ruv(ra);
|
||||
+ agmt_update_init_status(ra);
|
||||
next_ro = objset_next_obj(agmt_set, ro);
|
||||
/* Object ro was released in objset_next_obj,
|
||||
* but the address ro can be still used to remove ro from objset. */
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
|
||||
index aea2cf506..8cc7423bf 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
|
||||
@@ -2006,6 +2006,7 @@ clean_agmts(cleanruv_data *data)
|
||||
cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Cleaning agmt...");
|
||||
agmt_stop(agmt);
|
||||
agmt_update_consumer_ruv(agmt);
|
||||
+ agmt_update_init_status(agmt);
|
||||
agmt_start(agmt);
|
||||
agmt_obj = agmtlist_get_next_agreement_for_replica(data->replica, agmt_obj);
|
||||
}
|
||||
diff --git a/ldap/servers/plugins/replication/repl_globals.c b/ldap/servers/plugins/replication/repl_globals.c
|
||||
index 797ca957f..e6b89c33b 100644
|
||||
--- a/ldap/servers/plugins/replication/repl_globals.c
|
||||
+++ b/ldap/servers/plugins/replication/repl_globals.c
|
||||
@@ -118,6 +118,9 @@ const char *type_nsds5ReplicaBootstrapBindDN = "nsds5ReplicaBootstrapBindDN";
|
||||
const char *type_nsds5ReplicaBootstrapCredentials = "nsds5ReplicaBootstrapCredentials";
|
||||
const char *type_nsds5ReplicaBootstrapBindMethod = "nsds5ReplicaBootstrapBindMethod";
|
||||
const char *type_nsds5ReplicaBootstrapTransportInfo = "nsds5ReplicaBootstrapTransportInfo";
|
||||
+const char *type_nsds5ReplicaLastInitStart = "nsds5replicaLastInitStart";
|
||||
+const char *type_nsds5ReplicaLastInitEnd = "nsds5replicaLastInitEnd";
|
||||
+const char *type_nsds5ReplicaLastInitStatus = "nsds5replicaLastInitStatus";
|
||||
|
||||
/* windows sync specific attributes */
|
||||
const char *type_nsds7WindowsReplicaArea = "nsds7WindowsReplicaSubtree";
|
||||
--
|
||||
2.51.1
|
||||
|
||||
@ -1,671 +0,0 @@
|
||||
From dfb7e19fdcefe4af683a235ea7113956248571e3 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Thu, 17 Nov 2022 14:21:17 +0100
|
||||
Subject: [PATCH] Issue 3729 - RFE Extend log of operations statistics in
|
||||
access log (#5508)
|
||||
|
||||
Bug description:
|
||||
Create a per operation framework to collect/display
|
||||
statistics about internal ressource consumption
|
||||
|
||||
Fix description:
|
||||
|
||||
The fix contains 2 parts
|
||||
The framework, that registers a per operation object extension
|
||||
(op_stat_init). The extension is used to store/retrieve
|
||||
collected statistics.
|
||||
To reduce the impact of collecting/logging it uses a toggle
|
||||
with config attribute 'nsslapd-statlog-level' that is a bit mask.
|
||||
So that data are collected and logged only if the appropriate
|
||||
statistic level is set.
|
||||
|
||||
An exemple of statistic level regarding indexes fetching
|
||||
during the evaluation of a search filter.
|
||||
it is implemented in filterindex.c (store) and result.c (retrieve/log).
|
||||
This path uses LDAP_STAT_READ_INDEX=0x1.
|
||||
For LDAP_STAT_READ_INDEX, the collected data are:
|
||||
- for each key (attribute, type, value) the number of
|
||||
IDs
|
||||
- the duration to fetch all the values
|
||||
|
||||
design https://www.port389.org/docs/389ds/design/log-operation-stats.html
|
||||
relates: #3729
|
||||
|
||||
Reviewed by: Pierre Rogier, Mark Reynolds (thanks !)
|
||||
|
||||
(cherry picked from commit a480d2cbfa2b1325f44ab3e1c393c5ee348b388e)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 73 ++++++++++++++++
|
||||
ldap/servers/slapd/back-ldbm/filterindex.c | 49 +++++++++++
|
||||
ldap/servers/slapd/libglobs.c | 48 +++++++++++
|
||||
ldap/servers/slapd/log.c | 26 ++++++
|
||||
ldap/servers/slapd/log.h | 1 +
|
||||
ldap/servers/slapd/main.c | 1 +
|
||||
ldap/servers/slapd/operation.c | 86 +++++++++++++++++++
|
||||
ldap/servers/slapd/proto-slap.h | 8 ++
|
||||
ldap/servers/slapd/result.c | 64 ++++++++++++++
|
||||
ldap/servers/slapd/slap.h | 4 +
|
||||
ldap/servers/slapd/slapi-private.h | 27 ++++++
|
||||
11 files changed, 387 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 84d721756..43288f67f 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -27,6 +27,7 @@ from lib389.idm.group import Groups
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, PASSWORD
|
||||
from lib389.utils import ds_is_older, ds_is_newer
|
||||
+from lib389.dseldif import DSEldif
|
||||
import ldap
|
||||
import glob
|
||||
import re
|
||||
@@ -1250,6 +1251,78 @@ def test_missing_backend_suffix(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+def test_stat_index(topology_st, request):
|
||||
+ """Testing nsslapd-statlog-level with indexing statistics
|
||||
+
|
||||
+ :id: fcabab05-f000-468c-8eb4-02ce3c39c902
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Check that nsslapd-statlog-level is 0 (default)
|
||||
+ 2. Create 20 users with 'cn' starting with 'user\_'
|
||||
+ 3. Check there is no statistic record in the access log with ADD
|
||||
+ 4. Check there is no statistic record in the access log with SRCH
|
||||
+ 5. Set nsslapd-statlog-level=LDAP_STAT_READ_INDEX (0x1) to get
|
||||
+ statistics when reading indexes
|
||||
+ 6. Check there is statistic records in access log with SRCH
|
||||
+ :expectedresults:
|
||||
+ 1. This should pass
|
||||
+ 2. This should pass
|
||||
+ 3. This should pass
|
||||
+ 4. This should pass
|
||||
+ 5. This should pass
|
||||
+ 6. This should pass
|
||||
+ """
|
||||
+ topology_st.standalone.start()
|
||||
+
|
||||
+ # Step 1
|
||||
+ log.info("Assert nsslapd-statlog-level is by default 0")
|
||||
+ assert topology_st.standalone.config.get_attr_val_int("nsslapd-statlog-level") == 0
|
||||
+
|
||||
+ # Step 2
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ users_set = []
|
||||
+ log.info('Adding 20 users')
|
||||
+ for i in range(20):
|
||||
+ name = 'user_%d' % i
|
||||
+ last_user = users.create(properties={
|
||||
+ 'uid': name,
|
||||
+ 'sn': name,
|
||||
+ 'cn': name,
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '1000',
|
||||
+ 'homeDirectory': '/home/%s' % name,
|
||||
+ 'mail': '%s@example.com' % name,
|
||||
+ 'userpassword': 'pass%s' % name,
|
||||
+ })
|
||||
+ users_set.append(last_user)
|
||||
+
|
||||
+ # Step 3
|
||||
+ assert not topology_st.standalone.ds_access_log.match('.*STAT read index.*')
|
||||
+
|
||||
+ # Step 4
|
||||
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "cn=user_*")
|
||||
+ assert not topology_st.standalone.ds_access_log.match('.*STAT read index.*')
|
||||
+
|
||||
+ # Step 5
|
||||
+ log.info("Set nsslapd-statlog-level: 1 to enable indexing statistics")
|
||||
+ topology_st.standalone.config.set("nsslapd-statlog-level", "1")
|
||||
+
|
||||
+ # Step 6
|
||||
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "cn=user_*")
|
||||
+ topology_st.standalone.stop()
|
||||
+ assert topology_st.standalone.ds_access_log.match('.*STAT read index.*')
|
||||
+ assert topology_st.standalone.ds_access_log.match('.*STAT read index: attribute.*')
|
||||
+ assert topology_st.standalone.ds_access_log.match('.*STAT read index: duration.*')
|
||||
+ topology_st.standalone.start()
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Deleting users')
|
||||
+ for user in users_set:
|
||||
+ user.delete()
|
||||
+ topology_st.standalone.config.set("nsslapd-statlog-level", "0")
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/filterindex.c b/ldap/servers/slapd/back-ldbm/filterindex.c
|
||||
index 8a79848c3..30550dde7 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/filterindex.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/filterindex.c
|
||||
@@ -1040,13 +1040,57 @@ keys2idl(
|
||||
int allidslimit)
|
||||
{
|
||||
IDList *idl = NULL;
|
||||
+ Op_stat *op_stat;
|
||||
+ PRBool collect_stat = PR_FALSE;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "keys2idl", "=> type %s indextype %s\n", type, indextype);
|
||||
+
|
||||
+ /* Before reading the index take the start time */
|
||||
+ if (LDAP_STAT_READ_INDEX & config_get_statlog_level()) {
|
||||
+ op_stat = op_stat_get_operation_extension(pb);
|
||||
+ if (op_stat->search_stat) {
|
||||
+ collect_stat = PR_TRUE;
|
||||
+ clock_gettime(CLOCK_MONOTONIC, &(op_stat->search_stat->keys_lookup_start));
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
for (uint32_t i = 0; ivals[i] != NULL; i++) {
|
||||
IDList *idl2 = NULL;
|
||||
+ struct component_keys_lookup *key_stat;
|
||||
+ int key_len;
|
||||
|
||||
idl2 = index_read_ext_allids(pb, be, type, indextype, slapi_value_get_berval(ivals[i]), txn, err, unindexed, allidslimit);
|
||||
+ if (collect_stat) {
|
||||
+ /* gather the index lookup statistics */
|
||||
+ key_stat = (struct component_keys_lookup *) slapi_ch_calloc(1, sizeof (struct component_keys_lookup));
|
||||
+
|
||||
+ /* indextype e.g. "eq" or "sub" (see index.c) */
|
||||
+ if (indextype) {
|
||||
+ key_stat->index_type = slapi_ch_strdup(indextype);
|
||||
+ }
|
||||
+ /* key value e.g. '^st' or 'smith'*/
|
||||
+ key_len = slapi_value_get_length(ivals[i]);
|
||||
+ if (key_len) {
|
||||
+ key_stat->key = (char *) slapi_ch_calloc(1, key_len + 1);
|
||||
+ memcpy(key_stat->key, slapi_value_get_string(ivals[i]), key_len);
|
||||
+ }
|
||||
|
||||
+ /* attribute name e.g. 'uid' */
|
||||
+ if (type) {
|
||||
+ key_stat->attribute_type = slapi_ch_strdup(type);
|
||||
+ }
|
||||
+
|
||||
+ /* Number of lookup IDs with the key */
|
||||
+ key_stat->id_lookup_cnt = idl2 ? idl2->b_nids : 0;
|
||||
+ if (op_stat->search_stat->keys_lookup) {
|
||||
+ /* it already exist key stat. add key_stat at the head */
|
||||
+ key_stat->next = op_stat->search_stat->keys_lookup;
|
||||
+ } else {
|
||||
+ /* this is the first key stat record */
|
||||
+ key_stat->next = NULL;
|
||||
+ }
|
||||
+ op_stat->search_stat->keys_lookup = key_stat;
|
||||
+ }
|
||||
#ifdef LDAP_ERROR_LOGGING
|
||||
/* XXX if ( slapd_ldap_debug & LDAP_DEBUG_TRACE ) { XXX */
|
||||
{
|
||||
@@ -1080,5 +1124,10 @@ keys2idl(
|
||||
}
|
||||
}
|
||||
|
||||
+ /* All the keys have been fetch, time to take the completion time */
|
||||
+ if (collect_stat) {
|
||||
+ clock_gettime(CLOCK_MONOTONIC, &(op_stat->search_stat->keys_lookup_end));
|
||||
+ }
|
||||
+
|
||||
return (idl);
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
|
||||
index 2097ab93c..99b2c5d8e 100644
|
||||
--- a/ldap/servers/slapd/libglobs.c
|
||||
+++ b/ldap/servers/slapd/libglobs.c
|
||||
@@ -712,6 +712,10 @@ static struct config_get_and_set
|
||||
NULL, 0,
|
||||
(void **)&global_slapdFrontendConfig.accessloglevel,
|
||||
CONFIG_INT, NULL, SLAPD_DEFAULT_ACCESSLOG_LEVEL_STR, NULL},
|
||||
+ {CONFIG_STATLOGLEVEL_ATTRIBUTE, config_set_statlog_level,
|
||||
+ NULL, 0,
|
||||
+ (void **)&global_slapdFrontendConfig.statloglevel,
|
||||
+ CONFIG_INT, NULL, SLAPD_DEFAULT_STATLOG_LEVEL, NULL},
|
||||
{CONFIG_ERRORLOG_LOGROTATIONTIMEUNIT_ATTRIBUTE, NULL,
|
||||
log_set_rotationtimeunit, SLAPD_ERROR_LOG,
|
||||
(void **)&global_slapdFrontendConfig.errorlog_rotationunit,
|
||||
@@ -1748,6 +1752,7 @@ FrontendConfig_init(void)
|
||||
cfg->accessloglevel = SLAPD_DEFAULT_ACCESSLOG_LEVEL;
|
||||
init_accesslogbuffering = cfg->accesslogbuffering = LDAP_ON;
|
||||
init_csnlogging = cfg->csnlogging = LDAP_ON;
|
||||
+ cfg->statloglevel = SLAPD_DEFAULT_STATLOG_LEVEL;
|
||||
|
||||
init_errorlog_logging_enabled = cfg->errorlog_logging_enabled = LDAP_ON;
|
||||
init_external_libs_debug_enabled = cfg->external_libs_debug_enabled = LDAP_OFF;
|
||||
@@ -5382,6 +5387,38 @@ config_set_accesslog_level(const char *attrname, char *value, char *errorbuf, in
|
||||
return retVal;
|
||||
}
|
||||
|
||||
+int
|
||||
+config_set_statlog_level(const char *attrname, char *value, char *errorbuf, int apply)
|
||||
+{
|
||||
+ int retVal = LDAP_SUCCESS;
|
||||
+ long level = 0;
|
||||
+ char *endp = NULL;
|
||||
+
|
||||
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
||||
+
|
||||
+ if (config_value_is_null(attrname, value, errorbuf, 1)) {
|
||||
+ return LDAP_OPERATIONS_ERROR;
|
||||
+ }
|
||||
+
|
||||
+ errno = 0;
|
||||
+ level = strtol(value, &endp, 10);
|
||||
+
|
||||
+ if (*endp != '\0' || errno == ERANGE || level < 0) {
|
||||
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: stat log level \"%s\" is invalid,"
|
||||
+ " access log level must range from 0 to %lld",
|
||||
+ attrname, value, (long long int)LONG_MAX);
|
||||
+ retVal = LDAP_OPERATIONS_ERROR;
|
||||
+ return retVal;
|
||||
+ }
|
||||
+
|
||||
+ if (apply) {
|
||||
+ CFG_LOCK_WRITE(slapdFrontendConfig);
|
||||
+ g_set_statlog_level(level);
|
||||
+ slapdFrontendConfig->statloglevel = level;
|
||||
+ CFG_UNLOCK_WRITE(slapdFrontendConfig);
|
||||
+ }
|
||||
+ return retVal;
|
||||
+}
|
||||
/* set the referral-mode url (which puts us into referral mode) */
|
||||
int
|
||||
config_set_referral_mode(const char *attrname __attribute__((unused)), char *url, char *errorbuf, int apply)
|
||||
@@ -6612,6 +6649,17 @@ config_get_accesslog_level()
|
||||
return retVal;
|
||||
}
|
||||
|
||||
+int
|
||||
+config_get_statlog_level()
|
||||
+{
|
||||
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
|
||||
+ int retVal;
|
||||
+
|
||||
+ retVal = slapdFrontendConfig->statloglevel;
|
||||
+
|
||||
+ return retVal;
|
||||
+}
|
||||
+
|
||||
/* return integer -- don't worry about locking similar to config_check_referral_mode
|
||||
below */
|
||||
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index 8074735e2..837a9c6fd 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -233,6 +233,17 @@ g_set_accesslog_level(int val)
|
||||
LOG_ACCESS_UNLOCK_WRITE();
|
||||
}
|
||||
|
||||
+/******************************************************************************
|
||||
+* Set the stat level
|
||||
+******************************************************************************/
|
||||
+void
|
||||
+g_set_statlog_level(int val)
|
||||
+{
|
||||
+ LOG_ACCESS_LOCK_WRITE();
|
||||
+ loginfo.log_access_stat_level = val;
|
||||
+ LOG_ACCESS_UNLOCK_WRITE();
|
||||
+}
|
||||
+
|
||||
/******************************************************************************
|
||||
* Set whether the process is alive or dead
|
||||
* If it is detached, then we write the error in 'stderr'
|
||||
@@ -283,6 +294,7 @@ g_log_init()
|
||||
if ((loginfo.log_access_buffer->lock = PR_NewLock()) == NULL) {
|
||||
exit(-1);
|
||||
}
|
||||
+ loginfo.log_access_stat_level = cfg->statloglevel;
|
||||
|
||||
/* ERROR LOG */
|
||||
loginfo.log_error_state = cfg->errorlog_logging_enabled;
|
||||
@@ -2640,7 +2652,21 @@ vslapd_log_access(char *fmt, va_list ap)
|
||||
|
||||
return (rc);
|
||||
}
|
||||
+int
|
||||
+slapi_log_stat(int loglevel, const char *fmt, ...)
|
||||
+{
|
||||
+ char buf[2048];
|
||||
+ va_list args;
|
||||
+ int rc = LDAP_SUCCESS;
|
||||
|
||||
+ if (loglevel & loginfo.log_access_stat_level) {
|
||||
+ va_start(args, fmt);
|
||||
+ PR_vsnprintf(buf, sizeof(buf), fmt, args);
|
||||
+ rc = slapi_log_access(LDAP_DEBUG_STATS, "%s", buf);
|
||||
+ va_end(args);
|
||||
+ }
|
||||
+ return rc;
|
||||
+}
|
||||
int
|
||||
slapi_log_access(int level,
|
||||
char *fmt,
|
||||
diff --git a/ldap/servers/slapd/log.h b/ldap/servers/slapd/log.h
|
||||
index 9fb4e7425..6ac37bd29 100644
|
||||
--- a/ldap/servers/slapd/log.h
|
||||
+++ b/ldap/servers/slapd/log.h
|
||||
@@ -120,6 +120,7 @@ struct logging_opts
|
||||
int log_access_exptime; /* time */
|
||||
int log_access_exptimeunit; /* unit time */
|
||||
int log_access_exptime_secs; /* time in secs */
|
||||
+ int log_access_stat_level; /* statistics level in access log file */
|
||||
|
||||
int log_access_level; /* access log level */
|
||||
char *log_access_file; /* access log file path */
|
||||
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
|
||||
index ac45c85d1..9b5b845cb 100644
|
||||
--- a/ldap/servers/slapd/main.c
|
||||
+++ b/ldap/servers/slapd/main.c
|
||||
@@ -1040,6 +1040,7 @@ main(int argc, char **argv)
|
||||
* changes are replicated as soon as the replication plugin is started.
|
||||
*/
|
||||
pw_exp_init();
|
||||
+ op_stat_init();
|
||||
|
||||
plugin_print_lists();
|
||||
plugin_startall(argc, argv, NULL /* specific plugin list */);
|
||||
diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c
|
||||
index 4dd3481c7..dacd1838f 100644
|
||||
--- a/ldap/servers/slapd/operation.c
|
||||
+++ b/ldap/servers/slapd/operation.c
|
||||
@@ -652,6 +652,92 @@ slapi_operation_time_expiry(Slapi_Operation *o, time_t timeout, struct timespec
|
||||
slapi_timespec_expire_rel(timeout, &(o->o_hr_time_rel), expiry);
|
||||
}
|
||||
|
||||
+
|
||||
+/*
|
||||
+ * Operation extension for operation statistics
|
||||
+ */
|
||||
+static int op_stat_objtype = -1;
|
||||
+static int op_stat_handle = -1;
|
||||
+
|
||||
+Op_stat *
|
||||
+op_stat_get_operation_extension(Slapi_PBlock *pb)
|
||||
+{
|
||||
+ Slapi_Operation *op;
|
||||
+
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
|
||||
+ return (Op_stat *)slapi_get_object_extension(op_stat_objtype,
|
||||
+ op, op_stat_handle);
|
||||
+}
|
||||
+
|
||||
+void
|
||||
+op_stat_set_operation_extension(Slapi_PBlock *pb, Op_stat *op_stat)
|
||||
+{
|
||||
+ Slapi_Operation *op;
|
||||
+
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
|
||||
+ slapi_set_object_extension(op_stat_objtype, op,
|
||||
+ op_stat_handle, (void *)op_stat);
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * constructor for the operation object extension.
|
||||
+ */
|
||||
+static void *
|
||||
+op_stat_constructor(void *object __attribute__((unused)), void *parent __attribute__((unused)))
|
||||
+{
|
||||
+ Op_stat *op_statp = NULL;
|
||||
+ op_statp = (Op_stat *)slapi_ch_calloc(1, sizeof(Op_stat));
|
||||
+ op_statp->search_stat = (Op_search_stat *)slapi_ch_calloc(1, sizeof(Op_search_stat));
|
||||
+
|
||||
+ return op_statp;
|
||||
+}
|
||||
+/*
|
||||
+ * destructor for the operation object extension.
|
||||
+ */
|
||||
+static void
|
||||
+op_stat_destructor(void *extension, void *object __attribute__((unused)), void *parent __attribute__((unused)))
|
||||
+{
|
||||
+ Op_stat *op_statp = (Op_stat *)extension;
|
||||
+
|
||||
+ if (NULL == op_statp) {
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ if (op_statp->search_stat) {
|
||||
+ struct component_keys_lookup *keys, *next;
|
||||
+
|
||||
+ /* free all the individual key counter */
|
||||
+ keys = op_statp->search_stat->keys_lookup;
|
||||
+ while (keys) {
|
||||
+ next = keys->next;
|
||||
+ slapi_ch_free_string(&keys->attribute_type);
|
||||
+ slapi_ch_free_string(&keys->key);
|
||||
+ slapi_ch_free_string(&keys->index_type);
|
||||
+ slapi_ch_free((void **) &keys);
|
||||
+ keys = next;
|
||||
+ }
|
||||
+ slapi_ch_free((void **) &op_statp->search_stat);
|
||||
+ }
|
||||
+ slapi_ch_free((void **) &op_statp);
|
||||
+}
|
||||
+
|
||||
+#define SLAPI_OP_STAT_MODULE "Module to collect operation stat"
|
||||
+/* Called once from main */
|
||||
+void
|
||||
+op_stat_init(void)
|
||||
+{
|
||||
+ if (slapi_register_object_extension(SLAPI_OP_STAT_MODULE,
|
||||
+ SLAPI_EXT_OPERATION,
|
||||
+ op_stat_constructor,
|
||||
+ op_stat_destructor,
|
||||
+ &op_stat_objtype,
|
||||
+ &op_stat_handle) != 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "op_stat_init",
|
||||
+ "slapi_register_object_extension failed; "
|
||||
+ "operation statistics is not enabled\n");
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
/* Set the time the operation actually started */
|
||||
void
|
||||
slapi_operation_set_time_started(Slapi_Operation *o)
|
||||
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
|
||||
index 410a3c5fe..3a049ee76 100644
|
||||
--- a/ldap/servers/slapd/proto-slap.h
|
||||
+++ b/ldap/servers/slapd/proto-slap.h
|
||||
@@ -291,6 +291,7 @@ int config_set_defaultreferral(const char *attrname, struct berval **value, char
|
||||
int config_set_timelimit(const char *attrname, char *value, char *errorbuf, int apply);
|
||||
int config_set_errorlog_level(const char *attrname, char *value, char *errorbuf, int apply);
|
||||
int config_set_accesslog_level(const char *attrname, char *value, char *errorbuf, int apply);
|
||||
+int config_set_statlog_level(const char *attrname, char *value, char *errorbuf, int apply);
|
||||
int config_set_auditlog(const char *attrname, char *value, char *errorbuf, int apply);
|
||||
int config_set_auditfaillog(const char *attrname, char *value, char *errorbuf, int apply);
|
||||
int config_set_userat(const char *attrname, char *value, char *errorbuf, int apply);
|
||||
@@ -510,6 +511,7 @@ long long config_get_pw_minage(void);
|
||||
long long config_get_pw_warning(void);
|
||||
int config_get_errorlog_level(void);
|
||||
int config_get_accesslog_level(void);
|
||||
+int config_get_statlog_level();
|
||||
int config_get_auditlog_logging_enabled(void);
|
||||
int config_get_auditfaillog_logging_enabled(void);
|
||||
char *config_get_auditlog_display_attrs(void);
|
||||
@@ -815,10 +817,15 @@ int lock_fclose(FILE *fp, FILE *lfp);
|
||||
#define LDAP_DEBUG_INFO 0x08000000 /* 134217728 */
|
||||
#define LDAP_DEBUG_DEBUG 0x10000000 /* 268435456 */
|
||||
#define LDAP_DEBUG_ALL_LEVELS 0xFFFFFF
|
||||
+
|
||||
+#define LDAP_STAT_READ_INDEX 0x00000001 /* 1 */
|
||||
+#define LDAP_STAT_FREE_1 0x00000002 /* 2 */
|
||||
+
|
||||
extern int slapd_ldap_debug;
|
||||
|
||||
int loglevel_is_set(int level);
|
||||
int slapd_log_error_proc(int sev_level, char *subsystem, char *fmt, ...);
|
||||
+int slapi_log_stat(int loglevel, const char *fmt, ...);
|
||||
|
||||
int slapi_log_access(int level, char *fmt, ...)
|
||||
#ifdef __GNUC__
|
||||
@@ -874,6 +881,7 @@ int check_log_max_size(
|
||||
|
||||
|
||||
void g_set_accesslog_level(int val);
|
||||
+void g_set_statlog_level(int val);
|
||||
void log__delete_rotated_logs(void);
|
||||
|
||||
/*
|
||||
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
|
||||
index adcef9539..e94533d72 100644
|
||||
--- a/ldap/servers/slapd/result.c
|
||||
+++ b/ldap/servers/slapd/result.c
|
||||
@@ -38,6 +38,7 @@ static PRLock *current_conn_count_mutex;
|
||||
|
||||
static int flush_ber(Slapi_PBlock *pb, Connection *conn, Operation *op, BerElement *ber, int type);
|
||||
static char *notes2str(unsigned int notes, char *buf, size_t buflen);
|
||||
+static void log_op_stat(Slapi_PBlock *pb);
|
||||
static void log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries);
|
||||
static void log_entry(Operation *op, Slapi_Entry *e);
|
||||
static void log_referral(Operation *op);
|
||||
@@ -2050,6 +2051,68 @@ notes2str(unsigned int notes, char *buf, size_t buflen)
|
||||
return (buf);
|
||||
}
|
||||
|
||||
+static void
|
||||
+log_op_stat(Slapi_PBlock *pb)
|
||||
+{
|
||||
+
|
||||
+ Connection *conn = NULL;
|
||||
+ Operation *op = NULL;
|
||||
+ Op_stat *op_stat;
|
||||
+ struct timespec duration;
|
||||
+ char stat_etime[ETIME_BUFSIZ] = {0};
|
||||
+
|
||||
+ if (config_get_statlog_level() == 0) {
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ slapi_pblock_get(pb, SLAPI_CONNECTION, &conn);
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
|
||||
+ op_stat = op_stat_get_operation_extension(pb);
|
||||
+
|
||||
+ if (conn == NULL || op == NULL || op_stat == NULL) {
|
||||
+ return;
|
||||
+ }
|
||||
+ /* process the operation */
|
||||
+ switch (op->o_tag) {
|
||||
+ case LDAP_REQ_BIND:
|
||||
+ case LDAP_REQ_UNBIND:
|
||||
+ case LDAP_REQ_ADD:
|
||||
+ case LDAP_REQ_DELETE:
|
||||
+ case LDAP_REQ_MODRDN:
|
||||
+ case LDAP_REQ_MODIFY:
|
||||
+ case LDAP_REQ_COMPARE:
|
||||
+ break;
|
||||
+ case LDAP_REQ_SEARCH:
|
||||
+ if ((LDAP_STAT_READ_INDEX & config_get_statlog_level()) &&
|
||||
+ op_stat->search_stat) {
|
||||
+ struct component_keys_lookup *key_info;
|
||||
+ for (key_info = op_stat->search_stat->keys_lookup; key_info; key_info = key_info->next) {
|
||||
+ slapi_log_stat(LDAP_STAT_READ_INDEX,
|
||||
+ "conn=%" PRIu64 " op=%d STAT read index: attribute=%s key(%s)=%s --> count %d\n",
|
||||
+ op->o_connid, op->o_opid,
|
||||
+ key_info->attribute_type, key_info->index_type, key_info->key,
|
||||
+ key_info->id_lookup_cnt);
|
||||
+ }
|
||||
+
|
||||
+ /* total elapsed time */
|
||||
+ slapi_timespec_diff(&op_stat->search_stat->keys_lookup_end, &op_stat->search_stat->keys_lookup_start, &duration);
|
||||
+ snprintf(stat_etime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)duration.tv_sec, (int64_t)duration.tv_nsec);
|
||||
+ slapi_log_stat(LDAP_STAT_READ_INDEX,
|
||||
+ "conn=%" PRIu64 " op=%d STAT read index: duration %s\n",
|
||||
+ op->o_connid, op->o_opid, stat_etime);
|
||||
+ }
|
||||
+ break;
|
||||
+ case LDAP_REQ_ABANDON_30:
|
||||
+ case LDAP_REQ_ABANDON:
|
||||
+ break;
|
||||
+
|
||||
+ default:
|
||||
+ slapi_log_err(SLAPI_LOG_ERR,
|
||||
+ "log_op_stat", "Ignoring unknown LDAP request (conn=%" PRIu64 ", tag=0x%lx)\n",
|
||||
+ conn->c_connid, op->o_tag);
|
||||
+ break;
|
||||
+ }
|
||||
+}
|
||||
|
||||
static void
|
||||
log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries)
|
||||
@@ -2206,6 +2269,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
|
||||
} else {
|
||||
ext_str = "";
|
||||
}
|
||||
+ log_op_stat(pb);
|
||||
slapi_log_access(LDAP_DEBUG_STATS,
|
||||
"conn=%" PRIu64 " op=%d RESULT err=%d"
|
||||
" tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s\n",
|
||||
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
|
||||
index 927576b70..82550527c 100644
|
||||
--- a/ldap/servers/slapd/slap.h
|
||||
+++ b/ldap/servers/slapd/slap.h
|
||||
@@ -348,6 +348,8 @@ typedef void (*VFPV)(); /* takes undefined arguments */
|
||||
#define SLAPD_DEFAULT_FE_ERRORLOG_LEVEL_STR "16384"
|
||||
#define SLAPD_DEFAULT_ACCESSLOG_LEVEL 256
|
||||
#define SLAPD_DEFAULT_ACCESSLOG_LEVEL_STR "256"
|
||||
+#define SLAPD_DEFAULT_STATLOG_LEVEL 0
|
||||
+#define SLAPD_DEFAULT_STATLOG_LEVEL_STR "0"
|
||||
|
||||
#define SLAPD_DEFAULT_DISK_THRESHOLD 2097152
|
||||
#define SLAPD_DEFAULT_DISK_THRESHOLD_STR "2097152"
|
||||
@@ -2082,6 +2084,7 @@ typedef struct _slapdEntryPoints
|
||||
#define CONFIG_SCHEMAREPLACE_ATTRIBUTE "nsslapd-schemareplace"
|
||||
#define CONFIG_LOGLEVEL_ATTRIBUTE "nsslapd-errorlog-level"
|
||||
#define CONFIG_ACCESSLOGLEVEL_ATTRIBUTE "nsslapd-accesslog-level"
|
||||
+#define CONFIG_STATLOGLEVEL_ATTRIBUTE "nsslapd-statlog-level"
|
||||
#define CONFIG_ACCESSLOG_MODE_ATTRIBUTE "nsslapd-accesslog-mode"
|
||||
#define CONFIG_ERRORLOG_MODE_ATTRIBUTE "nsslapd-errorlog-mode"
|
||||
#define CONFIG_AUDITLOG_MODE_ATTRIBUTE "nsslapd-auditlog-mode"
|
||||
@@ -2457,6 +2460,7 @@ typedef struct _slapdFrontendConfig
|
||||
int accessloglevel;
|
||||
slapi_onoff_t accesslogbuffering;
|
||||
slapi_onoff_t csnlogging;
|
||||
+ int statloglevel;
|
||||
|
||||
/* ERROR LOG */
|
||||
slapi_onoff_t errorlog_logging_enabled;
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index 4b6cf29eb..bd7a4b39d 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -449,6 +449,33 @@ int operation_is_flag_set(Slapi_Operation *op, int flag);
|
||||
unsigned long operation_get_type(Slapi_Operation *op);
|
||||
LDAPMod **copy_mods(LDAPMod **orig_mods);
|
||||
|
||||
+/* Structures use to collect statistics per operation */
|
||||
+/* used for LDAP_STAT_READ_INDEX */
|
||||
+struct component_keys_lookup
|
||||
+{
|
||||
+ char *index_type;
|
||||
+ char *attribute_type;
|
||||
+ char *key;
|
||||
+ int id_lookup_cnt;
|
||||
+ struct component_keys_lookup *next;
|
||||
+};
|
||||
+typedef struct op_search_stat
|
||||
+{
|
||||
+ struct component_keys_lookup *keys_lookup;
|
||||
+ struct timespec keys_lookup_start;
|
||||
+ struct timespec keys_lookup_end;
|
||||
+} Op_search_stat;
|
||||
+
|
||||
+/* structure store in the operation extension */
|
||||
+typedef struct op_stat
|
||||
+{
|
||||
+ Op_search_stat *search_stat;
|
||||
+} Op_stat;
|
||||
+
|
||||
+void op_stat_init(void);
|
||||
+Op_stat *op_stat_get_operation_extension(Slapi_PBlock *pb);
|
||||
+void op_stat_set_operation_extension(Slapi_PBlock *pb, Op_stat *op_stat);
|
||||
+
|
||||
/*
|
||||
* From ldap.h
|
||||
* #define LDAP_MOD_ADD 0x00
|
||||
--
|
||||
2.51.1
|
||||
|
||||
@ -1,297 +0,0 @@
|
||||
From bc2629db166667cdb01fde2b9e249253d5d868b5 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Mon, 21 Nov 2022 11:41:15 +0100
|
||||
Subject: [PATCH] Issue 3729 - (cont) RFE Extend log of operations statistics
|
||||
in access log (#5538)
|
||||
|
||||
Bug description:
|
||||
This is a continuation of the #3729
|
||||
The previous fix did not manage internal SRCH, so
|
||||
statistics of internal SRCH were not logged
|
||||
|
||||
Fix description:
|
||||
For internal operation log_op_stat uses
|
||||
connid/op_id/op_internal_id/op_nested_count that have been
|
||||
computed log_result
|
||||
|
||||
For direct operation log_op_stat uses info from the
|
||||
operation itself (o_connid and o_opid)
|
||||
|
||||
log_op_stat relies on operation_type rather than
|
||||
o_tag that is not available for internal operation
|
||||
|
||||
relates: #3729
|
||||
|
||||
Reviewed by: Pierre Rogier
|
||||
|
||||
(cherry picked from commit 7915e85a55476647ac54330de4f6e89faf6f2934)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 90 ++++++++++++++++++-
|
||||
ldap/servers/slapd/proto-slap.h | 2 +-
|
||||
ldap/servers/slapd/result.c | 74 +++++++++------
|
||||
3 files changed, 136 insertions(+), 30 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 43288f67f..fbb8d7bf1 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -21,7 +21,7 @@ from lib389.idm.domain import Domain
|
||||
from lib389.configurations.sample import create_base_domain
|
||||
from lib389._mapped_object import DSLdapObject
|
||||
from lib389.topologies import topology_st
|
||||
-from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, AutoMembershipDefinitions
|
||||
+from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, AutoMembershipDefinitions, MemberOfPlugin
|
||||
from lib389.idm.user import UserAccounts, UserAccount
|
||||
from lib389.idm.group import Groups
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
@@ -1323,6 +1323,94 @@ def test_stat_index(topology_st, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+def test_stat_internal_op(topology_st, request):
|
||||
+ """Check that statistics can also be collected for internal operations
|
||||
+
|
||||
+ :id: 19f393bd-5866-425a-af7a-4dade06d5c77
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Check that nsslapd-statlog-level is 0 (default)
|
||||
+ 2. Enable memberof plugins
|
||||
+ 3. Create a user
|
||||
+ 4. Remove access log (to only detect new records)
|
||||
+ 5. Enable statistic logging nsslapd-statlog-level=1
|
||||
+ 6. Check that on direct SRCH there is no 'Internal' Stat records
|
||||
+ 7. Remove access log (to only detect new records)
|
||||
+ 8. Add group with the user, so memberof triggers internal search
|
||||
+ and check it exists 'Internal' Stat records
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+
|
||||
+ # Step 1
|
||||
+ log.info("Assert nsslapd-statlog-level is by default 0")
|
||||
+ assert topology_st.standalone.config.get_attr_val_int("nsslapd-statlog-level") == 0
|
||||
+
|
||||
+ # Step 2
|
||||
+ memberof = MemberOfPlugin(inst)
|
||||
+ memberof.enable()
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Step 3 Add setup entries
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX, rdn=None)
|
||||
+ user = users.create(properties={'uid': 'test_1',
|
||||
+ 'cn': 'test_1',
|
||||
+ 'sn': 'test_1',
|
||||
+ 'description': 'member',
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': '/home/testuser'})
|
||||
+ # Step 4 reset accesslog
|
||||
+ topology_st.standalone.stop()
|
||||
+ lpath = topology_st.standalone.ds_access_log._get_log_path()
|
||||
+ os.unlink(lpath)
|
||||
+ topology_st.standalone.start()
|
||||
+
|
||||
+ # Step 5 enable statistics
|
||||
+ log.info("Set nsslapd-statlog-level: 1 to enable indexing statistics")
|
||||
+ topology_st.standalone.config.set("nsslapd-statlog-level", "1")
|
||||
+
|
||||
+ # Step 6 for direct SRCH only non internal STAT records
|
||||
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "uid=test_1")
|
||||
+ topology_st.standalone.stop()
|
||||
+ assert topology_st.standalone.ds_access_log.match('.*STAT read index.*')
|
||||
+ assert topology_st.standalone.ds_access_log.match('.*STAT read index: attribute.*')
|
||||
+ assert topology_st.standalone.ds_access_log.match('.*STAT read index: duration.*')
|
||||
+ assert not topology_st.standalone.ds_access_log.match('.*Internal.*STAT.*')
|
||||
+ topology_st.standalone.start()
|
||||
+
|
||||
+ # Step 7 reset accesslog
|
||||
+ topology_st.standalone.stop()
|
||||
+ lpath = topology_st.standalone.ds_access_log._get_log_path()
|
||||
+ os.unlink(lpath)
|
||||
+ topology_st.standalone.start()
|
||||
+
|
||||
+ # Step 8 trigger internal searches and check internal stat records
|
||||
+ groups = Groups(inst, DEFAULT_SUFFIX, rdn=None)
|
||||
+ group = groups.create(properties={'cn': 'mygroup',
|
||||
+ 'member': 'uid=test_1,%s' % DEFAULT_SUFFIX,
|
||||
+ 'description': 'group'})
|
||||
+ topology_st.standalone.restart()
|
||||
+ assert topology_st.standalone.ds_access_log.match('.*Internal.*STAT read index.*')
|
||||
+ assert topology_st.standalone.ds_access_log.match('.*Internal.*STAT read index: attribute.*')
|
||||
+ assert topology_st.standalone.ds_access_log.match('.*Internal.*STAT read index: duration.*')
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info('Deleting user/group')
|
||||
+ user.delete()
|
||||
+ group.delete()
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
|
||||
index 3a049ee76..6e473a08e 100644
|
||||
--- a/ldap/servers/slapd/proto-slap.h
|
||||
+++ b/ldap/servers/slapd/proto-slap.h
|
||||
@@ -511,7 +511,7 @@ long long config_get_pw_minage(void);
|
||||
long long config_get_pw_warning(void);
|
||||
int config_get_errorlog_level(void);
|
||||
int config_get_accesslog_level(void);
|
||||
-int config_get_statlog_level();
|
||||
+int config_get_statlog_level(void);
|
||||
int config_get_auditlog_logging_enabled(void);
|
||||
int config_get_auditfaillog_logging_enabled(void);
|
||||
char *config_get_auditlog_display_attrs(void);
|
||||
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
|
||||
index e94533d72..87641e92f 100644
|
||||
--- a/ldap/servers/slapd/result.c
|
||||
+++ b/ldap/servers/slapd/result.c
|
||||
@@ -38,7 +38,7 @@ static PRLock *current_conn_count_mutex;
|
||||
|
||||
static int flush_ber(Slapi_PBlock *pb, Connection *conn, Operation *op, BerElement *ber, int type);
|
||||
static char *notes2str(unsigned int notes, char *buf, size_t buflen);
|
||||
-static void log_op_stat(Slapi_PBlock *pb);
|
||||
+static void log_op_stat(Slapi_PBlock *pb, uint64_t connid, int32_t op_id, int32_t op_internal_id, int32_t op_nested_count);
|
||||
static void log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries);
|
||||
static void log_entry(Operation *op, Slapi_Entry *e);
|
||||
static void log_referral(Operation *op);
|
||||
@@ -2051,65 +2051,82 @@ notes2str(unsigned int notes, char *buf, size_t buflen)
|
||||
return (buf);
|
||||
}
|
||||
|
||||
+#define STAT_LOG_CONN_OP_FMT_INT_INT "conn=Internal(%" PRIu64 ") op=%d(%d)(%d)"
|
||||
+#define STAT_LOG_CONN_OP_FMT_EXT_INT "conn=%" PRIu64 " (Internal) op=%d(%d)(%d)"
|
||||
static void
|
||||
-log_op_stat(Slapi_PBlock *pb)
|
||||
+log_op_stat(Slapi_PBlock *pb, uint64_t connid, int32_t op_id, int32_t op_internal_id, int32_t op_nested_count)
|
||||
{
|
||||
-
|
||||
- Connection *conn = NULL;
|
||||
Operation *op = NULL;
|
||||
Op_stat *op_stat;
|
||||
struct timespec duration;
|
||||
char stat_etime[ETIME_BUFSIZ] = {0};
|
||||
+ int internal_op;
|
||||
|
||||
if (config_get_statlog_level() == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
- slapi_pblock_get(pb, SLAPI_CONNECTION, &conn);
|
||||
slapi_pblock_get(pb, SLAPI_OPERATION, &op);
|
||||
+ internal_op = operation_is_flag_set(op, OP_FLAG_INTERNAL);
|
||||
op_stat = op_stat_get_operation_extension(pb);
|
||||
|
||||
- if (conn == NULL || op == NULL || op_stat == NULL) {
|
||||
+ if (op == NULL || op_stat == NULL) {
|
||||
return;
|
||||
}
|
||||
/* process the operation */
|
||||
- switch (op->o_tag) {
|
||||
- case LDAP_REQ_BIND:
|
||||
- case LDAP_REQ_UNBIND:
|
||||
- case LDAP_REQ_ADD:
|
||||
- case LDAP_REQ_DELETE:
|
||||
- case LDAP_REQ_MODRDN:
|
||||
- case LDAP_REQ_MODIFY:
|
||||
- case LDAP_REQ_COMPARE:
|
||||
+ switch (operation_get_type(op)) {
|
||||
+ case SLAPI_OPERATION_BIND:
|
||||
+ case SLAPI_OPERATION_UNBIND:
|
||||
+ case SLAPI_OPERATION_ADD:
|
||||
+ case SLAPI_OPERATION_DELETE:
|
||||
+ case SLAPI_OPERATION_MODRDN:
|
||||
+ case SLAPI_OPERATION_MODIFY:
|
||||
+ case SLAPI_OPERATION_COMPARE:
|
||||
+ case SLAPI_OPERATION_EXTENDED:
|
||||
break;
|
||||
- case LDAP_REQ_SEARCH:
|
||||
+ case SLAPI_OPERATION_SEARCH:
|
||||
if ((LDAP_STAT_READ_INDEX & config_get_statlog_level()) &&
|
||||
op_stat->search_stat) {
|
||||
struct component_keys_lookup *key_info;
|
||||
for (key_info = op_stat->search_stat->keys_lookup; key_info; key_info = key_info->next) {
|
||||
- slapi_log_stat(LDAP_STAT_READ_INDEX,
|
||||
- "conn=%" PRIu64 " op=%d STAT read index: attribute=%s key(%s)=%s --> count %d\n",
|
||||
- op->o_connid, op->o_opid,
|
||||
- key_info->attribute_type, key_info->index_type, key_info->key,
|
||||
- key_info->id_lookup_cnt);
|
||||
+ if (internal_op) {
|
||||
+ slapi_log_stat(LDAP_STAT_READ_INDEX,
|
||||
+ connid == 0 ? STAT_LOG_CONN_OP_FMT_INT_INT "STAT read index: attribute=%s key(%s)=%s --> count %d\n":
|
||||
+ STAT_LOG_CONN_OP_FMT_EXT_INT "STAT read index: attribute=%s key(%s)=%s --> count %d\n",
|
||||
+ connid, op_id, op_internal_id, op_nested_count,
|
||||
+ key_info->attribute_type, key_info->index_type, key_info->key,
|
||||
+ key_info->id_lookup_cnt);
|
||||
+ } else {
|
||||
+ slapi_log_stat(LDAP_STAT_READ_INDEX,
|
||||
+ "conn=%" PRIu64 " op=%d STAT read index: attribute=%s key(%s)=%s --> count %d\n",
|
||||
+ connid, op_id,
|
||||
+ key_info->attribute_type, key_info->index_type, key_info->key,
|
||||
+ key_info->id_lookup_cnt);
|
||||
+ }
|
||||
}
|
||||
|
||||
/* total elapsed time */
|
||||
slapi_timespec_diff(&op_stat->search_stat->keys_lookup_end, &op_stat->search_stat->keys_lookup_start, &duration);
|
||||
snprintf(stat_etime, ETIME_BUFSIZ, "%" PRId64 ".%.09" PRId64 "", (int64_t)duration.tv_sec, (int64_t)duration.tv_nsec);
|
||||
- slapi_log_stat(LDAP_STAT_READ_INDEX,
|
||||
- "conn=%" PRIu64 " op=%d STAT read index: duration %s\n",
|
||||
- op->o_connid, op->o_opid, stat_etime);
|
||||
+ if (internal_op) {
|
||||
+ slapi_log_stat(LDAP_STAT_READ_INDEX,
|
||||
+ connid == 0 ? STAT_LOG_CONN_OP_FMT_INT_INT "STAT read index: duration %s\n":
|
||||
+ STAT_LOG_CONN_OP_FMT_EXT_INT "STAT read index: duration %s\n",
|
||||
+ connid, op_id, op_internal_id, op_nested_count, stat_etime);
|
||||
+ } else {
|
||||
+ slapi_log_stat(LDAP_STAT_READ_INDEX,
|
||||
+ "conn=%" PRIu64 " op=%d STAT read index: duration %s\n",
|
||||
+ op->o_connid, op->o_opid, stat_etime);
|
||||
+ }
|
||||
}
|
||||
break;
|
||||
- case LDAP_REQ_ABANDON_30:
|
||||
- case LDAP_REQ_ABANDON:
|
||||
+ case SLAPI_OPERATION_ABANDON:
|
||||
break;
|
||||
|
||||
default:
|
||||
slapi_log_err(SLAPI_LOG_ERR,
|
||||
- "log_op_stat", "Ignoring unknown LDAP request (conn=%" PRIu64 ", tag=0x%lx)\n",
|
||||
- conn->c_connid, op->o_tag);
|
||||
+ "log_op_stat", "Ignoring unknown LDAP request (conn=%" PRIu64 ", op_type=0x%lx)\n",
|
||||
+ connid, operation_get_type(op));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -2269,7 +2286,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
|
||||
} else {
|
||||
ext_str = "";
|
||||
}
|
||||
- log_op_stat(pb);
|
||||
+ log_op_stat(pb, op->o_connid, op->o_opid, 0, 0);
|
||||
slapi_log_access(LDAP_DEBUG_STATS,
|
||||
"conn=%" PRIu64 " op=%d RESULT err=%d"
|
||||
" tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s%s\n",
|
||||
@@ -2284,6 +2301,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries
|
||||
}
|
||||
} else {
|
||||
int optype;
|
||||
+ log_op_stat(pb, connid, op_id, op_internal_id, op_nested_count);
|
||||
#define LOG_MSG_FMT " tag=%" BERTAG_T " nentries=%d wtime=%s optime=%s etime=%s%s%s\n"
|
||||
slapi_log_access(LDAP_DEBUG_ARGS,
|
||||
connid == 0 ? LOG_CONN_OP_FMT_INT_INT LOG_MSG_FMT :
|
||||
--
|
||||
2.51.1
|
||||
|
||||
@ -1,124 +0,0 @@
|
||||
From f6eca13762139538d974c1cb285ddf1354fe7837 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Tue, 28 Mar 2023 10:27:01 +0200
|
||||
Subject: [PATCH] Issue 5710 - subtree search statistics for index lookup does
|
||||
not report ancestorid/entryrdn lookups (#5711)
|
||||
|
||||
Bug description:
|
||||
The RFE #3729 allows to collect index lookups per search
|
||||
operation. For subtree searches the server lookup ancestorid
|
||||
and those lookup are not recorded
|
||||
|
||||
Fix description:
|
||||
if statistics are enabled, record ancestorid lookup
|
||||
|
||||
relates: #5710
|
||||
|
||||
Reviewed by: Mark Reynolds (thanks)
|
||||
|
||||
(cherry picked from commit fca27c3d0487c9aea9dc7da151a79e3ce0fc7d35)
|
||||
Signed-off-by: Masahiro Matsuya <mmatsuya@redhat.com>
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/ldbm_search.c | 59 ++++++++++++++++++++++
|
||||
1 file changed, 59 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
index 8c07d1395..5d98e288e 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
|
||||
@@ -35,6 +35,7 @@ static IDList *onelevel_candidates(Slapi_PBlock *pb, backend *be, const char *ba
|
||||
static back_search_result_set *new_search_result_set(IDList *idl, int vlv, int lookthroughlimit);
|
||||
static void delete_search_result_set(Slapi_PBlock *pb, back_search_result_set **sr);
|
||||
static int can_skip_filter_test(Slapi_PBlock *pb, struct slapi_filter *f, int scope, IDList *idl);
|
||||
+static void stat_add_srch_lookup(Op_stat *op_stat, char * attribute_type, const char* index_type, char *key_value, int lookup_cnt);
|
||||
|
||||
/* This is for performance testing, allows us to disable ACL checking altogether */
|
||||
#if defined(DISABLE_ACL_CHECK)
|
||||
@@ -1167,6 +1168,45 @@ create_subtree_filter(Slapi_Filter *filter, int managedsait, Slapi_Filter **focr
|
||||
return ftop;
|
||||
}
|
||||
|
||||
+static void
|
||||
+stat_add_srch_lookup(Op_stat *op_stat, char * attribute_type, const char* index_type, char *key_value, int lookup_cnt)
|
||||
+{
|
||||
+ struct component_keys_lookup *key_stat;
|
||||
+
|
||||
+ if ((op_stat == NULL) || (op_stat->search_stat == NULL)) {
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ /* gather the index lookup statistics */
|
||||
+ key_stat = (struct component_keys_lookup *) slapi_ch_calloc(1, sizeof (struct component_keys_lookup));
|
||||
+
|
||||
+ /* indextype is "eq" */
|
||||
+ if (index_type) {
|
||||
+ key_stat->index_type = slapi_ch_strdup(index_type);
|
||||
+ }
|
||||
+
|
||||
+ /* key value e.g. '1234' */
|
||||
+ if (key_value) {
|
||||
+ key_stat->key = (char *) slapi_ch_calloc(1, strlen(key_value) + 1);
|
||||
+ memcpy(key_stat->key, key_value, strlen(key_value));
|
||||
+ }
|
||||
+
|
||||
+ /* attribute name is e.g. 'uid' */
|
||||
+ if (attribute_type) {
|
||||
+ key_stat->attribute_type = slapi_ch_strdup(attribute_type);
|
||||
+ }
|
||||
+
|
||||
+ /* Number of lookup IDs with the key */
|
||||
+ key_stat->id_lookup_cnt = lookup_cnt;
|
||||
+ if (op_stat->search_stat->keys_lookup) {
|
||||
+ /* it already exist key stat. add key_stat at the head */
|
||||
+ key_stat->next = op_stat->search_stat->keys_lookup;
|
||||
+ } else {
|
||||
+ /* this is the first key stat record */
|
||||
+ key_stat->next = NULL;
|
||||
+ }
|
||||
+ op_stat->search_stat->keys_lookup = key_stat;
|
||||
+}
|
||||
|
||||
/*
|
||||
* Build a candidate list for a SUBTREE scope search.
|
||||
@@ -1232,6 +1272,17 @@ subtree_candidates(
|
||||
if (candidates != NULL && (idl_length(candidates) > FILTER_TEST_THRESHOLD) && e) {
|
||||
IDList *tmp = candidates, *descendants = NULL;
|
||||
back_txn txn = {NULL};
|
||||
+ Op_stat *op_stat = NULL;
|
||||
+ char key_value[32] = {0};
|
||||
+
|
||||
+ /* statistics for index lookup is enabled */
|
||||
+ if (LDAP_STAT_READ_INDEX & config_get_statlog_level()) {
|
||||
+ op_stat = op_stat_get_operation_extension(pb);
|
||||
+ if (op_stat) {
|
||||
+ /* easier to just record the entry ID */
|
||||
+ PR_snprintf(key_value, sizeof(key_value), "%lu", (u_long) e->ep_id);
|
||||
+ }
|
||||
+ }
|
||||
|
||||
slapi_pblock_get(pb, SLAPI_TXN, &txn.back_txn_txn);
|
||||
if (entryrdn_get_noancestorid()) {
|
||||
@@ -1239,12 +1290,20 @@ subtree_candidates(
|
||||
*err = entryrdn_get_subordinates(be,
|
||||
slapi_entry_get_sdn_const(e->ep_entry),
|
||||
e->ep_id, &descendants, &txn, 0);
|
||||
+ if (op_stat) {
|
||||
+ /* record entryrdn lookups */
|
||||
+ stat_add_srch_lookup(op_stat, LDBM_ENTRYRDN_STR, indextype_EQUALITY, key_value, descendants ? descendants->b_nids : 0);
|
||||
+ }
|
||||
idl_insert(&descendants, e->ep_id);
|
||||
candidates = idl_intersection(be, candidates, descendants);
|
||||
idl_free(&tmp);
|
||||
idl_free(&descendants);
|
||||
} else if (!has_tombstone_filter && !is_bulk_import) {
|
||||
*err = ldbm_ancestorid_read_ext(be, &txn, e->ep_id, &descendants, allidslimit);
|
||||
+ if (op_stat) {
|
||||
+ /* records ancestorid lookups */
|
||||
+ stat_add_srch_lookup(op_stat, LDBM_ANCESTORID_STR, indextype_EQUALITY, key_value, descendants ? descendants->b_nids : 0);
|
||||
+ }
|
||||
idl_insert(&descendants, e->ep_id);
|
||||
candidates = idl_intersection(be, candidates, descendants);
|
||||
idl_free(&tmp);
|
||||
--
|
||||
2.51.1
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user