Compare commits
2 Commits
c8-stream-
...
c10-beta
| Author | SHA1 | Date | |
|---|---|---|---|
| 2760ce3ca2 | |||
| 89b7694df5 |
@ -1,3 +0,0 @@
|
||||
bd9aab32d9cbf9231058d585479813f3420dc872 SOURCES/389-ds-base-1.4.3.39.tar.bz2
|
||||
1c8f2d0dfbf39fa8cd86363bf3314351ab21f8d4 SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
978b7c5e4a9e5784fddb23ba1abe4dc5a071589f SOURCES/vendor-1.4.3.39-1.tar.gz
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@ -1,3 +1,4 @@
|
||||
SOURCES/389-ds-base-1.4.3.39.tar.bz2
|
||||
SOURCES/jemalloc-5.3.0.tar.bz2
|
||||
SOURCES/vendor-1.4.3.39-1.tar.gz
|
||||
389-ds-base-3.1.3.tar.bz2
|
||||
jemalloc-5.3.0.tar.bz2
|
||||
libdb-5.3.28-59.tar.bz2
|
||||
vendor-3.1.3-1.tar.gz
|
||||
|
||||
488
0001-Issue-6822-Backend-creation-cleanup-and-Database-UI-.patch
Normal file
488
0001-Issue-6822-Backend-creation-cleanup-and-Database-UI-.patch
Normal file
@ -0,0 +1,488 @@
|
||||
From 8f68c90b69bb09563ad8aa8c365bff534e133419 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 27 Jun 2025 18:43:39 -0700
|
||||
Subject: [PATCH] Issue 6822 - Backend creation cleanup and Database UI tab
|
||||
error handling (#6823)
|
||||
|
||||
Description: Add rollback functionality when mapping tree creation fails
|
||||
during backend creation to prevent orphaned backends.
|
||||
Improve error handling in Database, Replication and Monitoring UI tabs
|
||||
to gracefully handle backend get-tree command failures.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6822
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
src/cockpit/389-console/src/database.jsx | 119 ++++++++------
|
||||
src/cockpit/389-console/src/monitor.jsx | 172 +++++++++++---------
|
||||
src/cockpit/389-console/src/replication.jsx | 55 ++++---
|
||||
src/lib389/lib389/backend.py | 18 +-
|
||||
4 files changed, 210 insertions(+), 154 deletions(-)
|
||||
|
||||
diff --git a/src/cockpit/389-console/src/database.jsx b/src/cockpit/389-console/src/database.jsx
|
||||
index c0c4be414..276125dfc 100644
|
||||
--- a/src/cockpit/389-console/src/database.jsx
|
||||
+++ b/src/cockpit/389-console/src/database.jsx
|
||||
@@ -478,6 +478,59 @@ export class Database extends React.Component {
|
||||
}
|
||||
|
||||
loadSuffixTree(fullReset) {
|
||||
+ const treeData = [
|
||||
+ {
|
||||
+ name: _("Global Database Configuration"),
|
||||
+ icon: <CogIcon />,
|
||||
+ id: "dbconfig",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Chaining Configuration"),
|
||||
+ icon: <ExternalLinkAltIcon />,
|
||||
+ id: "chaining-config",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Backups & LDIFs"),
|
||||
+ icon: <CopyIcon />,
|
||||
+ id: "backups",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Password Policies"),
|
||||
+ id: "pwp",
|
||||
+ icon: <KeyIcon />,
|
||||
+ children: [
|
||||
+ {
|
||||
+ name: _("Global Policy"),
|
||||
+ icon: <HomeIcon />,
|
||||
+ id: "pwpolicy",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Local Policies"),
|
||||
+ icon: <UsersIcon />,
|
||||
+ id: "localpwpolicy",
|
||||
+ },
|
||||
+ ],
|
||||
+ defaultExpanded: true
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Suffixes"),
|
||||
+ icon: <CatalogIcon />,
|
||||
+ id: "suffixes-tree",
|
||||
+ children: [],
|
||||
+ defaultExpanded: true,
|
||||
+ action: (
|
||||
+ <Button
|
||||
+ onClick={this.handleShowSuffixModal}
|
||||
+ variant="plain"
|
||||
+ aria-label="Create new suffix"
|
||||
+ title={_("Create new suffix")}
|
||||
+ >
|
||||
+ <PlusIcon />
|
||||
+ </Button>
|
||||
+ ),
|
||||
+ }
|
||||
+ ];
|
||||
+
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"backend", "get-tree",
|
||||
@@ -491,58 +544,20 @@ export class Database extends React.Component {
|
||||
suffixData = JSON.parse(content);
|
||||
this.processTree(suffixData);
|
||||
}
|
||||
- const treeData = [
|
||||
- {
|
||||
- name: _("Global Database Configuration"),
|
||||
- icon: <CogIcon />,
|
||||
- id: "dbconfig",
|
||||
- },
|
||||
- {
|
||||
- name: _("Chaining Configuration"),
|
||||
- icon: <ExternalLinkAltIcon />,
|
||||
- id: "chaining-config",
|
||||
- },
|
||||
- {
|
||||
- name: _("Backups & LDIFs"),
|
||||
- icon: <CopyIcon />,
|
||||
- id: "backups",
|
||||
- },
|
||||
- {
|
||||
- name: _("Password Policies"),
|
||||
- id: "pwp",
|
||||
- icon: <KeyIcon />,
|
||||
- children: [
|
||||
- {
|
||||
- name: _("Global Policy"),
|
||||
- icon: <HomeIcon />,
|
||||
- id: "pwpolicy",
|
||||
- },
|
||||
- {
|
||||
- name: _("Local Policies"),
|
||||
- icon: <UsersIcon />,
|
||||
- id: "localpwpolicy",
|
||||
- },
|
||||
- ],
|
||||
- defaultExpanded: true
|
||||
- },
|
||||
- {
|
||||
- name: _("Suffixes"),
|
||||
- icon: <CatalogIcon />,
|
||||
- id: "suffixes-tree",
|
||||
- children: suffixData,
|
||||
- defaultExpanded: true,
|
||||
- action: (
|
||||
- <Button
|
||||
- onClick={this.handleShowSuffixModal}
|
||||
- variant="plain"
|
||||
- aria-label="Create new suffix"
|
||||
- title={_("Create new suffix")}
|
||||
- >
|
||||
- <PlusIcon />
|
||||
- </Button>
|
||||
- ),
|
||||
- }
|
||||
- ];
|
||||
+
|
||||
+ let current_node = this.state.node_name;
|
||||
+ if (fullReset) {
|
||||
+ current_node = DB_CONFIG;
|
||||
+ }
|
||||
+
|
||||
+ treeData[4].children = suffixData; // suffixes node
|
||||
+ this.setState(() => ({
|
||||
+ nodes: treeData,
|
||||
+ node_name: current_node,
|
||||
+ }), this.loadAttrs);
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ // Handle backend get-tree failure gracefully
|
||||
let current_node = this.state.node_name;
|
||||
if (fullReset) {
|
||||
current_node = DB_CONFIG;
|
||||
diff --git a/src/cockpit/389-console/src/monitor.jsx b/src/cockpit/389-console/src/monitor.jsx
|
||||
index ad48d1f87..91a8e3e37 100644
|
||||
--- a/src/cockpit/389-console/src/monitor.jsx
|
||||
+++ b/src/cockpit/389-console/src/monitor.jsx
|
||||
@@ -200,6 +200,84 @@ export class Monitor extends React.Component {
|
||||
}
|
||||
|
||||
loadSuffixTree(fullReset) {
|
||||
+ const basicData = [
|
||||
+ {
|
||||
+ name: _("Server Statistics"),
|
||||
+ icon: <ClusterIcon />,
|
||||
+ id: "server-monitor",
|
||||
+ type: "server",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Replication"),
|
||||
+ icon: <TopologyIcon />,
|
||||
+ id: "replication-monitor",
|
||||
+ type: "replication",
|
||||
+ defaultExpanded: true,
|
||||
+ children: [
|
||||
+ {
|
||||
+ name: _("Synchronization Report"),
|
||||
+ icon: <MonitoringIcon />,
|
||||
+ id: "sync-report",
|
||||
+ item: "sync-report",
|
||||
+ type: "repl-mon",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Log Analysis"),
|
||||
+ icon: <MonitoringIcon />,
|
||||
+ id: "log-analysis",
|
||||
+ item: "log-analysis",
|
||||
+ type: "repl-mon",
|
||||
+ }
|
||||
+ ],
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Database"),
|
||||
+ icon: <DatabaseIcon />,
|
||||
+ id: "database-monitor",
|
||||
+ type: "database",
|
||||
+ children: [], // Will be populated with treeData on success
|
||||
+ defaultExpanded: true,
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Logging"),
|
||||
+ icon: <CatalogIcon />,
|
||||
+ id: "log-monitor",
|
||||
+ defaultExpanded: true,
|
||||
+ children: [
|
||||
+ {
|
||||
+ name: _("Access Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "access-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Audit Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "audit-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Audit Failure Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "auditfail-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Errors Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "error-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ {
|
||||
+ name: _("Security Log"),
|
||||
+ icon: <BookIcon size="sm" />,
|
||||
+ id: "security-log-monitor",
|
||||
+ type: "log",
|
||||
+ },
|
||||
+ ]
|
||||
+ },
|
||||
+ ];
|
||||
+
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"backend", "get-tree",
|
||||
@@ -210,83 +288,7 @@ export class Monitor extends React.Component {
|
||||
.done(content => {
|
||||
const treeData = JSON.parse(content);
|
||||
this.processTree(treeData);
|
||||
- const basicData = [
|
||||
- {
|
||||
- name: _("Server Statistics"),
|
||||
- icon: <ClusterIcon />,
|
||||
- id: "server-monitor",
|
||||
- type: "server",
|
||||
- },
|
||||
- {
|
||||
- name: _("Replication"),
|
||||
- icon: <TopologyIcon />,
|
||||
- id: "replication-monitor",
|
||||
- type: "replication",
|
||||
- defaultExpanded: true,
|
||||
- children: [
|
||||
- {
|
||||
- name: _("Synchronization Report"),
|
||||
- icon: <MonitoringIcon />,
|
||||
- id: "sync-report",
|
||||
- item: "sync-report",
|
||||
- type: "repl-mon",
|
||||
- },
|
||||
- {
|
||||
- name: _("Log Analysis"),
|
||||
- icon: <MonitoringIcon />,
|
||||
- id: "log-analysis",
|
||||
- item: "log-analysis",
|
||||
- type: "repl-mon",
|
||||
- }
|
||||
- ],
|
||||
- },
|
||||
- {
|
||||
- name: _("Database"),
|
||||
- icon: <DatabaseIcon />,
|
||||
- id: "database-monitor",
|
||||
- type: "database",
|
||||
- children: [],
|
||||
- defaultExpanded: true,
|
||||
- },
|
||||
- {
|
||||
- name: _("Logging"),
|
||||
- icon: <CatalogIcon />,
|
||||
- id: "log-monitor",
|
||||
- defaultExpanded: true,
|
||||
- children: [
|
||||
- {
|
||||
- name: _("Access Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "access-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Audit Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "audit-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Audit Failure Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "auditfail-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Errors Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "error-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- {
|
||||
- name: _("Security Log"),
|
||||
- icon: <BookIcon size="sm" />,
|
||||
- id: "security-log-monitor",
|
||||
- type: "log",
|
||||
- },
|
||||
- ]
|
||||
- },
|
||||
- ];
|
||||
+
|
||||
let current_node = this.state.node_name;
|
||||
let type = this.state.node_type;
|
||||
if (fullReset) {
|
||||
@@ -296,6 +298,22 @@ export class Monitor extends React.Component {
|
||||
basicData[2].children = treeData; // database node
|
||||
this.processReplSuffixes(basicData[1].children);
|
||||
|
||||
+ this.setState(() => ({
|
||||
+ nodes: basicData,
|
||||
+ node_name: current_node,
|
||||
+ node_type: type,
|
||||
+ }), this.update_tree_nodes);
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ // Handle backend get-tree failure gracefully
|
||||
+ let current_node = this.state.node_name;
|
||||
+ let type = this.state.node_type;
|
||||
+ if (fullReset) {
|
||||
+ current_node = "server-monitor";
|
||||
+ type = "server";
|
||||
+ }
|
||||
+ this.processReplSuffixes(basicData[1].children);
|
||||
+
|
||||
this.setState(() => ({
|
||||
nodes: basicData,
|
||||
node_name: current_node,
|
||||
diff --git a/src/cockpit/389-console/src/replication.jsx b/src/cockpit/389-console/src/replication.jsx
|
||||
index fa492fd2a..aa535bfc7 100644
|
||||
--- a/src/cockpit/389-console/src/replication.jsx
|
||||
+++ b/src/cockpit/389-console/src/replication.jsx
|
||||
@@ -177,6 +177,16 @@ export class Replication extends React.Component {
|
||||
loaded: false
|
||||
});
|
||||
|
||||
+ const basicData = [
|
||||
+ {
|
||||
+ name: _("Suffixes"),
|
||||
+ icon: <TopologyIcon />,
|
||||
+ id: "repl-suffixes",
|
||||
+ children: [],
|
||||
+ defaultExpanded: true
|
||||
+ }
|
||||
+ ];
|
||||
+
|
||||
const cmd = [
|
||||
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
|
||||
"backend", "get-tree",
|
||||
@@ -199,15 +209,7 @@ export class Replication extends React.Component {
|
||||
}
|
||||
}
|
||||
}
|
||||
- const basicData = [
|
||||
- {
|
||||
- name: _("Suffixes"),
|
||||
- icon: <TopologyIcon />,
|
||||
- id: "repl-suffixes",
|
||||
- children: [],
|
||||
- defaultExpanded: true
|
||||
- }
|
||||
- ];
|
||||
+
|
||||
let current_node = this.state.node_name;
|
||||
let current_type = this.state.node_type;
|
||||
let replicated = this.state.node_replicated;
|
||||
@@ -258,6 +260,19 @@ export class Replication extends React.Component {
|
||||
}
|
||||
|
||||
basicData[0].children = treeData;
|
||||
+ this.setState({
|
||||
+ nodes: basicData,
|
||||
+ node_name: current_node,
|
||||
+ node_type: current_type,
|
||||
+ node_replicated: replicated,
|
||||
+ }, () => { this.update_tree_nodes() });
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ // Handle backend get-tree failure gracefully
|
||||
+ let current_node = this.state.node_name;
|
||||
+ let current_type = this.state.node_type;
|
||||
+ let replicated = this.state.node_replicated;
|
||||
+
|
||||
this.setState({
|
||||
nodes: basicData,
|
||||
node_name: current_node,
|
||||
@@ -905,18 +920,18 @@ export class Replication extends React.Component {
|
||||
disableTree: false
|
||||
});
|
||||
});
|
||||
- })
|
||||
- .fail(err => {
|
||||
- const errMsg = JSON.parse(err);
|
||||
- this.props.addNotification(
|
||||
- "error",
|
||||
- cockpit.format(_("Error loading replication agreements configuration - $0"), errMsg.desc)
|
||||
- );
|
||||
- this.setState({
|
||||
- suffixLoading: false,
|
||||
- disableTree: false
|
||||
+ })
|
||||
+ .fail(err => {
|
||||
+ const errMsg = JSON.parse(err);
|
||||
+ this.props.addNotification(
|
||||
+ "error",
|
||||
+ cockpit.format(_("Error loading replication agreements configuration - $0"), errMsg.desc)
|
||||
+ );
|
||||
+ this.setState({
|
||||
+ suffixLoading: false,
|
||||
+ disableTree: false
|
||||
+ });
|
||||
});
|
||||
- });
|
||||
})
|
||||
.fail(err => {
|
||||
// changelog failure
|
||||
diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py
|
||||
index 1d000ed66..53f15b6b0 100644
|
||||
--- a/src/lib389/lib389/backend.py
|
||||
+++ b/src/lib389/lib389/backend.py
|
||||
@@ -694,24 +694,32 @@ class Backend(DSLdapObject):
|
||||
parent_suffix = properties.pop('parent', False)
|
||||
|
||||
# Okay, now try to make the backend.
|
||||
- super(Backend, self).create(dn, properties, basedn)
|
||||
+ backend_obj = super(Backend, self).create(dn, properties, basedn)
|
||||
|
||||
# We check if the mapping tree exists in create, so do this *after*
|
||||
if create_mapping_tree is True:
|
||||
- properties = {
|
||||
+ mapping_tree_properties = {
|
||||
'cn': self._nprops_stash['nsslapd-suffix'],
|
||||
'nsslapd-state': 'backend',
|
||||
'nsslapd-backend': self._nprops_stash['cn'],
|
||||
}
|
||||
if parent_suffix:
|
||||
# This is a subsuffix, set the parent suffix
|
||||
- properties['nsslapd-parent-suffix'] = parent_suffix
|
||||
- self._mts.create(properties=properties)
|
||||
+ mapping_tree_properties['nsslapd-parent-suffix'] = parent_suffix
|
||||
+
|
||||
+ try:
|
||||
+ self._mts.create(properties=mapping_tree_properties)
|
||||
+ except Exception as e:
|
||||
+ try:
|
||||
+ backend_obj.delete()
|
||||
+ except Exception as cleanup_error:
|
||||
+ self._instance.log.error(f"Failed to cleanup backend after mapping tree creation failure: {cleanup_error}")
|
||||
+ raise e
|
||||
|
||||
# We can't create the sample entries unless a mapping tree was installed.
|
||||
if sample_entries is not False and create_mapping_tree is True:
|
||||
self.create_sample_entries(sample_entries)
|
||||
- return self
|
||||
+ return backend_obj
|
||||
|
||||
def delete(self):
|
||||
"""Deletes the backend, it's mapping tree and all related indices.
|
||||
--
|
||||
2.49.0
|
||||
|
||||
56
0002-Issue-6852-Move-ds-CLI-tools-back-to-sbin.patch
Normal file
56
0002-Issue-6852-Move-ds-CLI-tools-back-to-sbin.patch
Normal file
@ -0,0 +1,56 @@
|
||||
From 6ed6a67f142fec393cd254df38b9750a14848528 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Tue, 8 Jul 2025 19:09:34 +0200
|
||||
Subject: [PATCH] Issue 6852 - Move ds* CLI tools back to /sbin
|
||||
|
||||
Bug Description:
|
||||
After #6767 ds* CLI tools are packaged in /bin instead of /sbin. Even
|
||||
though Fedora 42 has unified /bin and /sbin, some tools (ipa-backup) and
|
||||
our tests still rely on hardcoded paths.
|
||||
|
||||
Fix Description:
|
||||
Move ds* tools back to /sbin
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6852
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
---
|
||||
rpm/389-ds-base.spec.in | 15 ++++++++++-----
|
||||
1 file changed, 10 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
|
||||
index 2f1df63c9..101771574 100644
|
||||
--- a/rpm/389-ds-base.spec.in
|
||||
+++ b/rpm/389-ds-base.spec.in
|
||||
@@ -486,6 +486,11 @@ cp -r %{_builddir}/%{name}-%{version}/man/man3 $RPM_BUILD_ROOT/%{_mandir}/man3
|
||||
# lib389
|
||||
pushd src/lib389
|
||||
%pyproject_install
|
||||
+%if 0%{?fedora} <= 41 || (0%{?rhel} && 0%{?rhel} <= 10)
|
||||
+for clitool in dsconf dscreate dsctl dsidm openldap_to_ds; do
|
||||
+ mv %{buildroot}%{_bindir}/$clitool %{buildroot}%{_sbindir}/
|
||||
+done
|
||||
+%endif
|
||||
%pyproject_save_files -l lib389
|
||||
popd
|
||||
|
||||
@@ -743,11 +748,11 @@ fi
|
||||
%doc src/lib389/README.md
|
||||
%license LICENSE LICENSE.GPLv3+
|
||||
# Binaries
|
||||
-%{_bindir}/dsconf
|
||||
-%{_bindir}/dscreate
|
||||
-%{_bindir}/dsctl
|
||||
-%{_bindir}/dsidm
|
||||
-%{_bindir}/openldap_to_ds
|
||||
+%{_sbindir}/dsconf
|
||||
+%{_sbindir}/dscreate
|
||||
+%{_sbindir}/dsctl
|
||||
+%{_sbindir}/dsidm
|
||||
+%{_sbindir}/openldap_to_ds
|
||||
%{_libexecdir}/%{pkgname}/dscontainer
|
||||
# Man pages
|
||||
%{_mandir}/man8/dsconf.8.gz
|
||||
--
|
||||
2.49.0
|
||||
|
||||
380
0003-Issue-6663-Fix-NULL-subsystem-crash-in-JSON-error-lo.patch
Normal file
380
0003-Issue-6663-Fix-NULL-subsystem-crash-in-JSON-error-lo.patch
Normal file
@ -0,0 +1,380 @@
|
||||
From bd9ab54f64148d467e022c59ee8e5aed16f0c385 Mon Sep 17 00:00:00 2001
|
||||
From: Akshay Adhikari <aadhikar@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 18:14:15 +0530
|
||||
Subject: [PATCH] Issue 6663 - Fix NULL subsystem crash in JSON error logging
|
||||
(#6883)
|
||||
|
||||
Description: Fixes crash in JSON error logging when subsystem is NULL.
|
||||
Parametrized test case for better debugging.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6663
|
||||
|
||||
Reviewed by: @mreynolds389
|
||||
---
|
||||
.../tests/suites/clu/dsconf_logging.py | 168 ------------------
|
||||
.../tests/suites/clu/dsconf_logging_test.py | 164 +++++++++++++++++
|
||||
ldap/servers/slapd/log.c | 2 +-
|
||||
3 files changed, 165 insertions(+), 169 deletions(-)
|
||||
delete mode 100644 dirsrvtests/tests/suites/clu/dsconf_logging.py
|
||||
create mode 100644 dirsrvtests/tests/suites/clu/dsconf_logging_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsconf_logging.py b/dirsrvtests/tests/suites/clu/dsconf_logging.py
|
||||
deleted file mode 100644
|
||||
index 1c2f7fc2e..000000000
|
||||
--- a/dirsrvtests/tests/suites/clu/dsconf_logging.py
|
||||
+++ /dev/null
|
||||
@@ -1,168 +0,0 @@
|
||||
-# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2025 Red Hat, Inc.
|
||||
-# All rights reserved.
|
||||
-#
|
||||
-# License: GPL (version 3 or any later version).
|
||||
-# See LICENSE for details.
|
||||
-# --- END COPYRIGHT BLOCK ---
|
||||
-#
|
||||
-import json
|
||||
-import subprocess
|
||||
-import logging
|
||||
-import pytest
|
||||
-from lib389._constants import DN_DM
|
||||
-from lib389.topologies import topology_st as topo
|
||||
-
|
||||
-pytestmark = pytest.mark.tier1
|
||||
-
|
||||
-log = logging.getLogger(__name__)
|
||||
-
|
||||
-SETTINGS = [
|
||||
- ('logging-enabled', None),
|
||||
- ('logging-disabled', None),
|
||||
- ('mode', '700'),
|
||||
- ('compress-enabled', None),
|
||||
- ('compress-disabled', None),
|
||||
- ('buffering-enabled', None),
|
||||
- ('buffering-disabled', None),
|
||||
- ('max-logs', '4'),
|
||||
- ('max-logsize', '7'),
|
||||
- ('rotation-interval', '2'),
|
||||
- ('rotation-interval-unit', 'week'),
|
||||
- ('rotation-tod-enabled', None),
|
||||
- ('rotation-tod-disabled', None),
|
||||
- ('rotation-tod-hour', '12'),
|
||||
- ('rotation-tod-minute', '20'),
|
||||
- ('deletion-interval', '3'),
|
||||
- ('deletion-interval-unit', 'day'),
|
||||
- ('max-disk-space', '20'),
|
||||
- ('free-disk-space', '2'),
|
||||
-]
|
||||
-
|
||||
-DEFAULT_TIME_FORMAT = "%FT%TZ"
|
||||
-
|
||||
-
|
||||
-def execute_dsconf_command(dsconf_cmd, subcommands):
|
||||
- """Execute dsconf command and return output and return code"""
|
||||
-
|
||||
- cmdline = dsconf_cmd + subcommands
|
||||
- proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
|
||||
- out, _ = proc.communicate()
|
||||
- return out.decode('utf-8'), proc.returncode
|
||||
-
|
||||
-
|
||||
-def get_dsconf_base_cmd(topo):
|
||||
- """Return base dsconf command list"""
|
||||
- return ['/usr/sbin/dsconf', topo.standalone.serverid,
|
||||
- '-j', '-D', DN_DM, '-w', 'password', 'logging']
|
||||
-
|
||||
-
|
||||
-def test_log_settings(topo):
|
||||
- """Test each log setting can be set successfully
|
||||
-
|
||||
- :id: b800fd03-37f5-4e74-9af8-eeb07030eb52
|
||||
- :setup: Standalone DS instance
|
||||
- :steps:
|
||||
- 1. Test each log's settings
|
||||
- :expectedresults:
|
||||
- 1. Success
|
||||
- """
|
||||
-
|
||||
- dsconf_cmd = get_dsconf_base_cmd(topo)
|
||||
- for log_type in ['access', 'audit', 'auditfail', 'error', 'security']:
|
||||
- # Test "get" command
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd, [log_type, 'get'])
|
||||
- assert rc == 0
|
||||
- json_result = json.loads(output)
|
||||
- default_location = json_result['Log name and location']
|
||||
-
|
||||
- # Log location
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd, [log_type, 'set',
|
||||
- 'location',
|
||||
- f'/tmp/{log_type}'])
|
||||
- assert rc == 0
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd, [log_type, 'set',
|
||||
- 'location',
|
||||
- default_location])
|
||||
- assert rc == 0
|
||||
-
|
||||
- # Log levels
|
||||
- if log_type == "access":
|
||||
- # List levels
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'list-levels'])
|
||||
- assert rc == 0
|
||||
-
|
||||
- # Set levels
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set', 'level',
|
||||
- 'internal'])
|
||||
- assert rc == 0
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set', 'level',
|
||||
- 'internal', 'entry'])
|
||||
- assert rc == 0
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set', 'level',
|
||||
- 'internal', 'default'])
|
||||
- assert rc == 0
|
||||
-
|
||||
- if log_type == "error":
|
||||
- # List levels
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'list-levels'])
|
||||
- assert rc == 0
|
||||
-
|
||||
- # Set levels
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set', 'level',
|
||||
- 'plugin', 'replication'])
|
||||
- assert rc == 0
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set', 'level',
|
||||
- 'default'])
|
||||
- assert rc == 0
|
||||
-
|
||||
- # Log formats
|
||||
- if log_type in ["access", "audit", "error"]:
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set',
|
||||
- 'time-format', '%D'])
|
||||
- assert rc == 0
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set',
|
||||
- 'time-format',
|
||||
- DEFAULT_TIME_FORMAT])
|
||||
- assert rc == 0
|
||||
-
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set',
|
||||
- 'log-format',
|
||||
- 'json'])
|
||||
- assert rc == 0
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set',
|
||||
- 'log-format',
|
||||
- 'default'])
|
||||
- assert rc == 0
|
||||
-
|
||||
- # Audit log display attrs
|
||||
- if log_type == "audit":
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
- [log_type, 'set',
|
||||
- 'display-attrs', 'cn'])
|
||||
- assert rc == 0
|
||||
-
|
||||
- # Common settings
|
||||
- for attr, value in SETTINGS:
|
||||
- if log_type == "auditfail" and attr.startswith("buffer"):
|
||||
- # auditfail doesn't have a buffering settings
|
||||
- continue
|
||||
-
|
||||
- if value is None:
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd, [log_type,
|
||||
- 'set', attr])
|
||||
- else:
|
||||
- output, rc = execute_dsconf_command(dsconf_cmd, [log_type,
|
||||
- 'set', attr, value])
|
||||
- assert rc == 0
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsconf_logging_test.py b/dirsrvtests/tests/suites/clu/dsconf_logging_test.py
|
||||
new file mode 100644
|
||||
index 000000000..ca3f71997
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsconf_logging_test.py
|
||||
@@ -0,0 +1,164 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import json
|
||||
+import subprocess
|
||||
+import logging
|
||||
+import pytest
|
||||
+from lib389._constants import DN_DM
|
||||
+from lib389.topologies import topology_st as topo
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+SETTINGS = [
|
||||
+ ('logging-enabled', None),
|
||||
+ ('logging-disabled', None),
|
||||
+ ('mode', '700'),
|
||||
+ ('compress-enabled', None),
|
||||
+ ('compress-disabled', None),
|
||||
+ ('buffering-enabled', None),
|
||||
+ ('buffering-disabled', None),
|
||||
+ ('max-logs', '4'),
|
||||
+ ('max-logsize', '7'),
|
||||
+ ('rotation-interval', '2'),
|
||||
+ ('rotation-interval-unit', 'week'),
|
||||
+ ('rotation-tod-enabled', None),
|
||||
+ ('rotation-tod-disabled', None),
|
||||
+ ('rotation-tod-hour', '12'),
|
||||
+ ('rotation-tod-minute', '20'),
|
||||
+ ('deletion-interval', '3'),
|
||||
+ ('deletion-interval-unit', 'day'),
|
||||
+ ('max-disk-space', '20'),
|
||||
+ ('free-disk-space', '2'),
|
||||
+]
|
||||
+
|
||||
+DEFAULT_TIME_FORMAT = "%FT%TZ"
|
||||
+
|
||||
+
|
||||
+def execute_dsconf_command(dsconf_cmd, subcommands):
|
||||
+ """Execute dsconf command and return output and return code"""
|
||||
+
|
||||
+ cmdline = dsconf_cmd + subcommands
|
||||
+ proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
+ out, err = proc.communicate()
|
||||
+
|
||||
+ if proc.returncode != 0 and err:
|
||||
+ log.error(f"Command failed: {' '.join(cmdline)}")
|
||||
+ log.error(f"Stderr: {err.decode('utf-8')}")
|
||||
+
|
||||
+ return out.decode('utf-8'), proc.returncode
|
||||
+
|
||||
+
|
||||
+def get_dsconf_base_cmd(topo):
|
||||
+ """Return base dsconf command list"""
|
||||
+ return ['/usr/sbin/dsconf', topo.standalone.serverid,
|
||||
+ '-j', '-D', DN_DM, '-w', 'password', 'logging']
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_type", ['access', 'audit', 'auditfail', 'error', 'security'])
|
||||
+def test_log_settings(topo, log_type):
|
||||
+ """Test each log setting can be set successfully
|
||||
+
|
||||
+ :id: b800fd03-37f5-4e74-9af8-eeb07030eb52
|
||||
+ :setup: Standalone DS instance
|
||||
+ :steps:
|
||||
+ 1. Test each log's settings
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ """
|
||||
+
|
||||
+ dsconf_cmd = get_dsconf_base_cmd(topo)
|
||||
+
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, [log_type, 'get'])
|
||||
+ assert rc == 0
|
||||
+ json_result = json.loads(output)
|
||||
+ default_location = json_result['Log name and location']
|
||||
+
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, [log_type, 'set',
|
||||
+ 'location',
|
||||
+ f'/tmp/{log_type}'])
|
||||
+ assert rc == 0
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, [log_type, 'set',
|
||||
+ 'location',
|
||||
+ default_location])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ if log_type == "access":
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'list-levels'])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set', 'level',
|
||||
+ 'internal'])
|
||||
+ assert rc == 0
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set', 'level',
|
||||
+ 'internal', 'entry'])
|
||||
+ assert rc == 0
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set', 'level',
|
||||
+ 'internal', 'default'])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ if log_type == "error":
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'list-levels'])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set', 'level',
|
||||
+ 'plugin', 'replication'])
|
||||
+ assert rc == 0
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set', 'level',
|
||||
+ 'default'])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ if log_type in ["access", "audit", "error"]:
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set',
|
||||
+ 'time-format', '%D'])
|
||||
+ assert rc == 0
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set',
|
||||
+ 'time-format',
|
||||
+ DEFAULT_TIME_FORMAT])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set',
|
||||
+ 'log-format',
|
||||
+ 'json'])
|
||||
+ assert rc == 0, f"Failed to set {log_type} log-format to json: {output}"
|
||||
+
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set',
|
||||
+ 'log-format',
|
||||
+ 'default'])
|
||||
+ assert rc == 0, f"Failed to set {log_type} log-format to default: {output}"
|
||||
+
|
||||
+ if log_type == "audit":
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd,
|
||||
+ [log_type, 'set',
|
||||
+ 'display-attrs', 'cn'])
|
||||
+ assert rc == 0
|
||||
+
|
||||
+ for attr, value in SETTINGS:
|
||||
+ if log_type == "auditfail" and attr.startswith("buffer"):
|
||||
+ continue
|
||||
+
|
||||
+ if value is None:
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, [log_type,
|
||||
+ 'set', attr])
|
||||
+ else:
|
||||
+ output, rc = execute_dsconf_command(dsconf_cmd, [log_type,
|
||||
+ 'set', attr, value])
|
||||
+ assert rc == 0
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index 06dae4d0b..e859682fe 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -2937,7 +2937,7 @@ vslapd_log_error(
|
||||
json_obj = json_object_new_object();
|
||||
json_object_object_add(json_obj, "local_time", json_object_new_string(local_time));
|
||||
json_object_object_add(json_obj, "severity", json_object_new_string(get_log_sev_name(sev_level, sev_name)));
|
||||
- json_object_object_add(json_obj, "subsystem", json_object_new_string(subsystem));
|
||||
+ json_object_object_add(json_obj, "subsystem", json_object_new_string(subsystem ? subsystem : ""));
|
||||
json_object_object_add(json_obj, "msg", json_object_new_string(vbuf));
|
||||
|
||||
PR_snprintf(buffer, sizeof(buffer), "%s\n",
|
||||
--
|
||||
2.49.0
|
||||
|
||||
205
0004-Issue-6829-Update-parametrized-docstring-for-tests.patch
Normal file
205
0004-Issue-6829-Update-parametrized-docstring-for-tests.patch
Normal file
@ -0,0 +1,205 @@
|
||||
From 23e0b93c3bbe96a365357a3af11bc86172102c05 Mon Sep 17 00:00:00 2001
|
||||
From: Barbora Simonova <bsmejkal@redhat.com>
|
||||
Date: Wed, 25 Jun 2025 10:43:37 +0200
|
||||
Subject: [PATCH] Issue 6829 - Update parametrized docstring for tests
|
||||
|
||||
Description:
|
||||
Update missing parametrized value in docstring for some tests
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6829
|
||||
|
||||
Reviewed by: @vashirov (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/basic/basic_test.py | 2 +-
|
||||
dirsrvtests/tests/suites/clu/dsconf_config_test.py | 8 ++++++++
|
||||
dirsrvtests/tests/suites/clu/schema_test.py | 5 +++++
|
||||
dirsrvtests/tests/suites/mapping_tree/regression_test.py | 1 +
|
||||
dirsrvtests/tests/suites/password/password_test.py | 1 +
|
||||
.../tests/suites/replication/regression_m2_test.py | 1 +
|
||||
dirsrvtests/tests/suites/vlv/regression_test.py | 2 ++
|
||||
7 files changed, 19 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
index 8f5de91aa..db6bfab56 100644
|
||||
--- a/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
|
||||
@@ -961,7 +961,7 @@ def test_basic_search_lookthroughlimit(topology_st, limit, resp, import_example_
|
||||
Tests normal search with lookthroughlimit set high and low.
|
||||
|
||||
:id: b5119970-6c9f-41b7-9649-de9233226fec
|
||||
-
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone instance, add example.ldif to the database, search filter (uid=*).
|
||||
|
||||
:steps:
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsconf_config_test.py b/dirsrvtests/tests/suites/clu/dsconf_config_test.py
|
||||
index d67679adf..232018097 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsconf_config_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsconf_config_test.py
|
||||
@@ -58,6 +58,7 @@ def test_single_value_add(topology_st, attr_name, values_dict):
|
||||
"""Test adding a single value to an attribute
|
||||
|
||||
:id: ffc912a6-c188-413d-9c35-7f4b3774d946
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add a single value to the specified attribute
|
||||
@@ -89,6 +90,7 @@ def test_single_value_replace(topology_st, attr_name, values_dict):
|
||||
"""Test replacing a single value in configuration attributes
|
||||
|
||||
:id: 112e3e5e-8db8-4974-9ea4-ed789c2d02f2
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add initial value to the specified attribute
|
||||
@@ -127,6 +129,7 @@ def test_multi_value_batch_add(topology_st, attr_name, values_dict):
|
||||
"""Test adding multiple values in a single batch command
|
||||
|
||||
:id: 4c34c7f8-16cc-4ab6-938a-967537be5470
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add multiple values to the attribute in a single command
|
||||
@@ -157,6 +160,7 @@ def test_multi_value_batch_replace(topology_st, attr_name, values_dict):
|
||||
"""Test replacing with multiple values in a single batch command
|
||||
|
||||
:id: 05cf28b8-000e-4856-a10b-7e1df012737d
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add initial single value
|
||||
@@ -194,6 +198,7 @@ def test_multi_value_specific_delete(topology_st, attr_name, values_dict):
|
||||
"""Test deleting specific values from multi-valued attribute
|
||||
|
||||
:id: bb325c9a-eae8-438a-b577-bd63540b91cb
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add multiple values to the attribute
|
||||
@@ -232,6 +237,7 @@ def test_multi_value_batch_delete(topology_st, attr_name, values_dict):
|
||||
"""Test deleting multiple values in a single batch command
|
||||
|
||||
:id: 4b105824-b060-4f83-97d7-001a01dba1a5
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add multiple values to the attribute
|
||||
@@ -269,6 +275,7 @@ def test_single_value_persists_after_restart(topology_st, attr_name, values_dict
|
||||
"""Test single value persists after server restart
|
||||
|
||||
:id: be1a7e3d-a9ca-48a1-a3bc-062990d4f3e9
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add single value to the attribute
|
||||
@@ -310,6 +317,7 @@ def test_multi_value_batch_persists_after_restart(topology_st, attr_name, values
|
||||
"""Test multiple values added in batch persist after server restart
|
||||
|
||||
:id: fd0435e2-90b1-465a-8968-d3a375c8fb22
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone DS instance
|
||||
:steps:
|
||||
1. Add multiple values in a single batch command
|
||||
diff --git a/dirsrvtests/tests/suites/clu/schema_test.py b/dirsrvtests/tests/suites/clu/schema_test.py
|
||||
index 19ec032bc..5709471cf 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/schema_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/schema_test.py
|
||||
@@ -100,6 +100,7 @@ def test_origins(create_attribute):
|
||||
"""Test the various possibilities of x-origin
|
||||
|
||||
:id: 3229f6f8-67c1-4558-9be5-71434283086a
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone Instance
|
||||
:steps:
|
||||
1. Add an attribute with different x-origin values/types
|
||||
@@ -116,6 +117,7 @@ def test_mrs(create_attribute):
|
||||
"""Test an attribute can be added with a matching rule
|
||||
|
||||
:id: e4eb06e0-7f80-41fe-8868-08c2bafc7590
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone Instance
|
||||
:steps:
|
||||
1. Add an attribute with a matching rule
|
||||
@@ -132,6 +134,7 @@ def test_edit_attributetype(create_attribute):
|
||||
"""Test editing an attribute type in the schema
|
||||
|
||||
:id: 07c98f6a-89f8-44e5-9cc1-353d1f7bccf4
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone Instance
|
||||
:steps:
|
||||
1. Add a new attribute type
|
||||
@@ -209,6 +212,7 @@ def test_edit_attributetype_remove_superior(create_attribute):
|
||||
"""Test editing an attribute type to remove a parameter from it
|
||||
|
||||
:id: bd6ae89f-9617-4620-adc2-465884ca568b
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone Instance
|
||||
:steps:
|
||||
1. Add a new attribute type with a superior
|
||||
@@ -244,6 +248,7 @@ def test_edit_attribute_keep_custom_values(create_attribute):
|
||||
"""Test editing a custom schema attribute keeps all custom values
|
||||
|
||||
:id: 5b1e2e8b-28c2-4f77-9c03-07eff20f763d
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone Instance
|
||||
:steps:
|
||||
1. Create a custom attribute
|
||||
diff --git a/dirsrvtests/tests/suites/mapping_tree/regression_test.py b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
index 51c687059..2c57c2973 100644
|
||||
--- a/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/mapping_tree/regression_test.py
|
||||
@@ -111,6 +111,7 @@ def test_sub_suffixes(topo, orphan_param):
|
||||
""" check the entries found on suffix/sub-suffix
|
||||
|
||||
:id: 5b4421c2-d851-11ec-a760-482ae39447e5
|
||||
+ :parametrized: yes
|
||||
:feature: mapping-tree
|
||||
:setup: Standalone instance with 3 additional backends:
|
||||
dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent
|
||||
diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py
|
||||
index 2d4aef028..94a23e669 100644
|
||||
--- a/dirsrvtests/tests/suites/password/password_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/password_test.py
|
||||
@@ -156,6 +156,7 @@ def test_pwd_scheme_no_upgrade_on_bind(topology_st, crypt_scheme, request, no_up
|
||||
the current hash is in nsslapd-scheme-list-no-upgrade-hash
|
||||
|
||||
:id: b4d2c525-a239-4ca6-a168-5126da7abedd
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone instance
|
||||
:steps:
|
||||
1. Create a user with userpassword stored as CRYPT
|
||||
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
index 10a5fa419..68966ac49 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
|
||||
@@ -991,6 +991,7 @@ def test_change_repl_passwd(topo_m2, request, bind_cn):
|
||||
Testing when agmt bind group are used.
|
||||
|
||||
:id: a305913a-cc76-11ec-b324-482ae39447e5
|
||||
+ :parametrized: yes
|
||||
:setup: 2 Supplier Instances
|
||||
:steps:
|
||||
1. Insure agmt from supplier1 to supplier2 is properly set to use bind group
|
||||
diff --git a/dirsrvtests/tests/suites/vlv/regression_test.py b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
index d9d1cb444..f7847ac74 100644
|
||||
--- a/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/vlv/regression_test.py
|
||||
@@ -775,6 +775,7 @@ def test_vlv_reindex(topology_st, prefix, basedn):
|
||||
"""Test VLV reindexing.
|
||||
|
||||
:id: d5dc0d8e-cbe6-11ee-95b1-482ae39447e5
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone instance.
|
||||
:steps:
|
||||
1. Cleanup leftover from previous tests
|
||||
@@ -830,6 +831,7 @@ def test_vlv_offline_import(topology_st, prefix, basedn):
|
||||
"""Test VLV after off line import.
|
||||
|
||||
:id: 8732d7a8-e851-11ee-9d63-482ae39447e5
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone instance.
|
||||
:steps:
|
||||
1. Cleanup leftover from previous tests
|
||||
--
|
||||
2.49.0
|
||||
|
||||
127
0005-Issue-6782-Improve-paged-result-locking.patch
Normal file
127
0005-Issue-6782-Improve-paged-result-locking.patch
Normal file
@ -0,0 +1,127 @@
|
||||
From 4f81f696e85dc7c50555df2d410222214c8ac883 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Thu, 15 May 2025 10:35:27 -0400
|
||||
Subject: [PATCH] Issue 6782 - Improve paged result locking
|
||||
|
||||
Description:
|
||||
|
||||
When cleaning a slot, instead of mem setting everything to Zero and restoring
|
||||
the mutex, manually reset all the values leaving the mutex pointer
|
||||
intact.
|
||||
|
||||
There is also a deadlock possibility when checking for abandoned PR search
|
||||
in opshared.c, and we were checking a flag value outside of the per_conn
|
||||
lock.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6782
|
||||
|
||||
Reviewed by: progier & spichugi(Thanks!!)
|
||||
---
|
||||
ldap/servers/slapd/opshared.c | 10 +++++++++-
|
||||
ldap/servers/slapd/pagedresults.c | 27 +++++++++++++++++----------
|
||||
2 files changed, 26 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index 5ea919e2d..545518748 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -619,6 +619,14 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
int32_t tlimit;
|
||||
slapi_pblock_get(pb, SLAPI_SEARCH_TIMELIMIT, &tlimit);
|
||||
pagedresults_set_timelimit(pb_conn, operation, (time_t)tlimit, pr_idx);
|
||||
+ /* When using this mutex in conjunction with the main paged
|
||||
+ * result lock, you must do so in this order:
|
||||
+ *
|
||||
+ * --> pagedresults_lock()
|
||||
+ * --> pagedresults_mutex
|
||||
+ * <-- pagedresults_mutex
|
||||
+ * <-- pagedresults_unlock()
|
||||
+ */
|
||||
pagedresults_mutex = pageresult_lock_get_addr(pb_conn);
|
||||
}
|
||||
|
||||
@@ -744,11 +752,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
pr_search_result = pagedresults_get_search_result(pb_conn, operation, 1 /*locked*/, pr_idx);
|
||||
if (pr_search_result) {
|
||||
if (pagedresults_is_abandoned_or_notavailable(pb_conn, 1 /*locked*/, pr_idx)) {
|
||||
+ pthread_mutex_unlock(pagedresults_mutex);
|
||||
pagedresults_unlock(pb_conn, pr_idx);
|
||||
/* Previous operation was abandoned and the simplepaged object is not in use. */
|
||||
send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL);
|
||||
rc = LDAP_SUCCESS;
|
||||
- pthread_mutex_unlock(pagedresults_mutex);
|
||||
goto free_and_return;
|
||||
} else {
|
||||
slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, pr_search_result);
|
||||
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
|
||||
index 642aefb3d..c3f3aae01 100644
|
||||
--- a/ldap/servers/slapd/pagedresults.c
|
||||
+++ b/ldap/servers/slapd/pagedresults.c
|
||||
@@ -48,7 +48,6 @@ pageresult_lock_get_addr(Connection *conn)
|
||||
static void
|
||||
_pr_cleanup_one_slot(PagedResults *prp)
|
||||
{
|
||||
- PRLock *prmutex = NULL;
|
||||
if (!prp) {
|
||||
return;
|
||||
}
|
||||
@@ -56,13 +55,17 @@ _pr_cleanup_one_slot(PagedResults *prp)
|
||||
/* sr is left; release it. */
|
||||
prp->pr_current_be->be_search_results_release(&(prp->pr_search_result_set));
|
||||
}
|
||||
- /* clean up the slot */
|
||||
- if (prp->pr_mutex) {
|
||||
- /* pr_mutex is reused; back it up and reset it. */
|
||||
- prmutex = prp->pr_mutex;
|
||||
- }
|
||||
- memset(prp, '\0', sizeof(PagedResults));
|
||||
- prp->pr_mutex = prmutex;
|
||||
+
|
||||
+ /* clean up the slot except the mutex */
|
||||
+ prp->pr_current_be = NULL;
|
||||
+ prp->pr_search_result_set = NULL;
|
||||
+ prp->pr_search_result_count = 0;
|
||||
+ prp->pr_search_result_set_size_estimate = 0;
|
||||
+ prp->pr_sort_result_code = 0;
|
||||
+ prp->pr_timelimit_hr.tv_sec = 0;
|
||||
+ prp->pr_timelimit_hr.tv_nsec = 0;
|
||||
+ prp->pr_flags = 0;
|
||||
+ prp->pr_msgid = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1007,7 +1010,8 @@ op_set_pagedresults(Operation *op)
|
||||
|
||||
/*
|
||||
* pagedresults_lock/unlock -- introduced to protect search results for the
|
||||
- * asynchronous searches.
|
||||
+ * asynchronous searches. Do not call these functions while the PR conn lock
|
||||
+ * is held (e.g. pageresult_lock_get_addr(conn))
|
||||
*/
|
||||
void
|
||||
pagedresults_lock(Connection *conn, int index)
|
||||
@@ -1045,6 +1049,8 @@ int
|
||||
pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int index)
|
||||
{
|
||||
PagedResults *prp;
|
||||
+ int32_t result;
|
||||
+
|
||||
if (!conn || (index < 0) || (index >= conn->c_pagedresults.prl_maxlen)) {
|
||||
return 1; /* not abandoned, but do not want to proceed paged results op. */
|
||||
}
|
||||
@@ -1052,10 +1058,11 @@ pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int inde
|
||||
pthread_mutex_lock(pageresult_lock_get_addr(conn));
|
||||
}
|
||||
prp = conn->c_pagedresults.prl_list + index;
|
||||
+ result = prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED;
|
||||
if (!locked) {
|
||||
pthread_mutex_unlock(pageresult_lock_get_addr(conn));
|
||||
}
|
||||
- return prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED;
|
||||
+ return result;
|
||||
}
|
||||
|
||||
int
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,37 @@
|
||||
From 0ab37e0848e6f1c4e46068bee46bd91c3bb3d22d Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Tue, 1 Jul 2025 12:44:04 +0200
|
||||
Subject: [PATCH] Issue 6838 - lib389/replica.py is using nonexistent
|
||||
datetime.UTC in Python 3.9
|
||||
|
||||
Bug Description:
|
||||
389-ds-base-2.x is supposed to be used with Python 3.9.
|
||||
But lib389/replica.py is using `datetime.UTC`, which is an alias
|
||||
to `datetime.timezone.utc` was added only in Python 3.11.
|
||||
|
||||
Fix Description:
|
||||
Use `datetime.timezone.utc` instead.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6838
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
src/lib389/lib389/replica.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
|
||||
index 18ce1b1d5..59be00a33 100644
|
||||
--- a/src/lib389/lib389/replica.py
|
||||
+++ b/src/lib389/lib389/replica.py
|
||||
@@ -917,7 +917,7 @@ class RUV(object):
|
||||
ValueError("Wrong CSN value was supplied")
|
||||
|
||||
timestamp = int(csn[:8], 16)
|
||||
- time_str = datetime.datetime.fromtimestamp(timestamp, datetime.UTC).strftime('%Y-%m-%d %H:%M:%S')
|
||||
+ time_str = datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc).strftime('%Y-%m-%d %H:%M:%S')
|
||||
# We are parsing shorter CSN which contains only timestamp
|
||||
if len(csn) == 8:
|
||||
return time_str
|
||||
--
|
||||
2.49.0
|
||||
|
||||
515
0007-Issue-6753-Add-add_exclude_subtree-and-remove_exclud.patch
Normal file
515
0007-Issue-6753-Add-add_exclude_subtree-and-remove_exclud.patch
Normal file
@ -0,0 +1,515 @@
|
||||
From 8984550568737142dd22020e1b9efd87cc0e42f8 Mon Sep 17 00:00:00 2001
|
||||
From: Lenka Doudova <lryznaro@redhat.com>
|
||||
Date: Mon, 9 Jun 2025 15:15:04 +0200
|
||||
Subject: [PATCH] Issue 6753 - Add 'add_exclude_subtree' and
|
||||
'remove_exclude_subtree' methods to Attribute uniqueness plugin
|
||||
|
||||
Description:
|
||||
Adding 'add_exclude_subtree' and 'remove_exclude_subtree' methods to AttributeUniquenessPlugin in
|
||||
order to be able to easily add or remove an exclude subtree.
|
||||
Porting ticket 47927 test to
|
||||
dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
|
||||
Relates: #6753
|
||||
|
||||
Author: Lenka Doudova
|
||||
|
||||
Reviewers: Simon Pichugin, Mark Reynolds
|
||||
---
|
||||
.../tests/suites/plugins/attruniq_test.py | 171 +++++++++++
|
||||
dirsrvtests/tests/tickets/ticket47927_test.py | 267 ------------------
|
||||
src/lib389/lib389/plugins.py | 10 +
|
||||
3 files changed, 181 insertions(+), 267 deletions(-)
|
||||
delete mode 100644 dirsrvtests/tests/tickets/ticket47927_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/attruniq_test.py b/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
index c1ccad9ae..aac659c29 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
@@ -10,6 +10,7 @@ import pytest
|
||||
import ldap
|
||||
import logging
|
||||
from lib389.plugins import AttributeUniquenessPlugin
|
||||
+from lib389.idm.nscontainer import nsContainers
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.idm.group import Groups
|
||||
from lib389._constants import DEFAULT_SUFFIX
|
||||
@@ -22,6 +23,19 @@ log = logging.getLogger(__name__)
|
||||
MAIL_ATTR_VALUE = 'non-uniq@value.net'
|
||||
MAIL_ATTR_VALUE_ALT = 'alt-mail@value.net'
|
||||
|
||||
+EXCLUDED_CONTAINER_CN = "excluded_container"
|
||||
+EXCLUDED_CONTAINER_DN = "cn={},{}".format(EXCLUDED_CONTAINER_CN, DEFAULT_SUFFIX)
|
||||
+
|
||||
+EXCLUDED_BIS_CONTAINER_CN = "excluded_bis_container"
|
||||
+EXCLUDED_BIS_CONTAINER_DN = "cn={},{}".format(EXCLUDED_BIS_CONTAINER_CN, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ENFORCED_CONTAINER_CN = "enforced_container"
|
||||
+
|
||||
+USER_1_CN = "test_1"
|
||||
+USER_2_CN = "test_2"
|
||||
+USER_3_CN = "test_3"
|
||||
+USER_4_CN = "test_4"
|
||||
+
|
||||
|
||||
def test_modrdn_attr_uniqueness(topology_st):
|
||||
"""Test that we can not add two entries that have the same attr value that is
|
||||
@@ -154,3 +168,160 @@ def test_multiple_attr_uniqueness(topology_st):
|
||||
testuser2.delete()
|
||||
attruniq.disable()
|
||||
attruniq.delete()
|
||||
+
|
||||
+
|
||||
+def test_exclude_subtrees(topology_st):
|
||||
+ """ Test attribute uniqueness with exclude scope
|
||||
+
|
||||
+ :id: 43d29a60-40e1-4ebd-b897-6ef9f20e9f27
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Setup and enable attribute uniqueness plugin for telephonenumber unique attribute
|
||||
+ 2. Create subtrees and test users
|
||||
+ 3. Add a unique attribute to a user within uniqueness scope
|
||||
+ 4. Add exclude subtree
|
||||
+ 5. Try to add existing value attribute to an entry within uniqueness scope
|
||||
+ 6. Try to add existing value attribute to an entry within exclude scope
|
||||
+ 7. Remove the attribute from affected entries
|
||||
+ 8. Add a unique attribute to a user within exclude scope
|
||||
+ 9. Try to add existing value attribute to an entry within uniqueness scope
|
||||
+ 10. Try to add existing value attribute to another entry within uniqueness scope
|
||||
+ 11. Remove the attribute from affected entries
|
||||
+ 12. Add another exclude subtree
|
||||
+ 13. Add a unique attribute to a user within uniqueness scope
|
||||
+ 14. Try to add existing value attribute to an entry within uniqueness scope
|
||||
+ 15. Try to add existing value attribute to an entry within exclude scope
|
||||
+ 16. Try to add existing value attribute to an entry within another exclude scope
|
||||
+ 17. Clean up entries
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Should raise CONSTRAINT_VIOLATION
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Success
|
||||
+ 9. Success
|
||||
+ 10. Should raise CONSTRAINT_VIOLATION
|
||||
+ 11. Success
|
||||
+ 12. Success
|
||||
+ 13. Success
|
||||
+ 14. Should raise CONSTRAINT_VIOLATION
|
||||
+ 15. Success
|
||||
+ 16. Success
|
||||
+ 17. Success
|
||||
+ """
|
||||
+ log.info('Setup attribute uniqueness plugin')
|
||||
+ attruniq = AttributeUniquenessPlugin(topology_st.standalone, dn="cn=attruniq,cn=plugins,cn=config")
|
||||
+ attruniq.create(properties={'cn': 'attruniq'})
|
||||
+ attruniq.add_unique_attribute('telephonenumber')
|
||||
+ attruniq.add_unique_subtree(DEFAULT_SUFFIX)
|
||||
+ attruniq.enable_all_subtrees()
|
||||
+ attruniq.enable()
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ log.info('Create subtrees container')
|
||||
+ containers = nsContainers(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
+ cont1 = containers.create(properties={'cn': EXCLUDED_CONTAINER_CN})
|
||||
+ cont2 = containers.create(properties={'cn': EXCLUDED_BIS_CONTAINER_CN})
|
||||
+ cont3 = containers.create(properties={'cn': ENFORCED_CONTAINER_CN})
|
||||
+
|
||||
+ log.info('Create test users')
|
||||
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX,
|
||||
+ rdn='cn={}'.format(ENFORCED_CONTAINER_CN))
|
||||
+ users_excluded = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX,
|
||||
+ rdn='cn={}'.format(EXCLUDED_CONTAINER_CN))
|
||||
+ users_excluded2 = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX,
|
||||
+ rdn='cn={}'.format(EXCLUDED_BIS_CONTAINER_CN))
|
||||
+
|
||||
+ user1 = users.create(properties={'cn': USER_1_CN,
|
||||
+ 'uid': USER_1_CN,
|
||||
+ 'sn': USER_1_CN,
|
||||
+ 'uidNumber': '1',
|
||||
+ 'gidNumber': '11',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_1_CN)})
|
||||
+ user2 = users.create(properties={'cn': USER_2_CN,
|
||||
+ 'uid': USER_2_CN,
|
||||
+ 'sn': USER_2_CN,
|
||||
+ 'uidNumber': '2',
|
||||
+ 'gidNumber': '22',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_2_CN)})
|
||||
+ user3 = users_excluded.create(properties={'cn': USER_3_CN,
|
||||
+ 'uid': USER_3_CN,
|
||||
+ 'sn': USER_3_CN,
|
||||
+ 'uidNumber': '3',
|
||||
+ 'gidNumber': '33',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_3_CN)})
|
||||
+ user4 = users_excluded2.create(properties={'cn': USER_4_CN,
|
||||
+ 'uid': USER_4_CN,
|
||||
+ 'sn': USER_4_CN,
|
||||
+ 'uidNumber': '4',
|
||||
+ 'gidNumber': '44',
|
||||
+ 'homeDirectory': '/home/{}'.format(USER_4_CN)})
|
||||
+
|
||||
+ UNIQUE_VALUE = '1234'
|
||||
+
|
||||
+ try:
|
||||
+ log.info('Create user with unique attribute')
|
||||
+ user1.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user1.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Add exclude subtree')
|
||||
+ attruniq.add_exclude_subtree(EXCLUDED_CONTAINER_DN)
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ log.info('Verify an already used attribute value cannot be added within the same subtree')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user2.add('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify an entry with same attribute value can be added within exclude subtree')
|
||||
+ user3.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user3.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Cleanup unique attribute values')
|
||||
+ user1.remove_all('telephonenumber')
|
||||
+ user3.remove_all('telephonenumber')
|
||||
+
|
||||
+ log.info('Add a unique value to an entry in excluded scope')
|
||||
+ user3.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user3.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify the same value can be added to an entry within uniqueness scope')
|
||||
+ user1.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user1.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify that yet another same value cannot be added to another entry within uniqueness scope')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user2.add('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Cleanup unique attribute values')
|
||||
+ user1.remove_all('telephonenumber')
|
||||
+ user3.remove_all('telephonenumber')
|
||||
+
|
||||
+ log.info('Add another exclude subtree')
|
||||
+ attruniq.add_exclude_subtree(EXCLUDED_BIS_CONTAINER_DN)
|
||||
+ topology_st.standalone.restart()
|
||||
+
|
||||
+ user1.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ log.info('Verify an already used attribute value cannot be added within the same subtree')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user2.add('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ log.info('Verify an already used attribute can be added to an entry in exclude scope')
|
||||
+ user3.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user3.present('telephonenumber', UNIQUE_VALUE)
|
||||
+ user4.add('telephonenumber', UNIQUE_VALUE)
|
||||
+ assert user4.present('telephonenumber', UNIQUE_VALUE)
|
||||
+
|
||||
+ finally:
|
||||
+ log.info('Clean up users, containers and attribute uniqueness plugin')
|
||||
+ user1.delete()
|
||||
+ user2.delete()
|
||||
+ user3.delete()
|
||||
+ user4.delete()
|
||||
+ cont1.delete()
|
||||
+ cont2.delete()
|
||||
+ cont3.delete()
|
||||
+ attruniq.disable()
|
||||
+ attruniq.delete()
|
||||
\ No newline at end of file
|
||||
diff --git a/dirsrvtests/tests/tickets/ticket47927_test.py b/dirsrvtests/tests/tickets/ticket47927_test.py
|
||||
deleted file mode 100644
|
||||
index 887fe1af4..000000000
|
||||
--- a/dirsrvtests/tests/tickets/ticket47927_test.py
|
||||
+++ /dev/null
|
||||
@@ -1,267 +0,0 @@
|
||||
-# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2016 Red Hat, Inc.
|
||||
-# All rights reserved.
|
||||
-#
|
||||
-# License: GPL (version 3 or any later version).
|
||||
-# See LICENSE for details.
|
||||
-# --- END COPYRIGHT BLOCK ---
|
||||
-#
|
||||
-import pytest
|
||||
-from lib389.tasks import *
|
||||
-from lib389.utils import *
|
||||
-from lib389.topologies import topology_st
|
||||
-
|
||||
-from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_ATTR_UNIQUENESS
|
||||
-
|
||||
-# Skip on older versions
|
||||
-pytestmark = [pytest.mark.tier2,
|
||||
- pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")]
|
||||
-
|
||||
-logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
-log = logging.getLogger(__name__)
|
||||
-
|
||||
-EXCLUDED_CONTAINER_CN = "excluded_container"
|
||||
-EXCLUDED_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_CONTAINER_CN, SUFFIX)
|
||||
-
|
||||
-EXCLUDED_BIS_CONTAINER_CN = "excluded_bis_container"
|
||||
-EXCLUDED_BIS_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_BIS_CONTAINER_CN, SUFFIX)
|
||||
-
|
||||
-ENFORCED_CONTAINER_CN = "enforced_container"
|
||||
-ENFORCED_CONTAINER_DN = "cn=%s,%s" % (ENFORCED_CONTAINER_CN, SUFFIX)
|
||||
-
|
||||
-USER_1_CN = "test_1"
|
||||
-USER_1_DN = "cn=%s,%s" % (USER_1_CN, ENFORCED_CONTAINER_DN)
|
||||
-USER_2_CN = "test_2"
|
||||
-USER_2_DN = "cn=%s,%s" % (USER_2_CN, ENFORCED_CONTAINER_DN)
|
||||
-USER_3_CN = "test_3"
|
||||
-USER_3_DN = "cn=%s,%s" % (USER_3_CN, EXCLUDED_CONTAINER_DN)
|
||||
-USER_4_CN = "test_4"
|
||||
-USER_4_DN = "cn=%s,%s" % (USER_4_CN, EXCLUDED_BIS_CONTAINER_DN)
|
||||
-
|
||||
-
|
||||
-def test_ticket47927_init(topology_st):
|
||||
- topology_st.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
|
||||
- try:
|
||||
- topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
|
||||
- [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', b'telephonenumber'),
|
||||
- (ldap.MOD_REPLACE, 'uniqueness-subtrees', ensure_bytes(DEFAULT_SUFFIX)),
|
||||
- ])
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927: Failed to configure plugin for "telephonenumber": error ' + e.args[0]['desc'])
|
||||
- assert False
|
||||
- topology_st.standalone.restart(timeout=120)
|
||||
-
|
||||
- topology_st.standalone.add_s(Entry((EXCLUDED_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
|
||||
- 'cn': EXCLUDED_CONTAINER_CN})))
|
||||
- topology_st.standalone.add_s(Entry((EXCLUDED_BIS_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
|
||||
- 'cn': EXCLUDED_BIS_CONTAINER_CN})))
|
||||
- topology_st.standalone.add_s(Entry((ENFORCED_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
|
||||
- 'cn': ENFORCED_CONTAINER_CN})))
|
||||
-
|
||||
- # adding an entry on a stage with a different 'cn'
|
||||
- topology_st.standalone.add_s(Entry((USER_1_DN, {
|
||||
- 'objectclass': "top person".split(),
|
||||
- 'sn': USER_1_CN,
|
||||
- 'cn': USER_1_CN})))
|
||||
- # adding an entry on a stage with a different 'cn'
|
||||
- topology_st.standalone.add_s(Entry((USER_2_DN, {
|
||||
- 'objectclass': "top person".split(),
|
||||
- 'sn': USER_2_CN,
|
||||
- 'cn': USER_2_CN})))
|
||||
- topology_st.standalone.add_s(Entry((USER_3_DN, {
|
||||
- 'objectclass': "top person".split(),
|
||||
- 'sn': USER_3_CN,
|
||||
- 'cn': USER_3_CN})))
|
||||
- topology_st.standalone.add_s(Entry((USER_4_DN, {
|
||||
- 'objectclass': "top person".split(),
|
||||
- 'sn': USER_4_CN,
|
||||
- 'cn': USER_4_CN})))
|
||||
-
|
||||
-
|
||||
-def test_ticket47927_one(topology_st):
|
||||
- '''
|
||||
- Check that uniqueness is enforce on all SUFFIX
|
||||
- '''
|
||||
- UNIQUE_VALUE = b'1234'
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_1_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_one: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
-
|
||||
- # we expect to fail because user1 is in the scope of the plugin
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_2_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_2_DN))
|
||||
- assert False
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_2_DN, e.args[0]['desc']))
|
||||
- pass
|
||||
-
|
||||
- # we expect to fail because user1 is in the scope of the plugin
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_3_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_3_DN))
|
||||
- assert False
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_3_DN, e.args[0]['desc']))
|
||||
- pass
|
||||
-
|
||||
-
|
||||
-def test_ticket47927_two(topology_st):
|
||||
- '''
|
||||
- Exclude the EXCLUDED_CONTAINER_DN from the uniqueness plugin
|
||||
- '''
|
||||
- try:
|
||||
- topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
|
||||
- [(ldap.MOD_REPLACE, 'uniqueness-exclude-subtrees', ensure_bytes(EXCLUDED_CONTAINER_DN))])
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_two: Failed to configure plugin for to exclude %s: error %s' % (
|
||||
- EXCLUDED_CONTAINER_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
- topology_st.standalone.restart(timeout=120)
|
||||
-
|
||||
-
|
||||
-def test_ticket47927_three(topology_st):
|
||||
- '''
|
||||
- Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN
|
||||
- First case: it exists an entry (with the same attribute value) in the scope
|
||||
- of the plugin and we set the value in an entry that is in an excluded scope
|
||||
- '''
|
||||
- UNIQUE_VALUE = b'9876'
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_1_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_three: Failed to set the telephonenumber ' + e.args[0]['desc'])
|
||||
- assert False
|
||||
-
|
||||
- # we should not be allowed to set this value (because user1 is in the scope)
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_2_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_three: unexpected success to set the telephonenumber for %s' % (USER_2_DN))
|
||||
- assert False
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_three: Failed (expected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_2_DN, e.args[0]['desc']))
|
||||
-
|
||||
- # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_3_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_three: success to set the telephonenumber for %s' % (USER_3_DN))
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_three: Failed (unexpected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_3_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
-
|
||||
-
|
||||
-def test_ticket47927_four(topology_st):
|
||||
- '''
|
||||
- Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN
|
||||
- Second case: it exists an entry (with the same attribute value) in an excluded scope
|
||||
- of the plugin and we set the value in an entry is in the scope
|
||||
- '''
|
||||
- UNIQUE_VALUE = b'1111'
|
||||
- # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_3_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_four: success to set the telephonenumber for %s' % USER_3_DN)
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_four: Failed (unexpected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_3_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
-
|
||||
- # we should be allowed to set this value (because user3 is excluded from scope)
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_1_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal(
|
||||
- 'test_ticket47927_four: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
-
|
||||
- # we should not be allowed to set this value (because user1 is in the scope)
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_2_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_four: unexpected success to set the telephonenumber %s' % USER_2_DN)
|
||||
- assert False
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_four: Failed (expected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_2_DN, e.args[0]['desc']))
|
||||
- pass
|
||||
-
|
||||
-
|
||||
-def test_ticket47927_five(topology_st):
|
||||
- '''
|
||||
- Exclude the EXCLUDED_BIS_CONTAINER_DN from the uniqueness plugin
|
||||
- '''
|
||||
- try:
|
||||
- topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
|
||||
- [(ldap.MOD_ADD, 'uniqueness-exclude-subtrees', ensure_bytes(EXCLUDED_BIS_CONTAINER_DN))])
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_five: Failed to configure plugin for to exclude %s: error %s' % (
|
||||
- EXCLUDED_BIS_CONTAINER_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
- topology_st.standalone.restart(timeout=120)
|
||||
- topology_st.standalone.getEntry('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', ldap.SCOPE_BASE)
|
||||
-
|
||||
-
|
||||
-def test_ticket47927_six(topology_st):
|
||||
- '''
|
||||
- Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN
|
||||
- and EXCLUDED_BIS_CONTAINER_DN
|
||||
- First case: it exists an entry (with the same attribute value) in the scope
|
||||
- of the plugin and we set the value in an entry that is in an excluded scope
|
||||
- '''
|
||||
- UNIQUE_VALUE = b'222'
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_1_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_six: Failed to set the telephonenumber ' + e.args[0]['desc'])
|
||||
- assert False
|
||||
-
|
||||
- # we should not be allowed to set this value (because user1 is in the scope)
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_2_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_six: unexpected success to set the telephonenumber for %s' % (USER_2_DN))
|
||||
- assert False
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_six: Failed (expected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_2_DN, e.args[0]['desc']))
|
||||
-
|
||||
- # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_3_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_3_DN))
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_3_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
- # USER_4_DN is in EXCLUDED_CONTAINER_DN so update should be successful
|
||||
- try:
|
||||
- topology_st.standalone.modify_s(USER_4_DN,
|
||||
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
|
||||
- log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_4_DN))
|
||||
- except ldap.LDAPError as e:
|
||||
- log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (
|
||||
- USER_4_DN, e.args[0]['desc']))
|
||||
- assert False
|
||||
-
|
||||
-
|
||||
-if __name__ == '__main__':
|
||||
- # Run isolated
|
||||
- # -s for DEBUG mode
|
||||
- CURRENT_FILE = os.path.realpath(__file__)
|
||||
- pytest.main("-s %s" % CURRENT_FILE)
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 31bbfa502..977091726 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -175,6 +175,16 @@ class AttributeUniquenessPlugin(Plugin):
|
||||
|
||||
self.set('uniqueness-across-all-subtrees', 'off')
|
||||
|
||||
+ def add_exclude_subtree(self, basedn):
|
||||
+ """Add a uniqueness-exclude-subtrees attribute"""
|
||||
+
|
||||
+ self.add('uniqueness-exclude-subtrees', basedn)
|
||||
+
|
||||
+ def remove_exclude_subtree(self, basedn):
|
||||
+ """Remove a uniqueness-exclude-subtrees attribute"""
|
||||
+
|
||||
+ self.remove('uniqueness-exclude-subtrees', basedn)
|
||||
+
|
||||
|
||||
class AttributeUniquenessPlugins(DSLdapObjects):
|
||||
"""A DSLdapObjects entity which represents Attribute Uniqueness plugin instances
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,45 @@
|
||||
From 4be22be50dfdf0a5ddd27dc8f9d9618b941c8be8 Mon Sep 17 00:00:00 2001
|
||||
From: Alexander Bokovoy <abokovoy@redhat.com>
|
||||
Date: Wed, 9 Jul 2025 12:08:09 +0300
|
||||
Subject: [PATCH] Issue 6857 - uiduniq: allow specifying match rules in the
|
||||
filter
|
||||
|
||||
Allow uniqueness plugin to work with attributes where uniqueness should
|
||||
be enforced using different matching rule than the one defined for the
|
||||
attribute itself.
|
||||
|
||||
Since uniqueness plugin configuration can contain multiple attributes,
|
||||
add matching rule right to the attribute as it is used in the LDAP rule
|
||||
(e.g. 'attribute:caseIgnoreMatch:' to force 'attribute' to be searched
|
||||
with case-insensitive matching rule instead of the original matching
|
||||
rule.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6857
|
||||
|
||||
Signed-off-by: Alexander Bokovoy <abokovoy@redhat.com>
|
||||
---
|
||||
ldap/servers/plugins/uiduniq/uid.c | 7 +++++++
|
||||
1 file changed, 7 insertions(+)
|
||||
|
||||
diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
|
||||
index 053af4f9d..887e79d78 100644
|
||||
--- a/ldap/servers/plugins/uiduniq/uid.c
|
||||
+++ b/ldap/servers/plugins/uiduniq/uid.c
|
||||
@@ -1030,7 +1030,14 @@ preop_add(Slapi_PBlock *pb)
|
||||
}
|
||||
|
||||
for (i = 0; attrNames && attrNames[i]; i++) {
|
||||
+ char *attr_match = strchr(attrNames[i], ':');
|
||||
+ if (attr_match != NULL) {
|
||||
+ attr_match[0] = '\0';
|
||||
+ }
|
||||
err = slapi_entry_attr_find(e, attrNames[i], &attr);
|
||||
+ if (attr_match != NULL) {
|
||||
+ attr_match[0] = ':';
|
||||
+ }
|
||||
if (!err) {
|
||||
/*
|
||||
* Passed all the requirements - this is an operation we
|
||||
--
|
||||
2.49.0
|
||||
|
||||
1201
0009-Issue-6756-CLI-UI-Properly-handle-disabled-NDN-cache.patch
Normal file
1201
0009-Issue-6756-CLI-UI-Properly-handle-disabled-NDN-cache.patch
Normal file
File diff suppressed because it is too large
Load Diff
2237
0010-Issue-6854-Refactor-for-improved-data-management-685.patch
Normal file
2237
0010-Issue-6854-Refactor-for-improved-data-management-685.patch
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,65 @@
|
||||
From ad2a06cb64156c55d81b7a1647f9bec7071df9f4 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 7 Jul 2025 23:11:17 +0200
|
||||
Subject: [PATCH] Issue 6850 - AddressSanitizer: memory leak in mdb_init
|
||||
|
||||
Bug Description:
|
||||
`dbmdb_componentid` can be allocated multiple times. To avoid a memory
|
||||
leak, allocate it only once, and free at the cleanup.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6850
|
||||
|
||||
Reviewed by: @mreynolds389, @tbordaz (Tnanks!)
|
||||
---
|
||||
ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c | 4 +++-
|
||||
ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c | 2 +-
|
||||
ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c | 5 +++++
|
||||
3 files changed, 9 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
index 447f3c70a..54ca03b0b 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
|
||||
@@ -146,7 +146,9 @@ dbmdb_compute_limits(struct ldbminfo *li)
|
||||
int mdb_init(struct ldbminfo *li, config_info *config_array)
|
||||
{
|
||||
dbmdb_ctx_t *conf = (dbmdb_ctx_t *)slapi_ch_calloc(1, sizeof(dbmdb_ctx_t));
|
||||
- dbmdb_componentid = generate_componentid(NULL, "db-mdb");
|
||||
+ if (dbmdb_componentid == NULL) {
|
||||
+ dbmdb_componentid = generate_componentid(NULL, "db-mdb");
|
||||
+ }
|
||||
|
||||
li->li_dblayer_config = conf;
|
||||
strncpy(conf->home, li->li_directory, MAXPATHLEN-1);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
index c4e87987f..ed17f979f 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
|
||||
@@ -19,7 +19,7 @@
|
||||
#include <prclist.h>
|
||||
#include <glob.h>
|
||||
|
||||
-Slapi_ComponentId *dbmdb_componentid;
|
||||
+Slapi_ComponentId *dbmdb_componentid = NULL;
|
||||
|
||||
#define BULKOP_MAX_RECORDS 100 /* Max records handled by a single bulk operations */
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c
|
||||
index 2d07db9b5..ae10ac7cf 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c
|
||||
@@ -49,6 +49,11 @@ dbmdb_cleanup(struct ldbminfo *li)
|
||||
}
|
||||
slapi_ch_free((void **)&(li->li_dblayer_config));
|
||||
|
||||
+ if (dbmdb_componentid != NULL) {
|
||||
+ release_componentid(dbmdb_componentid);
|
||||
+ dbmdb_componentid = NULL;
|
||||
+ }
|
||||
+
|
||||
return 0;
|
||||
}
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
58
0012-Issue-6848-AddressSanitizer-leak-in-do_search.patch
Normal file
58
0012-Issue-6848-AddressSanitizer-leak-in-do_search.patch
Normal file
@ -0,0 +1,58 @@
|
||||
From 98a83bb00255f77467a370d3347a8428b6659463 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 7 Jul 2025 22:01:09 +0200
|
||||
Subject: [PATCH] Issue 6848 - AddressSanitizer: leak in do_search
|
||||
|
||||
Bug Description:
|
||||
When there's a BER decoding error and the function goes to
|
||||
`free_and_return`, the `attrs` variable is not being freed because it's
|
||||
only freed if `!psearch || rc != 0 || err != 0`, but `err` is still 0 at
|
||||
that point.
|
||||
|
||||
If we reach `free_and_return` from the `ber_scanf` error path, `attrs`
|
||||
was never set in the pblock with `slapi_pblock_set()`, so the
|
||||
`slapi_pblock_get()` call will not retrieve the potentially partially
|
||||
allocated `attrs` from the BER decoding.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6848
|
||||
|
||||
Reviewed by: @tbordaz, @droideck (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/search.c | 14 ++++++++++++--
|
||||
1 file changed, 12 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/search.c b/ldap/servers/slapd/search.c
|
||||
index e9b2c3670..f9d03c090 100644
|
||||
--- a/ldap/servers/slapd/search.c
|
||||
+++ b/ldap/servers/slapd/search.c
|
||||
@@ -235,6 +235,7 @@ do_search(Slapi_PBlock *pb)
|
||||
log_search_access(pb, base, scope, fstr, "decoding error");
|
||||
send_ldap_result(pb, LDAP_PROTOCOL_ERROR, NULL, NULL, 0,
|
||||
NULL);
|
||||
+ err = 1; /* Make sure we free everything */
|
||||
goto free_and_return;
|
||||
}
|
||||
|
||||
@@ -420,8 +421,17 @@ free_and_return:
|
||||
if (!psearch || rc != 0 || err != 0) {
|
||||
slapi_ch_free_string(&fstr);
|
||||
slapi_filter_free(filter, 1);
|
||||
- slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &attrs);
|
||||
- charray_free(attrs); /* passing NULL is fine */
|
||||
+
|
||||
+ /* Get attrs from pblock if it was set there, otherwise use local attrs */
|
||||
+ char **pblock_attrs = NULL;
|
||||
+ slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &pblock_attrs);
|
||||
+ if (pblock_attrs != NULL) {
|
||||
+ charray_free(pblock_attrs); /* Free attrs from pblock */
|
||||
+ slapi_pblock_set(pb, SLAPI_SEARCH_ATTRS, NULL);
|
||||
+ } else if (attrs != NULL) {
|
||||
+ /* Free attrs that were allocated but never put in pblock */
|
||||
+ charray_free(attrs);
|
||||
+ }
|
||||
charray_free(gerattrs); /* passing NULL is fine */
|
||||
/*
|
||||
* Fix for defect 526719 / 553356 : Persistent search op failed.
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,58 @@
|
||||
From e89a5acbc1bcc1b460683aa498005d6f0ce7054e Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Fri, 11 Jul 2025 12:32:38 +0200
|
||||
Subject: [PATCH] Issue 6865 - AddressSanitizer: leak in
|
||||
agmt_update_init_status
|
||||
|
||||
Bug Description:
|
||||
We allocate an array of `LDAPMod *` pointers, but never free it:
|
||||
|
||||
```
|
||||
=================================================================
|
||||
==2748356==ERROR: LeakSanitizer: detected memory leaks
|
||||
|
||||
Direct leak of 24 byte(s) in 1 object(s) allocated from:
|
||||
#0 0x7f05e8cb4a07 in __interceptor_malloc (/lib64/libasan.so.6+0xb4a07)
|
||||
#1 0x7f05e85c0138 in slapi_ch_malloc (/usr/lib64/dirsrv/libslapd.so.0+0x1c0138)
|
||||
#2 0x7f05e109e481 in agmt_update_init_status ldap/servers/plugins/replication/repl5_agmt.c:2583
|
||||
#3 0x7f05e10a0aa5 in agmtlist_shutdown ldap/servers/plugins/replication/repl5_agmtlist.c:789
|
||||
#4 0x7f05e10ab6bc in multisupplier_stop ldap/servers/plugins/replication/repl5_init.c:844
|
||||
#5 0x7f05e10ab6bc in multisupplier_stop ldap/servers/plugins/replication/repl5_init.c:837
|
||||
#6 0x7f05e862507d in plugin_call_func ldap/servers/slapd/plugin.c:2001
|
||||
#7 0x7f05e8625be1 in plugin_call_one ldap/servers/slapd/plugin.c:1950
|
||||
#8 0x7f05e8625be1 in plugin_dependency_closeall ldap/servers/slapd/plugin.c:1844
|
||||
#9 0x55e1a7ff9815 in slapd_daemon ldap/servers/slapd/daemon.c:1275
|
||||
#10 0x55e1a7fd36ef in main (/usr/sbin/ns-slapd+0x3e6ef)
|
||||
#11 0x7f05e80295cf in __libc_start_call_main (/lib64/libc.so.6+0x295cf)
|
||||
#12 0x7f05e802967f in __libc_start_main_alias_2 (/lib64/libc.so.6+0x2967f)
|
||||
#13 0x55e1a7fd74a4 in _start (/usr/sbin/ns-slapd+0x424a4)
|
||||
|
||||
SUMMARY: AddressSanitizer: 24 byte(s) leaked in 1 allocation(s).
|
||||
```
|
||||
|
||||
Fix Description:
|
||||
Ensure `mods` is freed in the cleanup code.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6865
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6470
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/replication/repl5_agmt.c | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
index c818c5857..0a81167b7 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
@@ -2743,6 +2743,7 @@ agmt_update_init_status(Repl_Agmt *ra)
|
||||
} else {
|
||||
PR_Unlock(ra->lock);
|
||||
}
|
||||
+ slapi_ch_free((void **)&mods);
|
||||
slapi_mod_done(&smod_start_time);
|
||||
slapi_mod_done(&smod_end_time);
|
||||
slapi_mod_done(&smod_status);
|
||||
--
|
||||
2.49.0
|
||||
|
||||
169
0014-Issue-6859-str2filter-is-not-fully-applying-matching.patch
Normal file
169
0014-Issue-6859-str2filter-is-not-fully-applying-matching.patch
Normal file
@ -0,0 +1,169 @@
|
||||
From bf58cba210cd785d3abe6ffbbf174481258dcf5e Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 9 Jul 2025 14:18:50 -0400
|
||||
Subject: [PATCH] Issue 6859 - str2filter is not fully applying matching rules
|
||||
|
||||
Description:
|
||||
|
||||
When we have an extended filter, one with a MR applied, it is ignored during
|
||||
internal searches:
|
||||
|
||||
"(cn:CaseExactMatch:=Value)"
|
||||
|
||||
For internal searches we use str2filter() and it doesn't fully apply extended
|
||||
search filter matching rules
|
||||
|
||||
Also needed to update attr uniqueness plugin to apply this change for mod
|
||||
operations (previously only Adds were correctly handling these attribute
|
||||
filters)
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6857
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6859
|
||||
|
||||
Reviewed by: spichugi & tbordaz(Thanks!!)
|
||||
---
|
||||
.../tests/suites/plugins/attruniq_test.py | 65 ++++++++++++++++++-
|
||||
ldap/servers/plugins/uiduniq/uid.c | 7 ++
|
||||
ldap/servers/slapd/plugin_mr.c | 2 +-
|
||||
ldap/servers/slapd/str2filter.c | 8 +++
|
||||
4 files changed, 79 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/attruniq_test.py b/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
index aac659c29..046952df3 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/attruniq_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2021 Red Hat, Inc.
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -324,4 +324,65 @@ def test_exclude_subtrees(topology_st):
|
||||
cont2.delete()
|
||||
cont3.delete()
|
||||
attruniq.disable()
|
||||
- attruniq.delete()
|
||||
\ No newline at end of file
|
||||
+ attruniq.delete()
|
||||
+
|
||||
+
|
||||
+def test_matchingrule_attr(topology_st):
|
||||
+ """ Test list extension MR attribute. Check for "cn" using CES (versus it
|
||||
+ being defined as CIS)
|
||||
+
|
||||
+ :id: 5cde4342-6fa3-4225-b23d-0af918981075
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Setup and enable attribute uniqueness plugin to use CN attribute
|
||||
+ with a matching rule of CaseExactMatch.
|
||||
+ 2. Add user with CN value is lowercase
|
||||
+ 3. Add second user with same lowercase CN which should be rejected
|
||||
+ 4. Add second user with same CN value but with mixed case
|
||||
+ 5. Modify second user replacing CN value to lc which should be rejected
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+
|
||||
+ attruniq = AttributeUniquenessPlugin(inst,
|
||||
+ dn="cn=attribute uniqueness,cn=plugins,cn=config")
|
||||
+ attruniq.add_unique_attribute('cn:CaseExactMatch:')
|
||||
+ attruniq.enable_all_subtrees()
|
||||
+ attruniq.enable()
|
||||
+ inst.restart()
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ users.create(properties={'cn': "common_name",
|
||||
+ 'uid': "uid_name",
|
||||
+ 'sn': "uid_name",
|
||||
+ 'uidNumber': '1',
|
||||
+ 'gidNumber': '11',
|
||||
+ 'homeDirectory': '/home/uid_name'})
|
||||
+
|
||||
+ log.info('Add entry with the exact CN value which should be rejected')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ users.create(properties={'cn': "common_name",
|
||||
+ 'uid': "uid_name2",
|
||||
+ 'sn': "uid_name2",
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'homeDirectory': '/home/uid_name2'})
|
||||
+
|
||||
+ log.info('Add entry with the mixed case CN value which should be allowed')
|
||||
+ user = users.create(properties={'cn': "Common_Name",
|
||||
+ 'uid': "uid_name2",
|
||||
+ 'sn': "uid_name2",
|
||||
+ 'uidNumber': '11',
|
||||
+ 'gidNumber': '111',
|
||||
+ 'homeDirectory': '/home/uid_name2'})
|
||||
+
|
||||
+ log.info('Mod entry with exact case CN value which should be rejected')
|
||||
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION):
|
||||
+ user.replace('cn', 'common_name')
|
||||
diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
|
||||
index 887e79d78..fdb1404a0 100644
|
||||
--- a/ldap/servers/plugins/uiduniq/uid.c
|
||||
+++ b/ldap/servers/plugins/uiduniq/uid.c
|
||||
@@ -1178,6 +1178,10 @@ preop_modify(Slapi_PBlock *pb)
|
||||
for (; mods && *mods; mods++) {
|
||||
mod = *mods;
|
||||
for (i = 0; attrNames && attrNames[i]; i++) {
|
||||
+ char *attr_match = strchr(attrNames[i], ':');
|
||||
+ if (attr_match != NULL) {
|
||||
+ attr_match[0] = '\0';
|
||||
+ }
|
||||
if ((slapi_attr_type_cmp(mod->mod_type, attrNames[i], 1) == 0) && /* mod contains target attr */
|
||||
(mod->mod_op & LDAP_MOD_BVALUES) && /* mod is bval encoded (not string val) */
|
||||
(mod->mod_bvalues && mod->mod_bvalues[0]) && /* mod actually contains some values */
|
||||
@@ -1186,6 +1190,9 @@ preop_modify(Slapi_PBlock *pb)
|
||||
{
|
||||
addMod(&checkmods, &checkmodsCapacity, &modcount, mod);
|
||||
}
|
||||
+ if (attr_match != NULL) {
|
||||
+ attr_match[0] = ':';
|
||||
+ }
|
||||
}
|
||||
}
|
||||
if (modcount == 0) {
|
||||
diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c
|
||||
index 9809a4374..757355dbc 100644
|
||||
--- a/ldap/servers/slapd/plugin_mr.c
|
||||
+++ b/ldap/servers/slapd/plugin_mr.c
|
||||
@@ -625,7 +625,7 @@ attempt_mr_filter_create(mr_filter_t *f, struct slapdplugin *mrp, Slapi_PBlock *
|
||||
int rc;
|
||||
int32_t (*mrf_create)(Slapi_PBlock *) = NULL;
|
||||
f->mrf_match = NULL;
|
||||
- pblock_init(pb);
|
||||
+ slapi_pblock_init(pb);
|
||||
if (!(rc = slapi_pblock_set(pb, SLAPI_PLUGIN, mrp)) &&
|
||||
!(rc = slapi_pblock_get(pb, SLAPI_PLUGIN_MR_FILTER_CREATE_FN, &mrf_create)) &&
|
||||
mrf_create != NULL &&
|
||||
diff --git a/ldap/servers/slapd/str2filter.c b/ldap/servers/slapd/str2filter.c
|
||||
index 9fdc500f7..5620b7439 100644
|
||||
--- a/ldap/servers/slapd/str2filter.c
|
||||
+++ b/ldap/servers/slapd/str2filter.c
|
||||
@@ -344,6 +344,14 @@ str2simple(char *str, int unescape_filter)
|
||||
return NULL; /* error */
|
||||
} else {
|
||||
f->f_choice = LDAP_FILTER_EXTENDED;
|
||||
+ if (f->f_mr_oid) {
|
||||
+ /* apply the MR indexers */
|
||||
+ rc = plugin_mr_filter_create(&f->f_mr);
|
||||
+ if (rc) {
|
||||
+ slapi_filter_free(f, 1);
|
||||
+ return NULL; /* error */
|
||||
+ }
|
||||
+ }
|
||||
}
|
||||
} else if (str_find_star(value) == NULL) {
|
||||
f->f_choice = LDAP_FILTER_EQUALITY;
|
||||
--
|
||||
2.49.0
|
||||
|
||||
163
0015-Issue-6872-compressed-log-rotation-creates-files-wit.patch
Normal file
163
0015-Issue-6872-compressed-log-rotation-creates-files-wit.patch
Normal file
@ -0,0 +1,163 @@
|
||||
From 4719e7df7ba0eb5e26830812ab9ead51f1e0c5f5 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Tue, 15 Jul 2025 17:56:18 -0400
|
||||
Subject: [PATCH] Issue 6872 - compressed log rotation creates files with world
|
||||
readable permission
|
||||
|
||||
Description:
|
||||
|
||||
When compressing a log file, first create the empty file using open()
|
||||
so we can set the correct permissions right from the start. gzopen()
|
||||
always uses permission 644 and that is not safe. So after creating it
|
||||
with open(), with the correct permissions, then pass the FD to gzdopen()
|
||||
and write the compressed content.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/6872
|
||||
|
||||
Reviewed by: progier(Thanks!)
|
||||
---
|
||||
.../logging/logging_compression_test.py | 15 ++++++++--
|
||||
ldap/servers/slapd/log.c | 28 +++++++++++++------
|
||||
ldap/servers/slapd/schema.c | 2 +-
|
||||
3 files changed, 33 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/logging_compression_test.py b/dirsrvtests/tests/suites/logging/logging_compression_test.py
|
||||
index e30874cc0..3a987d62c 100644
|
||||
--- a/dirsrvtests/tests/suites/logging/logging_compression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/logging/logging_compression_test.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2022 Red Hat, Inc.
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -22,12 +22,21 @@ log = logging.getLogger(__name__)
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
+
|
||||
def log_rotated_count(log_type, log_dir, check_compressed=False):
|
||||
- # Check if the log was rotated
|
||||
+ """
|
||||
+ Check if the log was rotated and has the correct permissions
|
||||
+ """
|
||||
log_file = f'{log_dir}/{log_type}.2*'
|
||||
if check_compressed:
|
||||
log_file += ".gz"
|
||||
- return len(glob.glob(log_file))
|
||||
+ log_files = glob.glob(log_file)
|
||||
+ for logf in log_files:
|
||||
+ # Check permissions
|
||||
+ st = os.stat(logf)
|
||||
+ assert oct(st.st_mode) == '0o100600' # 0600
|
||||
+
|
||||
+ return len(log_files)
|
||||
|
||||
|
||||
def update_and_sleep(inst, suffix, sleep=True):
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index e859682fe..f535011ab 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -174,17 +174,28 @@ get_syslog_loglevel(int loglevel)
|
||||
}
|
||||
|
||||
static int
|
||||
-compress_log_file(char *log_name)
|
||||
+compress_log_file(char *log_name, int32_t mode)
|
||||
{
|
||||
char gzip_log[BUFSIZ] = {0};
|
||||
char buf[LOG_CHUNK] = {0};
|
||||
size_t bytes_read = 0;
|
||||
gzFile outfile = NULL;
|
||||
FILE *source = NULL;
|
||||
+ int fd = 0;
|
||||
|
||||
PR_snprintf(gzip_log, sizeof(gzip_log), "%s.gz", log_name);
|
||||
- if ((outfile = gzopen(gzip_log,"wb")) == NULL) {
|
||||
- /* Failed to open new gzip file */
|
||||
+
|
||||
+ /*
|
||||
+ * Try to open the file as we may have an incorrect path. We also need to
|
||||
+ * set the permissions using open() as gzopen() creates the file with
|
||||
+ * 644 permissions (world readable - bad). So we create an empty file with
|
||||
+ * the correct permissions, then we pass the FD to gzdopen() to write the
|
||||
+ * compressed content.
|
||||
+ */
|
||||
+ if ((fd = open(gzip_log, O_WRONLY|O_CREAT|O_TRUNC, mode)) >= 0) {
|
||||
+ /* FIle successfully created, now pass the FD to gzdopen() */
|
||||
+ outfile = gzdopen(fd, "ab");
|
||||
+ } else {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -193,6 +204,7 @@ compress_log_file(char *log_name)
|
||||
gzclose(outfile);
|
||||
return -1;
|
||||
}
|
||||
+
|
||||
bytes_read = fread(buf, 1, LOG_CHUNK, source);
|
||||
while (bytes_read > 0) {
|
||||
int bytes_written = gzwrite(outfile, buf, bytes_read);
|
||||
@@ -3402,7 +3414,7 @@ log__open_accesslogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_access_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_access_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile",
|
||||
"failed to compress rotated access log (%s)\n",
|
||||
newfile);
|
||||
@@ -3570,7 +3582,7 @@ log__open_securitylogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_security_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_security_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_securitylogfile",
|
||||
"failed to compress rotated security audit log (%s)\n",
|
||||
newfile);
|
||||
@@ -6288,7 +6300,7 @@ log__open_errorlogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_error_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_error_mode) != 0) {
|
||||
PR_snprintf(buffer, sizeof(buffer), "Failed to compress errors log file (%s)\n", newfile);
|
||||
log__error_emergency(buffer, 1, 1);
|
||||
} else {
|
||||
@@ -6476,7 +6488,7 @@ log__open_auditlogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_audit_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_audit_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile",
|
||||
"failed to compress rotated audit log (%s)\n",
|
||||
newfile);
|
||||
@@ -6641,7 +6653,7 @@ log__open_auditfaillogfile(int logfile_state, int locked)
|
||||
return LOG_UNABLE_TO_OPENFILE;
|
||||
}
|
||||
} else if (loginfo.log_auditfail_compress) {
|
||||
- if (compress_log_file(newfile) != 0) {
|
||||
+ if (compress_log_file(newfile, loginfo.log_auditfail_mode) != 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "log__open_auditfaillogfile",
|
||||
"failed to compress rotated auditfail log (%s)\n",
|
||||
newfile);
|
||||
diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c
|
||||
index a8e6b1210..9ef4ee4bf 100644
|
||||
--- a/ldap/servers/slapd/schema.c
|
||||
+++ b/ldap/servers/slapd/schema.c
|
||||
@@ -903,7 +903,7 @@ oc_check_allowed_sv(Slapi_PBlock *pb, Slapi_Entry *e, const char *type, struct o
|
||||
|
||||
if (pb) {
|
||||
PR_snprintf(errtext, sizeof(errtext),
|
||||
- "attribute \"%s\" not allowed\n",
|
||||
+ "attribute \"%s\" not allowed",
|
||||
escape_string(type, ebuf));
|
||||
slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, errtext);
|
||||
}
|
||||
--
|
||||
2.49.0
|
||||
|
||||
116
0016-Issue-6878-Prevent-repeated-disconnect-logs-during-s.patch
Normal file
116
0016-Issue-6878-Prevent-repeated-disconnect-logs-during-s.patch
Normal file
@ -0,0 +1,116 @@
|
||||
From 1cc3715130650d9e778430792c5e2c2e9690cc72 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 18 Jul 2025 18:50:33 -0700
|
||||
Subject: [PATCH] Issue 6878 - Prevent repeated disconnect logs during shutdown
|
||||
(#6879)
|
||||
|
||||
Description: Avoid logging non-active initialized connections via CONN in disconnect_server_nomutex_ext by adding a check to skip invalid conn=0 with invalid sockets, preventing excessive repeated messages.
|
||||
|
||||
Update ds_logs_test.py by adding test_no_repeated_disconnect_messages to verify the fix.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6878
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
.../tests/suites/ds_logs/ds_logs_test.py | 51 ++++++++++++++++++-
|
||||
ldap/servers/slapd/connection.c | 15 +++---
|
||||
2 files changed, 59 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 209d63b5d..6fd790c18 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -24,7 +24,7 @@ from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, Aut
|
||||
from lib389.idm.user import UserAccounts, UserAccount
|
||||
from lib389.idm.group import Groups
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
-from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, PASSWORD
|
||||
+from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, PASSWORD, ErrorLog
|
||||
from lib389.utils import ds_is_older, ds_is_newer
|
||||
from lib389.config import RSA
|
||||
from lib389.dseldif import DSEldif
|
||||
@@ -1410,6 +1410,55 @@ def test_errorlog_buffering(topology_st, request):
|
||||
assert inst.ds_error_log.match(".*slapd_daemon - slapd started.*")
|
||||
|
||||
|
||||
+def test_no_repeated_disconnect_messages(topology_st):
|
||||
+ """Test that there are no repeated "Not setting conn 0 to be disconnected: socket is invalid" messages on restart
|
||||
+
|
||||
+ :id: 72b5e1ce-2db8-458f-b2cd-0a0b6525f51f
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Set error log level to CONNECTION
|
||||
+ 2. Clear existing error logs
|
||||
+ 3. Restart the server with 30 second timeout
|
||||
+ 4. Check error log for repeated disconnect messages
|
||||
+ 5. Verify there are no more than 10 occurrences of the disconnect message
|
||||
+ :expectedresults:
|
||||
+ 1. Error log level should be set successfully
|
||||
+ 2. Error logs should be cleared
|
||||
+ 3. Server should restart successfully within 30 seconds
|
||||
+ 4. Error log should be accessible
|
||||
+ 5. There should be no more than 10 repeated disconnect messages
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+
|
||||
+ log.info('Set error log level to CONNECTION')
|
||||
+ inst.config.loglevel([ErrorLog.CONNECT])
|
||||
+ current_level = inst.config.get_attr_val_int('nsslapd-errorlog-level')
|
||||
+ log.info(f'Error log level set to: {current_level}')
|
||||
+
|
||||
+ log.info('Clear existing error logs')
|
||||
+ inst.deleteErrorLogs()
|
||||
+
|
||||
+ log.info('Restart the server with 30 second timeout')
|
||||
+ inst.restart(timeout=30)
|
||||
+
|
||||
+ log.info('Check error log for repeated disconnect messages')
|
||||
+ disconnect_message = "Not setting conn 0 to be disconnected: socket is invalid"
|
||||
+
|
||||
+ # Count occurrences of the disconnect message
|
||||
+ error_log_lines = inst.ds_error_log.readlines()
|
||||
+ disconnect_count = 0
|
||||
+
|
||||
+ for line in error_log_lines:
|
||||
+ if disconnect_message in line:
|
||||
+ disconnect_count += 1
|
||||
+
|
||||
+ log.info(f'Found {disconnect_count} occurrences of disconnect message')
|
||||
+
|
||||
+ log.info('Verify there are no more than 10 occurrences')
|
||||
+ assert disconnect_count <= 10, f"Found {disconnect_count} repeated disconnect messages, expected <= 10"
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index 9f3c374cf..b3ca2e773 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -2556,12 +2556,15 @@ disconnect_server_nomutex_ext(Connection *conn, PRUint64 opconnid, int opid, PRE
|
||||
}
|
||||
}
|
||||
} else {
|
||||
- slapi_log_err(SLAPI_LOG_CONNS, "disconnect_server_nomutex_ext",
|
||||
- "Not setting conn %d to be disconnected: %s\n",
|
||||
- conn->c_sd,
|
||||
- (conn->c_sd == SLAPD_INVALID_SOCKET) ? "socket is invalid" :
|
||||
- ((conn->c_connid != opconnid) ? "conn id does not match op conn id" :
|
||||
- ((conn->c_flags & CONN_FLAG_CLOSING) ? "conn is closing" : "unknown")));
|
||||
+ /* We avoid logging an invalid conn=0 connection as it is not a real connection. */
|
||||
+ if (!(conn->c_sd == SLAPD_INVALID_SOCKET && conn->c_connid == 0)) {
|
||||
+ slapi_log_err(SLAPI_LOG_CONNS, "disconnect_server_nomutex_ext",
|
||||
+ "Not setting conn %d to be disconnected: %s\n",
|
||||
+ conn->c_sd,
|
||||
+ (conn->c_sd == SLAPD_INVALID_SOCKET) ? "socket is invalid" :
|
||||
+ ((conn->c_connid != opconnid) ? "conn id does not match op conn id" :
|
||||
+ ((conn->c_flags & CONN_FLAG_CLOSING) ? "conn is closing" : "unknown")));
|
||||
+ }
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
590
0017-Issue-6888-Missing-access-JSON-logging-for-TLS-Clien.patch
Normal file
590
0017-Issue-6888-Missing-access-JSON-logging-for-TLS-Clien.patch
Normal file
@ -0,0 +1,590 @@
|
||||
From 5ff051f936a5bc4f5e1edb17ee1c3149b66644a2 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 16 Jul 2025 20:54:48 -0400
|
||||
Subject: [PATCH] Issue 6888 - Missing access JSON logging for TLS/Client auth
|
||||
|
||||
Description:
|
||||
|
||||
TLS/Client auth logging was not converted to JSON (auth.c got missed)
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6888
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
---
|
||||
.../logging/access_json_logging_test.py | 96 ++++++++-
|
||||
ldap/servers/slapd/accesslog.c | 114 +++++++++++
|
||||
ldap/servers/slapd/auth.c | 182 +++++++++++++-----
|
||||
ldap/servers/slapd/log.c | 2 +
|
||||
ldap/servers/slapd/slapi-private.h | 10 +
|
||||
5 files changed, 353 insertions(+), 51 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/access_json_logging_test.py b/dirsrvtests/tests/suites/logging/access_json_logging_test.py
|
||||
index ae91dc487..f0dc861a7 100644
|
||||
--- a/dirsrvtests/tests/suites/logging/access_json_logging_test.py
|
||||
+++ b/dirsrvtests/tests/suites/logging/access_json_logging_test.py
|
||||
@@ -19,6 +19,8 @@ from lib389.idm.user import UserAccounts
|
||||
from lib389.dirsrv_log import DirsrvAccessJSONLog
|
||||
from lib389.index import VLVSearch, VLVIndex
|
||||
from lib389.tasks import Tasks
|
||||
+from lib389.config import CertmapLegacy
|
||||
+from lib389.nss_ssl import NssSsl
|
||||
from ldap.controls.vlv import VLVRequestControl
|
||||
from ldap.controls.sss import SSSRequestControl
|
||||
from ldap.controls import SimplePagedResultsControl
|
||||
@@ -67,11 +69,11 @@ def get_log_event(inst, op, key=None, val=None, key2=None, val2=None):
|
||||
if val == str(event[key]).lower() and \
|
||||
val2 == str(event[key2]).lower():
|
||||
return event
|
||||
-
|
||||
- elif key is not None and key in event:
|
||||
- val = str(val).lower()
|
||||
- if val == str(event[key]).lower():
|
||||
- return event
|
||||
+ elif key is not None:
|
||||
+ if key in event:
|
||||
+ val = str(val).lower()
|
||||
+ if val == str(event[key]).lower():
|
||||
+ return event
|
||||
else:
|
||||
return event
|
||||
|
||||
@@ -163,6 +165,7 @@ def test_access_json_format(topo_m2, setup_test):
|
||||
14. Test PAGED SEARCH is logged correctly
|
||||
15. Test PERSISTENT SEARCH is logged correctly
|
||||
16. Test EXTENDED OP
|
||||
+ 17. Test TLS_INFO is logged correctly
|
||||
:expectedresults:
|
||||
1. Success
|
||||
2. Success
|
||||
@@ -180,6 +183,7 @@ def test_access_json_format(topo_m2, setup_test):
|
||||
14. Success
|
||||
15. Success
|
||||
16. Success
|
||||
+ 17. Success
|
||||
"""
|
||||
|
||||
inst = topo_m2.ms["supplier1"]
|
||||
@@ -560,6 +564,88 @@ def test_access_json_format(topo_m2, setup_test):
|
||||
assert event['oid_name'] == "REPL_END_NSDS50_REPLICATION_REQUEST_OID"
|
||||
assert event['name'] == "replication-multisupplier-extop"
|
||||
|
||||
+ #
|
||||
+ # TLS INFO/TLS CLIENT INFO
|
||||
+ #
|
||||
+ RDN_TEST_USER = 'testuser'
|
||||
+ RDN_TEST_USER_WRONG = 'testuser_wrong'
|
||||
+ inst.enable_tls()
|
||||
+ inst.restart()
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ user = users.create(properties={
|
||||
+ 'uid': RDN_TEST_USER,
|
||||
+ 'cn': RDN_TEST_USER,
|
||||
+ 'sn': RDN_TEST_USER,
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': f'/home/{RDN_TEST_USER}'
|
||||
+ })
|
||||
+
|
||||
+ ssca_dir = inst.get_ssca_dir()
|
||||
+ ssca = NssSsl(dbpath=ssca_dir)
|
||||
+ ssca.create_rsa_user(RDN_TEST_USER)
|
||||
+ ssca.create_rsa_user(RDN_TEST_USER_WRONG)
|
||||
+
|
||||
+ # Get the details of where the key and crt are.
|
||||
+ tls_locs = ssca.get_rsa_user(RDN_TEST_USER)
|
||||
+ tls_locs_wrong = ssca.get_rsa_user(RDN_TEST_USER_WRONG)
|
||||
+
|
||||
+ user.enroll_certificate(tls_locs['crt_der_path'])
|
||||
+
|
||||
+ # Turn on the certmap.
|
||||
+ cm = CertmapLegacy(inst)
|
||||
+ certmaps = cm.list()
|
||||
+ certmaps['default']['DNComps'] = ''
|
||||
+ certmaps['default']['FilterComps'] = ['cn']
|
||||
+ certmaps['default']['VerifyCert'] = 'off'
|
||||
+ cm.set(certmaps)
|
||||
+
|
||||
+ # Check that EXTERNAL is listed in supported mechns.
|
||||
+ assert (inst.rootdse.supports_sasl_external())
|
||||
+
|
||||
+ # Restart to allow certmaps to be re-read: Note, we CAN NOT use post_open
|
||||
+ # here, it breaks on auth. see lib389/__init__.py
|
||||
+ inst.restart(post_open=False)
|
||||
+
|
||||
+ # Attempt a bind with TLS external
|
||||
+ inst.open(saslmethod='EXTERNAL', connOnly=True, certdir=ssca_dir,
|
||||
+ userkey=tls_locs['key'], usercert=tls_locs['crt'])
|
||||
+ inst.restart()
|
||||
+
|
||||
+ event = get_log_event(inst, "TLS_INFO")
|
||||
+ assert event is not None
|
||||
+ assert 'tls_version' in event
|
||||
+ assert 'keysize' in event
|
||||
+ assert 'cipher' in event
|
||||
+
|
||||
+ event = get_log_event(inst, "TLS_CLIENT_INFO",
|
||||
+ "subject",
|
||||
+ "CN=testuser,O=testing,L=389ds,ST=Queensland,C=AU")
|
||||
+ assert event is not None
|
||||
+ assert 'tls_version' in event
|
||||
+ assert 'keysize' in event
|
||||
+ assert 'issuer' in event
|
||||
+
|
||||
+ event = get_log_event(inst, "TLS_CLIENT_INFO",
|
||||
+ "client_dn",
|
||||
+ "uid=testuser,ou=People,dc=example,dc=com")
|
||||
+ assert event is not None
|
||||
+ assert 'tls_version' in event
|
||||
+ assert event['msg'] == "client bound"
|
||||
+
|
||||
+ # Check for failed certmap error
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ inst.open(saslmethod='EXTERNAL', connOnly=True, certdir=ssca_dir,
|
||||
+ userkey=tls_locs_wrong['key'],
|
||||
+ usercert=tls_locs_wrong['crt'])
|
||||
+
|
||||
+ event = get_log_event(inst, "TLS_CLIENT_INFO", "err", -185)
|
||||
+ assert event is not None
|
||||
+ assert 'tls_version' in event
|
||||
+ assert event['msg'] == "failed to map client certificate to LDAP DN"
|
||||
+ assert event['err_msg'] == "Certificate couldn't be mapped to an ldap entry"
|
||||
+
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
diff --git a/ldap/servers/slapd/accesslog.c b/ldap/servers/slapd/accesslog.c
|
||||
index 68022fe38..072ace203 100644
|
||||
--- a/ldap/servers/slapd/accesslog.c
|
||||
+++ b/ldap/servers/slapd/accesslog.c
|
||||
@@ -1147,3 +1147,117 @@ slapd_log_access_sort(slapd_log_pblock *logpb)
|
||||
|
||||
return rc;
|
||||
}
|
||||
+
|
||||
+/*
|
||||
+ * TLS connection
|
||||
+ *
|
||||
+ * int32_t log_format
|
||||
+ * time_t conn_time
|
||||
+ * uint64_t conn_id
|
||||
+ * const char *msg
|
||||
+ * const char *tls_version
|
||||
+ * int32_t keysize
|
||||
+ * const char *cipher
|
||||
+ * int32_t err
|
||||
+ * const char *err_str
|
||||
+ */
|
||||
+int32_t
|
||||
+slapd_log_access_tls(slapd_log_pblock *logpb)
|
||||
+{
|
||||
+ int32_t rc = 0;
|
||||
+ char *msg = NULL;
|
||||
+ json_object *json_obj = NULL;
|
||||
+
|
||||
+ if ((json_obj = build_base_obj(logpb, "TLS_INFO")) == NULL) {
|
||||
+ return rc;
|
||||
+ }
|
||||
+
|
||||
+ if (logpb->msg) {
|
||||
+ json_object_object_add(json_obj, "msg", json_obj_add_str(logpb->msg));
|
||||
+ }
|
||||
+ if (logpb->tls_version) {
|
||||
+ json_object_object_add(json_obj, "tls_version", json_obj_add_str(logpb->tls_version));
|
||||
+ }
|
||||
+ if (logpb->cipher) {
|
||||
+ json_object_object_add(json_obj, "cipher", json_obj_add_str(logpb->cipher));
|
||||
+ }
|
||||
+ if (logpb->keysize) {
|
||||
+ json_object_object_add(json_obj, "keysize", json_object_new_int(logpb->keysize));
|
||||
+ }
|
||||
+ if (logpb->err_str) {
|
||||
+ json_object_object_add(json_obj, "err", json_object_new_int(logpb->err));
|
||||
+ json_object_object_add(json_obj, "err_msg", json_obj_add_str(logpb->err_str));
|
||||
+ }
|
||||
+
|
||||
+ /* Convert json object to string and log it */
|
||||
+ msg = (char *)json_object_to_json_string_ext(json_obj, logpb->log_format);
|
||||
+ rc = slapd_log_access_json(msg);
|
||||
+
|
||||
+ /* Done with JSON object - free it */
|
||||
+ json_object_put(json_obj);
|
||||
+
|
||||
+ return rc;
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * TLS client auth
|
||||
+ *
|
||||
+ * int32_t log_format
|
||||
+ * time_t conn_time
|
||||
+ * uint64_t conn_id
|
||||
+ * const char* tls_version
|
||||
+ * const char* keysize
|
||||
+ * const char* cipher
|
||||
+ * const char* msg
|
||||
+ * const char* subject
|
||||
+ * const char* issuer
|
||||
+ * int32_t err
|
||||
+ * const char* err_str
|
||||
+ * const char *client_dn
|
||||
+ */
|
||||
+int32_t
|
||||
+slapd_log_access_tls_client_auth(slapd_log_pblock *logpb)
|
||||
+{
|
||||
+ int32_t rc = 0;
|
||||
+ char *msg = NULL;
|
||||
+ json_object *json_obj = NULL;
|
||||
+
|
||||
+ if ((json_obj = build_base_obj(logpb, "TLS_CLIENT_INFO")) == NULL) {
|
||||
+ return rc;
|
||||
+ }
|
||||
+
|
||||
+ if (logpb->tls_version) {
|
||||
+ json_object_object_add(json_obj, "tls_version", json_obj_add_str(logpb->tls_version));
|
||||
+ }
|
||||
+ if (logpb->cipher) {
|
||||
+ json_object_object_add(json_obj, "cipher", json_obj_add_str(logpb->cipher));
|
||||
+ }
|
||||
+ if (logpb->keysize) {
|
||||
+ json_object_object_add(json_obj, "keysize", json_object_new_int(logpb->keysize));
|
||||
+ }
|
||||
+ if (logpb->subject) {
|
||||
+ json_object_object_add(json_obj, "subject", json_obj_add_str(logpb->subject));
|
||||
+ }
|
||||
+ if (logpb->issuer) {
|
||||
+ json_object_object_add(json_obj, "issuer", json_obj_add_str(logpb->issuer));
|
||||
+ }
|
||||
+ if (logpb->client_dn) {
|
||||
+ json_object_object_add(json_obj, "client_dn", json_obj_add_str(logpb->client_dn));
|
||||
+ }
|
||||
+ if (logpb->msg) {
|
||||
+ json_object_object_add(json_obj, "msg", json_obj_add_str(logpb->msg));
|
||||
+ }
|
||||
+ if (logpb->err_str) {
|
||||
+ json_object_object_add(json_obj, "err", json_object_new_int(logpb->err));
|
||||
+ json_object_object_add(json_obj, "err_msg", json_obj_add_str(logpb->err_str));
|
||||
+ }
|
||||
+
|
||||
+ /* Convert json object to string and log it */
|
||||
+ msg = (char *)json_object_to_json_string_ext(json_obj, logpb->log_format);
|
||||
+ rc = slapd_log_access_json(msg);
|
||||
+
|
||||
+ /* Done with JSON object - free it */
|
||||
+ json_object_put(json_obj);
|
||||
+
|
||||
+ return rc;
|
||||
+}
|
||||
diff --git a/ldap/servers/slapd/auth.c b/ldap/servers/slapd/auth.c
|
||||
index e4231bf45..48e4b7129 100644
|
||||
--- a/ldap/servers/slapd/auth.c
|
||||
+++ b/ldap/servers/slapd/auth.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -363,19 +363,32 @@ handle_bad_certificate(void *clientData, PRFileDesc *prfd)
|
||||
char sbuf[BUFSIZ], ibuf[BUFSIZ];
|
||||
Connection *conn = (Connection *)clientData;
|
||||
CERTCertificate *clientCert = slapd_ssl_peerCertificate(prfd);
|
||||
-
|
||||
PRErrorCode errorCode = PR_GetError();
|
||||
char *subject = subject_of(clientCert);
|
||||
char *issuer = issuer_of(clientCert);
|
||||
- slapi_log_access(LDAP_DEBUG_STATS,
|
||||
- "conn=%" PRIu64 " " SLAPI_COMPONENT_NAME_NSPR " error %i (%s); unauthenticated client %s; issuer %s\n",
|
||||
- conn->c_connid, errorCode, slapd_pr_strerror(errorCode),
|
||||
- subject ? escape_string(subject, sbuf) : "NULL",
|
||||
- issuer ? escape_string(issuer, ibuf) : "NULL");
|
||||
+ int32_t log_format = config_get_accesslog_log_format();
|
||||
+ slapd_log_pblock logpb = {0};
|
||||
+
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.msg = "unauthenticated client";
|
||||
+ logpb.subject = subject ? escape_string(subject, sbuf) : "NULL";
|
||||
+ logpb.issuer = issuer ? escape_string(issuer, ibuf) : "NULL";
|
||||
+ logpb.err = errorCode;
|
||||
+ logpb.err_str = slapd_pr_strerror(errorCode);
|
||||
+ slapd_log_access_tls_client_auth(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " " SLAPI_COMPONENT_NAME_NSPR " error %i (%s); unauthenticated client %s; issuer %s\n",
|
||||
+ conn->c_connid, errorCode, slapd_pr_strerror(errorCode),
|
||||
+ subject ? escape_string(subject, sbuf) : "NULL",
|
||||
+ issuer ? escape_string(issuer, ibuf) : "NULL");
|
||||
+ }
|
||||
if (issuer)
|
||||
- free(issuer);
|
||||
+ slapi_ch_free_string(&issuer);
|
||||
if (subject)
|
||||
- free(subject);
|
||||
+ slapi_ch_free_string(&subject);
|
||||
if (clientCert)
|
||||
CERT_DestroyCertificate(clientCert);
|
||||
return -1; /* non-zero means reject this certificate */
|
||||
@@ -394,7 +407,8 @@ handle_handshake_done(PRFileDesc *prfd, void *clientData)
|
||||
{
|
||||
Connection *conn = (Connection *)clientData;
|
||||
CERTCertificate *clientCert = slapd_ssl_peerCertificate(prfd);
|
||||
-
|
||||
+ int32_t log_format = config_get_accesslog_log_format();
|
||||
+ slapd_log_pblock logpb = {0};
|
||||
char *clientDN = NULL;
|
||||
int keySize = 0;
|
||||
char *cipher = NULL;
|
||||
@@ -403,19 +417,39 @@ handle_handshake_done(PRFileDesc *prfd, void *clientData)
|
||||
SSLCipherSuiteInfo cipherInfo;
|
||||
char *subject = NULL;
|
||||
char sslversion[64];
|
||||
+ int err = 0;
|
||||
|
||||
if ((slapd_ssl_getChannelInfo(prfd, &channelInfo, sizeof(channelInfo))) != SECSuccess) {
|
||||
PRErrorCode errorCode = PR_GetError();
|
||||
- slapi_log_access(LDAP_DEBUG_STATS,
|
||||
- "conn=%" PRIu64 " SSL failed to obtain channel info; " SLAPI_COMPONENT_NAME_NSPR " error %i (%s)\n",
|
||||
- conn->c_connid, errorCode, slapd_pr_strerror(errorCode));
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.err = errorCode;
|
||||
+ logpb.err_str = slapd_pr_strerror(errorCode);
|
||||
+ logpb.msg = "SSL failed to obtain channel info; " SLAPI_COMPONENT_NAME_NSPR;
|
||||
+ slapd_log_access_tls(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " SSL failed to obtain channel info; " SLAPI_COMPONENT_NAME_NSPR " error %i (%s)\n",
|
||||
+ conn->c_connid, errorCode, slapd_pr_strerror(errorCode));
|
||||
+ }
|
||||
goto done;
|
||||
}
|
||||
+
|
||||
if ((slapd_ssl_getCipherSuiteInfo(channelInfo.cipherSuite, &cipherInfo, sizeof(cipherInfo))) != SECSuccess) {
|
||||
PRErrorCode errorCode = PR_GetError();
|
||||
- slapi_log_access(LDAP_DEBUG_STATS,
|
||||
- "conn=%" PRIu64 " SSL failed to obtain cipher info; " SLAPI_COMPONENT_NAME_NSPR " error %i (%s)\n",
|
||||
- conn->c_connid, errorCode, slapd_pr_strerror(errorCode));
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.err = errorCode;
|
||||
+ logpb.err_str = slapd_pr_strerror(errorCode);
|
||||
+ logpb.msg = "SSL failed to obtain cipher info; " SLAPI_COMPONENT_NAME_NSPR;
|
||||
+ slapd_log_access_tls(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " SSL failed to obtain cipher info; " SLAPI_COMPONENT_NAME_NSPR " error %i (%s)\n",
|
||||
+ conn->c_connid, errorCode, slapd_pr_strerror(errorCode));
|
||||
+ }
|
||||
goto done;
|
||||
}
|
||||
|
||||
@@ -434,47 +468,84 @@ handle_handshake_done(PRFileDesc *prfd, void *clientData)
|
||||
|
||||
if (config_get_SSLclientAuth() == SLAPD_SSLCLIENTAUTH_OFF) {
|
||||
(void)slapi_getSSLVersion_str(channelInfo.protocolVersion, sslversion, sizeof(sslversion));
|
||||
- slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " %s %i-bit %s\n",
|
||||
- conn->c_connid,
|
||||
- sslversion, keySize, cipher ? cipher : "NULL");
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.tls_version = sslversion;
|
||||
+ logpb.keysize = keySize;
|
||||
+ logpb.cipher = cipher ? cipher : "NULL";
|
||||
+ slapd_log_access_tls(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " %s %i-bit %s\n",
|
||||
+ conn->c_connid,
|
||||
+ sslversion, keySize, cipher ? cipher : "NULL");
|
||||
+ }
|
||||
goto done;
|
||||
}
|
||||
if (clientCert == NULL) {
|
||||
(void)slapi_getSSLVersion_str(channelInfo.protocolVersion, sslversion, sizeof(sslversion));
|
||||
- slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " %s %i-bit %s\n",
|
||||
- conn->c_connid,
|
||||
- sslversion, keySize, cipher ? cipher : "NULL");
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.tls_version = sslversion;
|
||||
+ logpb.keysize = keySize;
|
||||
+ logpb.cipher = cipher ? cipher : "NULL";
|
||||
+ slapd_log_access_tls(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS, "conn=%" PRIu64 " %s %i-bit %s\n",
|
||||
+ conn->c_connid,
|
||||
+ sslversion, keySize, cipher ? cipher : "NULL");
|
||||
+ }
|
||||
} else {
|
||||
subject = subject_of(clientCert);
|
||||
if (!subject) {
|
||||
(void)slapi_getSSLVersion_str(channelInfo.protocolVersion,
|
||||
sslversion, sizeof(sslversion));
|
||||
- slapi_log_access(LDAP_DEBUG_STATS,
|
||||
- "conn=%" PRIu64 " %s %i-bit %s; missing subject\n",
|
||||
- conn->c_connid,
|
||||
- sslversion, keySize, cipher ? cipher : "NULL");
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.msg = "missing subject";
|
||||
+ logpb.tls_version = sslversion;
|
||||
+ logpb.keysize = keySize;
|
||||
+ logpb.cipher = cipher ? cipher : "NULL";
|
||||
+ slapd_log_access_tls_client_auth(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " %s %i-bit %s; missing subject\n",
|
||||
+ conn->c_connid,
|
||||
+ sslversion, keySize, cipher ? cipher : "NULL");
|
||||
+ }
|
||||
goto done;
|
||||
- }
|
||||
- {
|
||||
+ } else {
|
||||
char *issuer = issuer_of(clientCert);
|
||||
char sbuf[BUFSIZ], ibuf[BUFSIZ];
|
||||
(void)slapi_getSSLVersion_str(channelInfo.protocolVersion,
|
||||
sslversion, sizeof(sslversion));
|
||||
- slapi_log_access(LDAP_DEBUG_STATS,
|
||||
- "conn=%" PRIu64 " %s %i-bit %s; client %s; issuer %s\n",
|
||||
- conn->c_connid,
|
||||
- sslversion, keySize,
|
||||
- cipher ? cipher : "NULL",
|
||||
- escape_string(subject, sbuf),
|
||||
- issuer ? escape_string(issuer, ibuf) : "NULL");
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.tls_version = sslversion;
|
||||
+ logpb.keysize = keySize;
|
||||
+ logpb.cipher = cipher ? cipher : "NULL";
|
||||
+ logpb.subject = escape_string(subject, sbuf);
|
||||
+ logpb.issuer = issuer ? escape_string(issuer, ibuf) : "NULL";
|
||||
+ slapd_log_access_tls_client_auth(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " %s %i-bit %s; client %s; issuer %s\n",
|
||||
+ conn->c_connid,
|
||||
+ sslversion, keySize,
|
||||
+ cipher ? cipher : "NULL",
|
||||
+ escape_string(subject, sbuf),
|
||||
+ issuer ? escape_string(issuer, ibuf) : "NULL");
|
||||
+ }
|
||||
if (issuer)
|
||||
- free(issuer);
|
||||
+ slapi_ch_free_string(&issuer);
|
||||
}
|
||||
slapi_dn_normalize(subject);
|
||||
{
|
||||
LDAPMessage *chain = NULL;
|
||||
char *basedn = config_get_basedn();
|
||||
- int err;
|
||||
|
||||
err = ldapu_cert_to_ldap_entry(clientCert, internal_ld, basedn ? basedn : "" /*baseDN*/, &chain);
|
||||
if (err == LDAPU_SUCCESS && chain) {
|
||||
@@ -505,18 +576,37 @@ handle_handshake_done(PRFileDesc *prfd, void *clientData)
|
||||
slapi_sdn_free(&sdn);
|
||||
(void)slapi_getSSLVersion_str(channelInfo.protocolVersion,
|
||||
sslversion, sizeof(sslversion));
|
||||
- slapi_log_access(LDAP_DEBUG_STATS,
|
||||
- "conn=%" PRIu64 " %s client bound as %s\n",
|
||||
- conn->c_connid,
|
||||
- sslversion, clientDN);
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.msg = "client bound";
|
||||
+ logpb.tls_version = sslversion;
|
||||
+ logpb.client_dn = clientDN;
|
||||
+ slapd_log_access_tls_client_auth(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " %s client bound as %s\n",
|
||||
+ conn->c_connid,
|
||||
+ sslversion, clientDN);
|
||||
+ }
|
||||
} else if (clientCert != NULL) {
|
||||
(void)slapi_getSSLVersion_str(channelInfo.protocolVersion,
|
||||
sslversion, sizeof(sslversion));
|
||||
- slapi_log_access(LDAP_DEBUG_STATS,
|
||||
- "conn=%" PRIu64 " %s failed to map client "
|
||||
- "certificate to LDAP DN (%s)\n",
|
||||
- conn->c_connid,
|
||||
- sslversion, extraErrorMsg);
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ slapd_log_pblock_init(&logpb, log_format, NULL);
|
||||
+ logpb.conn_id = conn->c_connid;
|
||||
+ logpb.msg = "failed to map client certificate to LDAP DN";
|
||||
+ logpb.tls_version = sslversion;
|
||||
+ logpb.err = err;
|
||||
+ logpb.err_str = extraErrorMsg;
|
||||
+ slapd_log_access_tls_client_auth(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " %s failed to map client "
|
||||
+ "certificate to LDAP DN (%s)\n",
|
||||
+ conn->c_connid,
|
||||
+ sslversion, extraErrorMsg);
|
||||
+ }
|
||||
}
|
||||
|
||||
/*
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index f535011ab..91ba23047 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -7270,6 +7270,8 @@ slapd_log_pblock_init(slapd_log_pblock *logpb, int32_t log_format, Slapi_PBlock
|
||||
slapi_pblock_get(pb, SLAPI_CONNECTION, &conn);
|
||||
}
|
||||
|
||||
+ memset(logpb, 0, sizeof(slapd_log_pblock));
|
||||
+
|
||||
logpb->loginfo = &loginfo;
|
||||
logpb->level = 256; /* default log level */
|
||||
logpb->log_format = log_format;
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index 6438a81fe..da232ae2f 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -1549,6 +1549,13 @@ typedef struct slapd_log_pblock {
|
||||
PRBool using_tls;
|
||||
PRBool haproxied;
|
||||
const char *bind_dn;
|
||||
+ /* TLS */
|
||||
+ const char *tls_version;
|
||||
+ int32_t keysize;
|
||||
+ const char *cipher;
|
||||
+ const char *subject;
|
||||
+ const char *issuer;
|
||||
+ const char *client_dn;
|
||||
/* Close connection */
|
||||
const char *close_error;
|
||||
const char *close_reason;
|
||||
@@ -1619,6 +1626,7 @@ typedef struct slapd_log_pblock {
|
||||
const char *oid;
|
||||
const char *msg;
|
||||
const char *name;
|
||||
+ const char *err_str;
|
||||
LDAPControl **request_controls;
|
||||
LDAPControl **response_controls;
|
||||
} slapd_log_pblock;
|
||||
@@ -1645,6 +1653,8 @@ int32_t slapd_log_access_entry(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_referral(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_extop(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_sort(slapd_log_pblock *logpb);
|
||||
+int32_t slapd_log_access_tls(slapd_log_pblock *logpb);
|
||||
+int32_t slapd_log_access_tls_client_auth(slapd_log_pblock *logpb);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,43 @@
|
||||
From 091016df4680e1f9ffc3f78292583800626153c2 Mon Sep 17 00:00:00 2001
|
||||
From: Barbora Simonova <bsmejkal@redhat.com>
|
||||
Date: Thu, 17 Jul 2025 16:46:57 +0200
|
||||
Subject: [PATCH] Issue 6829 - Update parametrized docstring for tests
|
||||
|
||||
Description:
|
||||
Update the rest of missing parametrized values
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6829
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/logging/error_json_logging_test.py | 1 +
|
||||
.../tests/suites/schema/schema_replication_origin_test.py | 1 +
|
||||
2 files changed, 2 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/error_json_logging_test.py b/dirsrvtests/tests/suites/logging/error_json_logging_test.py
|
||||
index 87e1840a6..e0b3d7317 100644
|
||||
--- a/dirsrvtests/tests/suites/logging/error_json_logging_test.py
|
||||
+++ b/dirsrvtests/tests/suites/logging/error_json_logging_test.py
|
||||
@@ -29,6 +29,7 @@ def test_error_json_format(topo, log_format):
|
||||
"""Test error log is in JSON
|
||||
|
||||
:id: c9afb295-43de-4581-af8b-ec8f25a06d75
|
||||
+ :parametrized: yes
|
||||
:setup: Standalone
|
||||
:steps:
|
||||
1. Check error log has json and the expected data is present
|
||||
diff --git a/dirsrvtests/tests/suites/schema/schema_replication_origin_test.py b/dirsrvtests/tests/suites/schema/schema_replication_origin_test.py
|
||||
index 9e4ce498c..e93dddad0 100644
|
||||
--- a/dirsrvtests/tests/suites/schema/schema_replication_origin_test.py
|
||||
+++ b/dirsrvtests/tests/suites/schema/schema_replication_origin_test.py
|
||||
@@ -157,6 +157,7 @@ def test_schema_xorigin_repl(topology, schema_replication_init, xorigin):
|
||||
schema is pushed and there is a message in the error log
|
||||
|
||||
:id: 2b29823b-3e83-4b25-954a-8a081dbc15ee
|
||||
+ :parametrized: yes
|
||||
:setup: Supplier and consumer topology, with one user entry;
|
||||
Supplier, hub and consumer topology, with one user entry
|
||||
:steps:
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,67 @@
|
||||
From 863c244cc137376ee8da0f007fc9b9da88d8dbc0 Mon Sep 17 00:00:00 2001
|
||||
From: Anuar Beisembayev <111912342+abeisemb@users.noreply.github.com>
|
||||
Date: Wed, 23 Jul 2025 23:48:11 -0400
|
||||
Subject: [PATCH] Issue 6772 - dsconf - Replicas with the "consumer" role allow
|
||||
for viewing and modification of their changelog. (#6773)
|
||||
|
||||
dsconf currently allows users to set and retrieve changelogs in consumer replicas, which do not have officially supported changelogs. This can lead to undefined behavior and confusion.
|
||||
This commit prints a warning message if the user tries to interact with a changelog on a consumer replica.
|
||||
|
||||
Resolves: https://github.com/389ds/389-ds-base/issues/6772
|
||||
|
||||
Reviewed by: @droideck
|
||||
---
|
||||
src/lib389/lib389/cli_conf/replication.py | 23 +++++++++++++++++++++++
|
||||
1 file changed, 23 insertions(+)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py
|
||||
index 6f77f34ca..a18bf83ca 100644
|
||||
--- a/src/lib389/lib389/cli_conf/replication.py
|
||||
+++ b/src/lib389/lib389/cli_conf/replication.py
|
||||
@@ -686,6 +686,9 @@ def set_per_backend_cl(inst, basedn, log, args):
|
||||
replace_list = []
|
||||
did_something = False
|
||||
|
||||
+ if (is_replica_role_consumer(inst, suffix)):
|
||||
+ log.info("Warning: Changelogs are not supported for consumer replicas. You may run into undefined behavior.")
|
||||
+
|
||||
if args.encrypt:
|
||||
cl.replace('nsslapd-encryptionalgorithm', 'AES')
|
||||
del args.encrypt
|
||||
@@ -715,6 +718,10 @@ def set_per_backend_cl(inst, basedn, log, args):
|
||||
# that means there is a changelog config entry per backend (aka suffix)
|
||||
def get_per_backend_cl(inst, basedn, log, args):
|
||||
suffix = args.suffix
|
||||
+
|
||||
+ if (is_replica_role_consumer(inst, suffix)):
|
||||
+ log.info("Warning: Changelogs are not supported for consumer replicas. You may run into undefined behavior.")
|
||||
+
|
||||
cl = Changelog(inst, suffix)
|
||||
if args and args.json:
|
||||
log.info(cl.get_all_attrs_json())
|
||||
@@ -822,6 +829,22 @@ def del_repl_manager(inst, basedn, log, args):
|
||||
|
||||
log.info("Successfully deleted replication manager: " + manager_dn)
|
||||
|
||||
+def is_replica_role_consumer(inst, suffix):
|
||||
+ """Helper function for get_per_backend_cl and set_per_backend_cl.
|
||||
+ Makes sure the instance in question is not a consumer, which is a role that
|
||||
+ does not support changelogs.
|
||||
+ """
|
||||
+ replicas = Replicas(inst)
|
||||
+ try:
|
||||
+ replica = replicas.get(suffix)
|
||||
+ role = replica.get_role()
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ raise ValueError(f"Backend \"{suffix}\" is not enabled for replication")
|
||||
+
|
||||
+ if role == ReplicaRole.CONSUMER:
|
||||
+ return True
|
||||
+ else:
|
||||
+ return False
|
||||
|
||||
#
|
||||
# Agreements
|
||||
--
|
||||
2.49.0
|
||||
|
||||
360
0020-Issue-6893-Log-user-that-is-updated-during-password-.patch
Normal file
360
0020-Issue-6893-Log-user-that-is-updated-during-password-.patch
Normal file
@ -0,0 +1,360 @@
|
||||
From 79b68019ff4b17c4b80fd2c6c725071a050559ca Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 21 Jul 2025 18:07:21 -0400
|
||||
Subject: [PATCH] Issue 6893 - Log user that is updated during password modify
|
||||
extended operation
|
||||
|
||||
Description:
|
||||
|
||||
When a user's password is updated via an extended operation (password modify
|
||||
plugin) we only log the bind DN and not what user was updated. While "internal
|
||||
operation" logging will display the the user it should be logged by the default
|
||||
logging level.
|
||||
|
||||
Add access logging using "EXT_INFO" for the old logging format, and
|
||||
"EXTENDED_OP_INFO" for json logging where we display the bind dn, target
|
||||
dn, and message.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6893
|
||||
|
||||
Reviewed by: spichugi & tbordaz(Thanks!!)
|
||||
---
|
||||
.../logging/access_json_logging_test.py | 98 +++++++++++++++----
|
||||
ldap/servers/slapd/accesslog.c | 47 +++++++++
|
||||
ldap/servers/slapd/passwd_extop.c | 69 +++++++------
|
||||
ldap/servers/slapd/slapi-private.h | 1 +
|
||||
4 files changed, 169 insertions(+), 46 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/access_json_logging_test.py b/dirsrvtests/tests/suites/logging/access_json_logging_test.py
|
||||
index f0dc861a7..699bd8c4d 100644
|
||||
--- a/dirsrvtests/tests/suites/logging/access_json_logging_test.py
|
||||
+++ b/dirsrvtests/tests/suites/logging/access_json_logging_test.py
|
||||
@@ -11,7 +11,7 @@ import os
|
||||
import time
|
||||
import ldap
|
||||
import pytest
|
||||
-from lib389._constants import DEFAULT_SUFFIX, PASSWORD, LOG_ACCESS_LEVEL
|
||||
+from lib389._constants import DEFAULT_SUFFIX, PASSWORD, LOG_ACCESS_LEVEL, DN_DM
|
||||
from lib389.properties import TASK_WAIT
|
||||
from lib389.topologies import topology_m2 as topo_m2
|
||||
from lib389.idm.group import Groups
|
||||
@@ -548,22 +548,6 @@ def test_access_json_format(topo_m2, setup_test):
|
||||
"2.16.840.1.113730.3.4.3",
|
||||
"LDAP_CONTROL_PERSISTENTSEARCH")
|
||||
|
||||
- #
|
||||
- # Extended op
|
||||
- #
|
||||
- log.info("Test EXTENDED_OP")
|
||||
- event = get_log_event(inst, "EXTENDED_OP", "oid",
|
||||
- "2.16.840.1.113730.3.5.12")
|
||||
- assert event is not None
|
||||
- assert event['oid_name'] == "REPL_START_NSDS90_REPLICATION_REQUEST_OID"
|
||||
- assert event['name'] == "replication-multisupplier-extop"
|
||||
-
|
||||
- event = get_log_event(inst, "EXTENDED_OP", "oid",
|
||||
- "2.16.840.1.113730.3.5.5")
|
||||
- assert event is not None
|
||||
- assert event['oid_name'] == "REPL_END_NSDS50_REPLICATION_REQUEST_OID"
|
||||
- assert event['name'] == "replication-multisupplier-extop"
|
||||
-
|
||||
#
|
||||
# TLS INFO/TLS CLIENT INFO
|
||||
#
|
||||
@@ -579,7 +563,8 @@ def test_access_json_format(topo_m2, setup_test):
|
||||
'sn': RDN_TEST_USER,
|
||||
'uidNumber': '1000',
|
||||
'gidNumber': '2000',
|
||||
- 'homeDirectory': f'/home/{RDN_TEST_USER}'
|
||||
+ 'homeDirectory': f'/home/{RDN_TEST_USER}',
|
||||
+ 'userpassword': 'password'
|
||||
})
|
||||
|
||||
ssca_dir = inst.get_ssca_dir()
|
||||
@@ -646,6 +631,83 @@ def test_access_json_format(topo_m2, setup_test):
|
||||
assert event['msg'] == "failed to map client certificate to LDAP DN"
|
||||
assert event['err_msg'] == "Certificate couldn't be mapped to an ldap entry"
|
||||
|
||||
+ #
|
||||
+ # Extended op
|
||||
+ #
|
||||
+ log.info("Test EXTENDED_OP")
|
||||
+ event = get_log_event(inst, "EXTENDED_OP", "oid",
|
||||
+ "2.16.840.1.113730.3.5.12")
|
||||
+ assert event is not None
|
||||
+ assert event['oid_name'] == "REPL_START_NSDS90_REPLICATION_REQUEST_OID"
|
||||
+ assert event['name'] == "replication-multisupplier-extop"
|
||||
+
|
||||
+ event = get_log_event(inst, "EXTENDED_OP", "oid",
|
||||
+ "2.16.840.1.113730.3.5.5")
|
||||
+ assert event is not None
|
||||
+ assert event['oid_name'] == "REPL_END_NSDS50_REPLICATION_REQUEST_OID"
|
||||
+ assert event['name'] == "replication-multisupplier-extop"
|
||||
+
|
||||
+ #
|
||||
+ # Extended op info
|
||||
+ #
|
||||
+ log.info("Test EXTENDED_OP_INFO")
|
||||
+ OLD_PASSWD = 'password'
|
||||
+ NEW_PASSWD = 'newpassword'
|
||||
+
|
||||
+ assert inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ assert inst.passwd_s(user.dn, OLD_PASSWD, NEW_PASSWD)
|
||||
+ event = get_log_event(inst, "EXTENDED_OP_INFO", "name",
|
||||
+ "passwd_modify_plugin")
|
||||
+ assert event is not None
|
||||
+ assert event['bind_dn'] == "cn=directory manager"
|
||||
+ assert event['target_dn'] == user.dn.lower()
|
||||
+ assert event['msg'] == "success"
|
||||
+
|
||||
+ # Test no such object
|
||||
+ BAD_DN = user.dn + ",dc=not"
|
||||
+ with pytest.raises(ldap.NO_SUCH_OBJECT):
|
||||
+ inst.passwd_s(BAD_DN, OLD_PASSWD, NEW_PASSWD)
|
||||
+
|
||||
+ event = get_log_event(inst, "EXTENDED_OP_INFO", "target_dn", BAD_DN)
|
||||
+ assert event is not None
|
||||
+ assert event['bind_dn'] == "cn=directory manager"
|
||||
+ assert event['target_dn'] == BAD_DN.lower()
|
||||
+ assert event['msg'] == "No such entry exists."
|
||||
+
|
||||
+ # Test invalid old password
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ inst.passwd_s(user.dn, "not_the_old_pw", NEW_PASSWD)
|
||||
+ event = get_log_event(inst, "EXTENDED_OP_INFO", "err", 49)
|
||||
+ assert event is not None
|
||||
+ assert event['bind_dn'] == "cn=directory manager"
|
||||
+ assert event['target_dn'] == user.dn.lower()
|
||||
+ assert event['msg'] == "Invalid oldPasswd value."
|
||||
+
|
||||
+ # Test user without permissions
|
||||
+ user2 = users.create(properties={
|
||||
+ 'uid': RDN_TEST_USER + "2",
|
||||
+ 'cn': RDN_TEST_USER + "2",
|
||||
+ 'sn': RDN_TEST_USER + "2",
|
||||
+ 'uidNumber': '1001',
|
||||
+ 'gidNumber': '2001',
|
||||
+ 'homeDirectory': f'/home/{RDN_TEST_USER + "2"}',
|
||||
+ 'userpassword': 'password'
|
||||
+ })
|
||||
+ inst.simple_bind_s(user2.dn, 'password')
|
||||
+ with pytest.raises(ldap.INSUFFICIENT_ACCESS):
|
||||
+ inst.passwd_s(user.dn, NEW_PASSWD, OLD_PASSWD)
|
||||
+ event = get_log_event(inst, "EXTENDED_OP_INFO", "err", 50)
|
||||
+ assert event is not None
|
||||
+ assert event['bind_dn'] == user2.dn.lower()
|
||||
+ assert event['target_dn'] == user.dn.lower()
|
||||
+ assert event['msg'] == "Insufficient access rights"
|
||||
+
|
||||
+
|
||||
+ # Reset bind
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
diff --git a/ldap/servers/slapd/accesslog.c b/ldap/servers/slapd/accesslog.c
|
||||
index 072ace203..46228d4a1 100644
|
||||
--- a/ldap/servers/slapd/accesslog.c
|
||||
+++ b/ldap/servers/slapd/accesslog.c
|
||||
@@ -1113,6 +1113,53 @@ slapd_log_access_extop(slapd_log_pblock *logpb)
|
||||
return rc;
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Extended operation information
|
||||
+ *
|
||||
+ * int32_t log_format
|
||||
+ * time_t conn_time
|
||||
+ * uint64_t conn_id
|
||||
+ * int32_t op_id
|
||||
+ * const char *name
|
||||
+ * const char *bind_dn
|
||||
+ * const char *tartet_dn
|
||||
+ * const char *msg
|
||||
+ */
|
||||
+int32_t
|
||||
+slapd_log_access_extop_info(slapd_log_pblock *logpb)
|
||||
+{
|
||||
+ int32_t rc = 0;
|
||||
+ char *msg = NULL;
|
||||
+ json_object *json_obj = NULL;
|
||||
+
|
||||
+ if ((json_obj = build_base_obj(logpb, "EXTENDED_OP_INFO")) == NULL) {
|
||||
+ return rc;
|
||||
+ }
|
||||
+
|
||||
+ if (logpb->name) {
|
||||
+ json_object_object_add(json_obj, "name", json_obj_add_str(logpb->name));
|
||||
+ }
|
||||
+ if (logpb->target_dn) {
|
||||
+ json_object_object_add(json_obj, "target_dn", json_obj_add_str(logpb->target_dn));
|
||||
+ }
|
||||
+ if (logpb->bind_dn) {
|
||||
+ json_object_object_add(json_obj, "bind_dn", json_obj_add_str(logpb->bind_dn));
|
||||
+ }
|
||||
+ if (logpb->msg) {
|
||||
+ json_object_object_add(json_obj, "msg", json_obj_add_str(logpb->msg));
|
||||
+ }
|
||||
+ json_object_object_add(json_obj, "err", json_object_new_int(logpb->err));
|
||||
+
|
||||
+ /* Convert json object to string and log it */
|
||||
+ msg = (char *)json_object_to_json_string_ext(json_obj, logpb->log_format);
|
||||
+ rc = slapd_log_access_json(msg);
|
||||
+
|
||||
+ /* Done with JSON object - free it */
|
||||
+ json_object_put(json_obj);
|
||||
+
|
||||
+ return rc;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Sort
|
||||
*
|
||||
diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c
|
||||
index 4bb60afd6..69bb3494c 100644
|
||||
--- a/ldap/servers/slapd/passwd_extop.c
|
||||
+++ b/ldap/servers/slapd/passwd_extop.c
|
||||
@@ -465,12 +465,14 @@ passwd_modify_extop(Slapi_PBlock *pb)
|
||||
BerElement *response_ber = NULL;
|
||||
Slapi_Entry *targetEntry = NULL;
|
||||
Connection *conn = NULL;
|
||||
+ Operation *pb_op = NULL;
|
||||
LDAPControl **req_controls = NULL;
|
||||
LDAPControl **resp_controls = NULL;
|
||||
passwdPolicy *pwpolicy = NULL;
|
||||
Slapi_DN *target_sdn = NULL;
|
||||
Slapi_Entry *referrals = NULL;
|
||||
- /* Slapi_DN sdn; */
|
||||
+ Slapi_Backend *be = NULL;
|
||||
+ int32_t log_format = config_get_accesslog_log_format();
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, "passwd_modify_extop", "=>\n");
|
||||
|
||||
@@ -647,7 +649,7 @@ parse_req_done:
|
||||
}
|
||||
dn = slapi_sdn_get_ndn(target_sdn);
|
||||
if (dn == NULL || *dn == '\0') {
|
||||
- /* Refuse the operation because they're bound anonymously */
|
||||
+ /* Invalid DN - refuse the operation */
|
||||
errMesg = "Invalid dn.";
|
||||
rc = LDAP_INVALID_DN_SYNTAX;
|
||||
goto free_and_return;
|
||||
@@ -724,14 +726,19 @@ parse_req_done:
|
||||
ber_free(response_ber, 1);
|
||||
}
|
||||
|
||||
- slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET, (void *)dn);
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &pb_op);
|
||||
+ if (pb_op == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "pb_op is NULL\n");
|
||||
+ goto free_and_return;
|
||||
+ }
|
||||
|
||||
+ slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET, (void *)dn);
|
||||
/* Now we have the DN, look for the entry */
|
||||
ret = passwd_modify_getEntry(dn, &targetEntry);
|
||||
/* If we can't find the entry, then that's an error */
|
||||
if (ret) {
|
||||
/* Couldn't find the entry, fail */
|
||||
- errMesg = "No such Entry exists.";
|
||||
+ errMesg = "No such entry exists.";
|
||||
rc = LDAP_NO_SUCH_OBJECT;
|
||||
goto free_and_return;
|
||||
}
|
||||
@@ -742,30 +749,18 @@ parse_req_done:
|
||||
leak any useful information to the client such as current password
|
||||
wrong, etc.
|
||||
*/
|
||||
- Operation *pb_op = NULL;
|
||||
- slapi_pblock_get(pb, SLAPI_OPERATION, &pb_op);
|
||||
- if (pb_op == NULL) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "pb_op is NULL\n");
|
||||
- goto free_and_return;
|
||||
- }
|
||||
-
|
||||
operation_set_target_spec(pb_op, slapi_entry_get_sdn(targetEntry));
|
||||
slapi_pblock_set(pb, SLAPI_REQUESTOR_ISROOT, &pb_op->o_isroot);
|
||||
|
||||
- /* In order to perform the access control check , we need to select a backend (even though
|
||||
- * we don't actually need it otherwise).
|
||||
- */
|
||||
- {
|
||||
- Slapi_Backend *be = NULL;
|
||||
-
|
||||
- be = slapi_mapping_tree_find_backend_for_sdn(slapi_entry_get_sdn(targetEntry));
|
||||
- if (NULL == be) {
|
||||
- errMesg = "Failed to find backend for target entry";
|
||||
- rc = LDAP_OPERATIONS_ERROR;
|
||||
- goto free_and_return;
|
||||
- }
|
||||
- slapi_pblock_set(pb, SLAPI_BACKEND, be);
|
||||
+ /* In order to perform the access control check, we need to select a backend (even though
|
||||
+ * we don't actually need it otherwise). */
|
||||
+ be = slapi_mapping_tree_find_backend_for_sdn(slapi_entry_get_sdn(targetEntry));
|
||||
+ if (NULL == be) {
|
||||
+ errMesg = "Failed to find backend for target entry";
|
||||
+ rc = LDAP_NO_SUCH_OBJECT;
|
||||
+ goto free_and_return;
|
||||
}
|
||||
+ slapi_pblock_set(pb, SLAPI_BACKEND, be);
|
||||
|
||||
/* Check if the pwpolicy control is present */
|
||||
slapi_pblock_get(pb, SLAPI_PWPOLICY, &need_pwpolicy_ctrl);
|
||||
@@ -797,10 +792,7 @@ parse_req_done:
|
||||
/* Check if password policy allows users to change their passwords. We need to do
|
||||
* this here since the normal modify code doesn't perform this check for
|
||||
* internal operations. */
|
||||
-
|
||||
- Connection *pb_conn;
|
||||
- slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn);
|
||||
- if (!pb_op->o_isroot && !pb_conn->c_needpw && !pwpolicy->pw_change) {
|
||||
+ if (!pb_op->o_isroot && !conn->c_needpw && !pwpolicy->pw_change) {
|
||||
if (NULL == bindSDN) {
|
||||
bindSDN = slapi_sdn_new_normdn_byref(bindDN);
|
||||
}
|
||||
@@ -848,6 +840,27 @@ free_and_return:
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop",
|
||||
"%s\n", errMesg ? errMesg : "success");
|
||||
|
||||
+ if (dn) {
|
||||
+ /* Log the target ndn (if we have a target ndn) */
|
||||
+ if (log_format != LOG_FORMAT_DEFAULT) {
|
||||
+ /* JSON logging */
|
||||
+ slapd_log_pblock logpb = {0};
|
||||
+ slapd_log_pblock_init(&logpb, log_format, pb);
|
||||
+ logpb.name = "passwd_modify_plugin";
|
||||
+ logpb.target_dn = dn;
|
||||
+ logpb.bind_dn = bindDN;
|
||||
+ logpb.msg = errMesg ? errMesg : "success";
|
||||
+ logpb.err = rc;
|
||||
+ slapd_log_access_extop_info(&logpb);
|
||||
+ } else {
|
||||
+ slapi_log_access(LDAP_DEBUG_STATS,
|
||||
+ "conn=%" PRIu64 " op=%d EXT_INFO name=\"passwd_modify_plugin\" bind_dn=\"%s\" target_dn=\"%s\" msg=\"%s\" rc=%d\n",
|
||||
+ conn ? conn->c_connid : -1, pb_op ? pb_op->o_opid : -1,
|
||||
+ bindDN ? bindDN : "", dn,
|
||||
+ errMesg ? errMesg : "success", rc);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
if ((rc == LDAP_REFERRAL) && (referrals)) {
|
||||
send_referrals_from_entry(pb, referrals);
|
||||
} else {
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index da232ae2f..e9abf8b75 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -1652,6 +1652,7 @@ int32_t slapd_log_access_vlv(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_entry(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_referral(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_extop(slapd_log_pblock *logpb);
|
||||
+int32_t slapd_log_access_extop_info(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_sort(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_tls(slapd_log_pblock *logpb);
|
||||
int32_t slapd_log_access_tls_client_auth(slapd_log_pblock *logpb);
|
||||
--
|
||||
2.49.0
|
||||
|
||||
37
0021-Issue-6352-Fix-DeprecationWarning.patch
Normal file
37
0021-Issue-6352-Fix-DeprecationWarning.patch
Normal file
@ -0,0 +1,37 @@
|
||||
From ab188bdfed7a144734c715f57ba4772c8d453b6f Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Fri, 11 Jul 2025 13:12:44 +0200
|
||||
Subject: [PATCH] Issue 6352 - Fix DeprecationWarning
|
||||
|
||||
Bug Description:
|
||||
When pytest is used on ASAN build, pytest-html plugin collects `*asan*`
|
||||
files, which results in the following deprecation warning:
|
||||
|
||||
```
|
||||
The 'report.extra' attribute is deprecated and will be removed in a
|
||||
future release, use 'report.extras' instead.
|
||||
```
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6352
|
||||
|
||||
Reviwed by: @droideck (Thanks!)
|
||||
---
|
||||
dirsrvtests/conftest.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/conftest.py b/dirsrvtests/conftest.py
|
||||
index c989729c1..0db6045f4 100644
|
||||
--- a/dirsrvtests/conftest.py
|
||||
+++ b/dirsrvtests/conftest.py
|
||||
@@ -138,7 +138,7 @@ def pytest_runtest_makereport(item, call):
|
||||
log_name = os.path.basename(f)
|
||||
instance_name = os.path.basename(os.path.dirname(f)).split("slapd-",1)[1]
|
||||
extra.append(pytest_html.extras.text(text, name=f"{instance_name}-{log_name}"))
|
||||
- report.extra = extra
|
||||
+ report.extras = extra
|
||||
|
||||
# Make a screenshot if WebUI test fails
|
||||
if call.when == "call" and "WEBUI" in os.environ:
|
||||
--
|
||||
2.49.0
|
||||
|
||||
38
0022-Issue-6880-Fix-ds_logs-test-suite-failure.patch
Normal file
38
0022-Issue-6880-Fix-ds_logs-test-suite-failure.patch
Normal file
@ -0,0 +1,38 @@
|
||||
From 5b0188baf00c395dac807657736ed51968d5c3a0 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 17 Jul 2025 13:41:04 +0200
|
||||
Subject: [PATCH] Issue 6880 - Fix ds_logs test suite failure
|
||||
|
||||
Bug Description:
|
||||
After 947ee67df6 ds_logs test suite started to fail in
|
||||
test_internal_log_server_level_4. It slightly changed the order and
|
||||
timing of log messages.
|
||||
|
||||
Fix Description:
|
||||
Do another MOD after restart to trigger the internal search.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6880
|
||||
|
||||
Reviewed by: @bsimonova, @droideck (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/ds_logs/ds_logs_test.py | 4 ++++
|
||||
1 file changed, 4 insertions(+)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
index 6fd790c18..eff6780cd 100644
|
||||
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
|
||||
@@ -356,6 +356,10 @@ def test_internal_log_server_level_4(topology_st, clean_access_logs, disable_acc
|
||||
log.info('Restart the server to flush the logs')
|
||||
topo.restart()
|
||||
|
||||
+ # After 947ee67 log dynamic has changed slightly
|
||||
+ # Do another MOD to trigger the internal search
|
||||
+ topo.config.set(LOG_ACCESS_LEVEL, access_log_level)
|
||||
+
|
||||
try:
|
||||
# These comments contain lines we are trying to find without regex (the op numbers are just examples)
|
||||
log.info("Check if access log contains internal MOD operation in correct format")
|
||||
--
|
||||
2.49.0
|
||||
|
||||
53
0023-Issue-6901-Update-changelog-trimming-logging.patch
Normal file
53
0023-Issue-6901-Update-changelog-trimming-logging.patch
Normal file
@ -0,0 +1,53 @@
|
||||
From 13b0a1637b2fb8eb8b6f5fa391721f61bfe41874 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 24 Jul 2025 19:09:40 +0200
|
||||
Subject: [PATCH] Issue 6901 - Update changelog trimming logging
|
||||
|
||||
Description:
|
||||
* Set SLAPI_LOG_ERR for message in `_cl5DispatchTrimThread`
|
||||
* Set correct function name for logs in `_cl5TrimEntry`.
|
||||
* Add number of scanned entries to the log.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6901
|
||||
|
||||
Reviewed by: @mreynolds389, @progier389 (Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/replication/cl5_api.c | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
|
||||
index 3c356abc0..1d62aa020 100644
|
||||
--- a/ldap/servers/plugins/replication/cl5_api.c
|
||||
+++ b/ldap/servers/plugins/replication/cl5_api.c
|
||||
@@ -2007,7 +2007,7 @@ _cl5DispatchTrimThread(Replica *replica)
|
||||
(void *)replica, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
|
||||
PR_UNJOINABLE_THREAD, DEFAULT_THREAD_STACKSIZE);
|
||||
if (NULL == pth) {
|
||||
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl,
|
||||
"_cl5DispatchTrimThread - Failed to create trimming thread for %s"
|
||||
"; NSPR error - %d\n", replica_get_name(replica),
|
||||
PR_GetError());
|
||||
@@ -2788,7 +2788,7 @@ _cl5TrimEntry(dbi_val_t *key, dbi_val_t *data, void *ctx)
|
||||
return DBI_RC_NOTFOUND;
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl,
|
||||
- "_cl5TrimReplica - Changelog purge skipped anchor csn %s\n",
|
||||
+ "_cl5TrimEntry - Changelog purge skipped anchor csn %s\n",
|
||||
(char*)key->data);
|
||||
return DBI_RC_SUCCESS;
|
||||
}
|
||||
@@ -2867,8 +2867,8 @@ _cl5TrimReplica(Replica *r)
|
||||
slapi_ch_free((void**)&dblcictx.rids);
|
||||
|
||||
if (dblcictx.changed.tot) {
|
||||
- slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl, "_cl5TrimReplica - Trimmed %ld changes from the changelog\n",
|
||||
- dblcictx.changed.tot);
|
||||
+ slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name_cl, "_cl5TrimReplica - Scanned %ld records, and trimmed %ld changes from the changelog\n",
|
||||
+ dblcictx.seen.tot, dblcictx.changed.tot);
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,98 @@
|
||||
From 9cb193418d4dd07182d2e1a38a5cc5a2a41e2877 Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Wed, 23 Jul 2025 19:35:32 -0400
|
||||
Subject: [PATCH] Issue 6895 - Crash if repl keep alive entry can not be
|
||||
created
|
||||
|
||||
Description:
|
||||
|
||||
Heap use after free when logging that the replicaton keep-alive entry can not
|
||||
be created. slapi_add_internal_pb() frees the slapi entry, then
|
||||
we try and get the dn from the entry and we get a use-after-free crash.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6895
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/chainingdb/cb_config.c | 3 +--
|
||||
ldap/servers/plugins/posix-winsync/posix-winsync.c | 1 -
|
||||
ldap/servers/plugins/replication/repl5_init.c | 3 ---
|
||||
ldap/servers/plugins/replication/repl5_replica.c | 8 ++++----
|
||||
4 files changed, 5 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/chainingdb/cb_config.c b/ldap/servers/plugins/chainingdb/cb_config.c
|
||||
index 40a7088d7..24fa1bcb3 100644
|
||||
--- a/ldap/servers/plugins/chainingdb/cb_config.c
|
||||
+++ b/ldap/servers/plugins/chainingdb/cb_config.c
|
||||
@@ -44,8 +44,7 @@ cb_config_add_dse_entries(cb_backend *cb, char **entries, char *string1, char *s
|
||||
slapi_pblock_get(util_pb, SLAPI_PLUGIN_INTOP_RESULT, &res);
|
||||
if (LDAP_SUCCESS != res && LDAP_ALREADY_EXISTS != res) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, CB_PLUGIN_SUBSYSTEM,
|
||||
- "cb_config_add_dse_entries - Unable to add config entry (%s) to the DSE: %s\n",
|
||||
- slapi_entry_get_dn(e),
|
||||
+ "cb_config_add_dse_entries - Unable to add config entry to the DSE: %s\n",
|
||||
ldap_err2string(res));
|
||||
rc = res;
|
||||
slapi_pblock_destroy(util_pb);
|
||||
diff --git a/ldap/servers/plugins/posix-winsync/posix-winsync.c b/ldap/servers/plugins/posix-winsync/posix-winsync.c
|
||||
index 51a55b643..3a002bb70 100644
|
||||
--- a/ldap/servers/plugins/posix-winsync/posix-winsync.c
|
||||
+++ b/ldap/servers/plugins/posix-winsync/posix-winsync.c
|
||||
@@ -1626,7 +1626,6 @@ posix_winsync_end_update_cb(void *cbdata __attribute__((unused)),
|
||||
"posix_winsync_end_update_cb: "
|
||||
"add task entry\n");
|
||||
}
|
||||
- /* slapi_entry_free(e_task); */
|
||||
slapi_pblock_destroy(pb);
|
||||
pb = NULL;
|
||||
posix_winsync_config_reset_MOFTaskCreated();
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_init.c b/ldap/servers/plugins/replication/repl5_init.c
|
||||
index 8bc0b5372..5047fb8dc 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_init.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_init.c
|
||||
@@ -682,7 +682,6 @@ create_repl_schema_policy(void)
|
||||
repl_schema_top,
|
||||
ldap_err2string(return_value));
|
||||
rc = -1;
|
||||
- slapi_entry_free(e); /* The entry was not consumed */
|
||||
goto done;
|
||||
}
|
||||
slapi_pblock_destroy(pb);
|
||||
@@ -703,7 +702,6 @@ create_repl_schema_policy(void)
|
||||
repl_schema_supplier,
|
||||
ldap_err2string(return_value));
|
||||
rc = -1;
|
||||
- slapi_entry_free(e); /* The entry was not consumed */
|
||||
goto done;
|
||||
}
|
||||
slapi_pblock_destroy(pb);
|
||||
@@ -724,7 +722,6 @@ create_repl_schema_policy(void)
|
||||
repl_schema_consumer,
|
||||
ldap_err2string(return_value));
|
||||
rc = -1;
|
||||
- slapi_entry_free(e); /* The entry was not consumed */
|
||||
goto done;
|
||||
}
|
||||
slapi_pblock_destroy(pb);
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
index 59062b46b..a97c807e9 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_replica.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_replica.c
|
||||
@@ -465,10 +465,10 @@ replica_subentry_create(const char *repl_root, ReplicaId rid)
|
||||
if (return_value != LDAP_SUCCESS &&
|
||||
return_value != LDAP_ALREADY_EXISTS &&
|
||||
return_value != LDAP_REFERRAL /* CONSUMER */) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_subentry_create - Unable to "
|
||||
- "create replication keep alive entry %s: error %d - %s\n",
|
||||
- slapi_entry_get_dn_const(e),
|
||||
- return_value, ldap_err2string(return_value));
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_subentry_create - "
|
||||
+ "Unable to create replication keep alive entry 'cn=%s %d,%s': error %d - %s\n",
|
||||
+ KEEP_ALIVE_ENTRY, rid, repl_root,
|
||||
+ return_value, ldap_err2string(return_value));
|
||||
rc = -1;
|
||||
goto done;
|
||||
}
|
||||
--
|
||||
2.49.0
|
||||
|
||||
352
0025-Issue-6250-Add-test-for-entryUSN-overflow-on-failed-.patch
Normal file
352
0025-Issue-6250-Add-test-for-entryUSN-overflow-on-failed-.patch
Normal file
@ -0,0 +1,352 @@
|
||||
From a0cdf2970edb46acec06a5ac204ec04135806b35 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 10:50:26 -0700
|
||||
Subject: [PATCH] Issue 6250 - Add test for entryUSN overflow on failed add
|
||||
operations (#6821)
|
||||
|
||||
Description: Add comprehensive test to reproduce the entryUSN
|
||||
overflow issue where failed attempts to add existing entries followed by
|
||||
modify operations cause entryUSN values to underflow/overflow instead of
|
||||
incrementing properly.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6250
|
||||
|
||||
Reviewed by: @tbordaz (Thanks!)
|
||||
---
|
||||
.../suites/plugins/entryusn_overflow_test.py | 323 ++++++++++++++++++
|
||||
1 file changed, 323 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
new file mode 100644
|
||||
index 000000000..a23d734ca
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
@@ -0,0 +1,323 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import os
|
||||
+import ldap
|
||||
+import logging
|
||||
+import pytest
|
||||
+import time
|
||||
+import random
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
+from lib389.config import Config
|
||||
+from lib389.plugins import USNPlugin
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389.rootdse import RootDSE
|
||||
+
|
||||
+pytestmark = pytest.mark.tier2
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+# Test constants
|
||||
+DEMO_USER_BASE_DN = "uid=demo_user,ou=people," + DEFAULT_SUFFIX
|
||||
+TEST_USER_PREFIX = "Demo User"
|
||||
+MAX_USN_64BIT = 18446744073709551615 # 2^64 - 1
|
||||
+ITERATIONS = 10
|
||||
+ADD_EXISTING_ENTRY_MAX_ATTEMPTS = 5
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="module")
|
||||
+def setup_usn_test(topology_st, request):
|
||||
+ """Setup USN plugin and test data for entryUSN overflow testing"""
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+
|
||||
+ log.info("Enable the USN plugin...")
|
||||
+ plugin = USNPlugin(inst)
|
||||
+ plugin.enable()
|
||||
+ plugin.enable_global_mode()
|
||||
+
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Create initial test users
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ created_users = []
|
||||
+
|
||||
+ log.info("Creating initial test users...")
|
||||
+ for i in range(3):
|
||||
+ user_props = {
|
||||
+ 'uid': f'{TEST_USER_PREFIX}-{i}',
|
||||
+ 'cn': f'{TEST_USER_PREFIX}-{i}',
|
||||
+ 'sn': f'User{i}',
|
||||
+ 'uidNumber': str(1000 + i),
|
||||
+ 'gidNumber': str(1000 + i),
|
||||
+ 'homeDirectory': f'/home/{TEST_USER_PREFIX}-{i}',
|
||||
+ 'userPassword': 'password123'
|
||||
+ }
|
||||
+ try:
|
||||
+ user = users.create(properties=user_props)
|
||||
+ created_users.append(user)
|
||||
+ log.info(f"Created user: {user.dn}")
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ log.info(f"User {user_props['uid']} already exists, skipping creation")
|
||||
+ user = users.get(user_props['uid'])
|
||||
+ created_users.append(user)
|
||||
+
|
||||
+ def fin():
|
||||
+ log.info("Cleaning up test users...")
|
||||
+ for user in created_users:
|
||||
+ try:
|
||||
+ user.delete()
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ pass
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ return created_users
|
||||
+
|
||||
+
|
||||
+def test_entryusn_overflow_on_add_existing_entries(topology_st, setup_usn_test):
|
||||
+ """Test that reproduces entryUSN overflow when adding existing entries
|
||||
+
|
||||
+ :id: a5a8c33d-82f3-4113-be2b-027de51791c8
|
||||
+ :setup: Standalone instance with USN plugin enabled and test users
|
||||
+ :steps:
|
||||
+ 1. Record initial entryUSN values for existing users
|
||||
+ 2. Attempt to add existing entries multiple times (should fail)
|
||||
+ 3. Perform modify operations on the entries
|
||||
+ 4. Check that entryUSN values increment correctly without overflow
|
||||
+ 5. Verify lastusn values are consistent
|
||||
+ :expectedresults:
|
||||
+ 1. Initial entryUSN values are recorded successfully
|
||||
+ 2. Add operations fail with ALREADY_EXISTS error
|
||||
+ 3. Modify operations succeed
|
||||
+ 4. EntryUSN values increment properly without underflow/overflow
|
||||
+ 5. LastUSN values are consistent and increasing
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ users = setup_usn_test
|
||||
+
|
||||
+ # Enable detailed logging for debugging
|
||||
+ config = Config(inst)
|
||||
+ config.replace('nsslapd-accesslog-level', '260') # Internal op logging
|
||||
+ config.replace('nsslapd-errorlog-level', '65536')
|
||||
+ config.replace('nsslapd-plugin-logging', 'on')
|
||||
+
|
||||
+ root_dse = RootDSE(inst)
|
||||
+
|
||||
+ log.info("Starting entryUSN overflow reproduction test")
|
||||
+
|
||||
+ # Record initial state
|
||||
+ initial_usn_values = {}
|
||||
+ for user in users:
|
||||
+ initial_usn = user.get_attr_val_int('entryusn')
|
||||
+ initial_usn_values[user.dn] = initial_usn
|
||||
+ log.info(f"Initial entryUSN for {user.get_attr_val_utf8('cn')}: {initial_usn}")
|
||||
+
|
||||
+ initial_lastusn = root_dse.get_attr_val_int("lastusn")
|
||||
+ log.info(f"Initial lastUSN: {initial_lastusn}")
|
||||
+
|
||||
+ # Perform test iterations
|
||||
+ for iteration in range(1, ITERATIONS + 1):
|
||||
+ log.info(f"\n--- Iteration {iteration} ---")
|
||||
+
|
||||
+ # Step 1: Try to add existing entries multiple times
|
||||
+ selected_user = random.choice(users)
|
||||
+ cn_value = selected_user.get_attr_val_utf8('cn')
|
||||
+ attempts = random.randint(1, ADD_EXISTING_ENTRY_MAX_ATTEMPTS)
|
||||
+
|
||||
+ log.info(f"Attempting to add existing entry '{cn_value}' {attempts} times")
|
||||
+
|
||||
+ # Get user attributes for recreation attempt
|
||||
+ user_attrs = {
|
||||
+ 'uid': selected_user.get_attr_val_utf8('uid'),
|
||||
+ 'cn': selected_user.get_attr_val_utf8('cn'),
|
||||
+ 'sn': selected_user.get_attr_val_utf8('sn'),
|
||||
+ 'uidNumber': selected_user.get_attr_val_utf8('uidNumber'),
|
||||
+ 'gidNumber': selected_user.get_attr_val_utf8('gidNumber'),
|
||||
+ 'homeDirectory': selected_user.get_attr_val_utf8('homeDirectory'),
|
||||
+ 'userPassword': 'password123'
|
||||
+ }
|
||||
+
|
||||
+ users_collection = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ # Try to add the existing user multiple times
|
||||
+ for attempt in range(attempts):
|
||||
+ try:
|
||||
+ users_collection.create(properties=user_attrs)
|
||||
+ log.error(f"ERROR: Add operation should have failed but succeeded on attempt {attempt + 1}")
|
||||
+ assert False, "Add operation should have failed with ALREADY_EXISTS"
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ log.info(f"Attempt {attempt + 1}: Got expected ALREADY_EXISTS error")
|
||||
+ except Exception as e:
|
||||
+ log.error(f"Unexpected error on attempt {attempt + 1}: {e}")
|
||||
+ raise
|
||||
+
|
||||
+ # Step 2: Perform modify operation
|
||||
+ target_user = random.choice(users)
|
||||
+ cn_value = target_user.get_attr_val_utf8('cn')
|
||||
+ old_usn = target_user.get_attr_val_int('entryusn')
|
||||
+
|
||||
+ # Modify the user entry
|
||||
+ new_description = f"Modified in iteration {iteration} - {time.time()}"
|
||||
+ target_user.replace('description', new_description)
|
||||
+
|
||||
+ # Get new USN value
|
||||
+ new_usn = target_user.get_attr_val_int('entryusn')
|
||||
+
|
||||
+ log.info(f"Modified entry '{cn_value}': old USN = {old_usn}, new USN = {new_usn}")
|
||||
+
|
||||
+ # Step 3: Validate USN values
|
||||
+ # Check for overflow/underflow conditions
|
||||
+ assert new_usn > 0, f"EntryUSN should be positive, got {new_usn}"
|
||||
+ assert new_usn < MAX_USN_64BIT, f"EntryUSN overflow detected: {new_usn} >= {MAX_USN_64BIT}"
|
||||
+
|
||||
+ # Check that USN didn't wrap around (underflow detection)
|
||||
+ usn_diff = new_usn - old_usn
|
||||
+ assert usn_diff < 1000, f"USN increment too large, possible overflow: {usn_diff}"
|
||||
+
|
||||
+ # Verify lastUSN is also reasonable
|
||||
+ current_lastusn = root_dse.get_attr_val_int("lastusn")
|
||||
+ assert current_lastusn >= new_usn, f"LastUSN ({current_lastusn}) should be >= entryUSN ({new_usn})"
|
||||
+ assert current_lastusn < MAX_USN_64BIT, f"LastUSN overflow detected: {current_lastusn}"
|
||||
+
|
||||
+ log.info(f"USN validation passed for iteration {iteration}")
|
||||
+
|
||||
+ # Add a new entry occasionally to increase USN diversity
|
||||
+ if iteration % 3 == 0:
|
||||
+ new_user_props = {
|
||||
+ 'uid': f'{TEST_USER_PREFIX}-new-{iteration}',
|
||||
+ 'cn': f'{TEST_USER_PREFIX}-new-{iteration}',
|
||||
+ 'sn': f'NewUser{iteration}',
|
||||
+ 'uidNumber': str(2000 + iteration),
|
||||
+ 'gidNumber': str(2000 + iteration),
|
||||
+ 'homeDirectory': f'/home/{TEST_USER_PREFIX}-new-{iteration}',
|
||||
+ 'userPassword': 'newpassword123'
|
||||
+ }
|
||||
+ try:
|
||||
+ new_user = users_collection.create(properties=new_user_props)
|
||||
+ new_user_usn = new_user.get_attr_val_int('entryusn')
|
||||
+ log.info(f"Created new entry '{new_user.get_attr_val_utf8('cn')}' with USN: {new_user_usn}")
|
||||
+ users.append(new_user) # Add to cleanup list
|
||||
+ except Exception as e:
|
||||
+ log.warning(f"Failed to create new user in iteration {iteration}: {e}")
|
||||
+
|
||||
+ # Final validation: Check all USN values are reasonable
|
||||
+ log.info("\nFinal USN validation")
|
||||
+ final_lastusn = root_dse.get_attr_val_int("lastusn")
|
||||
+
|
||||
+ for user in users:
|
||||
+ try:
|
||||
+ final_usn = user.get_attr_val_int('entryusn')
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+ log.info(f"Final entryUSN for '{cn_value}': {final_usn}")
|
||||
+
|
||||
+ # Ensure no overflow occurred
|
||||
+ assert final_usn > 0, f"Final entryUSN should be positive for {cn_value}: {final_usn}"
|
||||
+ assert final_usn < MAX_USN_64BIT, f"EntryUSN overflow for {cn_value}: {final_usn}"
|
||||
+
|
||||
+ except ldap.NO_SUCH_OBJECT:
|
||||
+ log.info(f"User {user.dn} was deleted during test")
|
||||
+
|
||||
+ log.info(f"Final lastUSN: {final_lastusn}")
|
||||
+ assert final_lastusn > initial_lastusn, "LastUSN should have increased during test"
|
||||
+ assert final_lastusn < MAX_USN_64BIT, f"LastUSN overflow detected: {final_lastusn}"
|
||||
+
|
||||
+ log.info("EntryUSN overflow test completed successfully")
|
||||
+
|
||||
+
|
||||
+def test_entryusn_consistency_after_failed_adds(topology_st, setup_usn_test):
|
||||
+ """Test that entryUSN remains consistent after failed add operations
|
||||
+
|
||||
+ :id: e380ccad-527b-427e-a331-df5c41badbed
|
||||
+ :setup: Standalone instance with USN plugin enabled and test users
|
||||
+ :steps:
|
||||
+ 1. Record entryUSN values before failed add attempts
|
||||
+ 2. Attempt to add existing entries (should fail)
|
||||
+ 3. Verify entryUSN values haven't changed due to failed operations
|
||||
+ 4. Perform successful modify operations
|
||||
+ 5. Verify entryUSN increments correctly
|
||||
+ :expectedresults:
|
||||
+ 1. Initial entryUSN values recorded
|
||||
+ 2. Add operations fail as expected
|
||||
+ 3. EntryUSN values unchanged after failed adds
|
||||
+ 4. Modify operations succeed
|
||||
+ 5. EntryUSN values increment correctly without overflow
|
||||
+ """
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+ users = setup_usn_test
|
||||
+
|
||||
+ log.info("Testing entryUSN consistency after failed adds")
|
||||
+
|
||||
+ # Record USN values before any operations
|
||||
+ pre_operation_usns = {}
|
||||
+ for user in users:
|
||||
+ usn = user.get_attr_val_int('entryusn')
|
||||
+ pre_operation_usns[user.dn] = usn
|
||||
+ log.info(f"Pre-operation entryUSN for {user.get_attr_val_utf8('cn')}: {usn}")
|
||||
+
|
||||
+ # Attempt to add existing entries - these should fail
|
||||
+ users_collection = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+
|
||||
+ for user in users:
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+ log.info(f"Attempting to add existing user: {cn_value}")
|
||||
+
|
||||
+ user_attrs = {
|
||||
+ 'uid': user.get_attr_val_utf8('uid'),
|
||||
+ 'cn': cn_value,
|
||||
+ 'sn': user.get_attr_val_utf8('sn'),
|
||||
+ 'uidNumber': user.get_attr_val_utf8('uidNumber'),
|
||||
+ 'gidNumber': user.get_attr_val_utf8('gidNumber'),
|
||||
+ 'homeDirectory': user.get_attr_val_utf8('homeDirectory'),
|
||||
+ 'userPassword': 'password123'
|
||||
+ }
|
||||
+
|
||||
+ try:
|
||||
+ users_collection.create(properties=user_attrs)
|
||||
+ assert False, f"Add operation should have failed for existing user {cn_value}"
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ log.info(f"Got expected ALREADY_EXISTS for {cn_value}")
|
||||
+
|
||||
+ # Verify USN values haven't changed after failed adds
|
||||
+ log.info("Verifying entryUSN values after failed add operations...")
|
||||
+ for user in users:
|
||||
+ current_usn = user.get_attr_val_int('entryusn')
|
||||
+ expected_usn = pre_operation_usns[user.dn]
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+
|
||||
+ assert current_usn == expected_usn, \
|
||||
+ f"EntryUSN changed after failed add for {cn_value}: was {expected_usn}, now {current_usn}"
|
||||
+ log.info(f"EntryUSN unchanged for {cn_value}: {current_usn}")
|
||||
+
|
||||
+ # Now perform successful modify operations
|
||||
+ log.info("Performing successful modify operations...")
|
||||
+ for i, user in enumerate(users):
|
||||
+ cn_value = user.get_attr_val_utf8('cn')
|
||||
+ old_usn = user.get_attr_val_int('entryusn')
|
||||
+
|
||||
+ # Modify the user
|
||||
+ user.replace('description', f'Consistency test modification {i + 1}')
|
||||
+
|
||||
+ new_usn = user.get_attr_val_int('entryusn')
|
||||
+ log.info(f"Modified {cn_value}: USN {old_usn} -> {new_usn}")
|
||||
+
|
||||
+ # Verify proper increment
|
||||
+ assert (new_usn - old_usn) == 1, f"EntryUSN should increment by 1 for {cn_value}: {old_usn} -> {new_usn}"
|
||||
+ assert new_usn < MAX_USN_64BIT, f"EntryUSN overflow for {cn_value}: {new_usn}"
|
||||
+
|
||||
+ log.info("EntryUSN consistency test completed successfully")
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
\ No newline at end of file
|
||||
--
|
||||
2.49.0
|
||||
|
||||
172
0026-Issue-6594-Add-test-for-numSubordinates-replication-.patch
Normal file
172
0026-Issue-6594-Add-test-for-numSubordinates-replication-.patch
Normal file
@ -0,0 +1,172 @@
|
||||
From 36ca93e8ad2915bcfcae0367051e0f606386f861 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 15:35:50 -0700
|
||||
Subject: [PATCH] Issue 6594 - Add test for numSubordinates replication
|
||||
consistency with tombstones (#6862)
|
||||
|
||||
Description: Add a comprehensive test to verify that numSubordinates and
|
||||
tombstoneNumSubordinates attributes are correctly replicated between
|
||||
instances when tombstone entries are present.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6594
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
.../numsubordinates_replication_test.py | 144 ++++++++++++++++++
|
||||
1 file changed, 144 insertions(+)
|
||||
create mode 100644 dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py b/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
|
||||
new file mode 100644
|
||||
index 000000000..9ba10657d
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/replication/numsubordinates_replication_test.py
|
||||
@@ -0,0 +1,144 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+
|
||||
+import os
|
||||
+import logging
|
||||
+import pytest
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
+from lib389.replica import ReplicationManager
|
||||
+from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.topologies import topology_i2 as topo_i2
|
||||
+
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+if DEBUGGING:
|
||||
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+else:
|
||||
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+def test_numsubordinates_tombstone_replication_mismatch(topo_i2):
|
||||
+ """Test that numSubordinates values match between replicas after tombstone creation
|
||||
+
|
||||
+ :id: c43ecc7a-d706-42e8-9179-1ff7d0e7163a
|
||||
+ :setup: Two standalone instances
|
||||
+ :steps:
|
||||
+ 1. Create a container (organizational unit) on the first instance
|
||||
+ 2. Create a user object in that container
|
||||
+ 3. Delete the user object (this creates a tombstone)
|
||||
+ 4. Set up replication between the two instances
|
||||
+ 5. Wait for replication to complete
|
||||
+ 6. Check numSubordinates on both instances
|
||||
+ 7. Check tombstoneNumSubordinates on both instances
|
||||
+ 8. Verify that numSubordinates values match on both instances
|
||||
+ :expectedresults:
|
||||
+ 1. Container should be created successfully
|
||||
+ 2. User object should be created successfully
|
||||
+ 3. User object should be deleted successfully
|
||||
+ 4. Replication should be set up successfully
|
||||
+ 5. Replication should complete successfully
|
||||
+ 6. numSubordinates should be accessible on both instances
|
||||
+ 7. tombstoneNumSubordinates should be accessible on both instances
|
||||
+ 8. numSubordinates values should match on both instances
|
||||
+ """
|
||||
+
|
||||
+ instance1 = topo_i2.ins["standalone1"]
|
||||
+ instance2 = topo_i2.ins["standalone2"]
|
||||
+
|
||||
+ log.info("Create a container (organizational unit) on the first instance")
|
||||
+ ous1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX)
|
||||
+ container = ous1.create(properties={
|
||||
+ 'ou': 'test_container',
|
||||
+ 'description': 'Test container for numSubordinates replication test'
|
||||
+ })
|
||||
+ container_rdn = container.rdn
|
||||
+ log.info(f"Created container: {container_rdn}")
|
||||
+
|
||||
+ log.info("Create a user object in that container")
|
||||
+ users1 = UserAccounts(instance1, DEFAULT_SUFFIX, rdn=f"ou={container_rdn}")
|
||||
+ test_user = users1.create_test_user(uid=1001)
|
||||
+ log.info(f"Created user: {test_user.dn}")
|
||||
+
|
||||
+ log.info("Checking initial numSubordinates on container")
|
||||
+ container_obj1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX).get(container_rdn)
|
||||
+ initial_numsubordinates = container_obj1.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"Initial numSubordinates: {initial_numsubordinates}")
|
||||
+ assert initial_numsubordinates == 1
|
||||
+
|
||||
+ log.info("Delete the user object (this creates a tombstone)")
|
||||
+ test_user.delete()
|
||||
+
|
||||
+ log.info("Checking numSubordinates after deletion")
|
||||
+ after_delete_numsubordinates = container_obj1.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"numSubordinates after deletion: {after_delete_numsubordinates}")
|
||||
+
|
||||
+ log.info("Checking tombstoneNumSubordinates after deletion")
|
||||
+ try:
|
||||
+ tombstone_numsubordinates = container_obj1.get_attr_val_int('tombstoneNumSubordinates')
|
||||
+ log.info(f"tombstoneNumSubordinates: {tombstone_numsubordinates}")
|
||||
+ except Exception as e:
|
||||
+ log.info(f"tombstoneNumSubordinates not found or error: {e}")
|
||||
+ tombstone_numsubordinates = 0
|
||||
+
|
||||
+ log.info("Set up replication between the two instances")
|
||||
+ repl = ReplicationManager(DEFAULT_SUFFIX)
|
||||
+ repl.create_first_supplier(instance1)
|
||||
+ repl.join_supplier(instance1, instance2)
|
||||
+
|
||||
+ log.info("Wait for replication to complete")
|
||||
+ repl.wait_for_replication(instance1, instance2)
|
||||
+
|
||||
+ log.info("Check numSubordinates on both instances")
|
||||
+ container_obj1 = OrganizationalUnits(instance1, DEFAULT_SUFFIX).get(container_rdn)
|
||||
+ numsubordinates_instance1 = container_obj1.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"numSubordinates on instance1: {numsubordinates_instance1}")
|
||||
+
|
||||
+ container_obj2 = OrganizationalUnits(instance2, DEFAULT_SUFFIX).get(container_rdn)
|
||||
+ numsubordinates_instance2 = container_obj2.get_attr_val_int('numSubordinates')
|
||||
+ log.info(f"numSubordinates on instance2: {numsubordinates_instance2}")
|
||||
+
|
||||
+ log.info("Check tombstoneNumSubordinates on both instances")
|
||||
+ try:
|
||||
+ tombstone_numsubordinates_instance1 = container_obj1.get_attr_val_int('tombstoneNumSubordinates')
|
||||
+ log.info(f"tombstoneNumSubordinates on instance1: {tombstone_numsubordinates_instance1}")
|
||||
+ except Exception as e:
|
||||
+ log.info(f"tombstoneNumSubordinates not found on instance1: {e}")
|
||||
+ tombstone_numsubordinates_instance1 = 0
|
||||
+
|
||||
+ try:
|
||||
+ tombstone_numsubordinates_instance2 = container_obj2.get_attr_val_int('tombstoneNumSubordinates')
|
||||
+ log.info(f"tombstoneNumSubordinates on instance2: {tombstone_numsubordinates_instance2}")
|
||||
+ except Exception as e:
|
||||
+ log.info(f"tombstoneNumSubordinates not found on instance2: {e}")
|
||||
+ tombstone_numsubordinates_instance2 = 0
|
||||
+
|
||||
+ log.info("Verify that numSubordinates values match on both instances")
|
||||
+ log.info(f"Comparison: instance1 numSubordinates={numsubordinates_instance1}, "
|
||||
+ f"instance2 numSubordinates={numsubordinates_instance2}")
|
||||
+ log.info(f"Comparison: instance1 tombstoneNumSubordinates={tombstone_numsubordinates_instance1}, "
|
||||
+ f"instance2 tombstoneNumSubordinates={tombstone_numsubordinates_instance2}")
|
||||
+
|
||||
+ assert numsubordinates_instance1 == numsubordinates_instance2, (
|
||||
+ f"numSubordinates mismatch: instance1 has {numsubordinates_instance1}, "
|
||||
+ f"instance2 has {numsubordinates_instance2}. "
|
||||
+ )
|
||||
+ assert tombstone_numsubordinates_instance1 == tombstone_numsubordinates_instance2, (
|
||||
+ f"tombstoneNumSubordinates mismatch: instance1 has {tombstone_numsubordinates_instance1}, "
|
||||
+ f"instance2 has {tombstone_numsubordinates_instance2}. "
|
||||
+ )
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
\ No newline at end of file
|
||||
--
|
||||
2.49.0
|
||||
|
||||
814
0027-Issue-6884-Mask-password-hashes-in-audit-logs-6885.patch
Normal file
814
0027-Issue-6884-Mask-password-hashes-in-audit-logs-6885.patch
Normal file
@ -0,0 +1,814 @@
|
||||
From 14b1407abc196df947fa50d48946ed072e4ea772 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 15:41:29 -0700
|
||||
Subject: [PATCH] Issue 6884 - Mask password hashes in audit logs (#6885)
|
||||
|
||||
Description: Fix the audit log functionality to mask password hash values for
|
||||
userPassword, nsslapd-rootpw, nsmultiplexorcredentials, nsds5ReplicaCredentials,
|
||||
and nsds5ReplicaBootstrapCredentials attributes in ADD and MODIFY operations.
|
||||
Update auditlog.c to detect password attributes and replace their values with
|
||||
asterisks (**********************) in both LDIF and JSON audit log formats.
|
||||
Add a comprehensive test suite audit_password_masking_test.py to verify
|
||||
password masking works correctly across all log formats and operation types.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6884
|
||||
|
||||
Reviewed by: @mreynolds389, @vashirov (Thanks!!)
|
||||
---
|
||||
.../logging/audit_password_masking_test.py | 501 ++++++++++++++++++
|
||||
ldap/servers/slapd/auditlog.c | 170 +++++-
|
||||
ldap/servers/slapd/slapi-private.h | 1 +
|
||||
src/lib389/lib389/chaining.py | 3 +-
|
||||
4 files changed, 652 insertions(+), 23 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
new file mode 100644
|
||||
index 000000000..3b6a54849
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
@@ -0,0 +1,501 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import logging
|
||||
+import pytest
|
||||
+import os
|
||||
+import re
|
||||
+import time
|
||||
+import ldap
|
||||
+from lib389._constants import DEFAULT_SUFFIX, DN_DM, PW_DM
|
||||
+from lib389.topologies import topology_m2 as topo
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.dirsrv_log import DirsrvAuditJSONLog
|
||||
+from lib389.plugins import ChainingBackendPlugin
|
||||
+from lib389.chaining import ChainingLinks
|
||||
+from lib389.agreement import Agreements
|
||||
+from lib389.replica import ReplicationManager, Replicas
|
||||
+from lib389.idm.directorymanager import DirectoryManager
|
||||
+
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+MASKED_PASSWORD = "**********************"
|
||||
+TEST_PASSWORD = "MySecret123"
|
||||
+TEST_PASSWORD_2 = "NewPassword789"
|
||||
+TEST_PASSWORD_3 = "NewPassword101"
|
||||
+
|
||||
+
|
||||
+def setup_audit_logging(inst, log_format='default', display_attrs=None):
|
||||
+ """Configure audit logging settings"""
|
||||
+ inst.config.replace('nsslapd-auditlog-logbuffering', 'off')
|
||||
+ inst.config.replace('nsslapd-auditlog-logging-enabled', 'on')
|
||||
+ inst.config.replace('nsslapd-auditlog-log-format', log_format)
|
||||
+
|
||||
+ if display_attrs is not None:
|
||||
+ inst.config.replace('nsslapd-auditlog-display-attrs', display_attrs)
|
||||
+
|
||||
+ inst.deleteAuditLogs()
|
||||
+
|
||||
+
|
||||
+def check_password_masked(inst, log_format, expected_password, actual_password):
|
||||
+ """Helper function to check password masking in audit logs"""
|
||||
+
|
||||
+ time.sleep(1) # Allow log to flush
|
||||
+
|
||||
+ # List of all password/credential attributes that should be masked
|
||||
+ password_attributes = [
|
||||
+ 'userPassword',
|
||||
+ 'nsslapd-rootpw',
|
||||
+ 'nsmultiplexorcredentials',
|
||||
+ 'nsDS5ReplicaCredentials',
|
||||
+ 'nsDS5ReplicaBootstrapCredentials'
|
||||
+ ]
|
||||
+
|
||||
+ # Get password schemes to check for hash leakage
|
||||
+ user_password_scheme = inst.config.get_attr_val_utf8('passwordStorageScheme')
|
||||
+ root_password_scheme = inst.config.get_attr_val_utf8('nsslapd-rootpwstoragescheme')
|
||||
+
|
||||
+ if log_format == 'json':
|
||||
+ # Check JSON format logs
|
||||
+ audit_log = DirsrvAuditJSONLog(inst)
|
||||
+ log_lines = audit_log.readlines()
|
||||
+
|
||||
+ found_masked = False
|
||||
+ found_actual = False
|
||||
+ found_hashed = False
|
||||
+
|
||||
+ for line in log_lines:
|
||||
+ # Check if any password attribute is present in the line
|
||||
+ for attr in password_attributes:
|
||||
+ if attr in line:
|
||||
+ if expected_password in line:
|
||||
+ found_masked = True
|
||||
+ if actual_password in line:
|
||||
+ found_actual = True
|
||||
+ # Check for password scheme indicators (hashed passwords)
|
||||
+ if user_password_scheme and f'{{{user_password_scheme}}}' in line:
|
||||
+ found_hashed = True
|
||||
+ if root_password_scheme and f'{{{root_password_scheme}}}' in line:
|
||||
+ found_hashed = True
|
||||
+ break # Found a password attribute, no need to check others for this line
|
||||
+
|
||||
+ else:
|
||||
+ # Check LDIF format logs
|
||||
+ found_masked = False
|
||||
+ found_actual = False
|
||||
+ found_hashed = False
|
||||
+
|
||||
+ # Check each password attribute for masked password
|
||||
+ for attr in password_attributes:
|
||||
+ if inst.ds_audit_log.match(f"{attr}: {re.escape(expected_password)}"):
|
||||
+ found_masked = True
|
||||
+ if inst.ds_audit_log.match(f"{attr}: {actual_password}"):
|
||||
+ found_actual = True
|
||||
+
|
||||
+ # Check for hashed passwords in LDIF format
|
||||
+ if user_password_scheme:
|
||||
+ if inst.ds_audit_log.match(f"userPassword: {{{user_password_scheme}}}"):
|
||||
+ found_hashed = True
|
||||
+ if root_password_scheme:
|
||||
+ if inst.ds_audit_log.match(f"nsslapd-rootpw: {{{root_password_scheme}}}"):
|
||||
+ found_hashed = True
|
||||
+
|
||||
+ # Delete audit logs to avoid interference with other tests
|
||||
+ # We need to reset the root password to default as deleteAuditLogs()
|
||||
+ # opens a new connection with the default password
|
||||
+ dm = DirectoryManager(inst)
|
||||
+ dm.change_password(PW_DM)
|
||||
+ inst.deleteAuditLogs()
|
||||
+
|
||||
+ return found_masked, found_actual, found_hashed
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "userPassword"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "userPassword")
|
||||
+])
|
||||
+def test_password_masking_add_operation(topo, log_format, display_attrs):
|
||||
+ """Test password masking in ADD operations
|
||||
+
|
||||
+ :id: 4358bd75-bcc7-401c-b492-d3209b10412d
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Add user with password
|
||||
+ 3. Check that password is masked in audit log
|
||||
+ 4. Verify actual password does not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Password should be masked with asterisks
|
||||
+ 4. Actual password should not be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier1']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ user = None
|
||||
+
|
||||
+ try:
|
||||
+ user = users.create(properties={
|
||||
+ 'uid': 'test_add_pwd_mask',
|
||||
+ 'cn': 'Test Add User',
|
||||
+ 'sn': 'User',
|
||||
+ 'uidNumber': '1000',
|
||||
+ 'gidNumber': '1000',
|
||||
+ 'homeDirectory': '/home/test_add',
|
||||
+ 'userPassword': TEST_PASSWORD
|
||||
+ })
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+
|
||||
+ assert found_masked, f"Masked password not found in {log_format} ADD operation"
|
||||
+ assert not found_actual, f"Actual password found in {log_format} ADD log (should be masked)"
|
||||
+ assert not found_hashed, f"Hashed password found in {log_format} ADD log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ if user is not None:
|
||||
+ try:
|
||||
+ user.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "userPassword"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "userPassword")
|
||||
+])
|
||||
+def test_password_masking_modify_operation(topo, log_format, display_attrs):
|
||||
+ """Test password masking in MODIFY operations
|
||||
+
|
||||
+ :id: e6963aa9-7609-419c-aae2-1d517aa434bd
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Add user without password
|
||||
+ 3. Add password via MODIFY operation
|
||||
+ 4. Check that password is masked in audit log
|
||||
+ 5. Modify password to new value
|
||||
+ 6. Check that new password is also masked
|
||||
+ 7. Verify actual passwords do not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Password should be masked with asterisks
|
||||
+ 5. Success
|
||||
+ 6. New password should be masked with asterisks
|
||||
+ 7. No actual password values should be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier1']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ user = None
|
||||
+
|
||||
+ try:
|
||||
+ user = users.create(properties={
|
||||
+ 'uid': 'test_modify_pwd_mask',
|
||||
+ 'cn': 'Test Modify User',
|
||||
+ 'sn': 'User',
|
||||
+ 'uidNumber': '2000',
|
||||
+ 'gidNumber': '2000',
|
||||
+ 'homeDirectory': '/home/test_modify'
|
||||
+ })
|
||||
+
|
||||
+ user.replace('userPassword', TEST_PASSWORD)
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+ assert found_masked, f"Masked password not found in {log_format} MODIFY operation (first password)"
|
||||
+ assert not found_actual, f"Actual password found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed, f"Hashed password found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ user.replace('userPassword', TEST_PASSWORD_2)
|
||||
+
|
||||
+ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_2, f"Masked password not found in {log_format} MODIFY operation (second password)"
|
||||
+ assert not found_actual_2, f"Second actual password found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed_2, f"Second hashed password found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ if user is not None:
|
||||
+ try:
|
||||
+ user.delete()
|
||||
+ except:
|
||||
+ pass
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsslapd-rootpw"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsslapd-rootpw")
|
||||
+])
|
||||
+def test_password_masking_rootpw_modify_operation(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsslapd-rootpw MODIFY operations
|
||||
+
|
||||
+ :id: ec8c9fd4-56ba-4663-ab65-58efb3b445e4
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Modify nsslapd-rootpw in configuration
|
||||
+ 3. Check that root password is masked in audit log
|
||||
+ 4. Modify root password to new value
|
||||
+ 5. Check that new root password is also masked
|
||||
+ 6. Verify actual root passwords do not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Root password should be masked with asterisks
|
||||
+ 4. Success
|
||||
+ 5. New root password should be masked with asterisks
|
||||
+ 6. No actual root password values should be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier1']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+ dm = DirectoryManager(inst)
|
||||
+
|
||||
+ try:
|
||||
+ dm.change_password(TEST_PASSWORD)
|
||||
+ dm.rebind(TEST_PASSWORD)
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+ assert found_masked, f"Masked root password not found in {log_format} MODIFY operation (first root password)"
|
||||
+ assert not found_actual, f"Actual root password found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed, f"Hashed root password found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ dm.change_password(TEST_PASSWORD_2)
|
||||
+ dm.rebind(TEST_PASSWORD_2)
|
||||
+
|
||||
+ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_2, f"Masked root password not found in {log_format} MODIFY operation (second root password)"
|
||||
+ assert not found_actual_2, f"Second actual root password found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed_2, f"Second hashed root password found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ dm.change_password(PW_DM)
|
||||
+ dm.rebind(PW_DM)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsmultiplexorcredentials"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsmultiplexorcredentials")
|
||||
+])
|
||||
+def test_password_masking_multiplexor_credentials(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsmultiplexorcredentials in chaining/multiplexor configurations
|
||||
+
|
||||
+ :id: 161a9498-b248-4926-90be-a696a36ed36e
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Create a chaining backend configuration entry with nsmultiplexorcredentials
|
||||
+ 3. Check that multiplexor credentials are masked in audit log
|
||||
+ 4. Modify the credentials
|
||||
+ 5. Check that updated credentials are also masked
|
||||
+ 6. Verify actual credentials do not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Multiplexor credentials should be masked with asterisks
|
||||
+ 4. Success
|
||||
+ 5. Updated credentials should be masked with asterisks
|
||||
+ 6. No actual credential values should be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier1']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+
|
||||
+ # Enable chaining plugin and create chaining link
|
||||
+ chain_plugin = ChainingBackendPlugin(inst)
|
||||
+ chain_plugin.enable()
|
||||
+
|
||||
+ chains = ChainingLinks(inst)
|
||||
+ chain = None
|
||||
+
|
||||
+ try:
|
||||
+ # Create chaining link with multiplexor credentials
|
||||
+ chain = chains.create(properties={
|
||||
+ 'cn': 'testchain',
|
||||
+ 'nsfarmserverurl': 'ldap://localhost:389/',
|
||||
+ 'nsslapd-suffix': 'dc=example,dc=com',
|
||||
+ 'nsmultiplexorbinddn': 'cn=manager',
|
||||
+ 'nsmultiplexorcredentials': TEST_PASSWORD,
|
||||
+ 'nsCheckLocalACI': 'on',
|
||||
+ 'nsConnectionLife': '30',
|
||||
+ })
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+ assert found_masked, f"Masked multiplexor credentials not found in {log_format} ADD operation"
|
||||
+ assert not found_actual, f"Actual multiplexor credentials found in {log_format} ADD log (should be masked)"
|
||||
+ assert not found_hashed, f"Hashed multiplexor credentials found in {log_format} ADD log (should be masked)"
|
||||
+
|
||||
+ # Modify the credentials
|
||||
+ chain.replace('nsmultiplexorcredentials', TEST_PASSWORD_2)
|
||||
+
|
||||
+ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_2, f"Masked multiplexor credentials not found in {log_format} MODIFY operation"
|
||||
+ assert not found_actual_2, f"Actual multiplexor credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed_2, f"Hashed multiplexor credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ chain_plugin.disable()
|
||||
+ if chain is not None:
|
||||
+ inst.delete_branch_s(chain.dn, ldap.SCOPE_ONELEVEL)
|
||||
+ chain.delete()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsDS5ReplicaCredentials"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsDS5ReplicaCredentials")
|
||||
+])
|
||||
+def test_password_masking_replica_credentials(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsDS5ReplicaCredentials in replication agreements
|
||||
+
|
||||
+ :id: 7bf9e612-1b7c-49af-9fc0-de4c7df84b2a
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Create a replication agreement entry with nsDS5ReplicaCredentials
|
||||
+ 3. Check that replica credentials are masked in audit log
|
||||
+ 4. Modify the credentials
|
||||
+ 5. Check that updated credentials are also masked
|
||||
+ 6. Verify actual credentials do not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Replica credentials should be masked with asterisks
|
||||
+ 4. Success
|
||||
+ 5. Updated credentials should be masked with asterisks
|
||||
+ 6. No actual credential values should be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier2']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+ agmt = None
|
||||
+
|
||||
+ try:
|
||||
+ replicas = Replicas(inst)
|
||||
+ replica = replicas.get(DEFAULT_SUFFIX)
|
||||
+ agmts = replica.get_agreements()
|
||||
+ agmt = agmts.create(properties={
|
||||
+ 'cn': 'testagmt',
|
||||
+ 'nsDS5ReplicaHost': 'localhost',
|
||||
+ 'nsDS5ReplicaPort': '389',
|
||||
+ 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config',
|
||||
+ 'nsDS5ReplicaCredentials': TEST_PASSWORD,
|
||||
+ 'nsDS5ReplicaRoot': DEFAULT_SUFFIX
|
||||
+ })
|
||||
+
|
||||
+ found_masked, found_actual, found_hashed = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD)
|
||||
+ assert found_masked, f"Masked replica credentials not found in {log_format} ADD operation"
|
||||
+ assert not found_actual, f"Actual replica credentials found in {log_format} ADD log (should be masked)"
|
||||
+ assert not found_hashed, f"Hashed replica credentials found in {log_format} ADD log (should be masked)"
|
||||
+
|
||||
+ # Modify the credentials
|
||||
+ agmt.replace('nsDS5ReplicaCredentials', TEST_PASSWORD_2)
|
||||
+
|
||||
+ found_masked_2, found_actual_2, found_hashed_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_2, f"Masked replica credentials not found in {log_format} MODIFY operation"
|
||||
+ assert not found_actual_2, f"Actual replica credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed_2, f"Hashed replica credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ if agmt is not None:
|
||||
+ agmt.delete()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
+ ("default", None),
|
||||
+ ("default", "*"),
|
||||
+ ("default", "nsDS5ReplicaBootstrapCredentials"),
|
||||
+ ("json", None),
|
||||
+ ("json", "*"),
|
||||
+ ("json", "nsDS5ReplicaBootstrapCredentials")
|
||||
+])
|
||||
+def test_password_masking_bootstrap_credentials(topo, log_format, display_attrs):
|
||||
+ """Test password masking for nsDS5ReplicaCredentials and nsDS5ReplicaBootstrapCredentials in replication agreements
|
||||
+
|
||||
+ :id: 248bd418-ffa4-4733-963d-2314c60b7c5b
|
||||
+ :parametrized: yes
|
||||
+ :setup: Standalone Instance
|
||||
+ :steps:
|
||||
+ 1. Configure audit logging format
|
||||
+ 2. Create a replication agreement entry with both nsDS5ReplicaCredentials and nsDS5ReplicaBootstrapCredentials
|
||||
+ 3. Check that both credentials are masked in audit log
|
||||
+ 4. Modify both credentials
|
||||
+ 5. Check that both updated credentials are also masked
|
||||
+ 6. Verify actual credentials do not appear in log
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Both credentials should be masked with asterisks
|
||||
+ 4. Success
|
||||
+ 5. Both updated credentials should be masked with asterisks
|
||||
+ 6. No actual credential values should be found in log
|
||||
+ """
|
||||
+ inst = topo.ms['supplier2']
|
||||
+ setup_audit_logging(inst, log_format, display_attrs)
|
||||
+ agmt = None
|
||||
+
|
||||
+ try:
|
||||
+ replicas = Replicas(inst)
|
||||
+ replica = replicas.get(DEFAULT_SUFFIX)
|
||||
+ agmts = replica.get_agreements()
|
||||
+ agmt = agmts.create(properties={
|
||||
+ 'cn': 'testbootstrapagmt',
|
||||
+ 'nsDS5ReplicaHost': 'localhost',
|
||||
+ 'nsDS5ReplicaPort': '389',
|
||||
+ 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config',
|
||||
+ 'nsDS5ReplicaCredentials': TEST_PASSWORD,
|
||||
+ 'nsDS5replicabootstrapbinddn': 'cn=bootstrap manager,cn=config',
|
||||
+ 'nsDS5ReplicaBootstrapCredentials': TEST_PASSWORD_2,
|
||||
+ 'nsDS5ReplicaRoot': DEFAULT_SUFFIX
|
||||
+ })
|
||||
+
|
||||
+ found_masked_bootstrap, found_actual_bootstrap, found_hashed_bootstrap = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_2)
|
||||
+ assert found_masked_bootstrap, f"Masked bootstrap credentials not found in {log_format} ADD operation"
|
||||
+ assert not found_actual_bootstrap, f"Actual bootstrap credentials found in {log_format} ADD log (should be masked)"
|
||||
+ assert not found_hashed_bootstrap, f"Hashed bootstrap credentials found in {log_format} ADD log (should be masked)"
|
||||
+
|
||||
+ agmt.replace('nsDS5ReplicaBootstrapCredentials', TEST_PASSWORD_3)
|
||||
+
|
||||
+ found_masked_bootstrap_2, found_actual_bootstrap_2, found_hashed_bootstrap_2 = check_password_masked(inst, log_format, MASKED_PASSWORD, TEST_PASSWORD_3)
|
||||
+ assert found_masked_bootstrap_2, f"Masked bootstrap credentials not found in {log_format} MODIFY operation"
|
||||
+ assert not found_actual_bootstrap_2, f"Actual bootstrap credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+ assert not found_hashed_bootstrap_2, f"Hashed bootstrap credentials found in {log_format} MODIFY log (should be masked)"
|
||||
+
|
||||
+ finally:
|
||||
+ if agmt is not None:
|
||||
+ agmt.delete()
|
||||
+
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main(["-s", CURRENT_FILE])
|
||||
\ No newline at end of file
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 1121aef35..7b591e072 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -39,6 +39,89 @@ static void write_audit_file(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
|
||||
static const char *modrdn_changes[4];
|
||||
|
||||
+/* Helper function to check if an attribute is a password that needs masking */
|
||||
+static int
|
||||
+is_password_attribute(const char *attr_name)
|
||||
+{
|
||||
+ return (strcasecmp(attr_name, SLAPI_USERPWD_ATTR) == 0 ||
|
||||
+ strcasecmp(attr_name, CONFIG_ROOTPW_ATTRIBUTE) == 0 ||
|
||||
+ strcasecmp(attr_name, SLAPI_MB_CREDENTIALS) == 0 ||
|
||||
+ strcasecmp(attr_name, SLAPI_REP_CREDENTIALS) == 0 ||
|
||||
+ strcasecmp(attr_name, SLAPI_REP_BOOTSTRAP_CREDENTIALS) == 0);
|
||||
+}
|
||||
+
|
||||
+/* Helper function to create a masked string representation of an entry */
|
||||
+static char *
|
||||
+create_masked_entry_string(Slapi_Entry *original_entry, int *len)
|
||||
+{
|
||||
+ Slapi_Attr *attr = NULL;
|
||||
+ char *entry_str = NULL;
|
||||
+ char *current_pos = NULL;
|
||||
+ char *line_start = NULL;
|
||||
+ char *next_line = NULL;
|
||||
+ char *colon_pos = NULL;
|
||||
+ int has_password_attrs = 0;
|
||||
+
|
||||
+ if (original_entry == NULL) {
|
||||
+ return NULL;
|
||||
+ }
|
||||
+
|
||||
+ /* Single pass through attributes to check for password attributes */
|
||||
+ for (slapi_entry_first_attr(original_entry, &attr); attr != NULL;
|
||||
+ slapi_entry_next_attr(original_entry, attr, &attr)) {
|
||||
+
|
||||
+ char *attr_name = NULL;
|
||||
+ slapi_attr_get_type(attr, &attr_name);
|
||||
+
|
||||
+ if (is_password_attribute(attr_name)) {
|
||||
+ has_password_attrs = 1;
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* If no password attributes, return original string - no masking needed */
|
||||
+ entry_str = slapi_entry2str(original_entry, len);
|
||||
+ if (!has_password_attrs) {
|
||||
+ return entry_str;
|
||||
+ }
|
||||
+
|
||||
+ /* Process the string in-place, replacing password values */
|
||||
+ current_pos = entry_str;
|
||||
+ while ((line_start = current_pos) != NULL && *line_start != '\0') {
|
||||
+ /* Find the end of current line */
|
||||
+ next_line = strchr(line_start, '\n');
|
||||
+ if (next_line != NULL) {
|
||||
+ *next_line = '\0'; /* Temporarily terminate line */
|
||||
+ current_pos = next_line + 1;
|
||||
+ } else {
|
||||
+ current_pos = NULL; /* Last line */
|
||||
+ }
|
||||
+
|
||||
+ /* Find the colon that separates attribute name from value */
|
||||
+ colon_pos = strchr(line_start, ':');
|
||||
+ if (colon_pos != NULL) {
|
||||
+ char saved_colon = *colon_pos;
|
||||
+ *colon_pos = '\0'; /* Temporarily null-terminate attribute name */
|
||||
+
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ if (is_password_attribute(line_start)) {
|
||||
+ strcpy(colon_pos + 1, " **********************");
|
||||
+ }
|
||||
+
|
||||
+ *colon_pos = saved_colon; /* Restore colon */
|
||||
+ }
|
||||
+
|
||||
+ /* Restore newline if it was there */
|
||||
+ if (next_line != NULL) {
|
||||
+ *next_line = '\n';
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* Update length since we may have shortened the string */
|
||||
+ *len = strlen(entry_str);
|
||||
+ return entry_str; /* Return the modified original string */
|
||||
+}
|
||||
+
|
||||
void
|
||||
write_audit_log_entry(Slapi_PBlock *pb)
|
||||
{
|
||||
@@ -282,10 +365,31 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object
|
||||
{
|
||||
slapi_entry_attr_find(entry, req_attr, &entry_attr);
|
||||
if (entry_attr) {
|
||||
- if (use_json) {
|
||||
- log_entry_attr_json(entry_attr, req_attr, id_list);
|
||||
+ if (strcmp(req_attr, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) {
|
||||
+ /* Do not write the unhashed clear-text password */
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ if (is_password_attribute(req_attr)) {
|
||||
+ /* userpassword/rootdn password - mask the value */
|
||||
+ if (use_json) {
|
||||
+ json_object *secret_obj = json_object_new_object();
|
||||
+ json_object_object_add(secret_obj, req_attr,
|
||||
+ json_object_new_string("**********************"));
|
||||
+ json_object_array_add(id_list, secret_obj);
|
||||
+ } else {
|
||||
+ addlenstr(l, "#");
|
||||
+ addlenstr(l, req_attr);
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
+ }
|
||||
} else {
|
||||
- log_entry_attr(entry_attr, req_attr, l);
|
||||
+ /* Regular attribute - log normally */
|
||||
+ if (use_json) {
|
||||
+ log_entry_attr_json(entry_attr, req_attr, id_list);
|
||||
+ } else {
|
||||
+ log_entry_attr(entry_attr, req_attr, l);
|
||||
+ }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -300,9 +404,7 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object
|
||||
continue;
|
||||
}
|
||||
|
||||
- if (strcasecmp(attr, SLAPI_USERPWD_ATTR) == 0 ||
|
||||
- strcasecmp(attr, CONFIG_ROOTPW_ATTRIBUTE) == 0)
|
||||
- {
|
||||
+ if (is_password_attribute(attr)) {
|
||||
/* userpassword/rootdn password - mask the value */
|
||||
if (use_json) {
|
||||
json_object *secret_obj = json_object_new_object();
|
||||
@@ -312,7 +414,7 @@ add_entry_attrs_ext(Slapi_Entry *entry, lenstr *l, PRBool use_json, json_object
|
||||
} else {
|
||||
addlenstr(l, "#");
|
||||
addlenstr(l, attr);
|
||||
- addlenstr(l, ": ****************************\n");
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
@@ -481,6 +583,9 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
}
|
||||
}
|
||||
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ int is_password_attr = is_password_attribute(mods[j]->mod_type);
|
||||
+
|
||||
mod = json_object_new_object();
|
||||
switch (operationtype) {
|
||||
case LDAP_MOD_ADD:
|
||||
@@ -505,7 +610,12 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
json_object *val_list = NULL;
|
||||
val_list = json_object_new_array();
|
||||
for (size_t i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) {
|
||||
- json_object_array_add(val_list, json_object_new_string(mods[j]->mod_bvalues[i]->bv_val));
|
||||
+ if (is_password_attr) {
|
||||
+ /* Mask password values */
|
||||
+ json_object_array_add(val_list, json_object_new_string("**********************"));
|
||||
+ } else {
|
||||
+ json_object_array_add(val_list, json_object_new_string(mods[j]->mod_bvalues[i]->bv_val));
|
||||
+ }
|
||||
}
|
||||
json_object_object_add(mod, "values", val_list);
|
||||
}
|
||||
@@ -517,8 +627,11 @@ write_audit_file_json(Slapi_PBlock *pb, Slapi_Entry *entry, int logtype,
|
||||
}
|
||||
case SLAPI_OPERATION_ADD: {
|
||||
int len;
|
||||
+
|
||||
e = change;
|
||||
- tmp = slapi_entry2str(e, &len);
|
||||
+
|
||||
+ /* Create a masked string representation for password attributes */
|
||||
+ tmp = create_masked_entry_string(e, &len);
|
||||
tmpsave = tmp;
|
||||
while ((tmp = strchr(tmp, '\n')) != NULL) {
|
||||
tmp++;
|
||||
@@ -665,6 +778,10 @@ write_audit_file(
|
||||
break;
|
||||
}
|
||||
}
|
||||
+
|
||||
+ /* Check if this is a password attribute that needs masking */
|
||||
+ int is_password_attr = is_password_attribute(mods[j]->mod_type);
|
||||
+
|
||||
switch (operationtype) {
|
||||
case LDAP_MOD_ADD:
|
||||
addlenstr(l, "add: ");
|
||||
@@ -689,18 +806,27 @@ write_audit_file(
|
||||
break;
|
||||
}
|
||||
if (operationtype != LDAP_MOD_IGNORE) {
|
||||
- for (i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) {
|
||||
- char *buf, *bufp;
|
||||
- len = strlen(mods[j]->mod_type);
|
||||
- len = LDIF_SIZE_NEEDED(len, mods[j]->mod_bvalues[i]->bv_len) + 1;
|
||||
- buf = slapi_ch_malloc(len);
|
||||
- bufp = buf;
|
||||
- slapi_ldif_put_type_and_value_with_options(&bufp, mods[j]->mod_type,
|
||||
- mods[j]->mod_bvalues[i]->bv_val,
|
||||
- mods[j]->mod_bvalues[i]->bv_len, 0);
|
||||
- *bufp = '\0';
|
||||
- addlenstr(l, buf);
|
||||
- slapi_ch_free((void **)&buf);
|
||||
+ if (is_password_attr) {
|
||||
+ /* Add masked password */
|
||||
+ for (i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) {
|
||||
+ addlenstr(l, mods[j]->mod_type);
|
||||
+ addlenstr(l, ": **********************\n");
|
||||
+ }
|
||||
+ } else {
|
||||
+ /* Add actual values for non-password attributes */
|
||||
+ for (i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++) {
|
||||
+ char *buf, *bufp;
|
||||
+ len = strlen(mods[j]->mod_type);
|
||||
+ len = LDIF_SIZE_NEEDED(len, mods[j]->mod_bvalues[i]->bv_len) + 1;
|
||||
+ buf = slapi_ch_malloc(len);
|
||||
+ bufp = buf;
|
||||
+ slapi_ldif_put_type_and_value_with_options(&bufp, mods[j]->mod_type,
|
||||
+ mods[j]->mod_bvalues[i]->bv_val,
|
||||
+ mods[j]->mod_bvalues[i]->bv_len, 0);
|
||||
+ *bufp = '\0';
|
||||
+ addlenstr(l, buf);
|
||||
+ slapi_ch_free((void **)&buf);
|
||||
+ }
|
||||
}
|
||||
}
|
||||
addlenstr(l, "-\n");
|
||||
@@ -711,7 +837,7 @@ write_audit_file(
|
||||
e = change;
|
||||
addlenstr(l, attr_changetype);
|
||||
addlenstr(l, ": add\n");
|
||||
- tmp = slapi_entry2str(e, &len);
|
||||
+ tmp = create_masked_entry_string(e, &len);
|
||||
tmpsave = tmp;
|
||||
while ((tmp = strchr(tmp, '\n')) != NULL) {
|
||||
tmp++;
|
||||
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
|
||||
index e9abf8b75..02f22fd2d 100644
|
||||
--- a/ldap/servers/slapd/slapi-private.h
|
||||
+++ b/ldap/servers/slapd/slapi-private.h
|
||||
@@ -848,6 +848,7 @@ void task_cleanup(void);
|
||||
/* for reversible encyrption */
|
||||
#define SLAPI_MB_CREDENTIALS "nsmultiplexorcredentials"
|
||||
#define SLAPI_REP_CREDENTIALS "nsds5ReplicaCredentials"
|
||||
+#define SLAPI_REP_BOOTSTRAP_CREDENTIALS "nsds5ReplicaBootstrapCredentials"
|
||||
int pw_rever_encode(Slapi_Value **vals, char *attr_name);
|
||||
int pw_rever_decode(char *cipher, char **plain, const char *attr_name);
|
||||
|
||||
diff --git a/src/lib389/lib389/chaining.py b/src/lib389/lib389/chaining.py
|
||||
index 533b83ebf..33ae78c8b 100644
|
||||
--- a/src/lib389/lib389/chaining.py
|
||||
+++ b/src/lib389/lib389/chaining.py
|
||||
@@ -134,7 +134,7 @@ class ChainingLink(DSLdapObject):
|
||||
"""
|
||||
|
||||
# Create chaining entry
|
||||
- super(ChainingLink, self).create(rdn, properties, basedn)
|
||||
+ link = super(ChainingLink, self).create(rdn, properties, basedn)
|
||||
|
||||
# Create mapping tree entry
|
||||
dn_comps = ldap.explode_dn(properties['nsslapd-suffix'][0])
|
||||
@@ -149,6 +149,7 @@ class ChainingLink(DSLdapObject):
|
||||
self._mts.ensure_state(properties=mt_properties)
|
||||
except ldap.ALREADY_EXISTS:
|
||||
pass
|
||||
+ return link
|
||||
|
||||
|
||||
class ChainingLinks(DSLdapObjects):
|
||||
--
|
||||
2.49.0
|
||||
|
||||
1719
0028-Issue-6897-Fix-disk-monitoring-test-failures-and-imp.patch
Normal file
1719
0028-Issue-6897-Fix-disk-monitoring-test-failures-and-imp.patch
Normal file
File diff suppressed because it is too large
Load Diff
262
0029-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch
Normal file
262
0029-Issue-6778-Memory-leak-in-roles_cache_create_object_.patch
Normal file
@ -0,0 +1,262 @@
|
||||
From c80554be0cea0eb5f2ab6d1e6e1fcef098304f69 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Wed, 16 Jul 2025 11:22:30 +0200
|
||||
Subject: [PATCH] Issue 6778 - Memory leak in
|
||||
roles_cache_create_object_from_entry part 2
|
||||
|
||||
Bug Description:
|
||||
Everytime a role with scope DN is processed, we leak rolescopeDN.
|
||||
|
||||
Fix Description:
|
||||
* Initialize all pointer variables to NULL
|
||||
* Add additional NULL checks
|
||||
* Free rolescopeDN
|
||||
* Move test_rewriter_with_invalid_filter before the DB contains 90k entries
|
||||
* Use task.wait() for import task completion instead of parsing logs,
|
||||
increase the timeout
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6778
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/roles/basic_test.py | 164 +++++++++----------
|
||||
ldap/servers/plugins/roles/roles_cache.c | 10 +-
|
||||
2 files changed, 82 insertions(+), 92 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
index d92d6f0c3..ec208bae9 100644
|
||||
--- a/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/roles/basic_test.py
|
||||
@@ -510,6 +510,76 @@ def test_vattr_on_managed_role(topo, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+def test_rewriter_with_invalid_filter(topo, request):
|
||||
+ """Test that server does not crash when having
|
||||
+ invalid filter in filtered role
|
||||
+
|
||||
+ :id: 5013b0b2-0af6-11f0-8684-482ae39447e5
|
||||
+ :setup: standalone server
|
||||
+ :steps:
|
||||
+ 1. Setup filtered role with good filter
|
||||
+ 2. Setup nsrole rewriter
|
||||
+ 3. Restart the server
|
||||
+ 4. Search for entries
|
||||
+ 5. Setup filtered role with bad filter
|
||||
+ 6. Search for entries
|
||||
+ :expectedresults:
|
||||
+ 1. Operation should succeed
|
||||
+ 2. Operation should succeed
|
||||
+ 3. Operation should succeed
|
||||
+ 4. Operation should succeed
|
||||
+ 5. Operation should succeed
|
||||
+ 6. Operation should succeed
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ entries = []
|
||||
+
|
||||
+ def fin():
|
||||
+ inst.start()
|
||||
+ for entry in entries:
|
||||
+ entry.delete()
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ # Setup filtered role
|
||||
+ roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}')
|
||||
+ filter_ko = '(&((objectClass=top)(objectClass=nsPerson))'
|
||||
+ filter_ok = '(&(objectClass=top)(objectClass=nsPerson))'
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ok,
|
||||
+ 'description': 'Test good filter',
|
||||
+ }
|
||||
+ role = roles.create(properties=role_properties)
|
||||
+ entries.append(role)
|
||||
+
|
||||
+ # Setup nsrole rewriter
|
||||
+ rewriters = Rewriters(inst)
|
||||
+ rewriter_properties = {
|
||||
+ "cn": "nsrole",
|
||||
+ "nsslapd-libpath": 'libroles-plugin',
|
||||
+ "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter',
|
||||
+ }
|
||||
+ rewriter = rewriters.ensure_state(properties=rewriter_properties)
|
||||
+ entries.append(rewriter)
|
||||
+
|
||||
+ # Restart thge instance
|
||||
+ inst.restart()
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+ # Set bad filter
|
||||
+ role_properties = {
|
||||
+ 'cn': 'TestFilteredRole',
|
||||
+ 'nsRoleFilter': filter_ko,
|
||||
+ 'description': 'Test bad filter',
|
||||
+ }
|
||||
+ role.ensure_state(properties=role_properties)
|
||||
+
|
||||
+ # Search for entries
|
||||
+ entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
+
|
||||
+
|
||||
def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
"""Test that filter components containing 'nsrole=xxx'
|
||||
are reworked if xxx is either a filtered role or a managed
|
||||
@@ -581,17 +651,11 @@ def test_managed_and_filtered_role_rewrite(topo, request):
|
||||
PARENT="ou=people,%s" % DEFAULT_SUFFIX
|
||||
dbgen_users(topo.standalone, 90000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT)
|
||||
|
||||
- # online import
|
||||
+ # Online import
|
||||
import_task = ImportTask(topo.standalone)
|
||||
import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
|
||||
- # Check for up to 200sec that the completion
|
||||
- for i in range(1, 20):
|
||||
- if len(topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9000.*')) > 0:
|
||||
- break
|
||||
- time.sleep(10)
|
||||
- import_complete = topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9000.*')
|
||||
- assert (len(import_complete) == 1)
|
||||
-
|
||||
+ import_task.wait(timeout=400)
|
||||
+ assert import_task.get_exit_code() == 0
|
||||
# Restart server
|
||||
topo.standalone.restart()
|
||||
|
||||
@@ -715,17 +779,11 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
PARENT="ou=people,%s" % DEFAULT_SUFFIX
|
||||
dbgen_users(topo.standalone, 91000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT)
|
||||
|
||||
- # online import
|
||||
+ # Online import
|
||||
import_task = ImportTask(topo.standalone)
|
||||
import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
|
||||
- # Check for up to 200sec that the completion
|
||||
- for i in range(1, 20):
|
||||
- if len(topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9100.*')) > 0:
|
||||
- break
|
||||
- time.sleep(10)
|
||||
- import_complete = topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9100.*')
|
||||
- assert (len(import_complete) == 1)
|
||||
-
|
||||
+ import_task.wait(timeout=400)
|
||||
+ assert import_task.get_exit_code() == 0
|
||||
# Restart server
|
||||
topo.standalone.restart()
|
||||
|
||||
@@ -769,76 +827,6 @@ def test_not_such_entry_role_rewrite(topo, request):
|
||||
request.addfinalizer(fin)
|
||||
|
||||
|
||||
-def test_rewriter_with_invalid_filter(topo, request):
|
||||
- """Test that server does not crash when having
|
||||
- invalid filter in filtered role
|
||||
-
|
||||
- :id: 5013b0b2-0af6-11f0-8684-482ae39447e5
|
||||
- :setup: standalone server
|
||||
- :steps:
|
||||
- 1. Setup filtered role with good filter
|
||||
- 2. Setup nsrole rewriter
|
||||
- 3. Restart the server
|
||||
- 4. Search for entries
|
||||
- 5. Setup filtered role with bad filter
|
||||
- 6. Search for entries
|
||||
- :expectedresults:
|
||||
- 1. Operation should succeed
|
||||
- 2. Operation should succeed
|
||||
- 3. Operation should succeed
|
||||
- 4. Operation should succeed
|
||||
- 5. Operation should succeed
|
||||
- 6. Operation should succeed
|
||||
- """
|
||||
- inst = topo.standalone
|
||||
- entries = []
|
||||
-
|
||||
- def fin():
|
||||
- inst.start()
|
||||
- for entry in entries:
|
||||
- entry.delete()
|
||||
- request.addfinalizer(fin)
|
||||
-
|
||||
- # Setup filtered role
|
||||
- roles = FilteredRoles(inst, f'ou=people,{DEFAULT_SUFFIX}')
|
||||
- filter_ko = '(&((objectClass=top)(objectClass=nsPerson))'
|
||||
- filter_ok = '(&(objectClass=top)(objectClass=nsPerson))'
|
||||
- role_properties = {
|
||||
- 'cn': 'TestFilteredRole',
|
||||
- 'nsRoleFilter': filter_ok,
|
||||
- 'description': 'Test good filter',
|
||||
- }
|
||||
- role = roles.create(properties=role_properties)
|
||||
- entries.append(role)
|
||||
-
|
||||
- # Setup nsrole rewriter
|
||||
- rewriters = Rewriters(inst)
|
||||
- rewriter_properties = {
|
||||
- "cn": "nsrole",
|
||||
- "nsslapd-libpath": 'libroles-plugin',
|
||||
- "nsslapd-filterrewriter": 'role_nsRole_filter_rewriter',
|
||||
- }
|
||||
- rewriter = rewriters.ensure_state(properties=rewriter_properties)
|
||||
- entries.append(rewriter)
|
||||
-
|
||||
- # Restart thge instance
|
||||
- inst.restart()
|
||||
-
|
||||
- # Search for entries
|
||||
- entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
-
|
||||
- # Set bad filter
|
||||
- role_properties = {
|
||||
- 'cn': 'TestFilteredRole',
|
||||
- 'nsRoleFilter': filter_ko,
|
||||
- 'description': 'Test bad filter',
|
||||
- }
|
||||
- role.ensure_state(properties=role_properties)
|
||||
-
|
||||
- # Search for entries
|
||||
- entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn)
|
||||
-
|
||||
-
|
||||
if __name__ == "__main__":
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c
|
||||
index 3e1c5b429..05cabc3a3 100644
|
||||
--- a/ldap/servers/plugins/roles/roles_cache.c
|
||||
+++ b/ldap/servers/plugins/roles/roles_cache.c
|
||||
@@ -1117,16 +1117,17 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
|
||||
rolescopeDN = slapi_entry_attr_get_charptr(role_entry, ROLE_SCOPE_DN);
|
||||
if (rolescopeDN) {
|
||||
- Slapi_DN *rolescopeSDN;
|
||||
- Slapi_DN *top_rolescopeSDN, *top_this_roleSDN;
|
||||
+ Slapi_DN *rolescopeSDN = NULL;
|
||||
+ Slapi_DN *top_rolescopeSDN = NULL;
|
||||
+ Slapi_DN *top_this_roleSDN = NULL;
|
||||
|
||||
/* Before accepting to use this scope, first check if it belongs to the same suffix */
|
||||
rolescopeSDN = slapi_sdn_new_dn_byref(rolescopeDN);
|
||||
- if ((strlen((char *)slapi_sdn_get_ndn(rolescopeSDN)) > 0) &&
|
||||
+ if (rolescopeSDN && (strlen((char *)slapi_sdn_get_ndn(rolescopeSDN)) > 0) &&
|
||||
(slapi_dn_syntax_check(NULL, (char *)slapi_sdn_get_ndn(rolescopeSDN), 1) == 0)) {
|
||||
top_rolescopeSDN = roles_cache_get_top_suffix(rolescopeSDN);
|
||||
top_this_roleSDN = roles_cache_get_top_suffix(this_role->dn);
|
||||
- if (slapi_sdn_compare(top_rolescopeSDN, top_this_roleSDN) == 0) {
|
||||
+ if (top_rolescopeSDN && top_this_roleSDN && slapi_sdn_compare(top_rolescopeSDN, top_this_roleSDN) == 0) {
|
||||
/* rolescopeDN belongs to the same suffix as the role, we can use this scope */
|
||||
this_role->rolescopedn = rolescopeSDN;
|
||||
} else {
|
||||
@@ -1148,6 +1149,7 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu
|
||||
rolescopeDN);
|
||||
slapi_sdn_free(&rolescopeSDN);
|
||||
}
|
||||
+ slapi_ch_free_string(&rolescopeDN);
|
||||
}
|
||||
|
||||
/* Depending upon role type, pull out the remaining information we need */
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,64 @@
|
||||
From 2988a4ad320b7a4870cfa055bf7afd009424a15f Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 13:16:10 +0200
|
||||
Subject: [PATCH] Issue 6901 - Update changelog trimming logging - fix tests
|
||||
|
||||
Description:
|
||||
Update changelog_trimming_test for the new error message.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6901
|
||||
|
||||
Reviewed by: @progier389, @aadhikar (Thanks!)
|
||||
---
|
||||
.../suites/replication/changelog_trimming_test.py | 10 +++++-----
|
||||
1 file changed, 5 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/replication/changelog_trimming_test.py b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py
|
||||
index 2d70d328e..27d19e8fd 100644
|
||||
--- a/dirsrvtests/tests/suites/replication/changelog_trimming_test.py
|
||||
+++ b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py
|
||||
@@ -110,7 +110,7 @@ def test_max_age(topo, setup_max_age):
|
||||
do_mods(supplier, 10)
|
||||
|
||||
time.sleep(1) # Trimming should not have occurred
|
||||
- if supplier.searchErrorsLog("Trimmed") is True:
|
||||
+ if supplier.searchErrorsLog("trimmed") is True:
|
||||
log.fatal('Trimming event unexpectedly occurred')
|
||||
assert False
|
||||
|
||||
@@ -120,12 +120,12 @@ def test_max_age(topo, setup_max_age):
|
||||
cl.set_trim_interval('5')
|
||||
|
||||
time.sleep(3) # Trimming should not have occurred
|
||||
- if supplier.searchErrorsLog("Trimmed") is True:
|
||||
+ if supplier.searchErrorsLog("trimmed") is True:
|
||||
log.fatal('Trimming event unexpectedly occurred')
|
||||
assert False
|
||||
|
||||
time.sleep(3) # Trimming should have occurred
|
||||
- if supplier.searchErrorsLog("Trimmed") is False:
|
||||
+ if supplier.searchErrorsLog("trimmed") is False:
|
||||
log.fatal('Trimming event did not occur')
|
||||
assert False
|
||||
|
||||
@@ -159,7 +159,7 @@ def test_max_entries(topo, setup_max_entries):
|
||||
do_mods(supplier, 10)
|
||||
|
||||
time.sleep(1) # Trimming should have occurred
|
||||
- if supplier.searchErrorsLog("Trimmed") is True:
|
||||
+ if supplier.searchErrorsLog("trimmed") is True:
|
||||
log.fatal('Trimming event unexpectedly occurred')
|
||||
assert False
|
||||
|
||||
@@ -169,7 +169,7 @@ def test_max_entries(topo, setup_max_entries):
|
||||
cl.set_trim_interval('5')
|
||||
|
||||
time.sleep(6) # Trimming should have occurred
|
||||
- if supplier.searchErrorsLog("Trimmed") is False:
|
||||
+ if supplier.searchErrorsLog("trimmed") is False:
|
||||
log.fatal('Trimming event did not occur')
|
||||
assert False
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,32 @@
|
||||
From 36c97c19dadda7f09a1e2b3d838e12fbdc39af23 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 13:18:26 +0200
|
||||
Subject: [PATCH] Issue 6181 - RFE - Allow system to manage uid/gid at startup
|
||||
|
||||
Description:
|
||||
Expand CapabilityBoundingSet to include CAP_FOWNER
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6181
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6906
|
||||
|
||||
Reviewed by: @progier389 (Thanks!)
|
||||
---
|
||||
wrappers/systemd.template.service.in | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in
|
||||
index ada608c86..8d2b96c7e 100644
|
||||
--- a/wrappers/systemd.template.service.in
|
||||
+++ b/wrappers/systemd.template.service.in
|
||||
@@ -29,7 +29,7 @@ MemoryAccounting=yes
|
||||
|
||||
# Allow non-root instances to bind to low ports.
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
-CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_SETUID CAP_SETGID CAP_DAC_OVERRIDE CAP_CHOWN
|
||||
+CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_SETUID CAP_SETGID CAP_DAC_OVERRIDE CAP_CHOWN CAP_FOWNER
|
||||
|
||||
PrivateTmp=on
|
||||
# https://en.opensuse.org/openSUSE:Security_Features#Systemd_hardening_effort
|
||||
--
|
||||
2.49.0
|
||||
|
||||
31
0032-Issue-6468-CLI-Fix-default-error-log-level.patch
Normal file
31
0032-Issue-6468-CLI-Fix-default-error-log-level.patch
Normal file
@ -0,0 +1,31 @@
|
||||
From 70256cd0e90b91733516d4434428d04aa55b39bd Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Tue, 29 Jul 2025 08:00:00 +0200
|
||||
Subject: [PATCH] Issue 6468 - CLI - Fix default error log level
|
||||
|
||||
Description:
|
||||
Default error log level is 16384
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6468
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
---
|
||||
src/lib389/lib389/cli_conf/logging.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/cli_conf/logging.py b/src/lib389/lib389/cli_conf/logging.py
|
||||
index 124556f1f..d9ae1ab16 100644
|
||||
--- a/src/lib389/lib389/cli_conf/logging.py
|
||||
+++ b/src/lib389/lib389/cli_conf/logging.py
|
||||
@@ -44,7 +44,7 @@ ERROR_LEVELS = {
|
||||
+ "methods used for a SASL bind"
|
||||
},
|
||||
"default": {
|
||||
- "level": 6384,
|
||||
+ "level": 16384,
|
||||
"desc": "Default logging level"
|
||||
},
|
||||
"filter": {
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,97 @@
|
||||
From d668c477158e962ebb6fb25ccabe6d9d09f30259 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 1 Aug 2025 13:27:02 +0100
|
||||
Subject: [PATCH] Issue 6768 - ns-slapd crashes when a referral is added
|
||||
(#6780)
|
||||
|
||||
Bug description: When a paged result search is successfully run on a referred
|
||||
suffix, we retrieve the search result set from the pblock and try to release
|
||||
it. In this case the search result set is NULL, which triggers a SEGV during
|
||||
the release.
|
||||
|
||||
Fix description: If the search result code is LDAP_REFERRAL, skip deletion of
|
||||
the search result set. Added test case.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6768
|
||||
|
||||
Reviewed by: @tbordaz, @progier389 (Thank you)
|
||||
---
|
||||
.../paged_results/paged_results_test.py | 46 +++++++++++++++++++
|
||||
ldap/servers/slapd/opshared.c | 4 +-
|
||||
2 files changed, 49 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
index fca48db0f..1bb94b53a 100644
|
||||
--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
+++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
|
||||
@@ -1271,6 +1271,52 @@ def test_search_stress_abandon(create_40k_users, create_user):
|
||||
paged_search(conn, create_40k_users.suffix, [req_ctrl], search_flt, searchreq_attrlist, abandon_rate=abandon_rate)
|
||||
|
||||
|
||||
+def test_search_referral(topology_st):
|
||||
+ """Test a paged search on a referred suffix doesnt crash the server.
|
||||
+
|
||||
+ :id: c788bdbf-965b-4f12-ac24-d4d695e2cce2
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Configure a default referral.
|
||||
+ 2. Create a paged result search control.
|
||||
+ 3. Paged result search on referral suffix (doesnt exist on the instance, triggering a referral).
|
||||
+ 4. Check the server is still running.
|
||||
+ 5. Remove referral.
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Referral sucessfully set.
|
||||
+ 2. Control created.
|
||||
+ 3. Search returns ldap.REFERRAL (10).
|
||||
+ 4. Server still running.
|
||||
+ 5. Referral removed.
|
||||
+ """
|
||||
+
|
||||
+ page_size = 5
|
||||
+ SEARCH_SUFFIX = "dc=referme,dc=com"
|
||||
+ REFERRAL = "ldap://localhost.localdomain:389/o%3dnetscaperoot"
|
||||
+
|
||||
+ log.info('Configuring referral')
|
||||
+ topology_st.standalone.config.set('nsslapd-referral', REFERRAL)
|
||||
+ referral = topology_st.standalone.config.get_attr_val_utf8('nsslapd-referral')
|
||||
+ assert (referral == REFERRAL)
|
||||
+
|
||||
+ log.info('Create paged result search control')
|
||||
+ req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
|
||||
+
|
||||
+ log.info('Perform a paged result search on referred suffix, no chase')
|
||||
+ with pytest.raises(ldap.REFERRAL):
|
||||
+ topology_st.standalone.search_ext_s(SEARCH_SUFFIX, ldap.SCOPE_SUBTREE, serverctrls=[req_ctrl])
|
||||
+
|
||||
+ log.info('Confirm instance is still running')
|
||||
+ assert (topology_st.standalone.status())
|
||||
+
|
||||
+ log.info('Remove referral')
|
||||
+ topology_st.standalone.config.remove_all('nsslapd-referral')
|
||||
+ referral = topology_st.standalone.config.get_attr_val_utf8('nsslapd-referral')
|
||||
+ assert (referral == None)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
|
||||
index 545518748..a5cddfd23 100644
|
||||
--- a/ldap/servers/slapd/opshared.c
|
||||
+++ b/ldap/servers/slapd/opshared.c
|
||||
@@ -910,7 +910,9 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
|
||||
/* Free the results if not "no_such_object" */
|
||||
void *sr = NULL;
|
||||
slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
|
||||
- be->be_search_results_release(&sr);
|
||||
+ if (be->be_search_results_release != NULL) {
|
||||
+ be->be_search_results_release(&sr);
|
||||
+ }
|
||||
}
|
||||
pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx);
|
||||
rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1);
|
||||
--
|
||||
2.49.0
|
||||
|
||||
222
0034-Issues-6913-6886-6250-Adjust-xfail-marks-6914.patch
Normal file
222
0034-Issues-6913-6886-6250-Adjust-xfail-marks-6914.patch
Normal file
@ -0,0 +1,222 @@
|
||||
From e0d9deaab662468e11b08105e1b155660076b5eb Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Fri, 1 Aug 2025 09:28:39 -0700
|
||||
Subject: [PATCH] Issues 6913, 6886, 6250 - Adjust xfail marks (#6914)
|
||||
|
||||
Description: Some of the ACI invalid syntax issues were fixed,
|
||||
so we need to remove xfail marks.
|
||||
Disk space issue should have a 'skipif' mark.
|
||||
Display all attrs (nsslapd-auditlog-display-attrs: *) fails because of a bug.
|
||||
EntryUSN inconsistency and overflow bugs were exposed with the tests.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6913
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6886
|
||||
Related: https://github.com/389ds/389-ds-base/issues/6250
|
||||
|
||||
Reviewed by: @vashirov (Thanks!)
|
||||
---
|
||||
dirsrvtests/tests/suites/acl/syntax_test.py | 13 ++++++++--
|
||||
.../tests/suites/import/regression_test.py | 18 +++++++-------
|
||||
.../logging/audit_password_masking_test.py | 24 +++++++++----------
|
||||
.../suites/plugins/entryusn_overflow_test.py | 2 ++
|
||||
4 files changed, 34 insertions(+), 23 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/acl/syntax_test.py b/dirsrvtests/tests/suites/acl/syntax_test.py
|
||||
index 4edc7fa4b..ed9919ba3 100644
|
||||
--- a/dirsrvtests/tests/suites/acl/syntax_test.py
|
||||
+++ b/dirsrvtests/tests/suites/acl/syntax_test.py
|
||||
@@ -190,10 +190,9 @@ FAILED = [('test_targattrfilters_18',
|
||||
f'(all)userdn="ldap:///anyone";)'), ]
|
||||
|
||||
|
||||
-@pytest.mark.xfail(reason='https://bugzilla.redhat.com/show_bug.cgi?id=1691473')
|
||||
@pytest.mark.parametrize("real_value", [a[1] for a in FAILED],
|
||||
ids=[a[0] for a in FAILED])
|
||||
-def test_aci_invalid_syntax_fail(topo, real_value):
|
||||
+def test_aci_invalid_syntax_fail(topo, real_value, request):
|
||||
"""Try to set wrong ACI syntax.
|
||||
|
||||
:id: 83c40784-fff5-49c8-9535-7064c9c19e7e
|
||||
@@ -206,6 +205,16 @@ def test_aci_invalid_syntax_fail(topo, real_value):
|
||||
1. It should pass
|
||||
2. It should not pass
|
||||
"""
|
||||
+ # Mark specific test cases as xfail
|
||||
+ xfail_cases = [
|
||||
+ 'test_targattrfilters_18',
|
||||
+ 'test_targattrfilters_20',
|
||||
+ 'test_bind_rule_set_with_more_than_three'
|
||||
+ ]
|
||||
+
|
||||
+ if request.node.callspec.id in xfail_cases:
|
||||
+ pytest.xfail("DS6913 - This test case is expected to fail")
|
||||
+
|
||||
domain = Domain(topo.standalone, DEFAULT_SUFFIX)
|
||||
with pytest.raises(ldap.INVALID_SYNTAX):
|
||||
domain.add("aci", real_value)
|
||||
diff --git a/dirsrvtests/tests/suites/import/regression_test.py b/dirsrvtests/tests/suites/import/regression_test.py
|
||||
index e6fef89cc..61fdf8559 100644
|
||||
--- a/dirsrvtests/tests/suites/import/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/import/regression_test.py
|
||||
@@ -320,7 +320,7 @@ ou: myDups00001
|
||||
assert standalone.ds_error_log.match('.*Duplicated DN detected.*')
|
||||
|
||||
@pytest.mark.tier2
|
||||
-@pytest.mark.xfail(not _check_disk_space(), reason="not enough disk space for lmdb map")
|
||||
+@pytest.mark.skipif(not _check_disk_space(), reason="not enough disk space for lmdb map")
|
||||
@pytest.mark.xfail(ds_is_older("1.3.10.1"), reason="bz1749595 not fixed on versions older than 1.3.10.1")
|
||||
def test_large_ldif2db_ancestorid_index_creation(topo, _set_mdb_map_size):
|
||||
"""Import with ldif2db a large file - check that the ancestorid index creation phase has a correct performance
|
||||
@@ -396,39 +396,39 @@ def test_large_ldif2db_ancestorid_index_creation(topo, _set_mdb_map_size):
|
||||
log.info('Starting the server')
|
||||
topo.standalone.start()
|
||||
|
||||
- # With lmdb there is no more any special phase for ancestorid
|
||||
+ # With lmdb there is no more any special phase for ancestorid
|
||||
# because ancestorsid get updated on the fly while processing the
|
||||
# entryrdn (by up the parents chain to compute the parentid
|
||||
- #
|
||||
+ #
|
||||
# But there is still a numSubordinates generation phase
|
||||
if get_default_db_lib() == "mdb":
|
||||
log.info('parse the errors logs to check lines with "Generating numSubordinates complete." are present')
|
||||
end_numsubordinates = str(topo.standalone.ds_error_log.match(r'.*Generating numSubordinates complete.*'))[1:-1]
|
||||
assert len(end_numsubordinates) > 0
|
||||
-
|
||||
+
|
||||
else:
|
||||
log.info('parse the errors logs to check lines with "Starting sort of ancestorid" are present')
|
||||
start_sort_str = str(topo.standalone.ds_error_log.match(r'.*Starting sort of ancestorid non-leaf IDs*'))[1:-1]
|
||||
assert len(start_sort_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('parse the errors logs to check lines with "Finished sort of ancestorid" are present')
|
||||
end_sort_str = str(topo.standalone.ds_error_log.match(r'.*Finished sort of ancestorid non-leaf IDs*'))[1:-1]
|
||||
assert len(end_sort_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('parse the error logs for the line with "Gathering ancestorid non-leaf IDs"')
|
||||
start_ancestorid_indexing_op_str = str(topo.standalone.ds_error_log.match(r'.*Gathering ancestorid non-leaf IDs*'))[1:-1]
|
||||
assert len(start_ancestorid_indexing_op_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('parse the error logs for the line with "Created ancestorid index"')
|
||||
end_ancestorid_indexing_op_str = str(topo.standalone.ds_error_log.match(r'.*Created ancestorid index*'))[1:-1]
|
||||
assert len(end_ancestorid_indexing_op_str) > 0
|
||||
-
|
||||
+
|
||||
log.info('get the ancestorid non-leaf IDs indexing start and end time from the collected strings')
|
||||
# Collected lines look like : '[15/May/2020:05:30:27.245967313 -0400] - INFO - bdb_get_nonleaf_ids - import userRoot: Gathering ancestorid non-leaf IDs...'
|
||||
# We are getting the sec.nanosec part of the date, '27.245967313' in the above example
|
||||
start_time = (start_ancestorid_indexing_op_str.split()[0]).split(':')[3]
|
||||
end_time = (end_ancestorid_indexing_op_str.split()[0]).split(':')[3]
|
||||
-
|
||||
+
|
||||
log.info('Calculate the elapsed time for the ancestorid non-leaf IDs index creation')
|
||||
etime = (Decimal(end_time) - Decimal(start_time))
|
||||
# The time for the ancestorid index creation should be less than 10s for an offline import of an ldif file with 100000 entries / 5 entries per node
|
||||
diff --git a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
index 3b6a54849..69a36cb5d 100644
|
||||
--- a/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
+++ b/dirsrvtests/tests/suites/logging/audit_password_masking_test.py
|
||||
@@ -117,10 +117,10 @@ def check_password_masked(inst, log_format, expected_password, actual_password):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "userPassword"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "userPassword")
|
||||
])
|
||||
def test_password_masking_add_operation(topo, log_format, display_attrs):
|
||||
@@ -173,10 +173,10 @@ def test_password_masking_add_operation(topo, log_format, display_attrs):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "userPassword"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "userPassword")
|
||||
])
|
||||
def test_password_masking_modify_operation(topo, log_format, display_attrs):
|
||||
@@ -242,10 +242,10 @@ def test_password_masking_modify_operation(topo, log_format, display_attrs):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsslapd-rootpw"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsslapd-rootpw")
|
||||
])
|
||||
def test_password_masking_rootpw_modify_operation(topo, log_format, display_attrs):
|
||||
@@ -297,10 +297,10 @@ def test_password_masking_rootpw_modify_operation(topo, log_format, display_attr
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsmultiplexorcredentials"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsmultiplexorcredentials")
|
||||
])
|
||||
def test_password_masking_multiplexor_credentials(topo, log_format, display_attrs):
|
||||
@@ -368,10 +368,10 @@ def test_password_masking_multiplexor_credentials(topo, log_format, display_attr
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsDS5ReplicaCredentials"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsDS5ReplicaCredentials")
|
||||
])
|
||||
def test_password_masking_replica_credentials(topo, log_format, display_attrs):
|
||||
@@ -432,10 +432,10 @@ def test_password_masking_replica_credentials(topo, log_format, display_attrs):
|
||||
|
||||
@pytest.mark.parametrize("log_format,display_attrs", [
|
||||
("default", None),
|
||||
- ("default", "*"),
|
||||
+ pytest.param("default", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("default", "nsDS5ReplicaBootstrapCredentials"),
|
||||
("json", None),
|
||||
- ("json", "*"),
|
||||
+ pytest.param("json", "*", marks=pytest.mark.xfail(reason="DS6886")),
|
||||
("json", "nsDS5ReplicaBootstrapCredentials")
|
||||
])
|
||||
def test_password_masking_bootstrap_credentials(topo, log_format, display_attrs):
|
||||
diff --git a/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
index a23d734ca..8c3a537ab 100644
|
||||
--- a/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
+++ b/dirsrvtests/tests/suites/plugins/entryusn_overflow_test.py
|
||||
@@ -81,6 +81,7 @@ def setup_usn_test(topology_st, request):
|
||||
return created_users
|
||||
|
||||
|
||||
+@pytest.mark.xfail(reason="DS6250")
|
||||
def test_entryusn_overflow_on_add_existing_entries(topology_st, setup_usn_test):
|
||||
"""Test that reproduces entryUSN overflow when adding existing entries
|
||||
|
||||
@@ -232,6 +233,7 @@ def test_entryusn_overflow_on_add_existing_entries(topology_st, setup_usn_test):
|
||||
log.info("EntryUSN overflow test completed successfully")
|
||||
|
||||
|
||||
+@pytest.mark.xfail(reason="DS6250")
|
||||
def test_entryusn_consistency_after_failed_adds(topology_st, setup_usn_test):
|
||||
"""Test that entryUSN remains consistent after failed add operations
|
||||
|
||||
--
|
||||
2.49.0
|
||||
|
||||
378
0035-Issue-6875-Fix-dsidm-tests.patch
Normal file
378
0035-Issue-6875-Fix-dsidm-tests.patch
Normal file
@ -0,0 +1,378 @@
|
||||
From e48f31b756509938d69a626744b8862fc26aa3ef Mon Sep 17 00:00:00 2001
|
||||
From: Lenka Doudova <lryznaro@redhat.com>
|
||||
Date: Tue, 15 Jul 2025 17:17:04 +0200
|
||||
Subject: [PATCH] Issue 6875 - Fix dsidm tests
|
||||
|
||||
Description:
|
||||
Adding testing of the "full_dn" option with 'dsidm list' command for all
|
||||
relevant types of entries
|
||||
Removing xfail markers in dsidm role tests since the issues were
|
||||
resolved
|
||||
|
||||
Relates: #6875
|
||||
|
||||
Author: Lenka Doudova
|
||||
|
||||
Reviewed by: ???
|
||||
---
|
||||
.../tests/suites/clu/dsidm_group_test.py | 12 +++++++++-
|
||||
.../clu/dsidm_organizational_unit_test.py | 13 ++++++++++-
|
||||
.../tests/suites/clu/dsidm_posixgroup_test.py | 13 ++++++++++-
|
||||
.../tests/suites/clu/dsidm_role_test.py | 23 +++++--------------
|
||||
.../tests/suites/clu/dsidm_services_test.py | 13 ++++++++++-
|
||||
.../suites/clu/dsidm_uniquegroup_test.py | 12 +++++++++-
|
||||
.../tests/suites/clu/dsidm_user_test.py | 22 +++++++++++++++++-
|
||||
7 files changed, 85 insertions(+), 23 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_group_test.py b/dirsrvtests/tests/suites/clu/dsidm_group_test.py
|
||||
index 36723a2d0..eba823d2d 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_group_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_group_test.py
|
||||
@@ -17,7 +17,7 @@ from lib389.cli_idm.group import (list, get, get_dn, create, delete, modify, ren
|
||||
members, add_member, remove_member)
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.cli_base import FakeArgs
|
||||
-from lib389.utils import ds_is_older, ensure_str
|
||||
+from lib389.utils import ds_is_older, ensure_str, is_a_dn
|
||||
from lib389.idm.group import Groups
|
||||
from . import check_value_in_log_and_reset
|
||||
|
||||
@@ -198,6 +198,7 @@ def test_dsidm_group_list(topology_st, create_test_group):
|
||||
standalone = topology_st.standalone
|
||||
args = FakeArgs()
|
||||
args.json = False
|
||||
+ args.full_dn = False
|
||||
json_list = ['type',
|
||||
'list',
|
||||
'items']
|
||||
@@ -214,12 +215,21 @@ def test_dsidm_group_list(topology_st, create_test_group):
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=group_name)
|
||||
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+
|
||||
log.info('Delete the group')
|
||||
groups = Groups(standalone, DEFAULT_SUFFIX)
|
||||
testgroup = groups.get(group_name)
|
||||
testgroup.delete()
|
||||
|
||||
log.info('Test empty dsidm group list with json')
|
||||
+ topology_st.logcap.flush()
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=group_name)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_organizational_unit_test.py b/dirsrvtests/tests/suites/clu/dsidm_organizational_unit_test.py
|
||||
index ee908fb22..06556b227 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_organizational_unit_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_organizational_unit_test.py
|
||||
@@ -11,12 +11,13 @@ import subprocess
|
||||
import pytest
|
||||
import logging
|
||||
import os
|
||||
+import json
|
||||
|
||||
from lib389 import DEFAULT_SUFFIX
|
||||
from lib389.cli_idm.organizationalunit import get, get_dn, create, modify, delete, list, rename
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.cli_base import FakeArgs
|
||||
-from lib389.utils import ds_is_older
|
||||
+from lib389.utils import ds_is_older, is_a_dn
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
from . import check_value_in_log_and_reset
|
||||
|
||||
@@ -110,6 +111,7 @@ def test_dsidm_organizational_unit_list(topology_st, create_test_ou):
|
||||
standalone = topology_st.standalone
|
||||
args = FakeArgs()
|
||||
args.json = False
|
||||
+ args.full_dn = False
|
||||
json_list = ['type',
|
||||
'list',
|
||||
'items']
|
||||
@@ -126,7 +128,16 @@ def test_dsidm_organizational_unit_list(topology_st, create_test_ou):
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, check_value=ou_name)
|
||||
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+
|
||||
log.info('Delete the organizational unit')
|
||||
+ topology_st.logcap.flush()
|
||||
ous = OrganizationalUnits(standalone, DEFAULT_SUFFIX)
|
||||
test_ou = ous.get(ou_name)
|
||||
test_ou.delete()
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_posixgroup_test.py b/dirsrvtests/tests/suites/clu/dsidm_posixgroup_test.py
|
||||
index ccafd3905..10799ee28 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_posixgroup_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_posixgroup_test.py
|
||||
@@ -9,12 +9,13 @@
|
||||
import pytest
|
||||
import logging
|
||||
import os
|
||||
+import json
|
||||
|
||||
from lib389 import DEFAULT_SUFFIX
|
||||
from lib389.cli_idm.posixgroup import list, get, get_dn, create, delete, modify, rename
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.cli_base import FakeArgs
|
||||
-from lib389.utils import ds_is_older, ensure_str
|
||||
+from lib389.utils import ds_is_older, ensure_str, is_a_dn
|
||||
from lib389.idm.posixgroup import PosixGroups
|
||||
from . import check_value_in_log_and_reset
|
||||
|
||||
@@ -195,6 +196,7 @@ def test_dsidm_posixgroup_list(topology_st, create_test_posixgroup):
|
||||
standalone = topology_st.standalone
|
||||
args = FakeArgs()
|
||||
args.json = False
|
||||
+ args.full_dn = False
|
||||
json_list = ['type',
|
||||
'list',
|
||||
'items']
|
||||
@@ -211,12 +213,21 @@ def test_dsidm_posixgroup_list(topology_st, create_test_posixgroup):
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=posixgroup_name)
|
||||
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+
|
||||
log.info('Delete the posixgroup')
|
||||
posixgroups = PosixGroups(standalone, DEFAULT_SUFFIX)
|
||||
test_posixgroup = posixgroups.get(posixgroup_name)
|
||||
test_posixgroup.delete()
|
||||
|
||||
log.info('Test empty dsidm posixgroup list with json')
|
||||
+ topology_st.logcap.flush()
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=posixgroup_name)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_role_test.py b/dirsrvtests/tests/suites/clu/dsidm_role_test.py
|
||||
index eb5f692d7..094db2f78 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_role_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_role_test.py
|
||||
@@ -67,6 +67,7 @@ def create_test_filtered_role(topology_st, request):
|
||||
|
||||
properties = FakeArgs()
|
||||
properties.cn = filtered_role_name
|
||||
+ properties.nsrolefilter = "(cn=*)"
|
||||
create_filtered(topology_st.standalone, DEFAULT_SUFFIX, topology_st.logcap.log, properties)
|
||||
test_filtered_role = filtered_roles.get(filtered_role_name)
|
||||
|
||||
@@ -92,7 +93,7 @@ def create_test_nested_role(topology_st, create_test_managed_role, request):
|
||||
|
||||
properties = FakeArgs()
|
||||
properties.cn = nested_role_name
|
||||
- properties.nsRoleDN = managed_role.dn
|
||||
+ properties.nsroledn = managed_role.dn
|
||||
create_nested(topology_st.standalone, DEFAULT_SUFFIX, topology_st.logcap.log, properties)
|
||||
test_nested_role = nested_roles.get(nested_role_name)
|
||||
|
||||
@@ -341,14 +342,8 @@ def test_dsidm_role_list(topology_st, create_test_managed_role):
|
||||
@pytest.mark.parametrize(
|
||||
"role_name, fixture, objectclasses",
|
||||
[(managed_role_name, 'create_test_managed_role', ['nsSimpleRoleDefinition', 'nsManagedRoleDefinition']),
|
||||
- (pytest.param(filtered_role_name,
|
||||
- create_test_filtered_role,
|
||||
- ['nsComplexRoleDefinition', 'nsFilteredRoleDefinition'],
|
||||
- marks=pytest.mark.xfail(reason="DS6492"))),
|
||||
- (pytest.param(nested_role_name,
|
||||
- create_test_nested_role,
|
||||
- ['nsComplexRoleDefinition', 'nsNestedRoleDefinition'],
|
||||
- marks=pytest.mark.xfail(reason="DS6493")))])
|
||||
+ (filtered_role_name, 'create_test_filtered_role', ['nsComplexRoleDefinition', 'nsFilteredRoleDefinition']),
|
||||
+ (nested_role_name, 'create_test_nested_role', ['nsComplexRoleDefinition', 'nsNestedRoleDefinition'])])
|
||||
def test_dsidm_role_get(topology_st, role_name, fixture, objectclasses, request):
|
||||
""" Test dsidm role get option for managed, filtered and nested role
|
||||
|
||||
@@ -422,14 +417,8 @@ def test_dsidm_role_get(topology_st, role_name, fixture, objectclasses, request)
|
||||
@pytest.mark.parametrize(
|
||||
"role_name, fixture, objectclasses",
|
||||
[(managed_role_name, 'create_test_managed_role', ['nsSimpleRoleDefinition', 'nsManagedRoleDefinition']),
|
||||
- (pytest.param(filtered_role_name,
|
||||
- create_test_filtered_role,
|
||||
- ['nsComplexRoleDefinition', 'nsFilteredRoleDefinition'],
|
||||
- marks=pytest.mark.xfail(reason="DS6492"))),
|
||||
- (pytest.param(nested_role_name,
|
||||
- create_test_nested_role,
|
||||
- ['nsComplexRoleDefinition', 'nsNestedRoleDefinition'],
|
||||
- marks=pytest.mark.xfail(reason="DS6493")))])
|
||||
+ (filtered_role_name, 'create_test_filtered_role', ['nsComplexRoleDefinition', 'nsFilteredRoleDefinition']),
|
||||
+ (nested_role_name, 'create_test_nested_role', ['nsComplexRoleDefinition', 'nsNestedRoleDefinition'])])
|
||||
def test_dsidm_role_get_by_dn(topology_st, role_name, fixture, objectclasses, request):
|
||||
""" Test dsidm role get-by-dn option for managed, filtered and nested role
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_services_test.py b/dirsrvtests/tests/suites/clu/dsidm_services_test.py
|
||||
index 61dd0ac11..f167b1c6f 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_services_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_services_test.py
|
||||
@@ -11,12 +11,13 @@ import subprocess
|
||||
import pytest
|
||||
import logging
|
||||
import os
|
||||
+import json
|
||||
|
||||
from lib389 import DEFAULT_SUFFIX
|
||||
from lib389.cli_idm.service import list, get, get_dn, create, delete, modify, rename
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.cli_base import FakeArgs
|
||||
-from lib389.utils import ds_is_older, ensure_str
|
||||
+from lib389.utils import ds_is_older, ensure_str, is_a_dn
|
||||
from lib389.idm.services import ServiceAccounts
|
||||
from . import check_value_in_log_and_reset
|
||||
|
||||
@@ -73,6 +74,7 @@ def test_dsidm_service_list(topology_st, create_test_service):
|
||||
standalone = topology_st.standalone
|
||||
args = FakeArgs()
|
||||
args.json = False
|
||||
+ args.full_dn = False
|
||||
service_value = 'test_service'
|
||||
json_list = ['type',
|
||||
'list',
|
||||
@@ -90,12 +92,21 @@ def test_dsidm_service_list(topology_st, create_test_service):
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=service_value)
|
||||
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+
|
||||
log.info('Delete the service')
|
||||
services = ServiceAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
testservice = services.get(service_value)
|
||||
testservice.delete()
|
||||
|
||||
log.info('Test empty dsidm service list with json')
|
||||
+ topology_st.logcap.flush()
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=service_value)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_uniquegroup_test.py b/dirsrvtests/tests/suites/clu/dsidm_uniquegroup_test.py
|
||||
index 0532791c1..4689ae34b 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_uniquegroup_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_uniquegroup_test.py
|
||||
@@ -17,7 +17,7 @@ from lib389.cli_idm.uniquegroup import (list, get, get_dn, create, delete, modif
|
||||
members, add_member, remove_member)
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.cli_base import FakeArgs
|
||||
-from lib389.utils import ds_is_older, ensure_str
|
||||
+from lib389.utils import ds_is_older, ensure_str, is_a_dn
|
||||
from lib389.idm.group import UniqueGroups
|
||||
from . import check_value_in_log_and_reset
|
||||
|
||||
@@ -153,6 +153,7 @@ def test_dsidm_uniquegroup_list(topology_st, create_test_uniquegroup):
|
||||
standalone = topology_st.standalone
|
||||
args = FakeArgs()
|
||||
args.json = False
|
||||
+ args.full_dn = False
|
||||
json_list = ['type',
|
||||
'list',
|
||||
'items']
|
||||
@@ -169,12 +170,21 @@ def test_dsidm_uniquegroup_list(topology_st, create_test_uniquegroup):
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=uniquegroup_name)
|
||||
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+
|
||||
log.info('Delete the uniquegroup')
|
||||
uniquegroups = UniqueGroups(standalone, DEFAULT_SUFFIX)
|
||||
test_uniquegroup = uniquegroups.get(uniquegroup_name)
|
||||
test_uniquegroup.delete()
|
||||
|
||||
log.info('Test empty dsidm uniquegroup list with json')
|
||||
+ topology_st.logcap.flush()
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=uniquegroup_name)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_user_test.py b/dirsrvtests/tests/suites/clu/dsidm_user_test.py
|
||||
index 4b5491735..620e183ac 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_user_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_user_test.py
|
||||
@@ -12,12 +12,13 @@ import pytest
|
||||
import logging
|
||||
import os
|
||||
import ldap
|
||||
+import json
|
||||
|
||||
from lib389 import DEFAULT_SUFFIX
|
||||
from lib389.cli_idm.user import list, get, get_dn, create, delete, modify, rename
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.cli_base import FakeArgs
|
||||
-from lib389.utils import ds_is_older, ensure_str
|
||||
+from lib389.utils import ds_is_older, ensure_str, is_a_dn
|
||||
from lib389.idm.user import nsUserAccounts
|
||||
from . import check_value_in_log_and_reset
|
||||
|
||||
@@ -74,6 +75,7 @@ def test_dsidm_user_list(topology_st, create_test_user):
|
||||
standalone = topology_st.standalone
|
||||
args = FakeArgs()
|
||||
args.json = False
|
||||
+ args.full_dn = False
|
||||
user_value = 'test_user_1000'
|
||||
json_list = ['type',
|
||||
'list',
|
||||
@@ -92,6 +94,15 @@ def test_dsidm_user_list(topology_st, create_test_user):
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=user_value)
|
||||
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+ topology_st.logcap.flush()
|
||||
+
|
||||
log.info('Delete the user')
|
||||
users = nsUserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
testuser = users.get(user_value)
|
||||
@@ -777,6 +788,7 @@ def test_dsidm_user_list_rdn_after_rename(topology_st):
|
||||
log.info('Test dsidm user list without json')
|
||||
args = FakeArgs()
|
||||
args.json = False
|
||||
+ args.full_dn = False
|
||||
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
# Should show the new name, not the original name
|
||||
check_value_in_log_and_reset(topology_st, check_value=new_name, check_value_not=original_name)
|
||||
@@ -787,6 +799,14 @@ def test_dsidm_user_list_rdn_after_rename(topology_st):
|
||||
# Should show the new name in JSON output as well
|
||||
check_value_in_log_and_reset(topology_st, check_value=new_name, check_value_not=original_name)
|
||||
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+
|
||||
log.info('Directly verify RDN extraction works correctly')
|
||||
renamed_user = users.get(new_name)
|
||||
rdn_value = renamed_user.get_rdn_from_dn(renamed_user.dn)
|
||||
--
|
||||
2.49.0
|
||||
|
||||
503
0036-Issue-6519-Add-basic-dsidm-account-tests.patch
Normal file
503
0036-Issue-6519-Add-basic-dsidm-account-tests.patch
Normal file
@ -0,0 +1,503 @@
|
||||
From 10937417415577569bd777aacf7941803e96da21 Mon Sep 17 00:00:00 2001
|
||||
From: Lenka Doudova <lryznaro@redhat.com>
|
||||
Date: Mon, 20 Jan 2025 14:19:51 +0100
|
||||
Subject: [PATCH] Issue 6519 - Add basic dsidm account tests
|
||||
|
||||
Automating basic dsidm account tests
|
||||
|
||||
Relates to: https://github.com/389ds/389-ds-base/issues/6519
|
||||
|
||||
Author: Lenka Doudova
|
||||
|
||||
Reviewed by: Simon Pichugin
|
||||
---
|
||||
.../tests/suites/clu/dsidm_account_test.py | 417 +++++++++++++++++-
|
||||
src/lib389/lib389/cli_idm/account.py | 6 +-
|
||||
2 files changed, 409 insertions(+), 14 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/clu/dsidm_account_test.py b/dirsrvtests/tests/suites/clu/dsidm_account_test.py
|
||||
index 4b48a11a5..c600e31fd 100644
|
||||
--- a/dirsrvtests/tests/suites/clu/dsidm_account_test.py
|
||||
+++ b/dirsrvtests/tests/suites/clu/dsidm_account_test.py
|
||||
@@ -6,22 +6,19 @@
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
#
|
||||
+
|
||||
import logging
|
||||
import os
|
||||
import json
|
||||
import pytest
|
||||
import ldap
|
||||
from lib389 import DEFAULT_SUFFIX
|
||||
-from lib389.cli_idm.account import (
|
||||
- get_dn,
|
||||
- lock,
|
||||
- unlock,
|
||||
- entry_status,
|
||||
- subtree_status,
|
||||
-)
|
||||
+from lib389.cli_idm.account import list, get_dn, lock, unlock, delete, modify, rename, entry_status, \
|
||||
+ subtree_status, reset_password, change_password
|
||||
+from lib389.cli_idm.user import create
|
||||
from lib389.topologies import topology_st
|
||||
from lib389.cli_base import FakeArgs
|
||||
-from lib389.utils import ds_is_older
|
||||
+from lib389.utils import ds_is_older, is_a_dn
|
||||
from lib389.idm.user import nsUserAccounts
|
||||
from . import check_value_in_log_and_reset
|
||||
|
||||
@@ -30,13 +27,28 @@ pytestmark = pytest.mark.tier0
|
||||
logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
+test_user_name = 'test_user_1000'
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def create_test_user(topology_st, request):
|
||||
log.info('Create test user')
|
||||
users = nsUserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
|
||||
- test_user = users.create_test_user()
|
||||
- log.info('Created test user: %s', test_user.dn)
|
||||
+
|
||||
+ if users.exists(test_user_name):
|
||||
+ test_user = users.get(test_user_name)
|
||||
+ test_user.delete()
|
||||
+
|
||||
+ properties = FakeArgs()
|
||||
+ properties.uid = test_user_name
|
||||
+ properties.cn = test_user_name
|
||||
+ properties.sn = test_user_name
|
||||
+ properties.uidNumber = '1000'
|
||||
+ properties.gidNumber = '2000'
|
||||
+ properties.homeDirectory = '/home/test_user_1000'
|
||||
+ properties.displayName = test_user_name
|
||||
+
|
||||
+ create(topology_st.standalone, DEFAULT_SUFFIX, topology_st.logcap.log, properties)
|
||||
+ test_user = users.get(test_user_name)
|
||||
|
||||
def fin():
|
||||
log.info('Delete test user')
|
||||
@@ -74,7 +86,7 @@ def test_dsidm_account_entry_status_with_lock(topology_st, create_test_user):
|
||||
|
||||
standalone = topology_st.standalone
|
||||
users = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
- test_user = users.get('test_user_1000')
|
||||
+ test_user = users.get(test_user_name)
|
||||
|
||||
entry_list = ['Entry DN: {}'.format(test_user.dn),
|
||||
'Entry Creation Date',
|
||||
@@ -169,8 +181,389 @@ def test_dsidm_account_entry_get_by_dn(topology_st, create_test_user):
|
||||
assert json_result['dn'] == user_dn
|
||||
|
||||
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_delete(topology_st, create_test_user):
|
||||
+ """ Test dsidm account delete option
|
||||
+
|
||||
+ :id: a7960bc2-0282-4a82-8dfb-3af2088ec661
|
||||
+ :setup: Standalone
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account delete on a created account
|
||||
+ 2. Check that a message is provided on deletion
|
||||
+ 3. Check that the account no longer exists
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+ output = 'Successfully deleted {}'.format(test_account.dn)
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = test_account.dn
|
||||
+
|
||||
+ log.info('Test dsidm account delete')
|
||||
+ delete(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+
|
||||
+ log.info('Check that the account no longer exists')
|
||||
+ assert not test_account.exists()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_list(topology_st, create_test_user):
|
||||
+ """ Test dsidm account list option
|
||||
+
|
||||
+ :id: 4d173a3e-ee36-4a8b-8d0d-4955c792faca
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account list without json
|
||||
+ 2. Check the output content is correct
|
||||
+ 3. Run dsidm account list with json
|
||||
+ 4. Check the output content is correct
|
||||
+ 5. Test full_dn option with list
|
||||
+ 6. Delete the account
|
||||
+ 7. Check the account is not in the list with json
|
||||
+ 8. Check the account is not in the list without json
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ 6. Success
|
||||
+ 7. Success
|
||||
+ 8. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ args = FakeArgs()
|
||||
+ args.json = False
|
||||
+ args.full_dn = False
|
||||
+ json_list = ['type',
|
||||
+ 'list',
|
||||
+ 'items']
|
||||
+
|
||||
+ log.info('Empty the log file to prevent false data to check about group')
|
||||
+ topology_st.logcap.flush()
|
||||
+
|
||||
+ log.info('Test dsidm account list without json')
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=test_user_name)
|
||||
+
|
||||
+ log.info('Test dsidm account list with json')
|
||||
+ args.json = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=test_user_name)
|
||||
+
|
||||
+ log.info('Test full_dn option with list')
|
||||
+ args.full_dn = True
|
||||
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ result = topology_st.logcap.get_raw_outputs()
|
||||
+ json_result = json.loads(result[0])
|
||||
+ assert is_a_dn(json_result['items'][0])
|
||||
+ args.full_dn = False
|
||||
+ topology_st.logcap.flush()
|
||||
+
|
||||
+ log.info('Delete the account')
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+ test_account.delete()
|
||||
+
|
||||
+ log.info('Test empty dsidm account list with json')
|
||||
+ list(standalone,DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=test_user_name)
|
||||
+
|
||||
+ log.info('Test empty dsidm account list without json')
|
||||
+ args.json = False
|
||||
+ list(standalone,DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value_not=test_user_name)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.xfail(reason='DS6515')
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_get_by_dn(topology_st, create_test_user):
|
||||
+ """ Test dsidm account get-by-dn option
|
||||
+
|
||||
+ :id: 07945577-2da0-4fd9-9237-43dd2823f7b8
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account get-by-dn for an account without json
|
||||
+ 2. Check the output content is correct
|
||||
+ 3. Run dsidm account get-by-dn for an account with json
|
||||
+ 4. Check the output content is correct
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = test_account.dn
|
||||
+ args.json = False
|
||||
+
|
||||
+ account_content = ['dn: {}'.format(test_account.dn),
|
||||
+ 'cn: {}'.format(test_account.rdn),
|
||||
+ 'displayName: {}'.format(test_user_name),
|
||||
+ 'gidNumber: 2000',
|
||||
+ 'homeDirectory: /home/{}'.format(test_user_name),
|
||||
+ 'objectClass: top',
|
||||
+ 'objectClass: nsPerson',
|
||||
+ 'objectClass: nsAccount',
|
||||
+ 'objectClass: nsOrgPerson',
|
||||
+ 'objectClass: posixAccount',
|
||||
+ 'uid: {}'.format(test_user_name),
|
||||
+ 'uidNumber: 1000']
|
||||
+
|
||||
+ json_content = ['attrs',
|
||||
+ 'objectclass',
|
||||
+ 'top',
|
||||
+ 'nsPerson',
|
||||
+ 'nsAccount',
|
||||
+ 'nsOrgPerson',
|
||||
+ 'posixAccount',
|
||||
+ 'cn',
|
||||
+ test_account.rdn,
|
||||
+ 'gidnumber',
|
||||
+ '2000',
|
||||
+ 'homedirectory',
|
||||
+ '/home/{}'.format(test_user_name),
|
||||
+ 'displayname',
|
||||
+ test_user_name,
|
||||
+ 'uidnumber',
|
||||
+ '1000',
|
||||
+ 'creatorsname',
|
||||
+ 'cn=directory manager',
|
||||
+ 'modifiersname',
|
||||
+ 'createtimestamp',
|
||||
+ 'modifytimestamp',
|
||||
+ 'nsuniqueid',
|
||||
+ 'parentid',
|
||||
+ 'entryid',
|
||||
+ 'entryuuid',
|
||||
+ 'dsentrydn',
|
||||
+ 'entrydn',
|
||||
+ test_account.dn]
|
||||
+
|
||||
+ log.info('Empty the log file to prevent false data to check about the account')
|
||||
+ topology_st.logcap.flush()
|
||||
+
|
||||
+ log.info('Test dsidm account get-by-dn without json')
|
||||
+ get_dn(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, content_list=account_content)
|
||||
+
|
||||
+ log.info('Test dsidm account get-by-dn with json')
|
||||
+ args.json = True
|
||||
+ get_dn(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, content_list=json_content)
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_modify_by_dn(topology_st, create_test_user):
|
||||
+ """ Test dsidm account modify-by-dn
|
||||
+
|
||||
+ :id: e7288f8c-f0a8-4d8d-a00f-1b243eb117bc
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account modify-by-dn add description value
|
||||
+ 2. Run dsidm account modify-by-dn replace description value
|
||||
+ 3. Run dsidm account modify-by-dn delete description value
|
||||
+ :expectedresults:
|
||||
+ 1. A description value is added
|
||||
+ 2. The original description value is replaced and the previous is not present
|
||||
+ 3. The replaced description value is deleted
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+ output = 'Successfully modified {}'.format(test_account.dn)
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = test_account.dn
|
||||
+ args.changes = ['add:description:new_description']
|
||||
+
|
||||
+ log.info('Test dsidm account modify add')
|
||||
+ modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+ assert test_account.present('description', 'new_description')
|
||||
+
|
||||
+ log.info('Test dsidm account modify replace')
|
||||
+ args.changes = ['replace:description:replaced_description']
|
||||
+ modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+ assert test_account.present('description', 'replaced_description')
|
||||
+ assert not test_account.present('description', 'new_description')
|
||||
+
|
||||
+ log.info('Test dsidm account modify delete')
|
||||
+ args.changes = ['delete:description:replaced_description']
|
||||
+ modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+ assert not test_account.present('description', 'replaced_description')
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_rename_by_dn(topology_st, create_test_user):
|
||||
+ """ Test dsidm account rename-by-dn option
|
||||
+
|
||||
+ :id: f4b8e491-35b1-4113-b9c4-e0a80f8985f3
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account rename option on existing account
|
||||
+ 2. Check the account does not have another uid attribute with the old rdn
|
||||
+ 3. Check the old account is deleted
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = test_account.dn
|
||||
+ args.new_name = 'renamed_account'
|
||||
+ args.new_dn = 'uid=renamed_account,ou=people,{}'.format(DEFAULT_SUFFIX)
|
||||
+ args.keep_old_rdn = False
|
||||
+
|
||||
+ log.info('Test dsidm account rename-by-dn')
|
||||
+ rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ new_account = accounts.get(args.new_name)
|
||||
+
|
||||
+ try:
|
||||
+ output = 'Successfully renamed to {}'.format(new_account.dn)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+
|
||||
+ log.info('Verify the new account does not have a uid attribute with the old rdn')
|
||||
+ assert not new_account.present('uid', test_user_name)
|
||||
+ assert new_account.present('displayName', test_user_name)
|
||||
+
|
||||
+ log.info('Verify the old account does not exist')
|
||||
+ assert not test_account.exists()
|
||||
+ finally:
|
||||
+ log.info('Clean up')
|
||||
+ new_account.delete()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_rename_by_dn_keep_old_rdn(topology_st, create_test_user):
|
||||
+ """ Test dsidm account rename-by-dn option with keep-old-rdn
|
||||
+
|
||||
+ :id: a128bdbb-c0a4-4d9d-9a95-9be2d3780094
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account rename option on existing account
|
||||
+ 2. Check the account has another uid attribute with the old rdn
|
||||
+ 3. Check the old account is deleted
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = test_account.dn
|
||||
+ args.new_name = 'renamed_account'
|
||||
+ args.new_dn = 'uid=renamed_account,ou=people,{}'.format(DEFAULT_SUFFIX)
|
||||
+ args.keep_old_rdn = True
|
||||
+
|
||||
+ log.info('Test dsidm account rename-by-dn')
|
||||
+ rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ new_account = accounts.get(args.new_name)
|
||||
+
|
||||
+ try:
|
||||
+ output = 'Successfully renamed to {}'.format(new_account.dn)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+
|
||||
+ log.info('Verify the new account does not have a uid attribute with the old rdn')
|
||||
+ assert new_account.present('uid', test_user_name)
|
||||
+ assert new_account.present('displayName', test_user_name)
|
||||
+
|
||||
+ log.info('Verify the old account does not exist')
|
||||
+ assert not test_account.exists()
|
||||
+ finally:
|
||||
+ log.info('Clean up')
|
||||
+ new_account.delete()
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_reset_password(topology_st, create_test_user):
|
||||
+ """ Test dsidm account reset_password option
|
||||
+
|
||||
+ :id: 02ffa044-08ae-40c5-9108-b02d0c3b0521
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account reset_password on an existing user
|
||||
+ 2. Verify that the user has now userPassword attribute set
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = test_account.dn
|
||||
+ args.new_password = 'newpasswd'
|
||||
+ output = 'reset password for {}'.format(test_account.dn)
|
||||
+
|
||||
+ log.info('Test dsidm account reset_password')
|
||||
+ reset_password(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+
|
||||
+ log.info('Verify the userPassword attribute is set')
|
||||
+ assert test_account.present('userPassword')
|
||||
+
|
||||
+
|
||||
+@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
|
||||
+def test_dsidm_account_change_password(topology_st, create_test_user):
|
||||
+ """ Test dsidm account change_password option
|
||||
+
|
||||
+ :id: 24c25b8f-df2b-4d43-a88e-47e24bc4ff36
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Run dsidm account change_password on an existing user
|
||||
+ 2. Verify that the user has userPassword attribute set
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ """
|
||||
+
|
||||
+ standalone = topology_st.standalone
|
||||
+ accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ test_account = accounts.get(test_user_name)
|
||||
+
|
||||
+ args = FakeArgs()
|
||||
+ args.dn = test_account.dn
|
||||
+ args.new_password = 'newpasswd'
|
||||
+ output = 'changed password for {}'.format(test_account.dn)
|
||||
+
|
||||
+ log.info('Test dsidm account change_password')
|
||||
+ change_password(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
|
||||
+ check_value_in_log_and_reset(topology_st, check_value=output)
|
||||
+
|
||||
+ log.info('Verify the userPassword attribute is set')
|
||||
+ assert test_account.present('userPassword')
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
- pytest.main("-s %s" % CURRENT_FILE)
|
||||
+ pytest.main("-s {}".format(CURRENT_FILE))
|
||||
\ No newline at end of file
|
||||
diff --git a/src/lib389/lib389/cli_idm/account.py b/src/lib389/lib389/cli_idm/account.py
|
||||
index 8b6f99549..9877c533a 100644
|
||||
--- a/src/lib389/lib389/cli_idm/account.py
|
||||
+++ b/src/lib389/lib389/cli_idm/account.py
|
||||
@@ -12,10 +12,12 @@ import ldap
|
||||
import math
|
||||
from datetime import datetime
|
||||
from lib389.idm.account import Account, Accounts, AccountState
|
||||
-from lib389.cli_base import (
|
||||
- _generic_get_dn,
|
||||
+from lib389.cli_idm import (
|
||||
_generic_list,
|
||||
_generic_delete,
|
||||
+ _generic_get_dn
|
||||
+)
|
||||
+from lib389.cli_base import (
|
||||
_generic_modify_dn,
|
||||
_get_arg,
|
||||
_get_dn_arg,
|
||||
--
|
||||
2.49.0
|
||||
|
||||
268
0037-Issue-6940-dsconf-monitor-server-fails-with-ldapi-du.patch
Normal file
268
0037-Issue-6940-dsconf-monitor-server-fails-with-ldapi-du.patch
Normal file
@ -0,0 +1,268 @@
|
||||
From 7423f0a0b90bac39a23b5ce54a1c61439d0ebcb6 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 19 Aug 2025 16:10:09 -0700
|
||||
Subject: [PATCH] Issue 6940 - dsconf monitor server fails with ldapi:// due to
|
||||
absent server ID (#6941)
|
||||
|
||||
Description: The dsconf monitor server command fails when using ldapi://
|
||||
protocol because the server ID is not set, preventing PID retrieval from
|
||||
defaults.inf. This causes the Web console to fail displaying the "Server
|
||||
Version" field and potentially other CLI/WebUI issues.
|
||||
|
||||
The fix attempts to derive the server ID from the LDAPI socket path when
|
||||
not explicitly provided. This covers the common case where the socket name
|
||||
contains the instance name (e.g., slapd-instance.socket).
|
||||
If that's not possible, it also attempts to derive the server ID from the
|
||||
nsslapd-instancedir configuration attribute. The derived server ID
|
||||
is validated against actual system instances to ensure it exists.
|
||||
Note that socket names can vary and nsslapd-instancedir can be changed.
|
||||
This is a best-effort approach for the common naming pattern.
|
||||
|
||||
Also fixes the LDAPI socket path extraction which was incorrectly using
|
||||
offset 9 instead of 8 for ldapi:// URIs.
|
||||
|
||||
The monitor command now handles missing PIDs gracefully, returning zero
|
||||
values for process-specific stats instead of failing completely.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6940
|
||||
|
||||
Reviewed by: @vashirov, @mreynolds389 (Thanks!!)
|
||||
---
|
||||
src/lib389/lib389/__init__.py | 93 +++++++++++++++++++++++++++---
|
||||
src/lib389/lib389/cli_base/dsrc.py | 4 +-
|
||||
src/lib389/lib389/monitor.py | 50 ++++++++++++----
|
||||
3 files changed, 124 insertions(+), 23 deletions(-)
|
||||
|
||||
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
|
||||
index 0ddfca8ae..23a20739f 100644
|
||||
--- a/src/lib389/lib389/__init__.py
|
||||
+++ b/src/lib389/lib389/__init__.py
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
import sys
|
||||
import os
|
||||
-from urllib.parse import urlparse
|
||||
+from urllib.parse import urlparse, unquote
|
||||
import stat
|
||||
import pwd
|
||||
import grp
|
||||
@@ -67,7 +67,8 @@ from lib389.utils import (
|
||||
get_default_db_lib,
|
||||
selinux_present,
|
||||
selinux_label_port,
|
||||
- get_user_is_root)
|
||||
+ get_user_is_root,
|
||||
+ get_instance_list)
|
||||
from lib389.paths import Paths
|
||||
from lib389.nss_ssl import NssSsl
|
||||
from lib389.tasks import BackupTask, RestoreTask, Task
|
||||
@@ -249,6 +250,57 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
self.dbdir = self.ds_paths.db_dir
|
||||
self.changelogdir = os.path.join(os.path.dirname(self.dbdir), DEFAULT_CHANGELOG_DB)
|
||||
|
||||
+ def _extract_serverid_from_string(self, text):
|
||||
+ """Extract serverid from a string containing 'slapd-<serverid>' pattern.
|
||||
+ Returns the serverid or None if not found or validation fails.
|
||||
+ Only attempts derivation if serverid is currently None.
|
||||
+ """
|
||||
+ if getattr(self, 'serverid', None) is not None:
|
||||
+ return None
|
||||
+ if not text:
|
||||
+ return None
|
||||
+
|
||||
+ # Use regex to extract serverid from "slapd-<serverid>" or "slapd-<serverid>.socket"
|
||||
+ match = re.search(r'slapd-([A-Za-z0-9._-]+?)(?:\.socket)?(?:$|/)', text)
|
||||
+ if not match:
|
||||
+ return None
|
||||
+ candidate = match.group(1)
|
||||
+
|
||||
+ self.serverid = candidate
|
||||
+ try:
|
||||
+ insts = get_instance_list()
|
||||
+ except Exception:
|
||||
+ self.serverid = None
|
||||
+ return None
|
||||
+ if f'slapd-{candidate}' in insts or candidate in insts:
|
||||
+ return candidate
|
||||
+ # restore original and report failure
|
||||
+ self.serverid = None
|
||||
+ return None
|
||||
+
|
||||
+ def _derive_serverid_from_ldapi(self):
|
||||
+ """Attempt to derive serverid from an LDAPI socket path or URI and
|
||||
+ verify it exists on the system. Returns the serverid or None.
|
||||
+ """
|
||||
+ socket_path = None
|
||||
+ if hasattr(self, 'ldapi_socket') and self.ldapi_socket:
|
||||
+ socket_path = unquote(self.ldapi_socket)
|
||||
+ elif hasattr(self, 'ldapuri') and isinstance(self.ldapuri, str) and self.ldapuri.startswith('ldapi://'):
|
||||
+ socket_path = unquote(self.ldapuri[len('ldapi://'):])
|
||||
+
|
||||
+ return self._extract_serverid_from_string(socket_path)
|
||||
+
|
||||
+ def _derive_serverid_from_instancedir(self):
|
||||
+ """Extract serverid from nsslapd-instancedir path like '/usr/lib64/dirsrv/slapd-<serverid>'"""
|
||||
+ try:
|
||||
+ from lib389.config import Config
|
||||
+ config = Config(self)
|
||||
+ instancedir = config.get_attr_val_utf8_l("nsslapd-instancedir")
|
||||
+ except Exception:
|
||||
+ return None
|
||||
+
|
||||
+ return self._extract_serverid_from_string(instancedir)
|
||||
+
|
||||
def rebind(self):
|
||||
"""Reconnect to the DS
|
||||
|
||||
@@ -528,6 +580,15 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
self.ldapi_autobind = args.get(SER_LDAPI_AUTOBIND, 'off')
|
||||
self.isLocal = True
|
||||
self.log.debug("Allocate %s with %s", self.__class__, self.ldapi_socket)
|
||||
+ elif self.ldapuri is not None and isinstance(self.ldapuri, str) and self.ldapuri.startswith('ldapi://'):
|
||||
+ # Try to learn serverid from ldapi uri
|
||||
+ try:
|
||||
+ self.ldapi_enabled = 'on'
|
||||
+ self.ldapi_socket = unquote(self.ldapuri[len('ldapi://'):])
|
||||
+ self.ldapi_autobind = args.get(SER_LDAPI_AUTOBIND, 'off')
|
||||
+ self.isLocal = True
|
||||
+ except Exception:
|
||||
+ pass
|
||||
# Settings from args of server attributes
|
||||
self.strict_hostname = args.get(SER_STRICT_HOSTNAME_CHECKING, False)
|
||||
if self.strict_hostname is True:
|
||||
@@ -548,9 +609,16 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
|
||||
self.log.debug("Allocate %s with %s:%s", self.__class__, self.host, (self.sslport or self.port))
|
||||
|
||||
- if SER_SERVERID_PROP in args:
|
||||
- self.ds_paths = Paths(serverid=args[SER_SERVERID_PROP], instance=self, local=self.isLocal)
|
||||
+ # Try to determine serverid if not provided
|
||||
+ if SER_SERVERID_PROP in args and args.get(SER_SERVERID_PROP) is not None:
|
||||
self.serverid = args.get(SER_SERVERID_PROP, None)
|
||||
+ elif getattr(self, 'serverid', None) is None and self.isLocal:
|
||||
+ sid = self._derive_serverid_from_ldapi()
|
||||
+ if sid:
|
||||
+ self.serverid = sid
|
||||
+
|
||||
+ if getattr(self, 'serverid', None):
|
||||
+ self.ds_paths = Paths(serverid=self.serverid, instance=self, local=self.isLocal)
|
||||
else:
|
||||
self.ds_paths = Paths(instance=self, local=self.isLocal)
|
||||
|
||||
@@ -989,6 +1057,17 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
self.__initPart2()
|
||||
self.state = DIRSRV_STATE_ONLINE
|
||||
# Now that we're online, some of our methods may try to query the version online.
|
||||
+
|
||||
+ # After transitioning online, attempt to derive serverid if still unknown.
|
||||
+ # If we find it, refresh ds_paths and rerun __initPart2
|
||||
+ if getattr(self, 'serverid', None) is None and self.isLocal:
|
||||
+ sid = self._derive_serverid_from_instancedir()
|
||||
+ if sid:
|
||||
+ self.serverid = sid
|
||||
+ # Reinitialize paths with the new serverid
|
||||
+ self.ds_paths = Paths(serverid=self.serverid, instance=self, local=self.isLocal)
|
||||
+ if not connOnly:
|
||||
+ self.__initPart2()
|
||||
self.__add_brookers__()
|
||||
|
||||
def close(self):
|
||||
@@ -3537,8 +3616,4 @@ class DirSrv(SimpleLDAPObject, object):
|
||||
"""
|
||||
Get the pid of the running server
|
||||
"""
|
||||
- pid = pid_from_file(self.pid_file())
|
||||
- if pid == 0 or pid is None:
|
||||
- return 0
|
||||
- else:
|
||||
- return pid
|
||||
+ return pid_from_file(self.pid_file())
|
||||
diff --git a/src/lib389/lib389/cli_base/dsrc.py b/src/lib389/lib389/cli_base/dsrc.py
|
||||
index 84567b990..498228ce0 100644
|
||||
--- a/src/lib389/lib389/cli_base/dsrc.py
|
||||
+++ b/src/lib389/lib389/cli_base/dsrc.py
|
||||
@@ -56,7 +56,7 @@ def dsrc_arg_concat(args, dsrc_inst):
|
||||
new_dsrc_inst['args'][SER_ROOT_DN] = new_dsrc_inst['binddn']
|
||||
if new_dsrc_inst['uri'][0:8] == 'ldapi://':
|
||||
new_dsrc_inst['args'][SER_LDAPI_ENABLED] = "on"
|
||||
- new_dsrc_inst['args'][SER_LDAPI_SOCKET] = new_dsrc_inst['uri'][9:]
|
||||
+ new_dsrc_inst['args'][SER_LDAPI_SOCKET] = new_dsrc_inst['uri'][8:]
|
||||
new_dsrc_inst['args'][SER_LDAPI_AUTOBIND] = "on"
|
||||
|
||||
# Make new
|
||||
@@ -170,7 +170,7 @@ def dsrc_to_ldap(path, instance_name, log):
|
||||
dsrc_inst['args'][SER_ROOT_DN] = dsrc_inst['binddn']
|
||||
if dsrc_inst['uri'][0:8] == 'ldapi://':
|
||||
dsrc_inst['args'][SER_LDAPI_ENABLED] = "on"
|
||||
- dsrc_inst['args'][SER_LDAPI_SOCKET] = dsrc_inst['uri'][9:]
|
||||
+ dsrc_inst['args'][SER_LDAPI_SOCKET] = dsrc_inst['uri'][8:]
|
||||
dsrc_inst['args'][SER_LDAPI_AUTOBIND] = "on"
|
||||
|
||||
# Return the dict.
|
||||
diff --git a/src/lib389/lib389/monitor.py b/src/lib389/lib389/monitor.py
|
||||
index 27b99a7e3..bf3e1df76 100644
|
||||
--- a/src/lib389/lib389/monitor.py
|
||||
+++ b/src/lib389/lib389/monitor.py
|
||||
@@ -92,21 +92,47 @@ class Monitor(DSLdapObject):
|
||||
Get CPU and memory stats
|
||||
"""
|
||||
stats = {}
|
||||
- pid = self._instance.get_pid()
|
||||
+ try:
|
||||
+ pid = self._instance.get_pid()
|
||||
+ except Exception:
|
||||
+ pid = None
|
||||
total_mem = psutil.virtual_memory()[0]
|
||||
- p = psutil.Process(pid)
|
||||
- memory_stats = p.memory_full_info()
|
||||
|
||||
- # Get memory & CPU stats
|
||||
+ # Always include total system memory
|
||||
stats['total_mem'] = [str(total_mem)]
|
||||
- stats['rss'] = [str(memory_stats[0])]
|
||||
- stats['vms'] = [str(memory_stats[1])]
|
||||
- stats['swap'] = [str(memory_stats[9])]
|
||||
- stats['mem_rss_percent'] = [str(round(p.memory_percent("rss")))]
|
||||
- stats['mem_vms_percent'] = [str(round(p.memory_percent("vms")))]
|
||||
- stats['mem_swap_percent'] = [str(round(p.memory_percent("swap")))]
|
||||
- stats['total_threads'] = [str(p.num_threads())]
|
||||
- stats['cpu_usage'] = [str(round(p.cpu_percent(interval=0.1)))]
|
||||
+
|
||||
+ # Process-specific stats - only if process is running (pid is not None)
|
||||
+ if pid is not None:
|
||||
+ try:
|
||||
+ p = psutil.Process(pid)
|
||||
+ memory_stats = p.memory_full_info()
|
||||
+
|
||||
+ # Get memory & CPU stats
|
||||
+ stats['rss'] = [str(memory_stats[0])]
|
||||
+ stats['vms'] = [str(memory_stats[1])]
|
||||
+ stats['swap'] = [str(memory_stats[9])]
|
||||
+ stats['mem_rss_percent'] = [str(round(p.memory_percent("rss")))]
|
||||
+ stats['mem_vms_percent'] = [str(round(p.memory_percent("vms")))]
|
||||
+ stats['mem_swap_percent'] = [str(round(p.memory_percent("swap")))]
|
||||
+ stats['total_threads'] = [str(p.num_threads())]
|
||||
+ stats['cpu_usage'] = [str(round(p.cpu_percent(interval=0.1)))]
|
||||
+ except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||
+ # Process exists in PID file but is not accessible or doesn't exist
|
||||
+ pid = None
|
||||
+
|
||||
+ # If no valid PID, provide zero values for process stats
|
||||
+ if pid is None:
|
||||
+ stats['rss'] = ['0']
|
||||
+ stats['vms'] = ['0']
|
||||
+ stats['swap'] = ['0']
|
||||
+ stats['mem_rss_percent'] = ['0']
|
||||
+ stats['mem_vms_percent'] = ['0']
|
||||
+ stats['mem_swap_percent'] = ['0']
|
||||
+ stats['total_threads'] = ['0']
|
||||
+ stats['cpu_usage'] = ['0']
|
||||
+ stats['server_status'] = ['PID unavailable']
|
||||
+ else:
|
||||
+ stats['server_status'] = ['Server running']
|
||||
|
||||
# Connections to DS
|
||||
if self._instance.port == "0":
|
||||
--
|
||||
2.49.0
|
||||
|
||||
569
0038-Issue-6936-Make-user-subtree-policy-creation-idempot.patch
Normal file
569
0038-Issue-6936-Make-user-subtree-policy-creation-idempot.patch
Normal file
@ -0,0 +1,569 @@
|
||||
From 594333d1a6a8bba4d485b8227c4474e4ca2aa6a4 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 19 Aug 2025 14:30:15 -0700
|
||||
Subject: [PATCH] Issue 6936 - Make user/subtree policy creation idempotent
|
||||
(#6937)
|
||||
|
||||
Description: Correct the CLI mapping typo to use 'nsslapd-pwpolicy-local',
|
||||
rework subtree policy detection to validate CoS templates and add user-policy detection.
|
||||
Make user/subtree policy creation idempotent via ensure_state, and improve deletion
|
||||
logic to distinguish subtree vs user policies and fail if none exist.
|
||||
|
||||
Add a test suite (pwp_history_local_override_test.py) exercising global-only and local-only
|
||||
history enforcement, local overriding global counts, immediate effect of dsconf updates,
|
||||
and fallback to global after removing a user policy, ensuring reliable behavior
|
||||
and preventing regressions.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6936
|
||||
|
||||
Reviewed by: @mreynolds389 (Thanks!)
|
||||
---
|
||||
.../pwp_history_local_override_test.py | 351 ++++++++++++++++++
|
||||
src/lib389/lib389/cli_conf/pwpolicy.py | 4 +-
|
||||
src/lib389/lib389/pwpolicy.py | 107 ++++--
|
||||
3 files changed, 424 insertions(+), 38 deletions(-)
|
||||
create mode 100644 dirsrvtests/tests/suites/password/pwp_history_local_override_test.py
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/pwp_history_local_override_test.py b/dirsrvtests/tests/suites/password/pwp_history_local_override_test.py
|
||||
new file mode 100644
|
||||
index 000000000..6d72725fa
|
||||
--- /dev/null
|
||||
+++ b/dirsrvtests/tests/suites/password/pwp_history_local_override_test.py
|
||||
@@ -0,0 +1,351 @@
|
||||
+# --- BEGIN COPYRIGHT BLOCK ---
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
+# All rights reserved.
|
||||
+#
|
||||
+# License: GPL (version 3 or any later version).
|
||||
+# See LICENSE for details.
|
||||
+# --- END COPYRIGHT BLOCK ---
|
||||
+#
|
||||
+import os
|
||||
+import time
|
||||
+import ldap
|
||||
+import pytest
|
||||
+import subprocess
|
||||
+import logging
|
||||
+
|
||||
+from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD, DN_CONFIG
|
||||
+from lib389.topologies import topology_st
|
||||
+from lib389.idm.user import UserAccounts
|
||||
+from lib389.idm.domain import Domain
|
||||
+from lib389.pwpolicy import PwPolicyManager
|
||||
+
|
||||
+pytestmark = pytest.mark.tier1
|
||||
+
|
||||
+DEBUGGING = os.getenv("DEBUGGING", default=False)
|
||||
+if DEBUGGING:
|
||||
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
|
||||
+else:
|
||||
+ logging.getLogger(__name__).setLevel(logging.INFO)
|
||||
+log = logging.getLogger(__name__)
|
||||
+
|
||||
+OU_DN = f"ou=People,{DEFAULT_SUFFIX}"
|
||||
+USER_ACI = '(targetattr="userpassword || passwordHistory")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)'
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(autouse=True, scope="function")
|
||||
+def restore_global_policy(topology_st, request):
|
||||
+ """Snapshot and restore global password policy around each test in this file."""
|
||||
+ inst = topology_st.standalone
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ attrs = [
|
||||
+ 'nsslapd-pwpolicy-local',
|
||||
+ 'nsslapd-pwpolicy-inherit-global',
|
||||
+ 'passwordHistory',
|
||||
+ 'passwordInHistory',
|
||||
+ 'passwordChange',
|
||||
+ ]
|
||||
+
|
||||
+ entry = inst.getEntry(DN_CONFIG, ldap.SCOPE_BASE, '(objectClass=*)', attrs)
|
||||
+ saved = {attr: entry.getValue(attr) for attr in attrs}
|
||||
+
|
||||
+ def fin():
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ for attr, value in saved.items():
|
||||
+ inst.config.replace(attr, value)
|
||||
+
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+
|
||||
+@pytest.fixture(scope="function")
|
||||
+def setup_entries(topology_st, request):
|
||||
+ """Create test OU and user, and install an ACI for self password changes."""
|
||||
+
|
||||
+ inst = topology_st.standalone
|
||||
+
|
||||
+ suffix = Domain(inst, DEFAULT_SUFFIX)
|
||||
+ suffix.add('aci', USER_ACI)
|
||||
+
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ try:
|
||||
+ user = users.create_test_user(uid=1)
|
||||
+ except ldap.ALREADY_EXISTS:
|
||||
+ user = users.get("test_user_1")
|
||||
+
|
||||
+ def fin():
|
||||
+ pwp = PwPolicyManager(inst)
|
||||
+ try:
|
||||
+ pwp.delete_local_policy(OU_DN)
|
||||
+ except Exception as e:
|
||||
+ if "No password policy" in str(e):
|
||||
+ pass
|
||||
+ else:
|
||||
+ raise e
|
||||
+ try:
|
||||
+ pwp.delete_local_policy(user.dn)
|
||||
+ except Exception as e:
|
||||
+ if "No password policy" in str(e):
|
||||
+ pass
|
||||
+ else:
|
||||
+ raise e
|
||||
+ suffix.remove('aci', USER_ACI)
|
||||
+ request.addfinalizer(fin)
|
||||
+
|
||||
+ return user
|
||||
+
|
||||
+
|
||||
+def set_user_password(inst, user, new_password, bind_as_user_password=None, expect_violation=False):
|
||||
+ if bind_as_user_password is not None:
|
||||
+ user.rebind(bind_as_user_password)
|
||||
+ try:
|
||||
+ user.reset_password(new_password)
|
||||
+ if expect_violation:
|
||||
+ pytest.fail("Password change unexpectedly succeeded")
|
||||
+ except ldap.CONSTRAINT_VIOLATION:
|
||||
+ if not expect_violation:
|
||||
+ pytest.fail("Password change unexpectedly rejected with CONSTRAINT_VIOLATION")
|
||||
+ finally:
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ time.sleep(1)
|
||||
+
|
||||
+
|
||||
+def set_global_history(inst, enabled: bool, count: int, inherit_global: str = 'on'):
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ inst.config.replace('nsslapd-pwpolicy-local', 'on')
|
||||
+ inst.config.replace('nsslapd-pwpolicy-inherit-global', inherit_global)
|
||||
+ inst.config.replace('passwordHistory', 'on' if enabled else 'off')
|
||||
+ inst.config.replace('passwordInHistory', str(count))
|
||||
+ inst.config.replace('passwordChange', 'on')
|
||||
+ time.sleep(1)
|
||||
+
|
||||
+
|
||||
+def ensure_local_subtree_policy(inst, count: int, track_update_time: str = 'on'):
|
||||
+ pwp = PwPolicyManager(inst)
|
||||
+ pwp.create_subtree_policy(OU_DN, {
|
||||
+ 'passwordChange': 'on',
|
||||
+ 'passwordHistory': 'on',
|
||||
+ 'passwordInHistory': str(count),
|
||||
+ 'passwordTrackUpdateTime': track_update_time,
|
||||
+ })
|
||||
+ time.sleep(1)
|
||||
+
|
||||
+
|
||||
+def set_local_history_via_cli(inst, count: int):
|
||||
+ sbin_dir = inst.get_sbin_dir()
|
||||
+ inst_name = inst.serverid
|
||||
+ cmd = [f"{sbin_dir}/dsconf", inst_name, "localpwp", "set", f"--pwdhistorycount={count}", OU_DN]
|
||||
+ rc = subprocess.call(cmd)
|
||||
+ assert rc == 0, f"dsconf command failed rc={rc}: {' '.join(cmd)}"
|
||||
+ time.sleep(1)
|
||||
+
|
||||
+
|
||||
+def test_global_history_only_enforced(topology_st, setup_entries):
|
||||
+ """Global-only history enforcement with count 2
|
||||
+
|
||||
+ :id: 3d8cf35b-4a33-4587-9814-ebe18b7a1f92
|
||||
+ :setup: Standalone instance, test OU and user, ACI for self password changes
|
||||
+ :steps:
|
||||
+ 1. Remove local policies
|
||||
+ 2. Set global policy: passwordHistory=on, passwordInHistory=2
|
||||
+ 3. Set password to Alpha1, then change to Alpha2 and Alpha3 as the user
|
||||
+ 4. Attempt to change to Alpha1 and Alpha2
|
||||
+ 5. Attempt to change to Alpha4
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Changes to Welcome1 and Welcome2 are rejected with CONSTRAINT_VIOLATION
|
||||
+ 5. Change to Welcome4 is accepted
|
||||
+ """
|
||||
+ inst = topology_st.standalone
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ set_global_history(inst, enabled=True, count=2)
|
||||
+
|
||||
+ user = setup_entries
|
||||
+ user.reset_password('Alpha1')
|
||||
+ set_user_password(inst, user, 'Alpha2', bind_as_user_password='Alpha1')
|
||||
+ set_user_password(inst, user, 'Alpha3', bind_as_user_password='Alpha2')
|
||||
+
|
||||
+ # Within last 2
|
||||
+ set_user_password(inst, user, 'Alpha2', bind_as_user_password='Alpha3', expect_violation=True)
|
||||
+ set_user_password(inst, user, 'Alpha1', bind_as_user_password='Alpha3', expect_violation=True)
|
||||
+
|
||||
+ # New password should be allowed
|
||||
+ set_user_password(inst, user, 'Alpha4', bind_as_user_password='Alpha3', expect_violation=False)
|
||||
+
|
||||
+
|
||||
+def test_local_overrides_global_history(topology_st, setup_entries):
|
||||
+ """Local subtree policy (history=3) overrides global (history=1)
|
||||
+
|
||||
+ :id: 97c22f56-5ea6-40c1-8d8c-1cece3bf46fd
|
||||
+ :setup: Standalone instance, test OU and user
|
||||
+ :steps:
|
||||
+ 1. Set global policy passwordInHistory=1
|
||||
+ 2. Create local subtree policy on the OU with passwordInHistory=3
|
||||
+ 3. Set password to Bravo1, then change to Bravo2 and Bravo3 as the user
|
||||
+ 4. Attempt to change to Bravo1
|
||||
+ 5. Attempt to change to Bravo5
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Change to Welcome1 is rejected (local policy wins)
|
||||
+ 5. Change to Welcome5 is accepted
|
||||
+ """
|
||||
+ inst = topology_st.standalone
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ set_global_history(inst, enabled=True, count=1, inherit_global='on')
|
||||
+
|
||||
+ ensure_local_subtree_policy(inst, count=3)
|
||||
+
|
||||
+ user = setup_entries
|
||||
+ user.reset_password('Bravo1')
|
||||
+ set_user_password(inst, user, 'Bravo2', bind_as_user_password='Bravo1')
|
||||
+ set_user_password(inst, user, 'Bravo3', bind_as_user_password='Bravo2')
|
||||
+
|
||||
+ # Third prior should be rejected under local policy count=3
|
||||
+ set_user_password(inst, user, 'Bravo1', bind_as_user_password='Bravo3', expect_violation=True)
|
||||
+
|
||||
+ # New password allowed
|
||||
+ set_user_password(inst, user, 'Bravo5', bind_as_user_password='Bravo3', expect_violation=False)
|
||||
+
|
||||
+
|
||||
+def test_change_local_history_via_cli_affects_enforcement(topology_st, setup_entries):
|
||||
+ """Changing local policy via CLI is enforced immediately
|
||||
+
|
||||
+ :id: 5a6d0d14-4009-4bad-86e1-cde5000c43dc
|
||||
+ :setup: Standalone instance, test OU and user, dsconf available
|
||||
+ :steps:
|
||||
+ 1. Ensure local subtree policy passwordInHistory=3
|
||||
+ 2. Set password to Charlie1, then change to Charlie2 and Charlie3 as the user
|
||||
+ 3. Attempt to change to Charlie1 (within last 3)
|
||||
+ 4. Run: dsconf <inst> localpwp set --pwdhistorycount=1 "ou=product testing,<suffix>"
|
||||
+ 5. Attempt to change to Charlie1 again
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Change to Welcome1 is rejected
|
||||
+ 4. CLI command succeeds
|
||||
+ 5. Change to Welcome1 now succeeds (only last 1 is disallowed)
|
||||
+ """
|
||||
+ inst = topology_st.standalone
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ ensure_local_subtree_policy(inst, count=3)
|
||||
+
|
||||
+ user = setup_entries
|
||||
+ user.reset_password('Charlie1')
|
||||
+ set_user_password(inst, user, 'Charlie2', bind_as_user_password='Charlie1', expect_violation=False)
|
||||
+ set_user_password(inst, user, 'Charlie3', bind_as_user_password='Charlie2', expect_violation=False)
|
||||
+
|
||||
+ # With count=3, Welcome1 is within history
|
||||
+ set_user_password(inst, user, 'Charlie1', bind_as_user_password='Charlie3', expect_violation=True)
|
||||
+
|
||||
+ # Reduce local count to 1 via CLI to exercise CLI mapping and updated code
|
||||
+ set_local_history_via_cli(inst, count=1)
|
||||
+
|
||||
+ # Now Welcome1 should be allowed
|
||||
+ set_user_password(inst, user, 'Charlie1', bind_as_user_password='Charlie3', expect_violation=False)
|
||||
+
|
||||
+
|
||||
+def test_history_local_only_enforced(topology_st, setup_entries):
|
||||
+ """Local-only history enforcement with count 3
|
||||
+
|
||||
+ :id: af6ff34d-ac94-4108-a7b6-2b589c960154
|
||||
+ :setup: Standalone instance, test OU and user
|
||||
+ :steps:
|
||||
+ 1. Disable global password history (passwordHistory=off, passwordInHistory=0, inherit off)
|
||||
+ 2. Ensure local subtree policy with passwordInHistory=3
|
||||
+ 3. Set password to Delta1, then change to Delta2 and Delta3 as the user
|
||||
+ 4. Attempt to change to Delta1
|
||||
+ 5. Attempt to change to Delta5
|
||||
+ 6. Change once more to Delta6, then change to Delta1
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Success
|
||||
+ 4. Change to Welcome1 is rejected (within last 3)
|
||||
+ 5. Change to Welcome5 is accepted
|
||||
+ 6. Welcome1 is now older than the last 3 and is accepted
|
||||
+ """
|
||||
+ inst = topology_st.standalone
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ set_global_history(inst, enabled=False, count=0, inherit_global='off')
|
||||
+
|
||||
+ ensure_local_subtree_policy(inst, count=3)
|
||||
+
|
||||
+ user = setup_entries
|
||||
+ user.reset_password('Delta1')
|
||||
+ set_user_password(inst, user, 'Delta2', bind_as_user_password='Delta1')
|
||||
+ set_user_password(inst, user, 'Delta3', bind_as_user_password='Delta2')
|
||||
+
|
||||
+ # Within last 2
|
||||
+ set_user_password(inst, user, 'Delta1', bind_as_user_password='Delta3', expect_violation=True)
|
||||
+
|
||||
+ # New password allowed
|
||||
+ set_user_password(inst, user, 'Delta5', bind_as_user_password='Delta3', expect_violation=False)
|
||||
+
|
||||
+ # Now Welcome1 is older than last 2 after one more change
|
||||
+ set_user_password(inst, user, 'Delta6', bind_as_user_password='Delta5', expect_violation=False)
|
||||
+ set_user_password(inst, user, 'Delta1', bind_as_user_password='Delta6', expect_violation=False)
|
||||
+
|
||||
+
|
||||
+def test_user_policy_detection_and_enforcement(topology_st, setup_entries):
|
||||
+ """User local policy is detected and enforced; removal falls back to global policy
|
||||
+
|
||||
+ :id: 2213126a-1f47-468c-8337-0d2ee5d2d585
|
||||
+ :setup: Standalone instance, test OU and user
|
||||
+ :steps:
|
||||
+ 1. Set global policy passwordInHistory=1
|
||||
+ 2. Create a user local password policy on the user with passwordInHistory=3
|
||||
+ 3. Verify is_user_policy(USER_DN) is True
|
||||
+ 4. Set password to Echo1, then change to Echo2 and Echo3 as the user
|
||||
+ 5. Attempt to change to Echo1 (within last 3)
|
||||
+ 6. Delete the user local policy
|
||||
+ 7. Verify is_user_policy(USER_DN) is False
|
||||
+ 8. Attempt to change to Echo1 again (now only last 1 disallowed by global)
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. is_user_policy returns True
|
||||
+ 4. Success
|
||||
+ 5. Change to Welcome1 is rejected
|
||||
+ 6. Success
|
||||
+ 7. is_user_policy returns False
|
||||
+ 8. Change to Welcome1 succeeds (two back is allowed by global=1)
|
||||
+ """
|
||||
+ inst = topology_st.standalone
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+
|
||||
+ set_global_history(inst, enabled=True, count=1, inherit_global='on')
|
||||
+
|
||||
+ pwp = PwPolicyManager(inst)
|
||||
+ user = setup_entries
|
||||
+ pwp.create_user_policy(user.dn, {
|
||||
+ 'passwordChange': 'on',
|
||||
+ 'passwordHistory': 'on',
|
||||
+ 'passwordInHistory': '3',
|
||||
+ })
|
||||
+
|
||||
+ assert pwp.is_user_policy(user.dn) is True
|
||||
+
|
||||
+ user.reset_password('Echo1')
|
||||
+ set_user_password(inst, user, 'Echo2', bind_as_user_password='Echo1', expect_violation=False)
|
||||
+ set_user_password(inst, user, 'Echo3', bind_as_user_password='Echo2', expect_violation=False)
|
||||
+ set_user_password(inst, user, 'Echo1', bind_as_user_password='Echo3', expect_violation=True)
|
||||
+
|
||||
+ pwp.delete_local_policy(user.dn)
|
||||
+ assert pwp.is_user_policy(user.dn) is False
|
||||
+
|
||||
+ # With only global=1, Echo1 (two back) is allowed
|
||||
+ set_user_password(inst, user, 'Echo1', bind_as_user_password='Echo3', expect_violation=False)
|
||||
+
|
||||
+
|
||||
+if __name__ == '__main__':
|
||||
+ # Run isolated
|
||||
+ # -s for DEBUG mode
|
||||
+ CURRENT_FILE = os.path.realpath(__file__)
|
||||
+ pytest.main("-s %s" % CURRENT_FILE)
|
||||
diff --git a/src/lib389/lib389/cli_conf/pwpolicy.py b/src/lib389/lib389/cli_conf/pwpolicy.py
|
||||
index 2d4ba9b21..a3e59a90c 100644
|
||||
--- a/src/lib389/lib389/cli_conf/pwpolicy.py
|
||||
+++ b/src/lib389/lib389/cli_conf/pwpolicy.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2023 Red Hat, Inc.
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -43,7 +43,7 @@ def _get_pw_policy(inst, targetdn, log, use_json=None):
|
||||
targetdn = 'cn=config'
|
||||
policydn = targetdn
|
||||
basedn = targetdn
|
||||
- attr_list.extend(['passwordisglobalpolicy', 'nsslapd-pwpolicy_local'])
|
||||
+ attr_list.extend(['passwordisglobalpolicy', 'nsslapd-pwpolicy-local'])
|
||||
all_attrs = inst.config.get_attrs_vals_utf8(attr_list)
|
||||
attrs = {k: v for k, v in all_attrs.items() if len(v) > 0}
|
||||
else:
|
||||
diff --git a/src/lib389/lib389/pwpolicy.py b/src/lib389/lib389/pwpolicy.py
|
||||
index 6a47a44fe..539c230a9 100644
|
||||
--- a/src/lib389/lib389/pwpolicy.py
|
||||
+++ b/src/lib389/lib389/pwpolicy.py
|
||||
@@ -1,5 +1,5 @@
|
||||
# --- BEGIN COPYRIGHT BLOCK ---
|
||||
-# Copyright (C) 2018 Red Hat, Inc.
|
||||
+# Copyright (C) 2025 Red Hat, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# License: GPL (version 3 or any later version).
|
||||
@@ -7,6 +7,7 @@
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
|
||||
import ldap
|
||||
+from ldap import filter as ldap_filter
|
||||
from lib389._mapped_object import DSLdapObject, DSLdapObjects
|
||||
from lib389.backend import Backends
|
||||
from lib389.config import Config
|
||||
@@ -74,19 +75,56 @@ class PwPolicyManager(object):
|
||||
}
|
||||
|
||||
def is_subtree_policy(self, dn):
|
||||
- """Check if the entry has a subtree password policy. If we can find a
|
||||
- template entry it is subtree policy
|
||||
+ """Check if a subtree password policy exists for a given entry DN.
|
||||
|
||||
- :param dn: Entry DN with PwPolicy set up
|
||||
+ A subtree policy is indicated by the presence of any CoS template
|
||||
+ (under `cn=nsPwPolicyContainer,<dn>`) that has a `pwdpolicysubentry`
|
||||
+ attribute pointing to an existing entry with objectClass `passwordpolicy`.
|
||||
+
|
||||
+ :param dn: Entry DN to check for subtree policy
|
||||
:type dn: str
|
||||
|
||||
- :returns: True if the entry has a subtree policy, False otherwise
|
||||
+ :returns: True if a subtree policy exists, False otherwise
|
||||
+ :rtype: bool
|
||||
"""
|
||||
- cos_templates = CosTemplates(self._instance, 'cn=nsPwPolicyContainer,{}'.format(dn))
|
||||
try:
|
||||
- cos_templates.get('cn=nsPwTemplateEntry,%s' % dn)
|
||||
- return True
|
||||
- except:
|
||||
+ container_basedn = 'cn=nsPwPolicyContainer,{}'.format(dn)
|
||||
+ templates = CosTemplates(self._instance, container_basedn).list()
|
||||
+ for tmpl in templates:
|
||||
+ pwp_dn = tmpl.get_attr_val_utf8('pwdpolicysubentry')
|
||||
+ if not pwp_dn:
|
||||
+ continue
|
||||
+ # Validate that the referenced entry exists and is a passwordpolicy
|
||||
+ pwp_entry = PwPolicyEntry(self._instance, pwp_dn)
|
||||
+ if pwp_entry.exists() and pwp_entry.present('objectClass', 'passwordpolicy'):
|
||||
+ return True
|
||||
+ except ldap.LDAPError:
|
||||
+ pass
|
||||
+ return False
|
||||
+
|
||||
+ def is_user_policy(self, dn):
|
||||
+ """Check if the entry has a user password policy.
|
||||
+
|
||||
+ A user policy is indicated by the target entry having a
|
||||
+ `pwdpolicysubentry` attribute that points to an existing
|
||||
+ entry with objectClass `passwordpolicy`.
|
||||
+
|
||||
+ :param dn: Entry DN to check
|
||||
+ :type dn: str
|
||||
+
|
||||
+ :returns: True if the entry has a user policy, False otherwise
|
||||
+ :rtype: bool
|
||||
+ """
|
||||
+ try:
|
||||
+ entry = Account(self._instance, dn)
|
||||
+ if not entry.exists():
|
||||
+ return False
|
||||
+ pwp_dn = entry.get_attr_val_utf8('pwdpolicysubentry')
|
||||
+ if not pwp_dn:
|
||||
+ return False
|
||||
+ pwp_entry = PwPolicyEntry(self._instance, pwp_dn)
|
||||
+ return pwp_entry.exists() and pwp_entry.present('objectClass', 'passwordpolicy')
|
||||
+ except ldap.LDAPError:
|
||||
return False
|
||||
|
||||
def create_user_policy(self, dn, properties):
|
||||
@@ -114,10 +152,10 @@ class PwPolicyManager(object):
|
||||
pwp_containers = nsContainers(self._instance, basedn=parentdn)
|
||||
pwp_container = pwp_containers.ensure_state(properties={'cn': 'nsPwPolicyContainer'})
|
||||
|
||||
- # Create policy entry
|
||||
+ # Create or update the policy entry
|
||||
properties['cn'] = 'cn=nsPwPolicyEntry_user,%s' % dn
|
||||
pwp_entries = PwPolicyEntries(self._instance, pwp_container.dn)
|
||||
- pwp_entry = pwp_entries.create(properties=properties)
|
||||
+ pwp_entry = pwp_entries.ensure_state(properties=properties)
|
||||
try:
|
||||
# Add policy to the entry
|
||||
user_entry.replace('pwdpolicysubentry', pwp_entry.dn)
|
||||
@@ -152,32 +190,27 @@ class PwPolicyManager(object):
|
||||
pwp_containers = nsContainers(self._instance, basedn=dn)
|
||||
pwp_container = pwp_containers.ensure_state(properties={'cn': 'nsPwPolicyContainer'})
|
||||
|
||||
- # Create policy entry
|
||||
- pwp_entry = None
|
||||
+ # Create or update the policy entry
|
||||
properties['cn'] = 'cn=nsPwPolicyEntry_subtree,%s' % dn
|
||||
pwp_entries = PwPolicyEntries(self._instance, pwp_container.dn)
|
||||
- pwp_entry = pwp_entries.create(properties=properties)
|
||||
- try:
|
||||
- # The CoS template entry (nsPwTemplateEntry) that has the pwdpolicysubentry
|
||||
- # value pointing to the above (nsPwPolicyEntry) entry
|
||||
- cos_template = None
|
||||
- cos_templates = CosTemplates(self._instance, pwp_container.dn)
|
||||
- cos_template = cos_templates.create(properties={'cosPriority': '1',
|
||||
- 'pwdpolicysubentry': pwp_entry.dn,
|
||||
- 'cn': 'cn=nsPwTemplateEntry,%s' % dn})
|
||||
-
|
||||
- # The CoS specification entry at the subtree level
|
||||
- cos_pointer_defs = CosPointerDefinitions(self._instance, dn)
|
||||
- cos_pointer_defs.create(properties={'cosAttribute': 'pwdpolicysubentry default operational-default',
|
||||
- 'cosTemplateDn': cos_template.dn,
|
||||
- 'cn': 'nsPwPolicy_CoS'})
|
||||
- except ldap.LDAPError as e:
|
||||
- # Something went wrong, remove what we have done
|
||||
- if pwp_entry is not None:
|
||||
- pwp_entry.delete()
|
||||
- if cos_template is not None:
|
||||
- cos_template.delete()
|
||||
- raise e
|
||||
+ pwp_entry = pwp_entries.ensure_state(properties=properties)
|
||||
+
|
||||
+ # Ensure the CoS template entry (nsPwTemplateEntry) that points to the
|
||||
+ # password policy entry
|
||||
+ cos_templates = CosTemplates(self._instance, pwp_container.dn)
|
||||
+ cos_template = cos_templates.ensure_state(properties={
|
||||
+ 'cosPriority': '1',
|
||||
+ 'pwdpolicysubentry': pwp_entry.dn,
|
||||
+ 'cn': 'cn=nsPwTemplateEntry,%s' % dn
|
||||
+ })
|
||||
+
|
||||
+ # Ensure the CoS specification entry at the subtree level
|
||||
+ cos_pointer_defs = CosPointerDefinitions(self._instance, dn)
|
||||
+ cos_pointer_defs.ensure_state(properties={
|
||||
+ 'cosAttribute': 'pwdpolicysubentry default operational-default',
|
||||
+ 'cosTemplateDn': cos_template.dn,
|
||||
+ 'cn': 'nsPwPolicy_CoS'
|
||||
+ })
|
||||
|
||||
# make sure that local policies are enabled
|
||||
self.set_global_policy({'nsslapd-pwpolicy-local': 'on'})
|
||||
@@ -244,10 +277,12 @@ class PwPolicyManager(object):
|
||||
if self.is_subtree_policy(entry.dn):
|
||||
parentdn = dn
|
||||
subtree = True
|
||||
- else:
|
||||
+ elif self.is_user_policy(entry.dn):
|
||||
dn_comps = ldap.dn.explode_dn(dn)
|
||||
dn_comps.pop(0)
|
||||
parentdn = ",".join(dn_comps)
|
||||
+ else:
|
||||
+ raise ValueError('The target entry dn does not have a password policy')
|
||||
|
||||
# Starting deleting the policy, ignore the parts that might already have been removed
|
||||
pwp_container = nsContainer(self._instance, 'cn=nsPwPolicyContainer,%s' % parentdn)
|
||||
--
|
||||
2.49.0
|
||||
|
||||
1460
0039-Issue-6919-numSubordinates-tombstoneNumSubordinates-.patch
Normal file
1460
0039-Issue-6919-numSubordinates-tombstoneNumSubordinates-.patch
Normal file
File diff suppressed because it is too large
Load Diff
574
0040-Issue-6910-Fix-latest-coverity-issues.patch
Normal file
574
0040-Issue-6910-Fix-latest-coverity-issues.patch
Normal file
@ -0,0 +1,574 @@
|
||||
From 5d2dc7f78f0a834e46d5665f0c12024da5ddda9e Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Mon, 28 Jul 2025 17:12:33 -0400
|
||||
Subject: [PATCH] Issue 6910 - Fix latest coverity issues
|
||||
|
||||
Description:
|
||||
|
||||
Fix various coverity/ASAN warnings:
|
||||
|
||||
- CID 1618837: Out-of-bounds read (OVERRUN) - bdb_bdbreader_glue.c
|
||||
- CID 1618831: Resource leak (RESOURCE_LEAK) - bdb_layer.c
|
||||
- CID 1612606: Resource leak (RESOURCE_LEAK) - log.c
|
||||
- CID 1611461: Uninitialized pointer read (UNINIT) - repl5_agmt.c
|
||||
- CID 1568589: Dereference before null check (REVERSE_INULL) - repl5_agmt.c
|
||||
- CID 1590353: Logically dead code (DEADCODE) - repl5_agmt.c
|
||||
- CID 1611460: Logically dead code (DEADCODE) - control.c
|
||||
- CID 1610568: Dereference after null check (FORWARD_NULL) - modify.c
|
||||
- CID 1591259: Out-of-bounds read (OVERRUN) - memberof.c
|
||||
- CID 1550231: Unsigned compared against 0 (NO_EFFECT) - memberof_config.c
|
||||
- CID 1548904: Overflowed constant (INTEGER_OVERFLOW) - ch_malloc.c
|
||||
- CID 1548902: Overflowed constant (INTEGER_OVERFLOW) - dse.lc
|
||||
- CID 1548900: Overflowed return value (INTEGER_OVERFLOW) - acct_util.c
|
||||
- CID 1548898: Overflowed constant (INTEGER_OVERFLOW) - parents.c
|
||||
- CID 1546849: Resource leak (RESOURCE_LEAK) - referint.c
|
||||
- ASAN - Use after free - automember.c
|
||||
|
||||
Relates: http://github.com/389ds/389-ds-base/issues/6910
|
||||
|
||||
Reviewed by: progier & spichugi(Thanks!)
|
||||
---
|
||||
ldap/servers/plugins/acctpolicy/acct_util.c | 6 ++-
|
||||
ldap/servers/plugins/automember/automember.c | 9 ++--
|
||||
ldap/servers/plugins/memberof/memberof.c | 15 +++++--
|
||||
.../plugins/memberof/memberof_config.c | 11 +++--
|
||||
ldap/servers/plugins/referint/referint.c | 4 +-
|
||||
ldap/servers/plugins/replication/repl5_agmt.c | 41 ++++++++-----------
|
||||
.../slapd/back-ldbm/db-bdb/bdb_import.c | 5 ++-
|
||||
.../back-ldbm/db-bdb/bdb_instance_config.c | 3 +-
|
||||
.../slapd/back-ldbm/db-bdb/bdb_layer.c | 13 ++++--
|
||||
ldap/servers/slapd/back-ldbm/parents.c | 4 +-
|
||||
ldap/servers/slapd/ch_malloc.c | 4 +-
|
||||
ldap/servers/slapd/control.c | 5 +--
|
||||
ldap/servers/slapd/dse.c | 4 +-
|
||||
ldap/servers/slapd/log.c | 5 ++-
|
||||
ldap/servers/slapd/modify.c | 6 +--
|
||||
ldap/servers/slapd/passwd_extop.c | 2 +-
|
||||
ldap/servers/slapd/unbind.c | 12 ++++--
|
||||
17 files changed, 88 insertions(+), 61 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/plugins/acctpolicy/acct_util.c b/ldap/servers/plugins/acctpolicy/acct_util.c
|
||||
index b27eeaff1..7735d10e6 100644
|
||||
--- a/ldap/servers/plugins/acctpolicy/acct_util.c
|
||||
+++ b/ldap/servers/plugins/acctpolicy/acct_util.c
|
||||
@@ -17,7 +17,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
Contributors:
|
||||
Hewlett-Packard Development Company, L.P.
|
||||
|
||||
-Copyright (C) 2021 Red Hat, Inc.
|
||||
+Copyright (C) 2025 Red Hat, Inc.
|
||||
******************************************************************************/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -248,6 +248,10 @@ gentimeToEpochtime(char *gentimestr)
|
||||
|
||||
/* Turn tm object into local epoch time */
|
||||
epochtime = mktime(&t);
|
||||
+ if (epochtime == (time_t) -1) {
|
||||
+ /* mktime failed */
|
||||
+ return 0;
|
||||
+ }
|
||||
|
||||
/* Turn local epoch time into GMT epoch time */
|
||||
epochtime -= zone_offset;
|
||||
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
|
||||
index f900db7f2..9eade495e 100644
|
||||
--- a/ldap/servers/plugins/automember/automember.c
|
||||
+++ b/ldap/servers/plugins/automember/automember.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2022 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -1756,9 +1756,10 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
|
||||
mod_pb = slapi_pblock_new();
|
||||
/* Do a single mod with error overrides for DEL/ADD */
|
||||
- result = slapi_single_modify_internal_override(mod_pb, slapi_sdn_new_dn_byval(group_dn), mods,
|
||||
- automember_get_plugin_id(), 0);
|
||||
-
|
||||
+ Slapi_DN *sdn = slapi_sdn_new_normdn_byref(group_dn);
|
||||
+ result = slapi_single_modify_internal_override(mod_pb, sdn, mods,
|
||||
+ automember_get_plugin_id(), 0);
|
||||
+ slapi_sdn_free(&sdn);
|
||||
if(add){
|
||||
if (result != LDAP_SUCCESS) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
|
||||
index 073d8d938..cfda977f0 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -1655,6 +1655,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn
|
||||
/* We already did the search for this backend, don't
|
||||
* do it again when we fall through */
|
||||
do_suffix_search = PR_FALSE;
|
||||
+ slapi_pblock_init(search_pb);
|
||||
}
|
||||
}
|
||||
} else if (!all_backends) {
|
||||
@@ -3763,6 +3764,10 @@ memberof_replace_list(Slapi_PBlock *pb, MemberOfConfig *config, Slapi_DN *group_
|
||||
|
||||
pre_index++;
|
||||
} else {
|
||||
+ if (pre_index >= pre_total || post_index >= post_total) {
|
||||
+ /* Don't overrun pre_array/post_array */
|
||||
+ break;
|
||||
+ }
|
||||
/* decide what to do */
|
||||
int cmp = memberof_compare(
|
||||
config,
|
||||
@@ -4453,10 +4458,12 @@ memberof_add_memberof_attr(LDAPMod **mods, const char *dn, char *add_oc)
|
||||
|
||||
while (1) {
|
||||
slapi_pblock_init(mod_pb);
|
||||
-
|
||||
+ Slapi_DN *sdn = slapi_sdn_new_normdn_byref(dn);
|
||||
/* Internal mod with error overrides for DEL/ADD */
|
||||
- rc = slapi_single_modify_internal_override(mod_pb, slapi_sdn_new_normdn_byref(dn), single_mod,
|
||||
- memberof_get_plugin_id(), SLAPI_OP_FLAG_BYPASS_REFERRALS);
|
||||
+ rc = slapi_single_modify_internal_override(mod_pb, sdn, single_mod,
|
||||
+ memberof_get_plugin_id(),
|
||||
+ SLAPI_OP_FLAG_BYPASS_REFERRALS);
|
||||
+ slapi_sdn_free(&sdn);
|
||||
if (rc == LDAP_OBJECT_CLASS_VIOLATION) {
|
||||
if (!add_oc || added_oc) {
|
||||
/*
|
||||
diff --git a/ldap/servers/plugins/memberof/memberof_config.c b/ldap/servers/plugins/memberof/memberof_config.c
|
||||
index 1e83ba6e0..e4da351d9 100644
|
||||
--- a/ldap/servers/plugins/memberof/memberof_config.c
|
||||
+++ b/ldap/servers/plugins/memberof/memberof_config.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -570,21 +570,24 @@ memberof_apply_config(Slapi_PBlock *pb __attribute__((unused)),
|
||||
if (num_groupattrs > 1) {
|
||||
size_t bytes_out = 0;
|
||||
size_t filter_str_len = groupattr_name_len + (num_groupattrs * 4) + 4;
|
||||
+ int32_t rc = 0;
|
||||
|
||||
/* Allocate enough space for the filter */
|
||||
filter_str = slapi_ch_malloc(filter_str_len);
|
||||
|
||||
/* Add beginning of filter. */
|
||||
- bytes_out = snprintf(filter_str, filter_str_len - bytes_out, "(|");
|
||||
- if (bytes_out<0) {
|
||||
+ rc = snprintf(filter_str, filter_str_len - bytes_out, "(|");
|
||||
+ if (rc < 0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM, "snprintf unexpectly failed in memberof_apply_config.\n");
|
||||
*returncode = LDAP_UNWILLING_TO_PERFORM;
|
||||
goto done;
|
||||
+ } else {
|
||||
+ bytes_out = rc;
|
||||
}
|
||||
|
||||
/* Add filter section for each groupattr. */
|
||||
for (size_t i=0; theConfig.groupattrs && theConfig.groupattrs[i]; i++) {
|
||||
- size_t bytes_read = snprintf(filter_str + bytes_out, filter_str_len - bytes_out, "(%s=*)", theConfig.groupattrs[i]);
|
||||
+ int32_t bytes_read = snprintf(filter_str + bytes_out, filter_str_len - bytes_out, "(%s=*)", theConfig.groupattrs[i]);
|
||||
if (bytes_read<0) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM, "snprintf unexpectly failed in memberof_apply_config.\n");
|
||||
*returncode = LDAP_UNWILLING_TO_PERFORM;
|
||||
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
|
||||
index 5d7f9e5dd..5746b913f 100644
|
||||
--- a/ldap/servers/plugins/referint/referint.c
|
||||
+++ b/ldap/servers/plugins/referint/referint.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -1499,6 +1499,8 @@ referint_thread_func(void *arg __attribute__((unused)))
|
||||
slapi_sdn_free(&sdn);
|
||||
continue;
|
||||
}
|
||||
+
|
||||
+ slapi_sdn_free(&tmpsuperior);
|
||||
if (!strcasecmp(ptoken, "NULL")) {
|
||||
tmpsuperior = NULL;
|
||||
} else {
|
||||
diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
index 0a81167b7..eed97578e 100644
|
||||
--- a/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
+++ b/ldap/servers/plugins/replication/repl5_agmt.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2021 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -202,7 +202,7 @@ agmt_init_session_id(Repl_Agmt *ra)
|
||||
char *host = NULL; /* e.g. localhost.domain */
|
||||
char port[10]; /* e.g. 389 */
|
||||
char sport[10]; /* e.g. 636 */
|
||||
- char *hash_in;
|
||||
+ char *hash_in = NULL;
|
||||
int32_t max_str_sid = SESSION_ID_STR_SZ - 4;
|
||||
|
||||
if (ra == NULL) {
|
||||
@@ -2718,31 +2718,26 @@ agmt_update_init_status(Repl_Agmt *ra)
|
||||
mod_idx++;
|
||||
}
|
||||
|
||||
- if (nb_mods) {
|
||||
- /* it is ok to release the lock here because we are done with the agreement data.
|
||||
- we have to do it before issuing the modify operation because it causes
|
||||
- agmtlist_notify_all to be called which uses the same lock - hence the deadlock */
|
||||
- PR_Unlock(ra->lock);
|
||||
-
|
||||
- pb = slapi_pblock_new();
|
||||
- mods[nb_mods] = NULL;
|
||||
+ /* it is ok to release the lock here because we are done with the agreement data.
|
||||
+ we have to do it before issuing the modify operation because it causes
|
||||
+ agmtlist_notify_all to be called which uses the same lock - hence the deadlock */
|
||||
+ PR_Unlock(ra->lock);
|
||||
|
||||
- slapi_modify_internal_set_pb_ext(pb, ra->dn, mods, NULL, NULL,
|
||||
- repl_get_plugin_identity(PLUGIN_MULTISUPPLIER_REPLICATION), 0);
|
||||
- slapi_modify_internal_pb(pb);
|
||||
+ pb = slapi_pblock_new();
|
||||
+ mods[nb_mods] = NULL;
|
||||
|
||||
- slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
|
||||
- if (rc != LDAP_SUCCESS && rc != LDAP_NO_SUCH_ATTRIBUTE) {
|
||||
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "agmt_update_consumer_ruv - "
|
||||
- "%s: agmt_update_consumer_ruv: "
|
||||
- "failed to update consumer's RUV; LDAP error - %d\n",
|
||||
- ra->long_name, rc);
|
||||
- }
|
||||
+ slapi_modify_internal_set_pb_ext(pb, ra->dn, mods, NULL, NULL,
|
||||
+ repl_get_plugin_identity(PLUGIN_MULTISUPPLIER_REPLICATION), 0);
|
||||
+ slapi_modify_internal_pb(pb);
|
||||
|
||||
- slapi_pblock_destroy(pb);
|
||||
- } else {
|
||||
- PR_Unlock(ra->lock);
|
||||
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
|
||||
+ if (rc != LDAP_SUCCESS && rc != LDAP_NO_SUCH_ATTRIBUTE) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "agmt_update_consumer_ruv - "
|
||||
+ "%s: agmt_update_consumer_ruv: failed to update consumer's RUV; LDAP error - %d\n",
|
||||
+ ra->long_name, rc);
|
||||
}
|
||||
+
|
||||
+ slapi_pblock_destroy(pb);
|
||||
slapi_ch_free((void **)&mods);
|
||||
slapi_mod_done(&smod_start_time);
|
||||
slapi_mod_done(&smod_end_time);
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
index 46c80ec3d..0127bf2f9 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2020 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -947,6 +947,7 @@ bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job)
|
||||
EQ_PREFIX, (u_long)id);
|
||||
key.size++; /* include the null terminator */
|
||||
ret = NEW_IDL_NO_ALLID;
|
||||
+ idl_free(&children);
|
||||
children = idl_fetch(be, db_pid, &key, txn, ai_pid, &ret);
|
||||
if (ret != 0) {
|
||||
ldbm_nasty("bdb_ancestorid_new_idl_create_index", sourcefile, 13070, ret);
|
||||
@@ -957,6 +958,7 @@ bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job)
|
||||
if (job->flags & FLAG_ABORT) {
|
||||
import_log_notice(job, SLAPI_LOG_ERR, "bdb_ancestorid_new_idl_create_index",
|
||||
"ancestorid creation aborted.");
|
||||
+ idl_free(&children);
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
@@ -1290,6 +1292,7 @@ bdb_update_subordinatecounts(backend *be, ImportJob *job, DB_TXN *txn)
|
||||
}
|
||||
bdb_close_subcount_cursor(&c_entryrdn);
|
||||
bdb_close_subcount_cursor(&c_objectclass);
|
||||
+
|
||||
return ret;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c
|
||||
index bb515a23f..44a624fde 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2020 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -261,6 +261,7 @@ bdb_instance_cleanup(struct ldbm_instance *inst)
|
||||
if (inst_dirp && *inst_dir) {
|
||||
return_value = env->remove(env, inst_dirp, 0);
|
||||
} else {
|
||||
+ slapi_ch_free((void **)&env);
|
||||
return_value = -1;
|
||||
}
|
||||
if (return_value == EBUSY) {
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
index 53f1cde69..b1e44a919 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2023 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -2027,9 +2027,13 @@ bdb_pre_close(struct ldbminfo *li)
|
||||
conf = (bdb_config *)li->li_dblayer_config;
|
||||
bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
|
||||
|
||||
+ if (pEnv == NULL) {
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
pthread_mutex_lock(&pEnv->bdb_thread_count_lock);
|
||||
|
||||
- if (conf->bdb_stop_threads || !pEnv) {
|
||||
+ if (conf->bdb_stop_threads) {
|
||||
/* already stopped. do nothing... */
|
||||
goto timeout_escape;
|
||||
}
|
||||
@@ -2203,6 +2207,7 @@ bdb_remove_env(struct ldbminfo *li)
|
||||
}
|
||||
if (NULL == li) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, "bdb_remove_env", "No ldbm info is given\n");
|
||||
+ slapi_ch_free((void **)&env);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -2212,10 +2217,11 @@ bdb_remove_env(struct ldbminfo *li)
|
||||
if (rc) {
|
||||
slapi_log_err(SLAPI_LOG_ERR,
|
||||
"bdb_remove_env", "Failed to remove DB environment files. "
|
||||
- "Please remove %s/__db.00# (# is 1 through 6)\n",
|
||||
+ "Please remove %s/__db.00# (# is 1 through 6)\n",
|
||||
home_dir);
|
||||
}
|
||||
}
|
||||
+ slapi_ch_free((void **)&env);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -6341,6 +6347,7 @@ bdb_back_ctrl(Slapi_Backend *be, int cmd, void *info)
|
||||
db->close(db, 0);
|
||||
rc = bdb_db_remove_ex((bdb_db_env *)priv->dblayer_env, path, NULL, PR_TRUE);
|
||||
inst->inst_changelog = NULL;
|
||||
+ slapi_ch_free_string(&path);
|
||||
slapi_ch_free_string(&instancedir);
|
||||
}
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/parents.c b/ldap/servers/slapd/back-ldbm/parents.c
|
||||
index 31107591e..52c665ca4 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/parents.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/parents.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -123,7 +123,7 @@ parent_update_on_childchange(modify_context *mc, int op, size_t *new_sub_count)
|
||||
/* Now compute the new value */
|
||||
if ((PARENTUPDATE_ADD == op) || (PARENTUPDATE_RESURECT == op)) {
|
||||
current_sub_count++;
|
||||
- } else {
|
||||
+ } else if (current_sub_count > 0) {
|
||||
current_sub_count--;
|
||||
}
|
||||
|
||||
diff --git a/ldap/servers/slapd/ch_malloc.c b/ldap/servers/slapd/ch_malloc.c
|
||||
index cbab1d170..27ed546a5 100644
|
||||
--- a/ldap/servers/slapd/ch_malloc.c
|
||||
+++ b/ldap/servers/slapd/ch_malloc.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -254,7 +254,7 @@ slapi_ch_bvecdup(struct berval **v)
|
||||
++i;
|
||||
newberval = (struct berval **)slapi_ch_malloc((i + 1) * sizeof(struct berval *));
|
||||
newberval[i] = NULL;
|
||||
- while (i-- > 0) {
|
||||
+ while (i > 0 && i-- > 0) {
|
||||
newberval[i] = slapi_ch_bvdup(v[i]);
|
||||
}
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/control.c b/ldap/servers/slapd/control.c
|
||||
index 7aeeba885..d661dc6e1 100644
|
||||
--- a/ldap/servers/slapd/control.c
|
||||
+++ b/ldap/servers/slapd/control.c
|
||||
@@ -174,7 +174,6 @@ create_sessiontracking_ctrl(const char *session_tracking_id, LDAPControl **sessi
|
||||
char *undefined_sid = "undefined sid";
|
||||
const char *sid;
|
||||
int rc = 0;
|
||||
- int tag;
|
||||
LDAPControl *ctrl = NULL;
|
||||
|
||||
if (session_tracking_id) {
|
||||
@@ -183,9 +182,7 @@ create_sessiontracking_ctrl(const char *session_tracking_id, LDAPControl **sessi
|
||||
sid = undefined_sid;
|
||||
}
|
||||
ctrlber = ber_alloc();
|
||||
- tag = ber_printf( ctrlber, "{nnno}", sid, strlen(sid));
|
||||
- if (rc == LBER_ERROR) {
|
||||
- tag = -1;
|
||||
+ if ((rc = ber_printf( ctrlber, "{nnno}", sid, strlen(sid)) == LBER_ERROR)) {
|
||||
goto done;
|
||||
}
|
||||
slapi_build_control(LDAP_CONTROL_X_SESSION_TRACKING, ctrlber, 0, &ctrl);
|
||||
diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c
|
||||
index b788054db..bec3e32f4 100644
|
||||
--- a/ldap/servers/slapd/dse.c
|
||||
+++ b/ldap/servers/slapd/dse.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -637,7 +637,7 @@ dse_updateNumSubordinates(Slapi_Entry *entry, int op)
|
||||
/* Now compute the new value */
|
||||
if (SLAPI_OPERATION_ADD == op) {
|
||||
current_sub_count++;
|
||||
- } else {
|
||||
+ } else if (current_sub_count > 0) {
|
||||
current_sub_count--;
|
||||
}
|
||||
{
|
||||
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
|
||||
index 91ba23047..a9a5f3b3f 100644
|
||||
--- a/ldap/servers/slapd/log.c
|
||||
+++ b/ldap/servers/slapd/log.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005-2024 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* Copyright (C) 2010 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -201,6 +201,7 @@ compress_log_file(char *log_name, int32_t mode)
|
||||
|
||||
if ((source = fopen(log_name, "r")) == NULL) {
|
||||
/* Failed to open log file */
|
||||
+ /* coverity[leaked_storage] gzclose does close FD */
|
||||
gzclose(outfile);
|
||||
return -1;
|
||||
}
|
||||
@@ -211,11 +212,13 @@ compress_log_file(char *log_name, int32_t mode)
|
||||
if (bytes_written == 0)
|
||||
{
|
||||
fclose(source);
|
||||
+ /* coverity[leaked_storage] gzclose does close FD */
|
||||
gzclose(outfile);
|
||||
return -1;
|
||||
}
|
||||
bytes_read = fread(buf, 1, LOG_CHUNK, source);
|
||||
}
|
||||
+ /* coverity[leaked_storage] gzclose does close FD */
|
||||
gzclose(outfile);
|
||||
fclose(source);
|
||||
PR_Delete(log_name); /* remove the old uncompressed log */
|
||||
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
|
||||
index 0a351d46a..9e5bce80b 100644
|
||||
--- a/ldap/servers/slapd/modify.c
|
||||
+++ b/ldap/servers/slapd/modify.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2009 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* Copyright (C) 2009, 2010 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -498,7 +498,7 @@ slapi_modify_internal_set_pb_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod
|
||||
*
|
||||
* Any other errors encountered during the operation will be returned as-is.
|
||||
*/
|
||||
-int
|
||||
+int
|
||||
slapi_single_modify_internal_override(Slapi_PBlock *pb, const Slapi_DN *sdn, LDAPMod **mod, Slapi_ComponentId *plugin_id, int op_flags)
|
||||
{
|
||||
int rc = 0;
|
||||
@@ -512,7 +512,7 @@ slapi_single_modify_internal_override(Slapi_PBlock *pb, const Slapi_DN *sdn, LDA
|
||||
!pb ? "pb " : "",
|
||||
!sdn ? "sdn " : "",
|
||||
!mod ? "mod " : "",
|
||||
- !mod[0] ? "mod[0] " : "");
|
||||
+ !mod || !mod[0] ? "mod[0] " : "");
|
||||
|
||||
return LDAP_PARAM_ERROR;
|
||||
}
|
||||
diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c
|
||||
index 69bb3494c..5f05cf74e 100644
|
||||
--- a/ldap/servers/slapd/passwd_extop.c
|
||||
+++ b/ldap/servers/slapd/passwd_extop.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
diff --git a/ldap/servers/slapd/unbind.c b/ldap/servers/slapd/unbind.c
|
||||
index fa8cd649f..c4e7a5efd 100644
|
||||
--- a/ldap/servers/slapd/unbind.c
|
||||
+++ b/ldap/servers/slapd/unbind.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2025 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -112,8 +112,12 @@ do_unbind(Slapi_PBlock *pb)
|
||||
/* pass the unbind to all backends */
|
||||
be_unbindall(pb_conn, operation);
|
||||
|
||||
-free_and_return:;
|
||||
+free_and_return:
|
||||
|
||||
- /* close the connection to the client */
|
||||
- disconnect_server(pb_conn, operation->o_connid, operation->o_opid, SLAPD_DISCONNECT_UNBIND, 0);
|
||||
+ /* close the connection to the client after refreshing the operation */
|
||||
+ slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
|
||||
+ disconnect_server(pb_conn,
|
||||
+ operation ? operation->o_connid : -1,
|
||||
+ operation ? operation->o_opid : -1,
|
||||
+ SLAPD_DISCONNECT_UNBIND, 0);
|
||||
}
|
||||
--
|
||||
2.49.0
|
||||
|
||||
@ -0,0 +1,35 @@
|
||||
From ea62e862c8ca7e036f7d1e23ec3a27bffbc39bdf Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Mon, 11 Aug 2025 13:19:13 +0200
|
||||
Subject: [PATCH] Issue 6929 - Compilation failure with rust-1.89 on Fedora ELN
|
||||
|
||||
Bug Description:
|
||||
The `ValueArrayRefIter` struct has a lifetime parameter `'a`.
|
||||
But in the `iter` method the return type doesn't specify the lifetime parameter.
|
||||
|
||||
Fix Description:
|
||||
Make the lifetime explicit.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6929
|
||||
|
||||
Reviewed by: @droideck (Thanks!)
|
||||
---
|
||||
src/slapi_r_plugin/src/value.rs | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/slapi_r_plugin/src/value.rs b/src/slapi_r_plugin/src/value.rs
|
||||
index 2fd35c808..fec74ac25 100644
|
||||
--- a/src/slapi_r_plugin/src/value.rs
|
||||
+++ b/src/slapi_r_plugin/src/value.rs
|
||||
@@ -61,7 +61,7 @@ impl ValueArrayRef {
|
||||
ValueArrayRef { raw_slapi_val }
|
||||
}
|
||||
|
||||
- pub fn iter(&self) -> ValueArrayRefIter {
|
||||
+ pub fn iter(&self) -> ValueArrayRefIter<'_> {
|
||||
ValueArrayRefIter {
|
||||
idx: 0,
|
||||
va_ref: &self,
|
||||
--
|
||||
2.49.0
|
||||
|
||||
4
389-ds-base-devel.README
Normal file
4
389-ds-base-devel.README
Normal file
@ -0,0 +1,4 @@
|
||||
For detailed information on developing plugins for 389 Directory Server visit
|
||||
|
||||
https://www.port389.org/docs/389ds/design/plugins.html
|
||||
https://github.com/389ds/389-ds-base/blob/main/src/slapi_r_plugin/README.md
|
||||
1100
389-ds-base.spec
Normal file
1100
389-ds-base.spec
Normal file
File diff suppressed because it is too large
Load Diff
3
389-ds-base.sysusers
Normal file
3
389-ds-base.sysusers
Normal file
@ -0,0 +1,3 @@
|
||||
#Type Name ID GECOS Home directory Shell
|
||||
g dirsrv 389
|
||||
u dirsrv 389:389 "user for 389-ds-base" /usr/share/dirsrv/ /sbin/nologin
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,119 +0,0 @@
|
||||
From dddb14210b402f317e566b6387c76a8e659bf7fa Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Tue, 14 Feb 2023 13:34:10 +0100
|
||||
Subject: [PATCH 1/2] issue 5647 - covscan: memory leak in audit log when
|
||||
adding entries (#5650)
|
||||
|
||||
covscan reported an issue about "vals" variable in auditlog.c:231 and indeed a charray_free is missing.
|
||||
Issue: 5647
|
||||
Reviewed by: @mreynolds389, @droideck
|
||||
---
|
||||
ldap/servers/slapd/auditlog.c | 71 +++++++++++++++++++----------------
|
||||
1 file changed, 38 insertions(+), 33 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 68cbc674d..3128e0497 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -177,6 +177,40 @@ write_auditfail_log_entry(Slapi_PBlock *pb)
|
||||
slapi_ch_free_string(&audit_config);
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * Write the attribute values to the audit log as "comments"
|
||||
+ *
|
||||
+ * Slapi_Attr *entry - the attribute begin logged.
|
||||
+ * char *attrname - the attribute name.
|
||||
+ * lenstr *l - the audit log buffer
|
||||
+ *
|
||||
+ * Resulting output in the log:
|
||||
+ *
|
||||
+ * #ATTR: VALUE
|
||||
+ * #ATTR: VALUE
|
||||
+ */
|
||||
+static void
|
||||
+log_entry_attr(Slapi_Attr *entry_attr, char *attrname, lenstr *l)
|
||||
+{
|
||||
+ Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
+ for(size_t i = 0; vals && vals[i]; i++) {
|
||||
+ char log_val[256] = "";
|
||||
+ const struct berval *bv = slapi_value_get_berval(vals[i]);
|
||||
+ if (bv->bv_len >= 256) {
|
||||
+ strncpy(log_val, bv->bv_val, 252);
|
||||
+ strcpy(log_val+252, "...");
|
||||
+ } else {
|
||||
+ strncpy(log_val, bv->bv_val, bv->bv_len);
|
||||
+ log_val[bv->bv_len] = 0;
|
||||
+ }
|
||||
+ addlenstr(l, "#");
|
||||
+ addlenstr(l, attrname);
|
||||
+ addlenstr(l, ": ");
|
||||
+ addlenstr(l, log_val);
|
||||
+ addlenstr(l, "\n");
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Write "requested" attributes from the entry to the audit log as "comments"
|
||||
*
|
||||
@@ -212,21 +246,9 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
for (req_attr = ldap_utf8strtok_r(display_attrs, ", ", &last); req_attr;
|
||||
req_attr = ldap_utf8strtok_r(NULL, ", ", &last))
|
||||
{
|
||||
- char **vals = slapi_entry_attr_get_charray(entry, req_attr);
|
||||
- for(size_t i = 0; vals && vals[i]; i++) {
|
||||
- char log_val[256] = {0};
|
||||
-
|
||||
- if (strlen(vals[i]) > 256) {
|
||||
- strncpy(log_val, vals[i], 252);
|
||||
- strcat(log_val, "...");
|
||||
- } else {
|
||||
- strcpy(log_val, vals[i]);
|
||||
- }
|
||||
- addlenstr(l, "#");
|
||||
- addlenstr(l, req_attr);
|
||||
- addlenstr(l, ": ");
|
||||
- addlenstr(l, log_val);
|
||||
- addlenstr(l, "\n");
|
||||
+ slapi_entry_attr_find(entry, req_attr, &entry_attr);
|
||||
+ if (entry_attr) {
|
||||
+ log_entry_attr(entry_attr, req_attr, l);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -234,7 +256,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
for (; entry_attr; entry_attr = entry_attr->a_next) {
|
||||
Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
char *attr = NULL;
|
||||
- const char *val = NULL;
|
||||
|
||||
slapi_attr_get_type(entry_attr, &attr);
|
||||
if (strcmp(attr, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) {
|
||||
@@ -251,23 +272,7 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
addlenstr(l, ": ****************************\n");
|
||||
continue;
|
||||
}
|
||||
-
|
||||
- for(size_t i = 0; vals && vals[i]; i++) {
|
||||
- char log_val[256] = {0};
|
||||
-
|
||||
- val = slapi_value_get_string(vals[i]);
|
||||
- if (strlen(val) > 256) {
|
||||
- strncpy(log_val, val, 252);
|
||||
- strcat(log_val, "...");
|
||||
- } else {
|
||||
- strcpy(log_val, val);
|
||||
- }
|
||||
- addlenstr(l, "#");
|
||||
- addlenstr(l, attr);
|
||||
- addlenstr(l, ": ");
|
||||
- addlenstr(l, log_val);
|
||||
- addlenstr(l, "\n");
|
||||
- }
|
||||
+ log_entry_attr(entry_attr, attr, l);
|
||||
}
|
||||
}
|
||||
slapi_ch_free_string(&display_attrs);
|
||||
--
|
||||
2.43.0
|
||||
|
||||
@ -1,27 +0,0 @@
|
||||
From be7c2b82958e91ce08775bf6b5da3c311d3b00e5 Mon Sep 17 00:00:00 2001
|
||||
From: progier389 <progier@redhat.com>
|
||||
Date: Mon, 20 Feb 2023 16:14:05 +0100
|
||||
Subject: [PATCH 2/2] Issue 5647 - Fix unused variable warning from previous
|
||||
commit (#5670)
|
||||
|
||||
* issue 5647 - memory leak in audit log when adding entries
|
||||
* Issue 5647 - Fix unused variable warning from previous commit
|
||||
---
|
||||
ldap/servers/slapd/auditlog.c | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
|
||||
index 3128e0497..0597ecc6f 100644
|
||||
--- a/ldap/servers/slapd/auditlog.c
|
||||
+++ b/ldap/servers/slapd/auditlog.c
|
||||
@@ -254,7 +254,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l)
|
||||
} else {
|
||||
/* Return all attributes */
|
||||
for (; entry_attr; entry_attr = entry_attr->a_next) {
|
||||
- Slapi_Value **vals = attr_get_present_values(entry_attr);
|
||||
char *attr = NULL;
|
||||
|
||||
slapi_attr_get_type(entry_attr, &attr);
|
||||
--
|
||||
2.43.0
|
||||
|
||||
@ -1,147 +0,0 @@
|
||||
From 692c4cec6cc5c0086cf58f83bcfa690c766c9887 Mon Sep 17 00:00:00 2001
|
||||
From: Thierry Bordaz <tbordaz@redhat.com>
|
||||
Date: Fri, 2 Feb 2024 14:14:28 +0100
|
||||
Subject: [PATCH] Issue 5407 - sync_repl crashes if enabled while dynamic
|
||||
plugin is enabled (#5411)
|
||||
|
||||
Bug description:
|
||||
When dynamic plugin is enabled, if a MOD enables sync_repl plugin
|
||||
then sync_repl init function registers the postop callback
|
||||
that will be called for the MOD itself while the preop
|
||||
has not been called.
|
||||
postop expects preop to be called and so primary operation
|
||||
to be set. When it is not set it crashes
|
||||
|
||||
Fix description:
|
||||
If the primary operation is not set, just return
|
||||
|
||||
relates: #5407
|
||||
---
|
||||
.../suites/syncrepl_plugin/basic_test.py | 68 +++++++++++++++++++
|
||||
ldap/servers/plugins/sync/sync_persist.c | 23 ++++++-
|
||||
2 files changed, 90 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
index eb3770b78..cdf35eeaa 100644
|
||||
--- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
+++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py
|
||||
@@ -592,6 +592,74 @@ def test_sync_repl_cenotaph(topo_m2, request):
|
||||
|
||||
request.addfinalizer(fin)
|
||||
|
||||
+def test_sync_repl_dynamic_plugin(topology, request):
|
||||
+ """Test sync_repl with dynamic plugin
|
||||
+
|
||||
+ :id: d4f84913-c18a-459f-8525-110f610ca9e6
|
||||
+ :setup: install a standalone instance
|
||||
+ :steps:
|
||||
+ 1. reset instance to standard (no retroCL, no sync_repl, no dynamic plugin)
|
||||
+ 2. Enable dynamic plugin
|
||||
+ 3. Enable retroCL/content_sync
|
||||
+ 4. Establish a sync_repl req
|
||||
+ :expectedresults:
|
||||
+ 1. Should succeeds
|
||||
+ 2. Should succeeds
|
||||
+ 3. Should succeeds
|
||||
+ 4. Should succeeds
|
||||
+ """
|
||||
+
|
||||
+ # Reset the instance in a default config
|
||||
+ # Disable content sync plugin
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_REPL_SYNC)
|
||||
+
|
||||
+ # Disable retro changelog
|
||||
+ topology.standalone.plugins.disable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Disable dynamic plugins
|
||||
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'off')])
|
||||
+ topology.standalone.restart()
|
||||
+
|
||||
+ # Now start the test
|
||||
+ # Enable dynamic plugins
|
||||
+ try:
|
||||
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')])
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.error('Failed to enable dynamic plugin! {}'.format(e.args[0]['desc']))
|
||||
+ assert False
|
||||
+
|
||||
+ # Enable retro changelog
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
|
||||
+
|
||||
+ # Enbale content sync plugin
|
||||
+ topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC)
|
||||
+
|
||||
+ # create a sync repl client and wait 5 seconds to be sure it is running
|
||||
+ sync_repl = Sync_persist(topology.standalone)
|
||||
+ sync_repl.start()
|
||||
+ time.sleep(5)
|
||||
+
|
||||
+ # create users
|
||||
+ users = UserAccounts(topology.standalone, DEFAULT_SUFFIX)
|
||||
+ users_set = []
|
||||
+ for i in range(10001, 10004):
|
||||
+ users_set.append(users.create_test_user(uid=i))
|
||||
+
|
||||
+ time.sleep(10)
|
||||
+ # delete users, that automember/memberof will generate nested updates
|
||||
+ for user in users_set:
|
||||
+ user.delete()
|
||||
+ # stop the server to get the sync_repl result set (exit from while loop).
|
||||
+ # Only way I found to acheive that.
|
||||
+ # and wait a bit to let sync_repl thread time to set its result before fetching it.
|
||||
+ topology.standalone.stop()
|
||||
+ sync_repl.get_result()
|
||||
+ sync_repl.join()
|
||||
+ log.info('test_sync_repl_dynamic_plugin: PASS\n')
|
||||
+
|
||||
+ # Success
|
||||
+ log.info('Test complete')
|
||||
+
|
||||
def test_sync_repl_invalid_cookie(topology, request):
|
||||
"""Test sync_repl with invalid cookie
|
||||
|
||||
diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c
|
||||
index d2210b64c..283607361 100644
|
||||
--- a/ldap/servers/plugins/sync/sync_persist.c
|
||||
+++ b/ldap/servers/plugins/sync/sync_persist.c
|
||||
@@ -156,6 +156,17 @@ ignore_op_pl(Slapi_PBlock *pb)
|
||||
* This is the same for ident
|
||||
*/
|
||||
prim_op = get_thread_primary_op();
|
||||
+ if (prim_op == NULL) {
|
||||
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
|
||||
+ * The only known case it happens is with dynamic plugin enabled and an
|
||||
+ * update that enable the sync_repl plugin. In such case sync_repl registers
|
||||
+ * the postop (sync_update_persist_op) that is called while the preop was not called
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ "ignore_op_pl - Operation without primary op set (0x%lx)\n",
|
||||
+ (ulong) op);
|
||||
+ return;
|
||||
+ }
|
||||
ident = sync_persist_get_operation_extension(pb);
|
||||
|
||||
if (ident) {
|
||||
@@ -232,8 +243,18 @@ sync_update_persist_op(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eprev, ber
|
||||
|
||||
|
||||
prim_op = get_thread_primary_op();
|
||||
+ if (prim_op == NULL) {
|
||||
+ /* This can happen if the PRE_OP (sync_update_persist_betxn_pre_op) was not called.
|
||||
+ * The only known case it happens is with dynamic plugin enabled and an
|
||||
+ * update that enable the sync_repl plugin. In such case sync_repl registers
|
||||
+ * the postop (sync_update_persist_op) that is called while the preop was not called
|
||||
+ */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, SYNC_PLUGIN_SUBSYSTEM,
|
||||
+ "sync_update_persist_op - Operation without primary op set (0x%lx)\n",
|
||||
+ (ulong) pb_op);
|
||||
+ return;
|
||||
+ }
|
||||
ident = sync_persist_get_operation_extension(pb);
|
||||
- PR_ASSERT(prim_op);
|
||||
|
||||
if ((ident == NULL) && operation_is_flag_set(pb_op, OP_FLAG_NOOP)) {
|
||||
/* This happens for URP (add cenotaph, fixup rename, tombstone resurrect)
|
||||
--
|
||||
2.43.0
|
||||
|
||||
@ -1,840 +0,0 @@
|
||||
From 8dc61a176323f0d41df730abd715ccff3034c2be Mon Sep 17 00:00:00 2001
|
||||
From: Mark Reynolds <mreynolds@redhat.com>
|
||||
Date: Sun, 27 Nov 2022 09:37:19 -0500
|
||||
Subject: [PATCH] Issue 5547 - automember plugin improvements
|
||||
|
||||
Description:
|
||||
|
||||
Rebuild task has the following improvements:
|
||||
|
||||
- Only one task allowed at a time
|
||||
- Do not cleanup previous members by default. Add new CLI option to intentionally
|
||||
cleanup memberships before rebuilding from scratch.
|
||||
- Add better task logging to show fixup progress
|
||||
|
||||
To prevent automember from being called in a nested be_txn loop thread storage is
|
||||
used to check and skip these loops.
|
||||
|
||||
relates: https://github.com/389ds/389-ds-base/issues/5547
|
||||
|
||||
Reviewed by: spichugi(Thanks!)
|
||||
---
|
||||
.../automember_plugin/automember_mod_test.py | 43 +++-
|
||||
ldap/servers/plugins/automember/automember.c | 232 ++++++++++++++----
|
||||
ldap/servers/slapd/back-ldbm/ldbm_add.c | 11 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 10 +-
|
||||
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 11 +-
|
||||
.../lib389/cli_conf/plugins/automember.py | 10 +-
|
||||
src/lib389/lib389/plugins.py | 7 +-
|
||||
src/lib389/lib389/tasks.py | 9 +-
|
||||
8 files changed, 250 insertions(+), 83 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
index 8d25384bf..7a0ed3275 100644
|
||||
--- a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
+++ b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py
|
||||
@@ -5,12 +5,13 @@
|
||||
# License: GPL (version 3 or any later version).
|
||||
# See LICENSE for details.
|
||||
# --- END COPYRIGHT BLOCK ---
|
||||
-#
|
||||
+import ldap
|
||||
import logging
|
||||
import pytest
|
||||
import os
|
||||
+import time
|
||||
from lib389.utils import ds_is_older
|
||||
-from lib389._constants import *
|
||||
+from lib389._constants import DEFAULT_SUFFIX
|
||||
from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions
|
||||
from lib389.idm.user import UserAccounts
|
||||
from lib389.idm.group import Groups
|
||||
@@ -41,6 +42,11 @@ def automember_fixture(topo, request):
|
||||
user_accts = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
user = user_accts.create_test_user()
|
||||
|
||||
+ # Create extra users
|
||||
+ users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
|
||||
+ for i in range(0, 100):
|
||||
+ users.create_test_user(uid=i)
|
||||
+
|
||||
# Create automember definitions and regex rules
|
||||
automember_prop = {
|
||||
'cn': 'testgroup_definition',
|
||||
@@ -59,7 +65,7 @@ def automember_fixture(topo, request):
|
||||
automemberplugin.enable()
|
||||
topo.standalone.restart()
|
||||
|
||||
- return (user, groups)
|
||||
+ return user, groups
|
||||
|
||||
|
||||
def test_mods(automember_fixture, topo):
|
||||
@@ -72,19 +78,21 @@ def test_mods(automember_fixture, topo):
|
||||
2. Update user that should add it to group[1]
|
||||
3. Update user that should add it to group[2]
|
||||
4. Update user that should add it to group[0]
|
||||
- 5. Test rebuild task correctly moves user to group[1]
|
||||
+ 5. Test rebuild task adds user to group[1]
|
||||
+ 6. Test rebuild task cleanups groups and only adds it to group[1]
|
||||
:expectedresults:
|
||||
1. Success
|
||||
2. Success
|
||||
3. Success
|
||||
4. Success
|
||||
5. Success
|
||||
+ 6. Success
|
||||
"""
|
||||
(user, groups) = automember_fixture
|
||||
|
||||
# Update user which should go into group[0]
|
||||
user.replace('cn', 'whatever')
|
||||
- groups[0].is_member(user.dn)
|
||||
+ assert groups[0].is_member(user.dn)
|
||||
if groups[1].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -92,7 +100,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user0 which should go into group[1]
|
||||
user.replace('cn', 'mark')
|
||||
- groups[1].is_member(user.dn)
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -100,7 +108,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user which should go into group[2]
|
||||
user.replace('cn', 'simon')
|
||||
- groups[2].is_member(user.dn)
|
||||
+ assert groups[2].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[1].is_member(user.dn):
|
||||
@@ -108,7 +116,7 @@ def test_mods(automember_fixture, topo):
|
||||
|
||||
# Update user which should go back into group[0] (full circle)
|
||||
user.replace('cn', 'whatever')
|
||||
- groups[0].is_member(user.dn)
|
||||
+ assert groups[0].is_member(user.dn)
|
||||
if groups[1].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -128,12 +136,24 @@ def test_mods(automember_fixture, topo):
|
||||
automemberplugin.enable()
|
||||
topo.standalone.restart()
|
||||
|
||||
- # Run rebuild task
|
||||
+ # Run rebuild task (no cleanup)
|
||||
task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount")
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ # test only one fixup task is allowed at a time
|
||||
+ automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=top")
|
||||
task.wait()
|
||||
|
||||
- # Test membership
|
||||
- groups[1].is_member(user.dn)
|
||||
+ # Test membership (user should still be in groups[0])
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
+ if not groups[0].is_member(user.dn):
|
||||
+ assert False
|
||||
+
|
||||
+ # Run rebuild task with cleanup
|
||||
+ task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount", cleanup=True)
|
||||
+ task.wait()
|
||||
+
|
||||
+ # Test membership (user should only be in groups[1])
|
||||
+ assert groups[1].is_member(user.dn)
|
||||
if groups[0].is_member(user.dn):
|
||||
assert False
|
||||
if groups[2].is_member(user.dn):
|
||||
@@ -148,4 +168,3 @@ if __name__ == '__main__':
|
||||
# -s for DEBUG mode
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main(["-s", CURRENT_FILE])
|
||||
-
|
||||
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
|
||||
index 3494d0343..419adb052 100644
|
||||
--- a/ldap/servers/plugins/automember/automember.c
|
||||
+++ b/ldap/servers/plugins/automember/automember.c
|
||||
@@ -1,5 +1,5 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
- * Copyright (C) 2011 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* License: GPL (version 3 or any later version).
|
||||
@@ -14,7 +14,7 @@
|
||||
* Auto Membership Plug-in
|
||||
*/
|
||||
#include "automember.h"
|
||||
-
|
||||
+#include <pthread.h>
|
||||
|
||||
/*
|
||||
* Plug-in globals
|
||||
@@ -22,7 +22,9 @@
|
||||
static PRCList *g_automember_config = NULL;
|
||||
static Slapi_RWLock *g_automember_config_lock = NULL;
|
||||
static uint64_t abort_rebuild_task = 0;
|
||||
-
|
||||
+static pthread_key_t td_automem_block_nested;
|
||||
+static PRBool fixup_running = PR_FALSE;
|
||||
+static PRLock *fixup_lock = NULL;
|
||||
static void *_PluginID = NULL;
|
||||
static Slapi_DN *_PluginDN = NULL;
|
||||
static Slapi_DN *_ConfigAreaDN = NULL;
|
||||
@@ -93,9 +95,43 @@ static void automember_task_export_destructor(Slapi_Task *task);
|
||||
static void automember_task_map_destructor(Slapi_Task *task);
|
||||
|
||||
#define DEFAULT_FILE_MODE PR_IRUSR | PR_IWUSR
|
||||
+#define FIXUP_PROGRESS_LIMIT 1000
|
||||
static uint64_t plugin_do_modify = 0;
|
||||
static uint64_t plugin_is_betxn = 0;
|
||||
|
||||
+/* automember_plugin fixup task and add operations should block other be_txn
|
||||
+ * plugins from calling automember_post_op_mod() */
|
||||
+static int32_t
|
||||
+slapi_td_block_nested_post_op(void)
|
||||
+{
|
||||
+ int32_t val = 12345;
|
||||
+
|
||||
+ if (pthread_setspecific(td_automem_block_nested, (void *)&val) != 0) {
|
||||
+ return PR_FAILURE;
|
||||
+ }
|
||||
+ return PR_SUCCESS;
|
||||
+}
|
||||
+
|
||||
+static int32_t
|
||||
+slapi_td_unblock_nested_post_op(void)
|
||||
+{
|
||||
+ if (pthread_setspecific(td_automem_block_nested, NULL) != 0) {
|
||||
+ return PR_FAILURE;
|
||||
+ }
|
||||
+ return PR_SUCCESS;
|
||||
+}
|
||||
+
|
||||
+static int32_t
|
||||
+slapi_td_is_post_op_nested(void)
|
||||
+{
|
||||
+ int32_t *value = pthread_getspecific(td_automem_block_nested);
|
||||
+
|
||||
+ if (value == NULL) {
|
||||
+ return 0;
|
||||
+ }
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Config cache locking functions
|
||||
*/
|
||||
@@ -317,6 +353,14 @@ automember_start(Slapi_PBlock *pb)
|
||||
return -1;
|
||||
}
|
||||
|
||||
+ if (fixup_lock == NULL) {
|
||||
+ if ((fixup_lock = PR_NewLock()) == NULL) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_start - Failed to create fixup lock.\n");
|
||||
+ return -1;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* Get the plug-in target dn from the system
|
||||
* and store it for future use. */
|
||||
@@ -360,6 +404,11 @@ automember_start(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
|
||||
+ if (pthread_key_create(&td_automem_block_nested, NULL) != 0) {
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_start - pthread_key_create failed\n");
|
||||
+ }
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_start - ready for service\n");
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
@@ -394,6 +443,8 @@ automember_close(Slapi_PBlock *pb __attribute__((unused)))
|
||||
slapi_sdn_free(&_ConfigAreaDN);
|
||||
slapi_destroy_rwlock(g_automember_config_lock);
|
||||
g_automember_config_lock = NULL;
|
||||
+ PR_DestroyLock(fixup_lock);
|
||||
+ fixup_lock = NULL;
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"<-- automember_close\n");
|
||||
@@ -1619,7 +1670,6 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
-
|
||||
/*
|
||||
* automember_update_member_value()
|
||||
*
|
||||
@@ -1634,7 +1684,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
LDAPMod *mods[2];
|
||||
char *vals[2];
|
||||
char *member_value = NULL;
|
||||
- int rc = 0;
|
||||
+ int rc = LDAP_SUCCESS;
|
||||
Slapi_DN *group_sdn;
|
||||
|
||||
/* First thing check that the group still exists */
|
||||
@@ -1653,7 +1703,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char
|
||||
"automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n",
|
||||
group_dn, rc);
|
||||
}
|
||||
- return rc;
|
||||
+ goto out;
|
||||
}
|
||||
|
||||
/* If grouping_value is dn, we need to fetch the dn instead. */
|
||||
@@ -1879,6 +1929,13 @@ automember_mod_post_op(Slapi_PBlock *pb)
|
||||
PRCList *list = NULL;
|
||||
int rc = SLAPI_PLUGIN_SUCCESS;
|
||||
|
||||
+ if (slapi_td_is_post_op_nested()) {
|
||||
+ /* don't process op twice in the same thread */
|
||||
+ return rc;
|
||||
+ } else {
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ }
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"--> automember_mod_post_op\n");
|
||||
|
||||
@@ -2005,6 +2062,7 @@ automember_mod_post_op(Slapi_PBlock *pb)
|
||||
}
|
||||
}
|
||||
}
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"<-- automember_mod_post_op (%d)\n", rc);
|
||||
@@ -2024,6 +2082,13 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"--> automember_add_post_op\n");
|
||||
|
||||
+ if (slapi_td_is_post_op_nested()) {
|
||||
+ /* don't process op twice in the same thread */
|
||||
+ return rc;
|
||||
+ } else {
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ }
|
||||
+
|
||||
/* Reload config if a config entry was added. */
|
||||
if ((sdn = automember_get_sdn(pb))) {
|
||||
if (automember_dn_is_config(sdn)) {
|
||||
@@ -2039,7 +2104,7 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
|
||||
/* If replication, just bail. */
|
||||
if (automember_isrepl(pb)) {
|
||||
- return SLAPI_PLUGIN_SUCCESS;
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Get the newly added entry. */
|
||||
@@ -2052,7 +2117,7 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
tombstone);
|
||||
slapi_value_free(&tombstone);
|
||||
if (is_tombstone) {
|
||||
- return SLAPI_PLUGIN_SUCCESS;
|
||||
+ goto bail;
|
||||
}
|
||||
|
||||
/* Check if a config entry applies
|
||||
@@ -2063,21 +2128,19 @@ automember_add_post_op(Slapi_PBlock *pb)
|
||||
list = PR_LIST_HEAD(g_automember_config);
|
||||
while (list != g_automember_config) {
|
||||
config = (struct configEntry *)list;
|
||||
-
|
||||
/* Does the entry meet scope and filter requirements? */
|
||||
if (slapi_dn_issuffix(slapi_sdn_get_dn(sdn), config->scope) &&
|
||||
- (slapi_filter_test_simple(e, config->filter) == 0)) {
|
||||
+ (slapi_filter_test_simple(e, config->filter) == 0))
|
||||
+ {
|
||||
/* Find out what membership changes are needed and make them. */
|
||||
if (automember_update_membership(config, e, NULL) == SLAPI_PLUGIN_FAILURE) {
|
||||
rc = SLAPI_PLUGIN_FAILURE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
-
|
||||
list = PR_NEXT_LINK(list);
|
||||
}
|
||||
}
|
||||
-
|
||||
automember_config_unlock();
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
@@ -2098,6 +2161,7 @@ bail:
|
||||
slapi_pblock_set(pb, SLAPI_RESULT_CODE, &result);
|
||||
slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, &errtxt);
|
||||
}
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -2138,6 +2202,7 @@ typedef struct _task_data
|
||||
Slapi_DN *base_dn;
|
||||
char *bind_dn;
|
||||
int scope;
|
||||
+ PRBool cleanup;
|
||||
} task_data;
|
||||
|
||||
static void
|
||||
@@ -2270,6 +2335,7 @@ automember_task_abort_thread(void *arg)
|
||||
* basedn: dc=example,dc=com
|
||||
* filter: (uid=*)
|
||||
* scope: sub
|
||||
+ * cleanup: yes/on (default is off)
|
||||
*
|
||||
* basedn and filter are required. If scope is omitted, the default is sub
|
||||
*/
|
||||
@@ -2284,9 +2350,22 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
const char *base_dn;
|
||||
const char *filter;
|
||||
const char *scope;
|
||||
+ const char *cleanup_str;
|
||||
+ PRBool cleanup = PR_FALSE;
|
||||
|
||||
*returncode = LDAP_SUCCESS;
|
||||
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ if (fixup_running) {
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_task_add - there is already a fixup task running\n");
|
||||
+ rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
+ goto out;
|
||||
+ }
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+
|
||||
/*
|
||||
* Grab the task params
|
||||
*/
|
||||
@@ -2300,6 +2379,12 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
goto out;
|
||||
}
|
||||
+ if ((cleanup_str = slapi_entry_attr_get_ref(e, "cleanup"))) {
|
||||
+ if (strcasecmp(cleanup_str, "yes") == 0 || strcasecmp(cleanup_str, "on")) {
|
||||
+ cleanup = PR_TRUE;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
scope = slapi_fetch_attr(e, "scope", "sub");
|
||||
/*
|
||||
* setup our task data
|
||||
@@ -2315,6 +2400,7 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
mytaskdata->bind_dn = slapi_ch_strdup(bind_dn);
|
||||
mytaskdata->base_dn = slapi_sdn_new_dn_byval(base_dn);
|
||||
mytaskdata->filter_str = slapi_ch_strdup(filter);
|
||||
+ mytaskdata->cleanup = cleanup;
|
||||
|
||||
if (scope) {
|
||||
if (strcasecmp(scope, "sub") == 0) {
|
||||
@@ -2334,6 +2420,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
task = slapi_plugin_new_task(slapi_entry_get_ndn(e), arg);
|
||||
slapi_task_set_destructor_fn(task, automember_task_destructor);
|
||||
slapi_task_set_data(task, mytaskdata);
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_TRUE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
/*
|
||||
* Start the task as a separate thread
|
||||
*/
|
||||
@@ -2345,6 +2434,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr
|
||||
"automember_task_add - Unable to create task thread!\n");
|
||||
*returncode = LDAP_OPERATIONS_ERROR;
|
||||
slapi_task_finish(task, *returncode);
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_FALSE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
rv = SLAPI_DSE_CALLBACK_ERROR;
|
||||
} else {
|
||||
rv = SLAPI_DSE_CALLBACK_OK;
|
||||
@@ -2372,6 +2464,9 @@ automember_rebuild_task_thread(void *arg)
|
||||
PRCList *list = NULL;
|
||||
PRCList *include_list = NULL;
|
||||
int result = 0;
|
||||
+ int64_t fixup_progress_count = 0;
|
||||
+ int64_t fixup_progress_elapsed = 0;
|
||||
+ int64_t fixup_start_time = 0;
|
||||
size_t i = 0;
|
||||
|
||||
/* Reset abort flag */
|
||||
@@ -2380,6 +2475,7 @@ automember_rebuild_task_thread(void *arg)
|
||||
if (!task) {
|
||||
return; /* no task */
|
||||
}
|
||||
+
|
||||
slapi_task_inc_refcount(task);
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_rebuild_task_thread - Refcount incremented.\n");
|
||||
@@ -2393,9 +2489,11 @@ automember_rebuild_task_thread(void *arg)
|
||||
slapi_task_log_status(task, "Automember rebuild task starting (base dn: (%s) filter (%s)...",
|
||||
slapi_sdn_get_dn(td->base_dn), td->filter_str);
|
||||
/*
|
||||
- * Set the bind dn in the local thread data
|
||||
+ * Set the bind dn in the local thread data, and block post op mods
|
||||
*/
|
||||
slapi_td_set_dn(slapi_ch_strdup(td->bind_dn));
|
||||
+ slapi_td_block_nested_post_op();
|
||||
+ fixup_start_time = slapi_current_rel_time_t();
|
||||
/*
|
||||
* Take the config lock now and search the database
|
||||
*/
|
||||
@@ -2426,6 +2524,21 @@ automember_rebuild_task_thread(void *arg)
|
||||
* Loop over the entries
|
||||
*/
|
||||
for (i = 0; entries && (entries[i] != NULL); i++) {
|
||||
+ fixup_progress_count++;
|
||||
+ if (fixup_progress_count % FIXUP_PROGRESS_LIMIT == 0 ) {
|
||||
+ slapi_task_log_notice(task,
|
||||
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
|
||||
+ fixup_progress_count,
|
||||
+ slapi_current_rel_time_t() - fixup_start_time,
|
||||
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
|
||||
+ slapi_task_log_status(task,
|
||||
+ "Processed %ld entries in %ld seconds (+%ld seconds)",
|
||||
+ fixup_progress_count,
|
||||
+ slapi_current_rel_time_t() - fixup_start_time,
|
||||
+ slapi_current_rel_time_t() - fixup_progress_elapsed);
|
||||
+ slapi_task_inc_progress(task);
|
||||
+ fixup_progress_elapsed = slapi_current_rel_time_t();
|
||||
+ }
|
||||
if (slapi_atomic_load_64(&abort_rebuild_task, __ATOMIC_ACQUIRE) == 1) {
|
||||
/* The task was aborted */
|
||||
slapi_task_log_notice(task, "Automember rebuild task was intentionally aborted");
|
||||
@@ -2443,48 +2556,66 @@ automember_rebuild_task_thread(void *arg)
|
||||
if (slapi_dn_issuffix(slapi_entry_get_dn(entries[i]), config->scope) &&
|
||||
(slapi_filter_test_simple(entries[i], config->filter) == 0))
|
||||
{
|
||||
- /* First clear out all the defaults groups */
|
||||
- for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
|
||||
- if ((result = automember_update_member_value(entries[i], config->default_groups[ii],
|
||||
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
|
||||
- {
|
||||
- slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from default group (%s) error (%d)",
|
||||
- config->default_groups[ii], result);
|
||||
- slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from default group (%s) error (%d)",
|
||||
- config->default_groups[ii], result);
|
||||
- slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
- "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
- config->default_groups[ii], result);
|
||||
- goto out;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- /* Then clear out the non-default group */
|
||||
- if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
|
||||
- include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
|
||||
- while (include_list != (PRCList *)config->inclusive_rules) {
|
||||
- struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
|
||||
- if ((result = automember_update_member_value(entries[i], slapi_sdn_get_dn(curr_rule->target_group_dn),
|
||||
- config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER)))
|
||||
+ if (td->cleanup) {
|
||||
+
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Cleaning up groups (config %s)\n",
|
||||
+ config->dn);
|
||||
+ /* First clear out all the defaults groups */
|
||||
+ for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) {
|
||||
+ if ((result = automember_update_member_value(entries[i],
|
||||
+ config->default_groups[ii],
|
||||
+ config->grouping_attr,
|
||||
+ config->grouping_value,
|
||||
+ NULL, DEL_MEMBER)))
|
||||
{
|
||||
slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from group (%s) error (%d)",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ "member from default group (%s) error (%d)",
|
||||
+ config->default_groups[ii], result);
|
||||
slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
- "member from group (%s) error (%d)",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ "member from default group (%s) error (%d)",
|
||||
+ config->default_groups[ii], result);
|
||||
slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
"automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
- slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ config->default_groups[ii], result);
|
||||
goto out;
|
||||
}
|
||||
- include_list = PR_NEXT_LINK(include_list);
|
||||
}
|
||||
+
|
||||
+ /* Then clear out the non-default group */
|
||||
+ if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) {
|
||||
+ include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules);
|
||||
+ while (include_list != (PRCList *)config->inclusive_rules) {
|
||||
+ struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list;
|
||||
+ if ((result = automember_update_member_value(entries[i],
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn),
|
||||
+ config->grouping_attr,
|
||||
+ config->grouping_value,
|
||||
+ NULL, DEL_MEMBER)))
|
||||
+ {
|
||||
+ slapi_task_log_notice(task, "Automember rebuild membership task unable to delete "
|
||||
+ "member from group (%s) error (%d)",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ slapi_task_log_status(task, "Automember rebuild membership task unable to delete "
|
||||
+ "member from group (%s) error (%d)",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n",
|
||||
+ slapi_sdn_get_dn(curr_rule->target_group_dn), result);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ include_list = PR_NEXT_LINK(include_list);
|
||||
+ }
|
||||
+ }
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Finished cleaning up groups (config %s)\n",
|
||||
+ config->dn);
|
||||
}
|
||||
|
||||
/* Update the memberships for this entries */
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
+ "automember_rebuild_task_thread - Updating membership (config %s)\n",
|
||||
+ config->dn);
|
||||
if (slapi_is_shutting_down() ||
|
||||
automember_update_membership(config, entries[i], NULL) == SLAPI_PLUGIN_FAILURE)
|
||||
{
|
||||
@@ -2508,15 +2639,22 @@ out:
|
||||
slapi_task_log_notice(task, "Automember rebuild task aborted. Error (%d)", result);
|
||||
slapi_task_log_status(task, "Automember rebuild task aborted. Error (%d)", result);
|
||||
} else {
|
||||
- slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
|
||||
- slapi_task_log_status(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i);
|
||||
+ slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
|
||||
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
|
||||
+ slapi_task_log_status(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds",
|
||||
+ (int64_t)i, slapi_current_rel_time_t() - fixup_start_time);
|
||||
}
|
||||
slapi_task_inc_progress(task);
|
||||
slapi_task_finish(task, result);
|
||||
slapi_task_dec_refcount(task);
|
||||
slapi_atomic_store_64(&abort_rebuild_task, 0, __ATOMIC_RELEASE);
|
||||
+ slapi_td_unblock_nested_post_op();
|
||||
+ PR_Lock(fixup_lock);
|
||||
+ fixup_running = PR_FALSE;
|
||||
+ PR_Unlock(fixup_lock);
|
||||
+
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM,
|
||||
- "automember_rebuild_task_thread - Refcount decremented.\n");
|
||||
+ "automember_rebuild_task_thread - task finished, refcount decremented.\n");
|
||||
}
|
||||
|
||||
/*
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
index ba2d73a84..ce4c314a1 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -1264,10 +1264,6 @@ ldbm_back_add(Slapi_PBlock *pb)
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
if (addingentry_id_assigned) {
|
||||
next_id_return(be, addingentry->ep_id);
|
||||
}
|
||||
@@ -1376,6 +1372,11 @@ diskfull_return:
|
||||
if (!not_an_error) {
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
}
|
||||
+
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
}
|
||||
|
||||
common_return:
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
index de23190c3..27f0ac58a 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
|
||||
@@ -1407,11 +1407,6 @@ commit_return:
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
-
|
||||
if (tombstone) {
|
||||
if (cache_is_in_cache(&inst->inst_cache, tombstone)) {
|
||||
tomb_ep_id = tombstone->ep_id; /* Otherwise, tombstone might have been freed. */
|
||||
@@ -1496,6 +1491,11 @@ error_return:
|
||||
conn_id, op_id, parent_modify_c.old_entry, parent_modify_c.new_entry, myrc);
|
||||
}
|
||||
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
+
|
||||
common_return:
|
||||
if (orig_entry) {
|
||||
/* NOTE: #define SLAPI_DELETE_BEPREOP_ENTRY SLAPI_ENTRY_PRE_OP */
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
index 537369055..64b293001 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
|
||||
@@ -1,6 +1,6 @@
|
||||
/** BEGIN COPYRIGHT BLOCK
|
||||
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
||||
- * Copyright (C) 2005 Red Hat, Inc.
|
||||
+ * Copyright (C) 2022 Red Hat, Inc.
|
||||
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
|
||||
* All rights reserved.
|
||||
*
|
||||
@@ -1043,11 +1043,6 @@ ldbm_back_modify(Slapi_PBlock *pb)
|
||||
goto common_return;
|
||||
|
||||
error_return:
|
||||
- /* Revert the caches if this is the parent operation */
|
||||
- if (parent_op && betxn_callback_fails) {
|
||||
- revert_cache(inst, &parent_time);
|
||||
- }
|
||||
-
|
||||
if (postentry != NULL) {
|
||||
slapi_entry_free(postentry);
|
||||
postentry = NULL;
|
||||
@@ -1103,6 +1098,10 @@ error_return:
|
||||
if (!not_an_error) {
|
||||
rc = SLAPI_FAIL_GENERAL;
|
||||
}
|
||||
+ /* Revert the caches if this is the parent operation */
|
||||
+ if (parent_op && betxn_callback_fails) {
|
||||
+ revert_cache(inst, &parent_time);
|
||||
+ }
|
||||
}
|
||||
|
||||
/* if ec is in cache, remove it, then add back e if we still have it */
|
||||
diff --git a/src/lib389/lib389/cli_conf/plugins/automember.py b/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
index 15b00c633..568586ad8 100644
|
||||
--- a/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
+++ b/src/lib389/lib389/cli_conf/plugins/automember.py
|
||||
@@ -155,7 +155,7 @@ def fixup(inst, basedn, log, args):
|
||||
log.info('Attempting to add task entry... This will fail if Automembership plug-in is not enabled.')
|
||||
if not plugin.status():
|
||||
log.error("'%s' is disabled. Rebuild membership task can't be executed" % plugin.rdn)
|
||||
- fixup_task = plugin.fixup(args.DN, args.filter)
|
||||
+ fixup_task = plugin.fixup(args.DN, args.filter, args.cleanup)
|
||||
if args.wait:
|
||||
log.info(f'Waiting for fixup task "{fixup_task.dn}" to complete. You can safely exit by pressing Control C ...')
|
||||
fixup_task.wait(timeout=args.timeout)
|
||||
@@ -225,8 +225,8 @@ def create_parser(subparsers):
|
||||
subcommands = automember.add_subparsers(help='action')
|
||||
add_generic_plugin_parsers(subcommands, AutoMembershipPlugin)
|
||||
|
||||
- list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
|
||||
- subcommands_list = list.add_subparsers(help='action')
|
||||
+ automember_list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.')
|
||||
+ subcommands_list = automember_list.add_subparsers(help='action')
|
||||
list_definitions = subcommands_list.add_parser('definitions', help='Lists Automembership definitions.')
|
||||
list_definitions.set_defaults(func=definition_list)
|
||||
list_regexes = subcommands_list.add_parser('regexes', help='List Automembership regex rules.')
|
||||
@@ -269,6 +269,8 @@ def create_parser(subparsers):
|
||||
fixup_task.add_argument('-f', '--filter', required=True, help='Sets the LDAP filter for entries to fix up')
|
||||
fixup_task.add_argument('-s', '--scope', required=True, choices=['sub', 'base', 'one'], type=str.lower,
|
||||
help='Sets the LDAP search scope for entries to fix up')
|
||||
+ fixup_task.add_argument('--cleanup', action='store_true',
|
||||
+ help="Clean up previous group memberships before rebuilding")
|
||||
fixup_task.add_argument('--wait', action='store_true',
|
||||
help="Wait for the task to finish, this could take a long time")
|
||||
fixup_task.add_argument('--timeout', default=0, type=int,
|
||||
@@ -279,7 +281,7 @@ def create_parser(subparsers):
|
||||
fixup_status.add_argument('--dn', help="The task entry's DN")
|
||||
fixup_status.add_argument('--show-log', action='store_true', help="Display the task log")
|
||||
fixup_status.add_argument('--watch', action='store_true',
|
||||
- help="Watch the task's status and wait for it to finish")
|
||||
+ help="Watch the task's status and wait for it to finish")
|
||||
|
||||
abort_fixup = subcommands.add_parser('abort-fixup', help='Abort the rebuild membership task.')
|
||||
abort_fixup.set_defaults(func=abort)
|
||||
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
|
||||
index 52691a44c..a1ad0a45b 100644
|
||||
--- a/src/lib389/lib389/plugins.py
|
||||
+++ b/src/lib389/lib389/plugins.py
|
||||
@@ -1141,13 +1141,15 @@ class AutoMembershipPlugin(Plugin):
|
||||
def __init__(self, instance, dn="cn=Auto Membership Plugin,cn=plugins,cn=config"):
|
||||
super(AutoMembershipPlugin, self).__init__(instance, dn)
|
||||
|
||||
- def fixup(self, basedn, _filter=None):
|
||||
+ def fixup(self, basedn, _filter=None, cleanup=False):
|
||||
"""Create an automember rebuild membership task
|
||||
|
||||
:param basedn: Basedn to fix up
|
||||
:type basedn: str
|
||||
:param _filter: a filter for entries to fix up
|
||||
:type _filter: str
|
||||
+ :param cleanup: cleanup old group memberships
|
||||
+ :type cleanup: boolean
|
||||
|
||||
:returns: an instance of Task(DSLdapObject)
|
||||
"""
|
||||
@@ -1156,6 +1158,9 @@ class AutoMembershipPlugin(Plugin):
|
||||
task_properties = {'basedn': basedn}
|
||||
if _filter is not None:
|
||||
task_properties['filter'] = _filter
|
||||
+ if cleanup:
|
||||
+ task_properties['cleanup'] = "yes"
|
||||
+
|
||||
task.create(properties=task_properties)
|
||||
|
||||
return task
|
||||
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
|
||||
index 1a16bbb83..193805780 100644
|
||||
--- a/src/lib389/lib389/tasks.py
|
||||
+++ b/src/lib389/lib389/tasks.py
|
||||
@@ -1006,12 +1006,13 @@ class Tasks(object):
|
||||
return exitCode
|
||||
|
||||
def automemberRebuild(self, suffix=DEFAULT_SUFFIX, scope='sub',
|
||||
- filterstr='objectclass=top', args=None):
|
||||
+ filterstr='objectclass=top', cleanup=False, args=None):
|
||||
'''
|
||||
- @param suffix - The suffix the task should examine - defualt is
|
||||
+ @param suffix - The suffix the task should examine - default is
|
||||
"dc=example,dc=com"
|
||||
@param scope - The scope of the search to find entries
|
||||
- @param fitlerstr - THe search filter to find entries
|
||||
+ @param fitlerstr - The search filter to find entries
|
||||
+ @param cleanup - reset/clear the old group mmeberships prior to rebuilding
|
||||
@param args - is a dictionary that contains modifier of the task
|
||||
wait: True/[False] - If True, waits for the completion of
|
||||
the task before to return
|
||||
@@ -1027,6 +1028,8 @@ class Tasks(object):
|
||||
entry.setValues('basedn', suffix)
|
||||
entry.setValues('filter', filterstr)
|
||||
entry.setValues('scope', scope)
|
||||
+ if cleanup:
|
||||
+ entry.setValues('cleanup', 'yes')
|
||||
|
||||
# start the task and possibly wait for task completion
|
||||
try:
|
||||
--
|
||||
2.43.0
|
||||
|
||||
@ -1,83 +0,0 @@
|
||||
From 9319d5b022918f14cacb00e3faef85a6ab730a26 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Pichugin <spichugi@redhat.com>
|
||||
Date: Tue, 27 Feb 2024 16:30:47 -0800
|
||||
Subject: [PATCH] Issue 3527 - Support HAProxy and Instance on the same machine
|
||||
configuration (#6107)
|
||||
|
||||
Description: Improve how we handle HAProxy connections to work better when
|
||||
the DS and HAProxy are on the same machine.
|
||||
Ensure the client and header destination IPs are checked against the trusted IP list.
|
||||
|
||||
Additionally, this change will also allow configuration having
|
||||
HAProxy is listening on a different subnet than the one used to forward the request.
|
||||
|
||||
Related: https://github.com/389ds/389-ds-base/issues/3527
|
||||
|
||||
Reviewed by: @progier389, @jchapma (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/connection.c | 35 +++++++++++++++++++++++++--------
|
||||
1 file changed, 27 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
|
||||
index d28a39bf7..10a8cc577 100644
|
||||
--- a/ldap/servers/slapd/connection.c
|
||||
+++ b/ldap/servers/slapd/connection.c
|
||||
@@ -1187,6 +1187,8 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
|
||||
char str_ip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
char str_haproxy_ip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
char str_haproxy_destip[INET6_ADDRSTRLEN + 1] = {0};
|
||||
+ int trusted_matches_ip_found = 0;
|
||||
+ int trusted_matches_destip_found = 0;
|
||||
struct berval **bvals = NULL;
|
||||
int proxy_connection = 0;
|
||||
|
||||
@@ -1245,21 +1247,38 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
|
||||
normalize_IPv4(conn->cin_addr, buf_ip, sizeof(buf_ip), str_ip, sizeof(str_ip));
|
||||
normalize_IPv4(&pr_netaddr_dest, buf_haproxy_destip, sizeof(buf_haproxy_destip),
|
||||
str_haproxy_destip, sizeof(str_haproxy_destip));
|
||||
+ size_t ip_len = strlen(buf_ip);
|
||||
+ size_t destip_len = strlen(buf_haproxy_destip);
|
||||
|
||||
/* Now, reset RC and set it to 0 only if a match is found */
|
||||
haproxy_rc = -1;
|
||||
|
||||
- /* Allow only:
|
||||
- * Trusted IP == Original Client IP == HAProxy Header Destination IP */
|
||||
+ /*
|
||||
+ * We need to allow a configuration where DS instance and HAProxy are on the same machine.
|
||||
+ * In this case, we need to check if
|
||||
+ * the HAProxy client IP (which will be a loopback address) matches one of the the trusted IP addresses,
|
||||
+ * while still checking that
|
||||
+ * the HAProxy header destination IP address matches one of the trusted IP addresses.
|
||||
+ * Additionally, this change will also allow configuration having
|
||||
+ * HAProxy listening on a different subnet than one used to forward the request.
|
||||
+ */
|
||||
for (size_t i = 0; bvals[i] != NULL; ++i) {
|
||||
- if ((strlen(bvals[i]->bv_val) == strlen(buf_ip)) &&
|
||||
- (strlen(bvals[i]->bv_val) == strlen(buf_haproxy_destip)) &&
|
||||
- (strncasecmp(bvals[i]->bv_val, buf_ip, strlen(buf_ip)) == 0) &&
|
||||
- (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, strlen(buf_haproxy_destip)) == 0)) {
|
||||
- haproxy_rc = 0;
|
||||
- break;
|
||||
+ size_t bval_len = strlen(bvals[i]->bv_val);
|
||||
+
|
||||
+ /* Check if the Client IP (HAProxy's machine IP) address matches the trusted IP address */
|
||||
+ if (!trusted_matches_ip_found) {
|
||||
+ trusted_matches_ip_found = (bval_len == ip_len) && (strncasecmp(bvals[i]->bv_val, buf_ip, ip_len) == 0);
|
||||
+ }
|
||||
+ /* Check if the HAProxy header destination IP address matches the trusted IP address */
|
||||
+ if (!trusted_matches_destip_found) {
|
||||
+ trusted_matches_destip_found = (bval_len == destip_len) && (strncasecmp(bvals[i]->bv_val, buf_haproxy_destip, destip_len) == 0);
|
||||
}
|
||||
}
|
||||
+
|
||||
+ if (trusted_matches_ip_found && trusted_matches_destip_found) {
|
||||
+ haproxy_rc = 0;
|
||||
+ }
|
||||
+
|
||||
if (haproxy_rc == -1) {
|
||||
slapi_log_err(SLAPI_LOG_CONNS, "connection_read_operation", "HAProxy header received from unknown source.\n");
|
||||
disconnect_server_nomutex(conn, conn->c_connid, -1, SLAPD_DISCONNECT_PROXY_UNKNOWN, EPROTO);
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,108 +0,0 @@
|
||||
From 016a2b6bd3e27cbff36609824a75b020dfd24823 Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 1 May 2024 15:01:33 +0100
|
||||
Subject: [PATCH] CVE-2024-2199
|
||||
|
||||
---
|
||||
.../tests/suites/password/password_test.py | 56 +++++++++++++++++++
|
||||
ldap/servers/slapd/modify.c | 8 ++-
|
||||
2 files changed, 62 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py
|
||||
index 38079476a..b3ff08904 100644
|
||||
--- a/dirsrvtests/tests/suites/password/password_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/password_test.py
|
||||
@@ -65,6 +65,62 @@ def test_password_delete_specific_password(topology_st):
|
||||
log.info('test_password_delete_specific_password: PASSED')
|
||||
|
||||
|
||||
+def test_password_modify_non_utf8(topology_st):
|
||||
+ """Attempt a modify of the userPassword attribute with
|
||||
+ an invalid non utf8 value
|
||||
+
|
||||
+ :id: a31af9d5-d665-42b9-8d6e-fea3d0837d36
|
||||
+ :setup: Standalone instance
|
||||
+ :steps:
|
||||
+ 1. Add a user if it doesnt exist and set its password
|
||||
+ 2. Verify password with a bind
|
||||
+ 3. Modify userPassword attr with invalid value
|
||||
+ 4. Attempt a bind with invalid password value
|
||||
+ 5. Verify original password with a bind
|
||||
+ :expectedresults:
|
||||
+ 1. The user with userPassword should be added successfully
|
||||
+ 2. Operation should be successful
|
||||
+ 3. Server returns ldap.UNWILLING_TO_PERFORM
|
||||
+ 4. Server returns ldap.INVALID_CREDENTIALS
|
||||
+ 5. Operation should be successful
|
||||
+ """
|
||||
+
|
||||
+ log.info('Running test_password_modify_non_utf8...')
|
||||
+
|
||||
+ # Create user and set password
|
||||
+ standalone = topology_st.standalone
|
||||
+ users = UserAccounts(standalone, DEFAULT_SUFFIX)
|
||||
+ if not users.exists(TEST_USER_PROPERTIES['uid'][0]):
|
||||
+ user = users.create(properties=TEST_USER_PROPERTIES)
|
||||
+ else:
|
||||
+ user = users.get(TEST_USER_PROPERTIES['uid'][0])
|
||||
+ user.set('userpassword', PASSWORD)
|
||||
+
|
||||
+ # Verify password
|
||||
+ try:
|
||||
+ user.bind(PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ # Modify userPassword with an invalid value
|
||||
+ password = b'tes\x82t-password' # A non UTF-8 encoded password
|
||||
+ with pytest.raises(ldap.UNWILLING_TO_PERFORM):
|
||||
+ user.replace('userpassword', password)
|
||||
+
|
||||
+ # Verify a bind fails with invalid pasword
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ user.bind(password)
|
||||
+
|
||||
+ # Verify we can still bind with original password
|
||||
+ try:
|
||||
+ user.bind(PASSWORD)
|
||||
+ except ldap.LDAPError as e:
|
||||
+ log.fatal('Failed to bind as {}, error: '.format(user.dn) + e.args[0]['desc'])
|
||||
+ assert False
|
||||
+
|
||||
+ log.info('test_password_modify_non_utf8: PASSED')
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
|
||||
index 5ca78539c..669bb104c 100644
|
||||
--- a/ldap/servers/slapd/modify.c
|
||||
+++ b/ldap/servers/slapd/modify.c
|
||||
@@ -765,8 +765,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
|
||||
* flagged - leave mod attributes alone */
|
||||
if (!repl_op && !skip_modified_attrs && lastmod) {
|
||||
modify_update_last_modified_attr(pb, &smods);
|
||||
+ slapi_pblock_set(pb, SLAPI_MODIFY_MODS, slapi_mods_get_ldapmods_byref(&smods));
|
||||
}
|
||||
|
||||
+
|
||||
if (0 == slapi_mods_get_num_mods(&smods)) {
|
||||
/* nothing to do - no mods - this is not an error - just
|
||||
send back LDAP_SUCCESS */
|
||||
@@ -933,8 +935,10 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
|
||||
|
||||
/* encode password */
|
||||
if (pw_encodevals_ext(pb, sdn, va)) {
|
||||
- slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s.\n", slapi_entry_get_dn_const(e));
|
||||
- send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to store attribute \"userPassword\" correctly\n", 0, NULL);
|
||||
+ slapi_log_err(SLAPI_LOG_CRIT, "op_shared_modify", "Unable to hash userPassword attribute for %s, "
|
||||
+ "check value is utf8 string.\n", slapi_entry_get_dn_const(e));
|
||||
+ send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Unable to hash \"userPassword\" attribute, "
|
||||
+ "check value is utf8 string.\n", 0, NULL);
|
||||
valuearray_free(&va);
|
||||
goto free_and_return;
|
||||
}
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,213 +0,0 @@
|
||||
From d5bbe52fbe84a7d3b5938bf82d5c4af15061a8e2 Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Wed, 17 Apr 2024 18:18:04 +0200
|
||||
Subject: [PATCH] CVE-2024-3657
|
||||
|
||||
---
|
||||
.../tests/suites/filter/large_filter_test.py | 34 +++++-
|
||||
ldap/servers/slapd/back-ldbm/index.c | 111 ++++++++++--------
|
||||
2 files changed, 92 insertions(+), 53 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/large_filter_test.py b/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
index ecc7bf979..40526bb16 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/large_filter_test.py
|
||||
@@ -13,19 +13,29 @@ verify and testing Filter from a search
|
||||
|
||||
import os
|
||||
import pytest
|
||||
+import ldap
|
||||
|
||||
-from lib389._constants import PW_DM
|
||||
+from lib389._constants import PW_DM, DEFAULT_SUFFIX, ErrorLog
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.user import UserAccounts, UserAccount
|
||||
from lib389.idm.account import Accounts
|
||||
from lib389.backend import Backends
|
||||
from lib389.idm.domain import Domain
|
||||
+from lib389.utils import get_ldapurl_from_serverid
|
||||
|
||||
SUFFIX = 'dc=anuj,dc=com'
|
||||
|
||||
pytestmark = pytest.mark.tier1
|
||||
|
||||
|
||||
+def open_new_ldapi_conn(dsinstance):
|
||||
+ ldapurl, certdir = get_ldapurl_from_serverid(dsinstance)
|
||||
+ assert 'ldapi://' in ldapurl
|
||||
+ conn = ldap.initialize(ldapurl)
|
||||
+ conn.sasl_interactive_bind_s("", ldap.sasl.external())
|
||||
+ return conn
|
||||
+
|
||||
+
|
||||
@pytest.fixture(scope="module")
|
||||
def _create_entries(request, topo):
|
||||
"""
|
||||
@@ -160,6 +170,28 @@ def test_large_filter(topo, _create_entries, real_value):
|
||||
assert len(Accounts(conn, SUFFIX).filter(real_value)) == 3
|
||||
|
||||
|
||||
+def test_long_filter_value(topo):
|
||||
+ """Exercise large eq filter with dn syntax attributes
|
||||
+
|
||||
+ :id: b069ef72-fcc3-11ee-981c-482ae39447e5
|
||||
+ :setup: Standalone
|
||||
+ :steps:
|
||||
+ 1. Try to pass filter rules as per the condition.
|
||||
+ :expectedresults:
|
||||
+ 1. Pass
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ conn = open_new_ldapi_conn(inst.serverid)
|
||||
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE,ErrorLog.SEARCH_FILTER))
|
||||
+ filter_value = "a\x1Edmin" * 1025
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ filter_value = "aAdmin" * 1025
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ filter_value = "*"
|
||||
+ conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f'(cn={filter_value})')
|
||||
+ inst.config.loglevel(vals=(ErrorLog.DEFAULT,))
|
||||
+
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
CURRENT_FILE = os.path.realpath(__file__)
|
||||
pytest.main("-s -v %s" % CURRENT_FILE)
|
||||
diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c
|
||||
index 410db23d1..30fa09ebb 100644
|
||||
--- a/ldap/servers/slapd/back-ldbm/index.c
|
||||
+++ b/ldap/servers/slapd/back-ldbm/index.c
|
||||
@@ -71,6 +71,32 @@ typedef struct _index_buffer_handle index_buffer_handle;
|
||||
#define INDEX_BUFFER_FLAG_SERIALIZE 1
|
||||
#define INDEX_BUFFER_FLAG_STATS 2
|
||||
|
||||
+/*
|
||||
+ * space needed to encode a byte:
|
||||
+ * 0x00-0x31 and 0x7f-0xff requires 3 bytes: \xx
|
||||
+ * 0x22 and 0x5C requires 2 bytes: \" and \\
|
||||
+ * other requires 1 byte: c
|
||||
+ */
|
||||
+static char encode_size[] = {
|
||||
+ /* 0x00 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x10 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x20 */ 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x30 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x40 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x50 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1,
|
||||
+ /* 0x60 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
+ /* 0x70 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3,
|
||||
+ /* 0x80 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0x90 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xA0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xB0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xC0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xD0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xE0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+ /* 0xF0 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
+};
|
||||
+
|
||||
+
|
||||
/* Index buffering functions */
|
||||
|
||||
static int
|
||||
@@ -799,65 +825,46 @@ index_add_mods(
|
||||
|
||||
/*
|
||||
* Convert a 'struct berval' into a displayable ASCII string
|
||||
+ * returns the printable string
|
||||
*/
|
||||
-
|
||||
-#define SPECIAL(c) (c < 32 || c > 126 || c == '\\' || c == '"')
|
||||
-
|
||||
const char *
|
||||
encode(const struct berval *data, char buf[BUFSIZ])
|
||||
{
|
||||
- char *s;
|
||||
- char *last;
|
||||
- if (data == NULL || data->bv_len == 0)
|
||||
- return "";
|
||||
- last = data->bv_val + data->bv_len - 1;
|
||||
- for (s = data->bv_val; s < last; ++s) {
|
||||
- if (SPECIAL(*s)) {
|
||||
- char *first = data->bv_val;
|
||||
- char *bufNext = buf;
|
||||
- size_t bufSpace = BUFSIZ - 4;
|
||||
- while (1) {
|
||||
- /* printf ("%lu bytes ASCII\n", (unsigned long)(s - first)); */
|
||||
- if (bufSpace < (size_t)(s - first))
|
||||
- s = first + bufSpace - 1;
|
||||
- if (s != first) {
|
||||
- memcpy(bufNext, first, s - first);
|
||||
- bufNext += (s - first);
|
||||
- bufSpace -= (s - first);
|
||||
- }
|
||||
- do {
|
||||
- if (bufSpace) {
|
||||
- *bufNext++ = '\\';
|
||||
- --bufSpace;
|
||||
- }
|
||||
- if (bufSpace < 2) {
|
||||
- memcpy(bufNext, "..", 2);
|
||||
- bufNext += 2;
|
||||
- goto bail;
|
||||
- }
|
||||
- if (*s == '\\' || *s == '"') {
|
||||
- *bufNext++ = *s;
|
||||
- --bufSpace;
|
||||
- } else {
|
||||
- sprintf(bufNext, "%02x", (unsigned)*(unsigned char *)s);
|
||||
- bufNext += 2;
|
||||
- bufSpace -= 2;
|
||||
- }
|
||||
- } while (++s <= last && SPECIAL(*s));
|
||||
- if (s > last)
|
||||
- break;
|
||||
- first = s;
|
||||
- while (!SPECIAL(*s) && s <= last)
|
||||
- ++s;
|
||||
- }
|
||||
- bail:
|
||||
- *bufNext = '\0';
|
||||
- /* printf ("%lu chars in buffer\n", (unsigned long)(bufNext - buf)); */
|
||||
+ if (!data || !data->bv_val) {
|
||||
+ strcpy(buf, "<NULL>");
|
||||
+ return buf;
|
||||
+ }
|
||||
+ char *endbuff = &buf[BUFSIZ-4]; /* Reserve space to append "...\0" */
|
||||
+ char *ptout = buf;
|
||||
+ unsigned char *ptin = (unsigned char*) data->bv_val;
|
||||
+ unsigned char *endptin = ptin+data->bv_len;
|
||||
+
|
||||
+ while (ptin < endptin) {
|
||||
+ if (ptout >= endbuff) {
|
||||
+ /*
|
||||
+ * BUFSIZ(8K) > SLAPI_LOG_BUFSIZ(2K) so the error log message will be
|
||||
+ * truncated anyway. So there is no real interrest to test if the original
|
||||
+ * data contains no special characters and return it as is.
|
||||
+ */
|
||||
+ strcpy(endbuff, "...");
|
||||
return buf;
|
||||
}
|
||||
+ switch (encode_size[*ptin]) {
|
||||
+ case 1:
|
||||
+ *ptout++ = *ptin++;
|
||||
+ break;
|
||||
+ case 2:
|
||||
+ *ptout++ = '\\';
|
||||
+ *ptout++ = *ptin++;
|
||||
+ break;
|
||||
+ case 3:
|
||||
+ sprintf(ptout, "\\%02x", *ptin++);
|
||||
+ ptout += 3;
|
||||
+ break;
|
||||
+ }
|
||||
}
|
||||
- /* printf ("%lu bytes, all ASCII\n", (unsigned long)(s - data->bv_val)); */
|
||||
- return data->bv_val;
|
||||
+ *ptout = 0;
|
||||
+ return buf;
|
||||
}
|
||||
|
||||
static const char *
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,143 +0,0 @@
|
||||
From 6e5f03d5872129963106024f53765234a282406c Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Fri, 16 Feb 2024 11:13:16 +0000
|
||||
Subject: [PATCH] Issue 6096 - Improve connection timeout error logging (#6097)
|
||||
|
||||
Bug description: When a paged result search is run with a time limit,
|
||||
if the time limit is exceed the server closes the connection with
|
||||
closed IO timeout (nsslapd-ioblocktimeout) - T2. This error message
|
||||
is incorrect as the reason the connection has been closed was because
|
||||
the specified time limit on a paged result search has been exceeded.
|
||||
|
||||
Fix description: Correct error message
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6096
|
||||
|
||||
Reviewed by: @tbordaz (Thank you)
|
||||
---
|
||||
ldap/admin/src/logconv.pl | 24 ++++++++++++++++++-
|
||||
ldap/servers/slapd/daemon.c | 4 ++--
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 1 +
|
||||
ldap/servers/slapd/disconnect_errors.h | 2 +-
|
||||
4 files changed, 27 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl
|
||||
index 7698c383a..2a933c4a3 100755
|
||||
--- a/ldap/admin/src/logconv.pl
|
||||
+++ b/ldap/admin/src/logconv.pl
|
||||
@@ -267,7 +267,7 @@ my $optimeAvg = 0;
|
||||
my %cipher = ();
|
||||
my @removefiles = ();
|
||||
|
||||
-my @conncodes = qw(A1 B1 B4 T1 T2 B2 B3 R1 P1 P2 U1);
|
||||
+my @conncodes = qw(A1 B1 B4 T1 T2 T3 B2 B3 R1 P1 P2 U1);
|
||||
my %conn = ();
|
||||
map {$conn{$_} = $_} @conncodes;
|
||||
|
||||
@@ -355,6 +355,7 @@ $connmsg{"B1"} = "Bad Ber Tag Encountered";
|
||||
$connmsg{"B4"} = "Server failed to flush data (response) back to Client";
|
||||
$connmsg{"T1"} = "Idle Timeout Exceeded";
|
||||
$connmsg{"T2"} = "IO Block Timeout Exceeded or NTSSL Timeout";
|
||||
+$connmsg{"T3"} = "Paged Search Time Limit Exceeded";
|
||||
$connmsg{"B2"} = "Ber Too Big";
|
||||
$connmsg{"B3"} = "Ber Peek";
|
||||
$connmsg{"R1"} = "Revents";
|
||||
@@ -1723,6 +1724,10 @@ if ($usage =~ /j/i || $verb eq "yes"){
|
||||
print "\n $recCount. You have some coonections that are being closed by the ioblocktimeout setting. You may want to increase the ioblocktimeout.\n";
|
||||
$recCount++;
|
||||
}
|
||||
+ if (defined($conncount->{"T3"}) and $conncount->{"T3"} > 0){
|
||||
+ print "\n $recCount. You have some connections that are being closed because a paged result search limit has been exceeded. You may want to increase the search time limit.\n";
|
||||
+ $recCount++;
|
||||
+ }
|
||||
# compare binds to unbinds, if the difference is more than 30% of the binds, then report a issue
|
||||
if (($bindCount - $unbindCount) > ($bindCount*.3)){
|
||||
print "\n $recCount. You have a significant difference between binds and unbinds. You may want to investigate this difference.\n";
|
||||
@@ -2366,6 +2371,7 @@ sub parseLineNormal
|
||||
$brokenPipeCount++;
|
||||
if (m/- T1/){ $hashes->{rc}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{rc}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{rc}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{rc}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{rc}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{rc}->{"B4"}++; }
|
||||
@@ -2381,6 +2387,7 @@ sub parseLineNormal
|
||||
$connResetByPeerCount++;
|
||||
if (m/- T1/){ $hashes->{src}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{src}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{src}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{src}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{src}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{src}->{"B4"}++; }
|
||||
@@ -2396,6 +2403,7 @@ sub parseLineNormal
|
||||
$resourceUnavailCount++;
|
||||
if (m/- T1/){ $hashes->{rsrc}->{"T1"}++; }
|
||||
elsif (m/- T2/){ $hashes->{rsrc}->{"T2"}++; }
|
||||
+ elsif (m/- T3/){ $hashes->{rsrc}->{"T3"}++; }
|
||||
elsif (m/- A1/){ $hashes->{rsrc}->{"A1"}++; }
|
||||
elsif (m/- B1/){ $hashes->{rsrc}->{"B1"}++; }
|
||||
elsif (m/- B4/){ $hashes->{rsrc}->{"B4"}++; }
|
||||
@@ -2494,6 +2502,20 @@ sub parseLineNormal
|
||||
}
|
||||
}
|
||||
}
|
||||
+ if (m/- T3/){
|
||||
+ if ($_ =~ /conn= *([0-9A-Z]+)/i) {
|
||||
+ $exc = "no";
|
||||
+ $ip = getIPfromConn($1, $serverRestartCount);
|
||||
+ for (my $xxx = 0; $xxx < $#excludeIP; $xxx++){
|
||||
+ if ($ip eq $excludeIP[$xxx]){$exc = "yes";}
|
||||
+ }
|
||||
+ if ($exc ne "yes"){
|
||||
+ $hashes->{T3}->{$ip}++;
|
||||
+ $hashes->{conncount}->{"T3"}++;
|
||||
+ $connCodeCount++;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
if (m/- B2/){
|
||||
if ($_ =~ /conn= *([0-9A-Z]+)/i) {
|
||||
$exc = "no";
|
||||
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
|
||||
index 5a48aa66f..bb80dae36 100644
|
||||
--- a/ldap/servers/slapd/daemon.c
|
||||
+++ b/ldap/servers/slapd/daemon.c
|
||||
@@ -1599,9 +1599,9 @@ setup_pr_read_pds(Connection_Table *ct)
|
||||
int add_fd = 1;
|
||||
/* check timeout for PAGED RESULTS */
|
||||
if (pagedresults_is_timedout_nolock(c)) {
|
||||
- /* Exceeded the timelimit; disconnect the client */
|
||||
+ /* Exceeded the paged search timelimit; disconnect the client */
|
||||
disconnect_server_nomutex(c, c->c_connid, -1,
|
||||
- SLAPD_DISCONNECT_IO_TIMEOUT,
|
||||
+ SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT,
|
||||
0);
|
||||
connection_table_move_connection_out_of_active_list(ct,
|
||||
c);
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index f7a31d728..c2d9e283b 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -27,6 +27,7 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
|
||||
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
|
||||
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
|
||||
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
|
||||
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
|
||||
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
|
||||
ER2(SLAPD_DISCONNECT_POLL, "P2")
|
||||
diff --git a/ldap/servers/slapd/disconnect_errors.h b/ldap/servers/slapd/disconnect_errors.h
|
||||
index a0484f1c2..e118f674c 100644
|
||||
--- a/ldap/servers/slapd/disconnect_errors.h
|
||||
+++ b/ldap/servers/slapd/disconnect_errors.h
|
||||
@@ -35,6 +35,6 @@
|
||||
#define SLAPD_DISCONNECT_SASL_FAIL SLAPD_DISCONNECT_ERROR_BASE + 12
|
||||
#define SLAPD_DISCONNECT_PROXY_INVALID_HEADER SLAPD_DISCONNECT_ERROR_BASE + 13
|
||||
#define SLAPD_DISCONNECT_PROXY_UNKNOWN SLAPD_DISCONNECT_ERROR_BASE + 14
|
||||
-
|
||||
+#define SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT SLAPD_DISCONNECT_ERROR_BASE + 15
|
||||
|
||||
#endif /* __DISCONNECT_ERRORS_H_ */
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,44 +0,0 @@
|
||||
From a112394af3a20787755029804684d57a9c3ffa9a Mon Sep 17 00:00:00 2001
|
||||
From: James Chapman <jachapma@redhat.com>
|
||||
Date: Wed, 21 Feb 2024 12:43:03 +0000
|
||||
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
|
||||
(#6104)
|
||||
|
||||
Bug description: A recent addition to the connection disconnect error
|
||||
messaging, conflicts with how errormap.c maps error codes/strings.
|
||||
|
||||
Fix description: errormap expects error codes/strings to be in ascending
|
||||
order. Moved the new error code to the bottom of the list.
|
||||
|
||||
Relates: https://github.com/389ds/389-ds-base/issues/6103
|
||||
|
||||
Reviewed by: @droideck. @progier389 (Thank you)
|
||||
---
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 5 +++--
|
||||
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index c2d9e283b..f603a08ce 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -14,7 +14,8 @@
|
||||
/* disconnect_error_strings.h
|
||||
*
|
||||
* Strings describing the errors used in logging the reason a connection
|
||||
- * was closed.
|
||||
+ * was closed. Ensure definitions are in the same order as the error codes
|
||||
+ * defined in disconnect_errors.h
|
||||
*/
|
||||
#ifndef __DISCONNECT_ERROR_STRINGS_H_
|
||||
#define __DISCONNECT_ERROR_STRINGS_H_
|
||||
@@ -35,6 +36,6 @@ ER2(SLAPD_DISCONNECT_NTSSL_TIMEOUT, "T2")
|
||||
ER2(SLAPD_DISCONNECT_SASL_FAIL, "S1")
|
||||
ER2(SLAPD_DISCONNECT_PROXY_INVALID_HEADER, "P3")
|
||||
ER2(SLAPD_DISCONNECT_PROXY_UNKNOWN, "P4")
|
||||
-
|
||||
+ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
|
||||
#endif /* __DISCONNECT_ERROR_STRINGS_H_ */
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,30 +0,0 @@
|
||||
From edd9abc8901604dde1d739d87ca2906734d53dd3 Mon Sep 17 00:00:00 2001
|
||||
From: Viktor Ashirov <vashirov@redhat.com>
|
||||
Date: Thu, 13 Jun 2024 13:35:09 +0200
|
||||
Subject: [PATCH] Issue 6103 - New connection timeout error breaks errormap
|
||||
|
||||
Description:
|
||||
Remove duplicate SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT error code.
|
||||
|
||||
Fixes: https://github.com/389ds/389-ds-base/issues/6103
|
||||
|
||||
Reviewed by: @tbordaz (Thanks!)
|
||||
---
|
||||
ldap/servers/slapd/disconnect_error_strings.h | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/ldap/servers/slapd/disconnect_error_strings.h b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
index f603a08ce..d49cc79a2 100644
|
||||
--- a/ldap/servers/slapd/disconnect_error_strings.h
|
||||
+++ b/ldap/servers/slapd/disconnect_error_strings.h
|
||||
@@ -28,7 +28,6 @@ ER2(SLAPD_DISCONNECT_BER_FLUSH, "B4")
|
||||
ER2(SLAPD_DISCONNECT_IDLE_TIMEOUT, "T1")
|
||||
ER2(SLAPD_DISCONNECT_REVENTS, "R1")
|
||||
ER2(SLAPD_DISCONNECT_IO_TIMEOUT, "T2")
|
||||
-ER2(SLAPD_DISCONNECT_PAGED_SEARCH_LIMIT, "T3")
|
||||
ER2(SLAPD_DISCONNECT_PLUGIN, "P1")
|
||||
ER2(SLAPD_DISCONNECT_UNBIND, "U1")
|
||||
ER2(SLAPD_DISCONNECT_POLL, "P2")
|
||||
--
|
||||
2.45.0
|
||||
|
||||
@ -1,220 +0,0 @@
|
||||
From 8cf981c00ae18d3efaeb10819282cd991621e9a2 Mon Sep 17 00:00:00 2001
|
||||
From: tbordaz <tbordaz@redhat.com>
|
||||
Date: Wed, 22 May 2024 11:29:05 +0200
|
||||
Subject: [PATCH] Issue 6172 - RFE: improve the performance of evaluation of
|
||||
filter component when tested against a large valueset (like group members)
|
||||
(#6173)
|
||||
|
||||
Bug description:
|
||||
Before returning an entry (to a SRCH) the server checks that the entry matches the SRCH filter.
|
||||
If a filter component (equality) is testing the value (ava) against a
|
||||
large valueset (like uniquemember values), it takes a long time because
|
||||
of the large number of values and required normalization of the values.
|
||||
This can be improved taking benefit of sorted valueset. Those sorted
|
||||
valueset were created to improve updates of large valueset (groups) but
|
||||
at that time not implemented in SRCH path.
|
||||
|
||||
Fix description:
|
||||
In case of LDAP_FILTER_EQUALITY component, the server can get
|
||||
benefit of the sorted valuearray.
|
||||
To limit the risk of regression, we use the sorted valuearray
|
||||
only for the DN syntax attribute. Indeed the sorted valuearray was
|
||||
designed for those type of attribute.
|
||||
With those two limitations, there is no need of a toggle and
|
||||
the call to plugin_call_syntax_filter_ava can be replaced by
|
||||
a call to slapi_valueset_find.
|
||||
In both cases, sorted valueset and plugin_call_syntax_filter_ava, ava and
|
||||
values are normalized.
|
||||
In sorted valueset, the values have been normalized to insert the index
|
||||
in the sorted array and then comparison is done on normalized values.
|
||||
In plugin_call_syntax_filter_ava, all values in valuearray (of valueset) are normalized
|
||||
before comparison.
|
||||
|
||||
relates: #6172
|
||||
|
||||
Reviewed by: Pierre Rogier, Simon Pichugin (Big Thanks !!!)
|
||||
---
|
||||
.../tests/suites/filter/filter_test.py | 125 ++++++++++++++++++
|
||||
ldap/servers/slapd/filterentry.c | 22 ++-
|
||||
2 files changed, 146 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
index d6bfa5a3b..4baaf04a7 100644
|
||||
--- a/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
+++ b/dirsrvtests/tests/suites/filter/filter_test.py
|
||||
@@ -9,7 +9,11 @@
|
||||
import logging
|
||||
|
||||
import pytest
|
||||
+import time
|
||||
+from lib389.dirsrv_log import DirsrvAccessLog
|
||||
from lib389.tasks import *
|
||||
+from lib389.backend import Backends, Backend
|
||||
+from lib389.dbgen import dbgen_users, dbgen_groups
|
||||
from lib389.topologies import topology_st
|
||||
from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX
|
||||
from lib389.utils import *
|
||||
@@ -304,6 +308,127 @@ def test_extended_search(topology_st):
|
||||
ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
|
||||
assert len(ents) == 1
|
||||
|
||||
+def test_match_large_valueset(topology_st):
|
||||
+ """Test that when returning a big number of entries
|
||||
+ and that we need to match the filter from a large valueset
|
||||
+ we get benefit to use the sorted valueset
|
||||
+
|
||||
+ :id: 7db5aa88-50e0-4c31-85dd-1d2072cb674c
|
||||
+
|
||||
+ :setup: Standalone instance
|
||||
+
|
||||
+ :steps:
|
||||
+ 1. Create a users and groups backends and tune them
|
||||
+ 2. Generate a test ldif (2k users and 1K groups with all users)
|
||||
+ 3. Import test ldif file using Offline import (ldif2db).
|
||||
+ 4. Prim the 'groups' entrycache with a "fast" search
|
||||
+ 5. Search the 'groups' with a difficult matching value
|
||||
+ 6. check that etime from step 5 is less than a second
|
||||
+
|
||||
+ :expectedresults:
|
||||
+ 1. Create a users and groups backends should PASS
|
||||
+ 2. Generate LDIF should PASS.
|
||||
+ 3. Offline import should PASS.
|
||||
+ 4. Priming should PASS.
|
||||
+ 5. Performance search should PASS.
|
||||
+ 6. Etime of performance search should PASS.
|
||||
+ """
|
||||
+
|
||||
+ log.info('Running test_match_large_valueset...')
|
||||
+ #
|
||||
+ # Test online/offline LDIF imports
|
||||
+ #
|
||||
+ inst = topology_st.standalone
|
||||
+ inst.start()
|
||||
+ backends = Backends(inst)
|
||||
+ users_suffix = "ou=users,%s" % DEFAULT_SUFFIX
|
||||
+ users_backend = 'users'
|
||||
+ users_ldif = 'users_import.ldif'
|
||||
+ groups_suffix = "ou=groups,%s" % DEFAULT_SUFFIX
|
||||
+ groups_backend = 'groups'
|
||||
+ groups_ldif = 'groups_import.ldif'
|
||||
+ groups_entrycache = '200000000'
|
||||
+ users_number = 2000
|
||||
+ groups_number = 1000
|
||||
+
|
||||
+
|
||||
+ # For priming the cache we just want to be fast
|
||||
+ # taking the first value in the valueset is good
|
||||
+ # whether the valueset is sorted or not
|
||||
+ priming_user_rdn = "user0001"
|
||||
+
|
||||
+ # For performance testing, this is important to use
|
||||
+ # user1000 rather then user0001
|
||||
+ # Because user0001 is the first value in the valueset
|
||||
+ # whether we use the sorted valuearray or non sorted
|
||||
+ # valuearray the performance will be similar.
|
||||
+ # With middle value user1000, the performance boost of
|
||||
+ # the sorted valuearray will make the difference.
|
||||
+ perf_user_rdn = "user1000"
|
||||
+
|
||||
+ # Step 1. Prepare the backends and tune the groups entrycache
|
||||
+ try:
|
||||
+ be_users = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': users_suffix, 'name': users_backend})
|
||||
+ be_groups = backends.create(properties={'parent': DEFAULT_SUFFIX, 'nsslapd-suffix': groups_suffix, 'name': groups_backend})
|
||||
+
|
||||
+ # set the entry cache to 200Mb as the 1K groups of 2K users require at least 170Mb
|
||||
+ be_groups.replace('nsslapd-cachememsize', groups_entrycache)
|
||||
+ except:
|
||||
+ raise
|
||||
+
|
||||
+ # Step 2. Generate a test ldif (10k users entries)
|
||||
+ log.info("Generating users LDIF...")
|
||||
+ ldif_dir = inst.get_ldif_dir()
|
||||
+ users_import_ldif = "%s/%s" % (ldif_dir, users_ldif)
|
||||
+ groups_import_ldif = "%s/%s" % (ldif_dir, groups_ldif)
|
||||
+ dbgen_users(inst, users_number, users_import_ldif, suffix=users_suffix, generic=True, parent=users_suffix)
|
||||
+
|
||||
+ # Generate a test ldif (800 groups with 10k members) that fit in 700Mb entry cache
|
||||
+ props = {
|
||||
+ "name": "group",
|
||||
+ "suffix": groups_suffix,
|
||||
+ "parent": groups_suffix,
|
||||
+ "number": groups_number,
|
||||
+ "numMembers": users_number,
|
||||
+ "createMembers": False,
|
||||
+ "memberParent": users_suffix,
|
||||
+ "membershipAttr": "uniquemember",
|
||||
+ }
|
||||
+ dbgen_groups(inst, groups_import_ldif, props)
|
||||
+
|
||||
+ # Step 3. Do the both offline imports
|
||||
+ inst.stop()
|
||||
+ if not inst.ldif2db(users_backend, None, None, None, users_import_ldif):
|
||||
+ log.fatal('test_basic_import_export: Offline users import failed')
|
||||
+ assert False
|
||||
+ if not inst.ldif2db(groups_backend, None, None, None, groups_import_ldif):
|
||||
+ log.fatal('test_basic_import_export: Offline groups import failed')
|
||||
+ assert False
|
||||
+ inst.start()
|
||||
+
|
||||
+ # Step 4. first prime the cache
|
||||
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (priming_user_rdn, users_suffix), ['dn'])
|
||||
+ assert len(entries) == groups_number
|
||||
+
|
||||
+ # Step 5. Now do the real performance checking it should take less than a second
|
||||
+ # Just request the 'DN'. We are interested by the time of matching not by the time of transfert
|
||||
+ search_start = time.time()
|
||||
+ entries = topology_st.standalone.search_s(groups_suffix, ldap.SCOPE_SUBTREE, "(&(objectclass=groupOfUniqueNames)(uniquemember=uid=%s,%s))" % (perf_user_rdn, users_suffix), ['dn'])
|
||||
+ duration = time.time() - search_start
|
||||
+ log.info("Duration of the search was %f", duration)
|
||||
+
|
||||
+ # Step 6. Gather the etime from the access log
|
||||
+ inst.stop()
|
||||
+ access_log = DirsrvAccessLog(inst)
|
||||
+ search_result = access_log.match(".*RESULT err=0 tag=101 nentries=%s.*" % groups_number)
|
||||
+ log.info("Found patterns are %s", search_result[0])
|
||||
+ log.info("Found patterns are %s", search_result[1])
|
||||
+ etime = float(search_result[1].split('etime=')[1])
|
||||
+ log.info("Duration of the search from access log was %f", etime)
|
||||
+ assert len(entries) == groups_number
|
||||
+ assert (etime < 1)
|
||||
+
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
# -s for DEBUG mode
|
||||
diff --git a/ldap/servers/slapd/filterentry.c b/ldap/servers/slapd/filterentry.c
|
||||
index fd8fdda9f..cae5c7edc 100644
|
||||
--- a/ldap/servers/slapd/filterentry.c
|
||||
+++ b/ldap/servers/slapd/filterentry.c
|
||||
@@ -296,7 +296,27 @@ test_ava_filter(
|
||||
rc = -1;
|
||||
for (; a != NULL; a = a->a_next) {
|
||||
if (slapi_attr_type_cmp(ava->ava_type, a->a_type, SLAPI_TYPE_CMP_SUBTYPE) == 0) {
|
||||
- rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||
+ if ((ftype == LDAP_FILTER_EQUALITY) &&
|
||||
+ (slapi_attr_is_dn_syntax_type(a->a_type))) {
|
||||
+ /* This path is for a performance improvement */
|
||||
+
|
||||
+ /* In case of equality filter we can get benefit of the
|
||||
+ * sorted valuearray (from valueset).
|
||||
+ * This improvement is limited to DN syntax attributes for
|
||||
+ * which the sorted valueset was designed.
|
||||
+ */
|
||||
+ Slapi_Value *sval = NULL;
|
||||
+ sval = slapi_value_new_berval(&ava->ava_value);
|
||||
+ if (slapi_valueset_find((const Slapi_Attr *)a, &a->a_present_values, sval)) {
|
||||
+ rc = 0;
|
||||
+ }
|
||||
+ slapi_value_free(&sval);
|
||||
+ } else {
|
||||
+ /* When sorted valuearray optimization cannot be used
|
||||
+ * lets filter the value according to its syntax
|
||||
+ */
|
||||
+ rc = plugin_call_syntax_filter_ava(a, ftype, ava);
|
||||
+ }
|
||||
if (rc == 0) {
|
||||
break;
|
||||
}
|
||||
--
|
||||
2.46.0
|
||||
|
||||
@ -1,163 +0,0 @@
|
||||
From 57051154bafaf50b83fc27dadbd89a49fd1c8c36 Mon Sep 17 00:00:00 2001
|
||||
From: Pierre Rogier <progier@redhat.com>
|
||||
Date: Fri, 14 Jun 2024 13:27:10 +0200
|
||||
Subject: [PATCH] Security fix for CVE-2024-5953
|
||||
|
||||
Description:
|
||||
A denial of service vulnerability was found in the 389 Directory Server.
|
||||
This issue may allow an authenticated user to cause a server denial
|
||||
of service while attempting to log in with a user with a malformed hash
|
||||
in their password.
|
||||
|
||||
Fix Description:
|
||||
To prevent buffer overflow when a bind request is processed, the bind fails
|
||||
if the hash size is not coherent without even attempting to process further
|
||||
the hashed password.
|
||||
|
||||
References:
|
||||
- https://nvd.nist.gov/vuln/detail/CVE-2024-5953
|
||||
- https://access.redhat.com/security/cve/CVE-2024-5953
|
||||
- https://bugzilla.redhat.com/show_bug.cgi?id=2292104
|
||||
---
|
||||
.../tests/suites/password/regression_test.py | 54 ++++++++++++++++++-
|
||||
ldap/servers/plugins/pwdstorage/md5_pwd.c | 9 +++-
|
||||
ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 6 +++
|
||||
3 files changed, 66 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/dirsrvtests/tests/suites/password/regression_test.py b/dirsrvtests/tests/suites/password/regression_test.py
|
||||
index 8f1facb6d..1fa581643 100644
|
||||
--- a/dirsrvtests/tests/suites/password/regression_test.py
|
||||
+++ b/dirsrvtests/tests/suites/password/regression_test.py
|
||||
@@ -7,12 +7,14 @@
|
||||
#
|
||||
import pytest
|
||||
import time
|
||||
+import glob
|
||||
+import base64
|
||||
from lib389._constants import PASSWORD, DN_DM, DEFAULT_SUFFIX
|
||||
from lib389._constants import SUFFIX, PASSWORD, DN_DM, DN_CONFIG, PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DEFAULT_CHANGELOG_DB
|
||||
from lib389 import Entry
|
||||
from lib389.topologies import topology_m1 as topo_supplier
|
||||
-from lib389.idm.user import UserAccounts
|
||||
-from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer
|
||||
+from lib389.idm.user import UserAccounts, UserAccount
|
||||
+from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer, ds_supports_new_changelog
|
||||
from lib389.topologies import topology_st as topo
|
||||
from lib389.idm.organizationalunit import OrganizationalUnits
|
||||
|
||||
@@ -39,6 +41,13 @@ TEST_PASSWORDS += ['CNpwtest1ZZZZ', 'ZZZZZCNpwtest1',
|
||||
TEST_PASSWORDS2 = (
|
||||
'CN12pwtest31', 'SN3pwtest231', 'UID1pwtest123', 'MAIL2pwtest12@redhat.com', '2GN1pwtest123', 'People123')
|
||||
|
||||
+SUPPORTED_SCHEMES = (
|
||||
+ "{SHA}", "{SSHA}", "{SHA256}", "{SSHA256}",
|
||||
+ "{SHA384}", "{SSHA384}", "{SHA512}", "{SSHA512}",
|
||||
+ "{crypt}", "{NS-MTA-MD5}", "{clear}", "{MD5}",
|
||||
+ "{SMD5}", "{PBKDF2_SHA256}", "{PBKDF2_SHA512}",
|
||||
+ "{GOST_YESCRYPT}", "{PBKDF2-SHA256}", "{PBKDF2-SHA512}" )
|
||||
+
|
||||
def _check_unhashed_userpw(inst, user_dn, is_present=False):
|
||||
"""Check if unhashed#user#password attribute is present or not in the changelog"""
|
||||
unhashed_pwd_attribute = 'unhashed#user#password'
|
||||
@@ -319,6 +328,47 @@ def test_unhashed_pw_switch(topo_supplier):
|
||||
# Add debugging steps(if any)...
|
||||
pass
|
||||
|
||||
+@pytest.mark.parametrize("scheme", SUPPORTED_SCHEMES )
|
||||
+def test_long_hashed_password(topo, create_user, scheme):
|
||||
+ """Check that hashed password with very long value does not cause trouble
|
||||
+
|
||||
+ :id: 252a1f76-114b-11ef-8a7a-482ae39447e5
|
||||
+ :setup: standalone Instance
|
||||
+ :parametrized: yes
|
||||
+ :steps:
|
||||
+ 1. Add a test user user
|
||||
+ 2. Set a long password with requested scheme
|
||||
+ 3. Bind on that user using a wrong password
|
||||
+ 4. Check that instance is still alive
|
||||
+ 5. Remove the added user
|
||||
+ :expectedresults:
|
||||
+ 1. Success
|
||||
+ 2. Success
|
||||
+ 3. Should get ldap.INVALID_CREDENTIALS exception
|
||||
+ 4. Success
|
||||
+ 5. Success
|
||||
+ """
|
||||
+ inst = topo.standalone
|
||||
+ inst.simple_bind_s(DN_DM, PASSWORD)
|
||||
+ users = UserAccounts(inst, DEFAULT_SUFFIX)
|
||||
+ # Make sure that server is started as this test may crash it
|
||||
+ inst.start()
|
||||
+ # Adding Test user (It may already exists if previous test failed)
|
||||
+ user2 = UserAccount(inst, dn='uid=test_user_1002,ou=People,dc=example,dc=com')
|
||||
+ if not user2.exists():
|
||||
+ user2 = users.create_test_user(uid=1002, gid=2002)
|
||||
+ # Setting hashed password
|
||||
+ passwd = 'A'*4000
|
||||
+ hashed_passwd = scheme.encode('utf-8') + base64.b64encode(passwd.encode('utf-8'))
|
||||
+ user2.replace('userpassword', hashed_passwd)
|
||||
+ # Bind on that user using a wrong password
|
||||
+ with pytest.raises(ldap.INVALID_CREDENTIALS):
|
||||
+ conn = user2.bind(PASSWORD)
|
||||
+ # Check that instance is still alive
|
||||
+ assert inst.status()
|
||||
+ # Remove the added user
|
||||
+ user2.delete()
|
||||
+
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run isolated
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/md5_pwd.c b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
index 1e2cf58e7..b9a48d5ca 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/md5_pwd.c
|
||||
@@ -37,6 +37,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
unsigned char hash_out[MD5_HASH_LEN];
|
||||
unsigned char b2a_out[MD5_HASH_LEN * 2]; /* conservative */
|
||||
SECItem binary_item;
|
||||
+ size_t dbpwd_len = strlen(dbpwd);
|
||||
|
||||
ctx = PK11_CreateDigestContext(SEC_OID_MD5);
|
||||
if (ctx == NULL) {
|
||||
@@ -45,6 +46,12 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
goto loser;
|
||||
}
|
||||
|
||||
+ if (dbpwd_len >= sizeof b2a_out) {
|
||||
+ slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||
+ "The hashed password stored in the user entry is longer than any valid md5 hash");
|
||||
+ goto loser;
|
||||
+ }
|
||||
+
|
||||
/* create the hash */
|
||||
PK11_DigestBegin(ctx);
|
||||
PK11_DigestOp(ctx, (const unsigned char *)userpwd, strlen(userpwd));
|
||||
@@ -57,7 +64,7 @@ md5_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
bver = NSSBase64_EncodeItem(NULL, (char *)b2a_out, sizeof b2a_out, &binary_item);
|
||||
/* bver points to b2a_out upon success */
|
||||
if (bver) {
|
||||
- rc = slapi_ct_memcmp(bver, dbpwd, strlen(dbpwd));
|
||||
+ rc = slapi_ct_memcmp(bver, dbpwd, dbpwd_len);
|
||||
} else {
|
||||
slapi_log_err(SLAPI_LOG_PLUGIN, MD5_SUBSYSTEM_NAME,
|
||||
"Could not base64 encode hashed value for password compare");
|
||||
diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
index dcac4fcdd..82b8c9501 100644
|
||||
--- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
+++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c
|
||||
@@ -255,6 +255,12 @@ pbkdf2_sha256_pw_cmp(const char *userpwd, const char *dbpwd)
|
||||
passItem.data = (unsigned char *)userpwd;
|
||||
passItem.len = strlen(userpwd);
|
||||
|
||||
+ if (pwdstorage_base64_decode_len(dbpwd, dbpwd_len) > sizeof dbhash) {
|
||||
+ /* Hashed value is too long and cannot match any value generated by pbkdf2_sha256_hash */
|
||||
+ slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value. (hashed value is too long)\n");
|
||||
+ return result;
|
||||
+ }
|
||||
+
|
||||
/* Decode the DBpwd to bytes from b64 */
|
||||
if (PL_Base64Decode(dbpwd, dbpwd_len, dbhash) == NULL) {
|
||||
slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to base64 decode dbpwd value\n");
|
||||
--
|
||||
2.46.0
|
||||
|
||||
@ -1,4 +0,0 @@
|
||||
For detailed information on developing plugins for
|
||||
389 Directory Server visit.
|
||||
|
||||
http://port389/wiki/Plugins
|
||||
@ -1,16 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
DATE=`date +%Y%m%d`
|
||||
# use a real tag name here
|
||||
VERSION=1.3.5.14
|
||||
PKGNAME=389-ds-base
|
||||
TAG=${TAG:-$PKGNAME-$VERSION}
|
||||
URL="https://git.fedorahosted.org/git/?p=389/ds.git;a=snapshot;h=$TAG;sf=tgz"
|
||||
SRCNAME=$PKGNAME-$VERSION
|
||||
|
||||
wget -O $SRCNAME.tar.gz "$URL"
|
||||
|
||||
echo convert tgz format to tar.bz2 format
|
||||
|
||||
gunzip $PKGNAME-$VERSION.tar.gz
|
||||
bzip2 $PKGNAME-$VERSION.tar
|
||||
File diff suppressed because it is too large
Load Diff
4
sources
Normal file
4
sources
Normal file
@ -0,0 +1,4 @@
|
||||
SHA512 (389-ds-base-3.1.3.tar.bz2) = bd15c29dba5209ed828a2534e51fd000fdd5d32862fd07ea73339e73489b3c79f1991c91592c75dbb67384c696a03c82378f156bbea594e2e17421c95ca4c6be
|
||||
SHA512 (jemalloc-5.3.0.tar.bz2) = 22907bb052096e2caffb6e4e23548aecc5cc9283dce476896a2b1127eee64170e3562fa2e7db9571298814a7a2c7df6e8d1fbe152bd3f3b0c1abec22a2de34b1
|
||||
SHA512 (libdb-5.3.28-59.tar.bz2) = 731a434fa2e6487ebb05c458b0437456eb9f7991284beb08cb3e21931e23bdeddddbc95bfabe3a2f9f029fe69cd33a2d4f0f5ce6a9811e9c3b940cb6fde4bf79
|
||||
SHA512 (vendor-3.1.3-1.tar.gz) = bf7f775da482a0164b5192e60cc335f32c65edf120ab94336835d98b2ea769eb116c808d06376e8ececb96e617194ec3febebf375821657e3d4751d9d8a0cf3c
|
||||
Loading…
Reference in New Issue
Block a user