autobuild v6.0-60

Related: rhbz#2055630
Resolves: bz#1668303 bz#1853631 bz#1901468 bz#1904137 bz#1911665
Resolves: bz#1962972 bz#1973566 bz#1994593 bz#1995029 bz#1997447
Resolves: bz#2006205
Signed-off-by: Gluster Jenkins <dkhandel+glusterjenkins@redhat.com>
This commit is contained in:
Gluster Jenkins 2021-10-11 08:22:10 +00:00 committed by Tamar Shacked
parent 1b5eaf155e
commit 8c93bcfac1
25 changed files with 2228 additions and 19 deletions

View File

@ -0,0 +1,26 @@
From f72780b560ea8efe1508aa9ddc574e6dc066bf9a Mon Sep 17 00:00:00 2001
From: Csaba Henk <chenk@redhat.com>
Date: Wed, 29 Sep 2021 10:44:37 +0200
Subject: [PATCH 587/610] Update rfc.sh to rhgs-3.5.6
Signed-off-by: Csaba Henk <chenk@redhat.com>
---
rfc.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/rfc.sh b/rfc.sh
index daeff32..67798cb 100755
--- a/rfc.sh
+++ b/rfc.sh
@@ -18,7 +18,7 @@ done
shift $((OPTIND-1))
-branch="rhgs-3.5.5";
+branch="rhgs-3.5.6";
set_hooks_commit_msg()
{
--
1.8.3.1

View File

@ -0,0 +1,388 @@
From e3813685237dbdf8dc7cf28726fff2caf2288706 Mon Sep 17 00:00:00 2001
From: Xavi Hernandez <xhernandez@redhat.com>
Date: Mon, 19 Jul 2021 15:37:02 +0200
Subject: [PATCH 588/610] locks: Fix null gfid in lock contention notifications
This patch fixes 3 problems:
First problem:
After commit c0bd592e, the pl_inode_t object was also created in the
cbk of lookup requests. Lookup requests are a bit different than any
other request because the inode received may not be completely
initialized. In particular, inode->gfid may be null.
This caused that the gfid stored in the pl_inode_t object was null in
some cases. This gfid is used mostly for logs, but also to send lock
contention notifications. This meant that some notifications could be
sent with a null gfid, making impossible for the client xlator to
correctly identify the contending inode, so the lock was not released
immediately when eager-lock was also enabled.
Second problem:
The feature introduced by c0bd592e needed to track the number of
hardlinks of each inode to detect when it was deleted. However it
was done using the 'get-link-count' special xattr on lookup, while
posix only implements it for unlink and rename.
Also, the number of hardlinks was not incremented for mkdir, mknod,
rename, ..., so it didn't work correctly for directories.
Third problem:
When the last hardlink of an open file is deleted, all locks will be
denied with ESTALE error, but that's not correct. Access to the open
fd must succeed.
The first problem is fixed by avoiding creating pl_inode_t objects
during lookup. Second and third problems are fixed by completely
ignoring if the file has been deleted or not. Even if we grant a
lock on a non-existing file, the next operation done by the client
inside the lock will return the correct error, which should be enough.
Upstream patch:
> Upstream-patch-link: https://github.com/gluster/glusterfs/pull/2553
> Fixes: #2551
> Change-Id: Ic73e82f6b725b838c1600b6a128ea36a75f13253
> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
BUG: 1962972
Change-Id: Ic73e82f6b725b838c1600b6a128ea36a75f13253
Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/279192
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/bugs/locks/issue-2551.t | 58 ++++++++++++++++++
xlators/features/locks/src/common.c | 31 +++-------
xlators/features/locks/src/locks.h | 2 -
xlators/features/locks/src/posix.c | 118 +++---------------------------------
4 files changed, 74 insertions(+), 135 deletions(-)
create mode 100644 tests/bugs/locks/issue-2551.t
diff --git a/tests/bugs/locks/issue-2551.t b/tests/bugs/locks/issue-2551.t
new file mode 100644
index 0000000..a32af02
--- /dev/null
+++ b/tests/bugs/locks/issue-2551.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function check_time() {
+ local max="${1}"
+ local start="$(date +"%s")"
+
+ shift
+
+ if "${@}"; then
+ if [[ $(($(date +"%s") - ${start})) -lt ${max} ]]; then
+ return 0
+ fi
+ fi
+
+ return 1
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/brick{0..2}
+TEST $CLI volume set $V0 disperse.eager-lock on
+TEST $CLI volume set $V0 disperse.eager-lock-timeout 30
+TEST $CLI volume set $V0 features.locks-notify-contention on
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.quick-read off
+
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick2
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0 $M0
+
+TEST mkdir $M0/dir
+TEST dd if=/dev/zero of=$M0/dir/test bs=4k count=1
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick2
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0 $M0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0 $M1
+
+TEST dd if=/dev/zero of=$M0/dir/test bs=4k count=1 conv=notrunc
+TEST check_time 5 dd if=/dev/zero of=$M1/dir/test bs=4k count=1 conv=notrunc
diff --git a/xlators/features/locks/src/common.c b/xlators/features/locks/src/common.c
index cddbfa6..5403086 100644
--- a/xlators/features/locks/src/common.c
+++ b/xlators/features/locks/src/common.c
@@ -468,9 +468,7 @@ pl_inode_get(xlator_t *this, inode_t *inode, pl_local_t *local)
pl_inode->check_mlock_info = _gf_true;
pl_inode->mlock_enforced = _gf_false;
- /* -2 means never looked up. -1 means something went wrong and link
- * tracking is disabled. */
- pl_inode->links = -2;
+ pl_inode->remove_running = 0;
ret = __inode_ctx_put(inode, this, (uint64_t)(long)(pl_inode));
if (ret) {
@@ -1403,11 +1401,6 @@ pl_inode_remove_prepare(xlator_t *xl, call_frame_t *frame, loc_t *loc,
pthread_mutex_lock(&pl_inode->mutex);
- if (pl_inode->removed) {
- error = ESTALE;
- goto unlock;
- }
-
if (pl_inode_has_owners(xl, frame->root->client, pl_inode, &now, contend)) {
error = -1;
/* We skip the unlock here because the caller must create a stub when
@@ -1420,7 +1413,6 @@ pl_inode_remove_prepare(xlator_t *xl, call_frame_t *frame, loc_t *loc,
pl_inode->is_locked = _gf_true;
pl_inode->remove_running++;
-unlock:
pthread_mutex_unlock(&pl_inode->mutex);
done:
@@ -1490,20 +1482,18 @@ pl_inode_remove_cbk(xlator_t *xl, pl_inode_t *pl_inode, int32_t error)
pthread_mutex_lock(&pl_inode->mutex);
- if (error == 0) {
- if (pl_inode->links >= 0) {
- pl_inode->links--;
- }
- if (pl_inode->links == 0) {
- pl_inode->removed = _gf_true;
- }
- }
-
pl_inode->remove_running--;
if ((pl_inode->remove_running == 0) && list_empty(&pl_inode->waiting)) {
pl_inode->is_locked = _gf_false;
+ /* At this point it's possible that the inode has been deleted, but
+ * there could be open fd's still referencing it, so we can't prevent
+ * pending locks from being granted. If the file has really been
+ * deleted, whatever the client does once the lock is granted will
+ * fail with the appropriate error, so we don't need to worry about
+ * it here. */
+
list_for_each_entry(dom, &pl_inode->dom_list, inode_list)
{
__grant_blocked_inode_locks(xl, pl_inode, &granted, dom, &now,
@@ -1555,11 +1545,6 @@ pl_inode_remove_inodelk(pl_inode_t *pl_inode, pl_inode_lock_t *lock)
pl_dom_list_t *dom;
pl_inode_lock_t *ilock;
- /* If the inode has been deleted, we won't allow any lock. */
- if (pl_inode->removed) {
- return -ESTALE;
- }
-
/* We only synchronize with locks made for regular operations coming from
* the user. Locks done for internal purposes are hard to control and could
* lead to long delays or deadlocks quite easily. */
diff --git a/xlators/features/locks/src/locks.h b/xlators/features/locks/src/locks.h
index 6666feb..2406dcd 100644
--- a/xlators/features/locks/src/locks.h
+++ b/xlators/features/locks/src/locks.h
@@ -202,10 +202,8 @@ struct __pl_inode {
int fop_wind_count;
pthread_cond_t check_fop_wind_count;
- int32_t links; /* Number of hard links the inode has. */
uint32_t remove_running; /* Number of remove operations running. */
gf_boolean_t is_locked; /* Regular locks will be blocked. */
- gf_boolean_t removed; /* The inode has been deleted. */
};
typedef struct __pl_inode pl_inode_t;
diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c
index 22ef5b8..d5effef 100644
--- a/xlators/features/locks/src/posix.c
+++ b/xlators/features/locks/src/posix.c
@@ -2975,104 +2975,24 @@ out:
return ret;
}
-static int32_t
-pl_request_link_count(dict_t **pxdata)
-{
- dict_t *xdata;
-
- xdata = *pxdata;
- if (xdata == NULL) {
- xdata = dict_new();
- if (xdata == NULL) {
- return ENOMEM;
- }
- } else {
- dict_ref(xdata);
- }
-
- if (dict_set_uint32(xdata, GET_LINK_COUNT, 0) != 0) {
- dict_unref(xdata);
- return ENOMEM;
- }
-
- *pxdata = xdata;
-
- return 0;
-}
-
-static int32_t
-pl_check_link_count(dict_t *xdata)
-{
- int32_t count;
-
- /* In case we are unable to read the link count from xdata, we take a
- * conservative approach and return -2, which will prevent the inode from
- * being considered deleted. In fact it will cause link tracking for this
- * inode to be disabled completely to avoid races. */
-
- if (xdata == NULL) {
- return -2;
- }
-
- if (dict_get_int32(xdata, GET_LINK_COUNT, &count) != 0) {
- return -2;
- }
-
- return count;
-}
-
int32_t
pl_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
int32_t op_errno, inode_t *inode, struct iatt *buf, dict_t *xdata,
struct iatt *postparent)
{
- pl_inode_t *pl_inode;
-
- if (op_ret >= 0) {
- pl_inode = pl_inode_get(this, inode, NULL);
- if (pl_inode == NULL) {
- PL_STACK_UNWIND(lookup, xdata, frame, -1, ENOMEM, NULL, NULL, NULL,
- NULL);
- return 0;
- }
-
- pthread_mutex_lock(&pl_inode->mutex);
-
- /* We only update the link count if we previously didn't know it.
- * Doing it always can lead to races since lookup is not executed
- * atomically most of the times. */
- if (pl_inode->links == -2) {
- pl_inode->links = pl_check_link_count(xdata);
- if (buf->ia_type == IA_IFDIR) {
- /* Directories have at least 2 links. To avoid special handling
- * for directories, we simply decrement the value here to make
- * them equivalent to regular files. */
- pl_inode->links--;
- }
- }
-
- pthread_mutex_unlock(&pl_inode->mutex);
- }
-
PL_STACK_UNWIND(lookup, xdata, frame, op_ret, op_errno, inode, buf, xdata,
postparent);
+
return 0;
}
int32_t
pl_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
{
- int32_t error;
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
+ STACK_WIND(frame, pl_lookup_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lookup, loc, xdata);
- error = pl_request_link_count(&xdata);
- if (error == 0) {
- PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
- STACK_WIND(frame, pl_lookup_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->lookup, loc, xdata);
- dict_unref(xdata);
- } else {
- STACK_UNWIND_STRICT(lookup, frame, -1, error, NULL, NULL, NULL, NULL);
- }
return 0;
}
@@ -3881,9 +3801,7 @@ unlock:
__dump_posixlks(pl_inode);
}
- gf_proc_dump_write("links", "%d", pl_inode->links);
gf_proc_dump_write("removes_pending", "%u", pl_inode->remove_running);
- gf_proc_dump_write("removed", "%u", pl_inode->removed);
}
pthread_mutex_unlock(&pl_inode->mutex);
@@ -4508,21 +4426,9 @@ pl_link_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
int32_t op_errno, inode_t *inode, struct iatt *buf,
struct iatt *preparent, struct iatt *postparent, dict_t *xdata)
{
- pl_inode_t *pl_inode = (pl_inode_t *)cookie;
-
- if (op_ret >= 0) {
- pthread_mutex_lock(&pl_inode->mutex);
-
- /* TODO: can happen pl_inode->links == 0 ? */
- if (pl_inode->links >= 0) {
- pl_inode->links++;
- }
-
- pthread_mutex_unlock(&pl_inode->mutex);
- }
-
PL_STACK_UNWIND_FOR_CLIENT(link, xdata, frame, op_ret, op_errno, inode, buf,
preparent, postparent, xdata);
+
return 0;
}
@@ -4530,18 +4436,10 @@ int
pl_link(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
dict_t *xdata)
{
- pl_inode_t *pl_inode;
-
- pl_inode = pl_inode_get(this, oldloc->inode, NULL);
- if (pl_inode == NULL) {
- STACK_UNWIND_STRICT(link, frame, -1, ENOMEM, NULL, NULL, NULL, NULL,
- NULL);
- return 0;
- }
-
PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), oldloc, newloc);
- STACK_WIND_COOKIE(frame, pl_link_cbk, pl_inode, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->link, oldloc, newloc, xdata);
+ STACK_WIND(frame, pl_link_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->link, oldloc, newloc, xdata);
+
return 0;
}
--
1.8.3.1

View File

@ -0,0 +1,63 @@
From 0bb71e1492b1ad442758399eb8dcb5f087d77f12 Mon Sep 17 00:00:00 2001
From: Nikhil Ladha <nladha@redhat.com>
Date: Wed, 28 Apr 2021 02:14:27 +0530
Subject: [PATCH 589/610] extras: fix for postscript failure on logrotation of
snapd logs (#2310)
Issue:
On executing the logrotate command, the postscript runs as a separate process,
and when we do a grep for the snapd process it returns the PID of that
short-term process as well, and executing a kill on that throws the error.
To check a similar error could be seen if we replace the killall for bricks
log rotation with a for loop on PIDs.
Fix:
Use the killall command on the list of snapd processes instead of
using the kill command to individually kill them.
>Fixes: #2360
>Change-Id: I1ad6e3e4d74128706e71900d02e715635294ff72
>Signed-off-by: nik-redhat <nladha@redhat.com>
Upstream patch: https://github.com/gluster/glusterfs/pull/2310
BUG: 1668303
Change-Id: I59910fc3660e11e131b1aa813848c2e19cbffefd
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/279533
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/glusterfs-logrotate | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/extras/glusterfs-logrotate b/extras/glusterfs-logrotate
index 75f700e..2b9028b 100644
--- a/extras/glusterfs-logrotate
+++ b/extras/glusterfs-logrotate
@@ -45,3 +45,22 @@
compress
delaycompress
}
+
+# Rotate snapd log
+/var/log/glusterfs/snaps/*/*.log {
+ sharedscripts
+ weekly
+ maxsize 10M
+ minsize 100k
+
+ # 6 months of logs are good enough
+ rotate 26
+
+ missingok
+ compress
+ delaycompress
+ notifempty
+ postrotate
+ /usr/bin/killall -HUP `pgrep -f "glusterfs.*snapd"` > /dev/null 2>&1 || true
+ endscript
+}
--
1.8.3.1

View File

@ -0,0 +1,128 @@
From 87138f86b8cb98d1c9d1a4c9a2393e7978d20b1d Mon Sep 17 00:00:00 2001
From: karthik-us <ksubrahm@redhat.com>
Date: Tue, 5 Oct 2021 12:33:01 +0530
Subject: [PATCH 590/610] cluster/afr: Don't check for stale entry-index
Problem:
In every entry index heal there is a check to see if the
index is stale or not.
1. If a file is created when the brick is down this
will lead to an extra index lookup because the name is not stale.
2. If a file is deleted when the brick is down this will also lead to
and extra index lookup because the name is not stale.
3. If a file is created and deleted when the brick is down then the
index is stale and this will save entry-heal i.e. 2 entrylks and 2 lookups
Since 1, 2 happen significantly more than 3, this is a bad tradeoff.
Fix:
Let stale index be removed as part of normal entry heal detecting 'the
name is already deleted' code path.
> Upstream patch: https://github.com/gluster/glusterfs/pull/2612
> fixes: gluster#2611
> Change-Id: I29bcc07f2480877a83b30dbd7e2e5631a74df8e8
> Signed-off-by: Pranith Kumar K <pranith.karampuri@phonepe.com>
BUG: 1994593
Change-Id: I29bcc07f2480877a83b30dbd7e2e5631a74df8e8
Signed-off-by: karthik-us <ksubrahm@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/279606
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/cluster/afr/src/afr-self-heal-entry.c | 46 +++++++--------------------
1 file changed, 11 insertions(+), 35 deletions(-)
diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c
index a17dd93..14b7417 100644
--- a/xlators/cluster/afr/src/afr-self-heal-entry.c
+++ b/xlators/cluster/afr/src/afr-self-heal-entry.c
@@ -933,37 +933,8 @@ afr_selfheal_entry_granular_dirent(xlator_t *subvol, gf_dirent_t *entry,
loc_t *parent, void *data)
{
int ret = 0;
- loc_t loc = {
- 0,
- };
- struct iatt iatt = {
- 0,
- };
afr_granular_esh_args_t *args = data;
- /* Look up the actual inode associated with entry. If the lookup returns
- * ESTALE or ENOENT, then it means we have a stale index. Remove it.
- * This is analogous to the check in afr_shd_index_heal() except that
- * here it is achieved through LOOKUP and in afr_shd_index_heal() through
- * a GETXATTR.
- */
-
- loc.inode = inode_new(args->xl->itable);
- loc.parent = inode_ref(args->heal_fd->inode);
- gf_uuid_copy(loc.pargfid, loc.parent->gfid);
- loc.name = entry->d_name;
-
- ret = syncop_lookup(args->xl, &loc, &iatt, NULL, NULL, NULL);
- if ((ret == -ENOENT) || (ret == -ESTALE)) {
- /* The name indices under the pgfid index dir are guaranteed
- * to be regular files. Hence the hardcoding.
- */
- afr_shd_entry_purge(subvol, parent->inode, entry->d_name, IA_IFREG);
- ret = 0;
- goto out;
- }
- /* TBD: afr_shd_zero_xattrop? */
-
ret = afr_selfheal_entry_dirent(args->frame, args->xl, args->heal_fd,
entry->d_name, parent->inode, subvol,
_gf_false);
@@ -974,8 +945,6 @@ afr_selfheal_entry_granular_dirent(xlator_t *subvol, gf_dirent_t *entry,
if (ret == -1)
args->mismatch = _gf_true;
-out:
- loc_wipe(&loc);
return ret;
}
@@ -1050,7 +1019,9 @@ afr_selfheal_entry_do(call_frame_t *frame, xlator_t *this, fd_t *fd, int source,
local = frame->local;
gf_msg(this->name, GF_LOG_INFO, 0, AFR_MSG_SELF_HEAL_INFO,
- "performing entry selfheal on %s", uuid_utoa(fd->inode->gfid));
+ "performing %s entry selfheal on %s",
+ (local->need_full_crawl ? "full" : "granular"),
+ uuid_utoa(fd->inode->gfid));
for (i = 0; i < priv->child_count; i++) {
/* Expunge */
@@ -1112,6 +1083,7 @@ __afr_selfheal_entry(call_frame_t *frame, xlator_t *this, fd_t *fd,
afr_local_t *local = NULL;
afr_private_t *priv = NULL;
gf_boolean_t did_sh = _gf_true;
+ char *heal_type = "granular entry";
priv = this->private;
local = frame->local;
@@ -1194,11 +1166,15 @@ postop_unlock:
afr_selfheal_unentrylk(frame, this, fd->inode, this->name, NULL,
postop_lock, NULL);
out:
- if (did_sh)
- afr_log_selfheal(fd->inode->gfid, this, ret, "entry", source, sources,
+ if (did_sh) {
+ if (local->need_full_crawl) {
+ heal_type = "full entry";
+ }
+ afr_log_selfheal(fd->inode->gfid, this, ret, heal_type, source, sources,
healed_sinks);
- else
+ } else {
ret = 1;
+ }
if (locked_replies)
afr_replies_wipe(locked_replies, priv->child_count);
--
1.8.3.1

View File

@ -0,0 +1,44 @@
From 19460ebc988795eeabaeb8e25d6eba9a3cf2864b Mon Sep 17 00:00:00 2001
From: karthik-us <ksubrahm@redhat.com>
Date: Mon, 4 Oct 2021 12:44:21 +0530
Subject: [PATCH 591/610] afr: check for valid iatt
Problem:
If the entry being processed by afr_shd_anon_inode_cleaner() is no
longer present, gfid lookup fails with ENOENT on all bricks and iatt
will never be assigned, causing a crash due to null dereference.
Fix:
Add a null-check for iatt.
> Upstream patch: https://github.com/gluster/glusterfs/pull/2660
> Fixes: gluster#2659
> Change-Id: I6abfc8063677861ce9388ca4efdf491ec956dc74
> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
BUG: 1995029
Change-Id: I6abfc8063677861ce9388ca4efdf491ec956dc74
Signed-off-by: karthik-us <ksubrahm@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/279529
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/cluster/afr/src/afr-self-heald.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c
index 18aed93..bc720cf 100644
--- a/xlators/cluster/afr/src/afr-self-heald.c
+++ b/xlators/cluster/afr/src/afr-self-heald.c
@@ -870,7 +870,7 @@ afr_shd_anon_inode_cleaner(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
}
/*Inode is deleted from subvol*/
- if (count == 1 || (iatt->ia_type != IA_IFDIR && multiple_links)) {
+ if (count == 1 || (iatt && iatt->ia_type != IA_IFDIR && multiple_links)) {
gf_msg(healer->this->name, GF_LOG_WARNING, 0,
AFR_MSG_EXPUNGING_FILE_OR_DIR, "expunging %s %s/%s on %s", type,
priv->anon_inode_name, entry->d_name, subvol->name);
--
1.8.3.1

View File

@ -0,0 +1,119 @@
From be3448ed5d9d59752cff4df8325ee67eb7d41531 Mon Sep 17 00:00:00 2001
From: Xavi Hernandez <xhernandez@redhat.com>
Date: Mon, 19 Jul 2021 06:56:18 +0200
Subject: [PATCH 592/610] md-cache: fix integer signedness mismatch
md-cache uses a mechanism based on a generation number to detect
modifications made by other clients to the entries and invalidate
the cached data.
This generation number is a 32 bit integer. When it overflows,
special management is done to avoid problems. This overflow condition
is tracked with a single bit.
For many fops, when they are received, the overflow bit and the
current generation number are recorded in a single 64-bit value
which is used later in the cbk.
This is the problematic function:
uint64_t
__mdc_get_generation(xlator_t *this, struct md_cache *mdc)
{
uint64_t gen = 0, rollover;
struct mdc_conf *conf = NULL;
conf = this->private;
gen = GF_ATOMIC_INC(conf->generation);
if (gen == 0) {
gf_log("MDC", GF_LOG_NOTICE, "%p Reset 1", mdc);
mdc->gen_rollover = !mdc->gen_rollover;
gen = GF_ATOMIC_INC(conf->generation);
mdc->ia_time = 0;
mdc->generation = 0;
mdc->invalidation_time = gen - 1;
}
rollover = mdc->gen_rollover;
gen |= (rollover << 32);
return gen;
}
'conf->generation' is declared as an atomic signed 32-bit integer,
and 'gen' is an unsigned 64-bit value. When 'gen' is assigned from
a signed int, the sign bit is extended to fill the high 32 bits of
'gen'. If the counter has overflown the maximum signed positive
value, it will become negative (sign bit = 1).
In this case, when 'rollover' is later combined with 'gen', all the
high bits remain at '1'.
This value is used later in 'mdc_inode_iatt_set_validate' during
callback processing. The overflow condition and generation numbers
from when the operation was received are recovered this way:
rollover = incident_time >> 32;
incident_time = (incident_time & 0xffffffff);
('incident_time' is the saved value from '__mdc_get_generation').
So here rollover will be 0xffffffff, when it's expected to be 0
or 1 only. When this is compared later with the cached overflow
bit, it doesn't match, which prevents updating the cached info.
This is bad in general, but it's even worse when an entry is not
cached and 'rollover' is 0xffffffff the first time. When md-cache
doesn't have cached data it assumes it's everything 0. This causes
a mismatch, which sends an invalidation request to the kernel, but
since the 'rollover' doesn't match, the cached data is not updated.
So the next time the cached data is checked, it will also send an
invalidation to the kernel, indefinitely.
This patch fixes two things:
1. The 'generation' field is made unsigned to avoid sign extension.
2. Invalidation requests are only sent if we already had valid cached
data. Otherwise it doesn't make sense to send an invalidation.
Upstream patch:
> Upstream-patch-link: https://github.com/gluster/glusterfs/pull/2619
> Fixes: #2617
> Change-Id: Ie40e68288cf143e1bc1a40f46da98f51bb2d6864
> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
BUG: 1904137
Change-Id: Ie40e68288cf143e1bc1a40f46da98f51bb2d6864
Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/279188
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/performance/md-cache/src/md-cache.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/xlators/performance/md-cache/src/md-cache.c b/xlators/performance/md-cache/src/md-cache.c
index bbbee3b..e0256d6 100644
--- a/xlators/performance/md-cache/src/md-cache.c
+++ b/xlators/performance/md-cache/src/md-cache.c
@@ -79,7 +79,7 @@ struct mdc_conf {
gf_boolean_t cache_statfs;
struct mdc_statfs_cache statfs_cache;
char *mdc_xattr_str;
- gf_atomic_int32_t generation;
+ gf_atomic_uint32_t generation;
};
struct mdc_local;
@@ -537,7 +537,7 @@ mdc_inode_iatt_set_validate(xlator_t *this, inode_t *inode, struct iatt *prebuf,
(iatt->ia_mtime_nsec != mdc->md_mtime_nsec) ||
(iatt->ia_ctime != mdc->md_ctime) ||
(iatt->ia_ctime_nsec != mdc->md_ctime_nsec)) {
- if (conf->global_invalidation &&
+ if (conf->global_invalidation && mdc->valid &&
(!prebuf || (prebuf->ia_mtime != mdc->md_mtime) ||
(prebuf->ia_mtime_nsec != mdc->md_mtime_nsec) ||
(prebuf->ia_ctime != mdc->md_ctime) ||
--
1.8.3.1

View File

@ -0,0 +1,58 @@
From 76c9faf5c750428e5eb69462b82ee0c12cbdabc0 Mon Sep 17 00:00:00 2001
From: nik-redhat <nladha@redhat.com>
Date: Fri, 25 Sep 2020 18:39:51 +0530
Subject: [PATCH 593/610] dht: explicit null dereference
Added a null check for uuid_list_copy, to avoid
null dereference in strtok_r() in case of strdup()
failure.
CID: 1325612
CID: 1274223
>Updates: #1060
>Change-Id: I641a5068cd76d7b2ed92eccf39e7f97d6f7b2480
>Signed-off-by: nik-redhat <nladha@redhat.com>
Upstream link: https://review.gluster.org/c/glusterfs/+/25046
BUG: 1997447
Change-Id: I576b4ce610948bdb84eb30377a684c54df718bdc
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280063
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/cluster/dht/src/dht-common.c | 2 ++
xlators/cluster/dht/src/dht-shared.c | 2 ++
2 files changed, 4 insertions(+)
diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c
index edfc6e7..e6a16ff 100644
--- a/xlators/cluster/dht/src/dht-common.c
+++ b/xlators/cluster/dht/src/dht-common.c
@@ -4296,6 +4296,8 @@ dht_find_local_subvol_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
index = conf->local_subvols_cnt;
uuid_list_copy = gf_strdup(uuid_list);
+ if (!uuid_list_copy)
+ goto unlock;
for (uuid_str = strtok_r(uuid_list, " ", &saveptr); uuid_str;
uuid_str = next_uuid_str) {
diff --git a/xlators/cluster/dht/src/dht-shared.c b/xlators/cluster/dht/src/dht-shared.c
index 58e3339..cca272a 100644
--- a/xlators/cluster/dht/src/dht-shared.c
+++ b/xlators/cluster/dht/src/dht-shared.c
@@ -567,6 +567,8 @@ gf_defrag_pattern_list_fill(xlator_t *this, gf_defrag_info_t *defrag,
pattern_str = strtok_r(data, ",", &tmp_str);
while (pattern_str) {
dup_str = gf_strdup(pattern_str);
+ if (!dup_str)
+ goto out;
pattern_list = GF_CALLOC(1, sizeof(gf_defrag_pattern_list_t), 1);
if (!pattern_list) {
goto out;
--
1.8.3.1

View File

@ -0,0 +1,52 @@
From 663df92f9b4b9f35ae10f84487494829987e2f58 Mon Sep 17 00:00:00 2001
From: nik-redhat <nladha@redhat.com>
Date: Fri, 25 Sep 2020 17:56:19 +0530
Subject: [PATCH 594/610] glusterd: resource leaks
Issue:
iobref was not freed before exiting the function.
Fix:
Modified the code to free iobref before exiting.
CID: 1430107
>Updates: #1060
>Change-Id: I89351b3aa645792eb8dda6292d1e559057b02d8b
>Signed-off-by: nik-redhat <nladha@redhat.com>
Upstream link: https://review.gluster.org/c/glusterfs/+/25042
BUG: 1997447
Change-Id: Iea56afca015a7c0f15ab32f490ea27f5ea323a07
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280066
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-utils.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 6d40be5..c037933 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -6042,7 +6042,6 @@ send_attach_req(xlator_t *this, struct rpc_clnt *rpc, char *path,
GF_ATOMIC_INC(conf->blockers);
ret = rpc_clnt_submit(rpc, &gd_brick_prog, op, cbkfn, &iov, 1, NULL, 0,
iobref, frame, NULL, 0, NULL, 0, NULL);
- return ret;
free_iobref:
iobref_unref(iobref);
@@ -6051,7 +6050,7 @@ maybe_free_iobuf:
iobuf_unref(iobuf);
}
err:
- return -1;
+ return ret;
}
extern size_t
--
1.8.3.1

View File

@ -0,0 +1,51 @@
From 025718f1734655c411475ea338cee1659d96763e Mon Sep 17 00:00:00 2001
From: nik-redhat <nladha@redhat.com>
Date: Thu, 3 Sep 2020 15:42:45 +0530
Subject: [PATCH 595/610] glusterd: use after free (coverity issue)
Issue:
dict_unref is called on the same dict again,
in the out label of the code, which causes the
use after free issue.
Fix:
Set the dict to NULL after unref, to avoid
use after free issue.
CID: 1430127
>Updates: #1060
>Change-Id: Ide9a5cbc5f496705c671e72b0260da6d4c06f16d
>Signed-off-by: nik-redhat <nladha@redhat.com>
Upstream link: https://review.gluster.org/c/glusterfs/+/24946
BUG: 1997447
Change-Id: Id1e58cd6226b9329ad49bd5b75ee96a3a5ec5ab7
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280067
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
index 386eed2..b0fa490 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
@@ -2039,8 +2039,9 @@ glusterd_update_snaps_synctask(void *opaque)
"Failed to remove snap %s", snap->snapname);
goto out;
}
- if (dict)
- dict_unref(dict);
+
+ dict_unref(dict);
+ dict = NULL;
}
snprintf(buf, sizeof(buf), "%s.accept_peer_data", prefix);
ret = dict_get_int32(peer_data, buf, &val);
--
1.8.3.1

View File

@ -0,0 +1,43 @@
From 099fcac6fecef6fc367d8fcae8442195f3f174db Mon Sep 17 00:00:00 2001
From: nik-redhat <nladha@redhat.com>
Date: Fri, 25 Sep 2020 18:19:39 +0530
Subject: [PATCH 596/610] locks: null dereference
Added a null check before executing the strtok_r()
to avoid null dereference in case of strdup() failure.
CID: 1407938
>Updates: #1060
>Change-Id: Iec6e72ae8cb54f6d0a287615c43756325b2026ec
>Signed-off-by: nik-redhat <nladha@redhat.com>
Upstream link: https://review.gluster.org/c/glusterfs/+/25045
BUG: 1997447
Change-Id: I47e6e2402badaf4103607b4164f19142a99a2f71
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280065
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/features/locks/src/posix.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c
index d5effef..03c4907 100644
--- a/xlators/features/locks/src/posix.c
+++ b/xlators/features/locks/src/posix.c
@@ -494,6 +494,9 @@ pl_inodelk_xattr_fill_multiple(dict_t *this, char *key, data_t *value,
char *save_ptr = NULL;
tmp_key = gf_strdup(key);
+ if (!tmp_key)
+ return -1;
+
strtok_r(tmp_key, ":", &save_ptr);
if (!*save_ptr) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, EINVAL,
--
1.8.3.1

View File

@ -0,0 +1,163 @@
From 59c05230c0df58765e30553c66bbcc0c9965d362 Mon Sep 17 00:00:00 2001
From: nik-redhat <nladha@redhat.com>
Date: Tue, 11 Aug 2020 23:12:26 +0530
Subject: [PATCH 597/610] glusterd: memory deallocated twice
Issue:
If the the pointer tmptier is destroyed in the function
code it still it checks for the same in the out label.
And tries to destroy the same pointer again.
Fix:
So, instead of passing the ptr by value, if we
pass it by reference then, on making the ptr in the
function the value will persist, in the calling
function and next time when the gf_store_iter_destory()
is called it won't try to free the ptr again.
CID: 1430122
>Updates: #1060
>Change-Id: I019cea8e301c7cc87be792c03b58722fc96f04ef
>Signed-off-by: nik-redhat <nladha@redhat.com>
Upstream link: https://review.gluster.org/c/glusterfs/+/24855
BUG: 1997447
Change-Id: Ib403efd08d47a69d25f291ae61c9cbfcaaa05da8
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280076
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/glusterfs/store.h | 2 +-
libglusterfs/src/store.c | 12 +++++++-----
xlators/mgmt/glusterd/src/glusterd-store.c | 16 ++++++++--------
3 files changed, 16 insertions(+), 14 deletions(-)
diff --git a/libglusterfs/src/glusterfs/store.h b/libglusterfs/src/glusterfs/store.h
index 68a20ad..76af2df 100644
--- a/libglusterfs/src/glusterfs/store.h
+++ b/libglusterfs/src/glusterfs/store.h
@@ -93,7 +93,7 @@ int32_t
gf_store_iter_get_matching(gf_store_iter_t *iter, char *key, char **value);
int32_t
-gf_store_iter_destroy(gf_store_iter_t *iter);
+gf_store_iter_destroy(gf_store_iter_t **iter);
char *
gf_store_strerror(gf_store_op_errno_t op_errno);
diff --git a/libglusterfs/src/store.c b/libglusterfs/src/store.c
index 3af627a..e4931bf 100644
--- a/libglusterfs/src/store.c
+++ b/libglusterfs/src/store.c
@@ -606,23 +606,25 @@ out:
}
int32_t
-gf_store_iter_destroy(gf_store_iter_t *iter)
+gf_store_iter_destroy(gf_store_iter_t **iter)
{
int32_t ret = -1;
- if (!iter)
+ if (!(*iter))
return 0;
/* gf_store_iter_new will not return a valid iter object with iter->file
* being NULL*/
- ret = fclose(iter->file);
+ ret = fclose((*iter)->file);
if (ret)
gf_msg("", GF_LOG_ERROR, errno, LG_MSG_FILE_OP_FAILED,
"Unable"
" to close file: %s, ret: %d",
- iter->filepath, ret);
+ (*iter)->filepath, ret);
+
+ GF_FREE(*iter);
+ *iter = NULL;
- GF_FREE(iter);
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index a8651d8..e027575 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -2576,7 +2576,7 @@ glusterd_store_retrieve_snapd(glusterd_volinfo_t *volinfo)
ret = 0;
out:
- if (gf_store_iter_destroy(iter)) {
+ if (gf_store_iter_destroy(&iter)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
"Failed to destroy store iter");
ret = -1;
@@ -2895,13 +2895,13 @@ glusterd_store_retrieve_bricks(glusterd_volinfo_t *volinfo)
ret = 0;
out:
- if (gf_store_iter_destroy(tmpiter)) {
+ if (gf_store_iter_destroy(&tmpiter)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
"Failed to destroy store iter");
ret = -1;
}
- if (gf_store_iter_destroy(iter)) {
+ if (gf_store_iter_destroy(&iter)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
"Failed to destroy store iter");
ret = -1;
@@ -3067,7 +3067,7 @@ glusterd_store_retrieve_node_state(glusterd_volinfo_t *volinfo)
ret = 0;
out:
- if (gf_store_iter_destroy(iter)) {
+ if (gf_store_iter_destroy(&iter)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
"Failed to destroy store iter");
ret = -1;
@@ -3379,7 +3379,7 @@ glusterd_store_update_volinfo(glusterd_volinfo_t *volinfo)
ret = 0;
out:
- if (gf_store_iter_destroy(iter)) {
+ if (gf_store_iter_destroy(&iter)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
"Failed to destroy store iter");
ret = -1;
@@ -3574,7 +3574,7 @@ glusterd_store_retrieve_options(xlator_t *this)
goto out;
ret = 0;
out:
- (void)gf_store_iter_destroy(iter);
+ (void)gf_store_iter_destroy(&iter);
gf_store_handle_destroy(shandle);
return ret;
}
@@ -4026,7 +4026,7 @@ glusterd_store_update_snap(glusterd_snap_t *snap)
ret = 0;
out:
- if (gf_store_iter_destroy(iter)) {
+ if (gf_store_iter_destroy(&iter)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
"Failed to destroy store iter");
ret = -1;
@@ -4774,7 +4774,7 @@ glusterd_store_retrieve_peers(xlator_t *this)
is_ok = _gf_true;
next:
- (void)gf_store_iter_destroy(iter);
+ (void)gf_store_iter_destroy(&iter);
if (!is_ok) {
gf_log(this->name, GF_LOG_WARNING,
--
1.8.3.1

View File

@ -0,0 +1,51 @@
From 84aaaded4e958a10c7492233c053e3c681f2d575 Mon Sep 17 00:00:00 2001
From: nik-redhat <nladha@redhat.com>
Date: Thu, 2 Jul 2020 18:10:32 +0530
Subject: [PATCH 598/610] glusterd: null dereference
Issue:
There has been either an explicit null
dereference or a dereference after null
check in some cases.
Fix:
Added the proper condition for null check
and fixed null derefencing.
CID: 1430106 : Dereference after null check
CID: 1430120 : Explicit null dereferenced
CID: 1430132 : Dereference after null check
CID: 1430134 : Dereference after null check
>Change-Id: I7e795cf9f7146a633097c26a766f16b159881fa3
>Updates: #1060
>Signed-off-by: nik-redhat <nladha@redhat.com>
Upstream link: https://review.gluster.org/c/glusterfs/+/24664
BUG: 1997447
Change-Id: I2b2632c93094d0e7b9fbd65a2ca2b0eaf6212d79
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280083
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-syncop.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index 05c9e11..f1807cd 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -1797,7 +1797,7 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
pending_node = NULL;
ret = 0;
out:
- if (pending_node)
+ if (pending_node && pending_node->node)
glusterd_pending_node_put_rpc(pending_node);
if (rsp_dict)
--
1.8.3.1

View File

@ -0,0 +1,59 @@
From 4186f81596a481a5c0c5a707fc9b2358ee8f49f0 Mon Sep 17 00:00:00 2001
From: nik-redhat <nladha@redhat.com>
Date: Fri, 3 Jul 2020 17:18:33 +0530
Subject: [PATCH 599/610] afr: null dereference & nagative value
Added a check for NULL before dereferencing
the object as it may be NULL in few cases
inside the funtion. Also, added a check for
the negative value of gfid_idx.
CID: 1430140
CID: 1430145
>Change-Id: Ib7d23459b48bbc471dbcccab6d20572261882d11
>Updates: #1060
>Signed-off-by: nik-redhat <nladha@redhat.com>
Upstream link: https://review.gluster.org/c/glusterfs/+/24671
BUG: 1997447
Change-Id: I7e705a106d97001b67f5cde8589413c0c24ee507
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280085
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/cluster/afr/src/afr-self-heal-common.c | 2 +-
xlators/cluster/afr/src/afr-self-heal-name.c | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
index 0954d2c..cbd5117 100644
--- a/xlators/cluster/afr/src/afr-self-heal-common.c
+++ b/xlators/cluster/afr/src/afr-self-heal-common.c
@@ -140,7 +140,7 @@ heal:
}
}
out:
- if (gfid_idx && (*gfid_idx == -1) && (ret == 0)) {
+ if (gfid_idx && (*gfid_idx == -1) && (ret == 0) && local) {
ret = -afr_final_errno(local, priv);
}
loc_wipe(&loc);
diff --git a/xlators/cluster/afr/src/afr-self-heal-name.c b/xlators/cluster/afr/src/afr-self-heal-name.c
index 9ec2066..c5ab8d7 100644
--- a/xlators/cluster/afr/src/afr-self-heal-name.c
+++ b/xlators/cluster/afr/src/afr-self-heal-name.c
@@ -353,7 +353,7 @@ __afr_selfheal_name_do(call_frame_t *frame, xlator_t *this, inode_t *parent,
ret = __afr_selfheal_assign_gfid(this, parent, pargfid, bname, inode,
replies, gfid, locked_on, source, sources,
is_gfid_absent, &gfid_idx);
- if (ret)
+ if (ret || (gfid_idx < 0))
return ret;
ret = __afr_selfheal_name_impunge(frame, this, parent, pargfid, bname,
--
1.8.3.1

View File

@ -0,0 +1,161 @@
From 1cd16553d436fa703f5e18d71c35108d0e179e8b Mon Sep 17 00:00:00 2001
From: nik-redhat <nladha@redhat.com>
Date: Thu, 9 Apr 2020 11:36:34 +0530
Subject: [PATCH 600/610] dht xlator: integer handling issue
Issue: The ret value is passed to the function
instead of the proper errno value
Fix: Passing the errno generated to
the log function
CID: 1415824 : Improper use of negative value
CID: 1420205 : Improper use of negative value
>Change-Id: Iaa7407ebd03eda46a2c027695e6bf0f598b371b2
>Updates: #1060
>Signed-off-by: nik-redhat <nladha@redhat.com>
Upstream link: https://review.gluster.org/c/glusterfs/+/24314
BUG: 1997447
Change-Id: Ibb7f432dbcc9ffd8dff6be6f984a6705894d6bef
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280086
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/cluster/dht/src/dht-common.c | 12 ++++++++----
xlators/cluster/dht/src/dht-common.h | 2 +-
xlators/cluster/dht/src/dht-helper.c | 9 ++++++---
xlators/cluster/dht/src/dht-selfheal.c | 8 +++++---
4 files changed, 20 insertions(+), 11 deletions(-)
diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c
index e6a16ff..5eaaa1e 100644
--- a/xlators/cluster/dht/src/dht-common.c
+++ b/xlators/cluster/dht/src/dht-common.c
@@ -672,13 +672,14 @@ dht_discover_complete(xlator_t *this, call_frame_t *discover_frame)
if (local->need_xattr_heal && !heal_path) {
local->need_xattr_heal = 0;
- ret = dht_dir_xattr_heal(this, local);
- if (ret)
- gf_msg(this->name, GF_LOG_ERROR, ret,
+ ret = dht_dir_xattr_heal(this, local, &op_errno);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno,
DHT_MSG_DIR_XATTR_HEAL_FAILED,
"xattr heal failed for "
"directory gfid is %s ",
gfid_local);
+ }
}
}
@@ -1205,7 +1206,7 @@ dht_dict_get_array(dict_t *dict, char *key, int32_t value[], int32_t size,
to non hashed subvol
*/
int
-dht_dir_xattr_heal(xlator_t *this, dht_local_t *local)
+dht_dir_xattr_heal(xlator_t *this, dht_local_t *local, int *op_errno)
{
dht_local_t *copy_local = NULL;
call_frame_t *copy = NULL;
@@ -1217,6 +1218,7 @@ dht_dir_xattr_heal(xlator_t *this, dht_local_t *local)
"No gfid exists for path %s "
"so healing xattr is not possible",
local->loc.path);
+ *op_errno = EIO;
goto out;
}
@@ -1230,6 +1232,7 @@ dht_dir_xattr_heal(xlator_t *this, dht_local_t *local)
"Memory allocation failed "
"for path %s gfid %s ",
local->loc.path, gfid_local);
+ *op_errno = ENOMEM;
DHT_STACK_DESTROY(copy);
} else {
copy_local->stbuf = local->stbuf;
@@ -1244,6 +1247,7 @@ dht_dir_xattr_heal(xlator_t *this, dht_local_t *local)
"Synctask creation failed to heal xattr "
"for path %s gfid %s ",
local->loc.path, gfid_local);
+ *op_errno = ENOMEM;
DHT_STACK_DESTROY(copy);
}
}
diff --git a/xlators/cluster/dht/src/dht-common.h b/xlators/cluster/dht/src/dht-common.h
index b856c68..1cb1c0c 100644
--- a/xlators/cluster/dht/src/dht-common.h
+++ b/xlators/cluster/dht/src/dht-common.h
@@ -1493,7 +1493,7 @@ dht_dir_set_heal_xattr(xlator_t *this, dht_local_t *local, dict_t *dst,
dict_t *src, int *uret, int *uflag);
int
-dht_dir_xattr_heal(xlator_t *this, dht_local_t *local);
+dht_dir_xattr_heal(xlator_t *this, dht_local_t *local, int *op_errno);
int32_t
dht_dict_get_array(dict_t *dict, char *key, int32_t value[], int32_t size,
diff --git a/xlators/cluster/dht/src/dht-helper.c b/xlators/cluster/dht/src/dht-helper.c
index 4c3940a..d3444b3 100644
--- a/xlators/cluster/dht/src/dht-helper.c
+++ b/xlators/cluster/dht/src/dht-helper.c
@@ -2105,6 +2105,7 @@ dht_heal_full_path_done(int op_ret, call_frame_t *heal_frame, void *data)
dht_local_t *local = NULL;
xlator_t *this = NULL;
int ret = -1;
+ int op_errno = 0;
local = heal_frame->local;
main_frame = local->main_frame;
@@ -2114,10 +2115,12 @@ dht_heal_full_path_done(int op_ret, call_frame_t *heal_frame, void *data)
dht_set_fixed_dir_stat(&local->postparent);
if (local->need_xattr_heal) {
local->need_xattr_heal = 0;
- ret = dht_dir_xattr_heal(this, local);
- if (ret)
- gf_msg(this->name, GF_LOG_ERROR, ret, DHT_MSG_DIR_XATTR_HEAL_FAILED,
+ ret = dht_dir_xattr_heal(this, local, &op_errno);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno,
+ DHT_MSG_DIR_XATTR_HEAL_FAILED,
"xattr heal failed for directory %s ", local->loc.path);
+ }
}
DHT_STACK_UNWIND(lookup, main_frame, 0, 0, local->inode, &local->stbuf,
diff --git a/xlators/cluster/dht/src/dht-selfheal.c b/xlators/cluster/dht/src/dht-selfheal.c
index 8af7301..2da9817 100644
--- a/xlators/cluster/dht/src/dht-selfheal.c
+++ b/xlators/cluster/dht/src/dht-selfheal.c
@@ -1471,6 +1471,7 @@ dht_selfheal_dir_mkdir(call_frame_t *frame, loc_t *loc, dht_layout_t *layout,
{
int missing_dirs = 0;
int i = 0;
+ int op_errno = 0;
int ret = -1;
dht_local_t *local = NULL;
xlator_t *this = NULL;
@@ -1493,13 +1494,14 @@ dht_selfheal_dir_mkdir(call_frame_t *frame, loc_t *loc, dht_layout_t *layout,
if (!__is_root_gfid(local->stbuf.ia_gfid)) {
if (local->need_xattr_heal) {
local->need_xattr_heal = 0;
- ret = dht_dir_xattr_heal(this, local);
- if (ret)
- gf_msg(this->name, GF_LOG_ERROR, ret,
+ ret = dht_dir_xattr_heal(this, local, &op_errno);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno,
DHT_MSG_DIR_XATTR_HEAL_FAILED,
"%s:xattr heal failed for "
"directory (gfid = %s)",
local->loc.path, local->gfid);
+ }
} else {
if (!gf_uuid_is_null(local->gfid))
gf_uuid_copy(loc->gfid, local->gfid);
--
1.8.3.1

View File

@ -0,0 +1,99 @@
From 6d7049a19029331266f70f68d860bbccef01a35d Mon Sep 17 00:00:00 2001
From: Nikhil Ladha <nladha@redhat.com>
Date: Thu, 8 Jul 2021 11:26:54 +0530
Subject: [PATCH 601/610] coverity: resource leak (#2321)
Issue:
Variable `arg` is not freed before the function exits,
and leads to resource leak.
Fix:
Free the arg variable if the status of function call
`glusterd_compare_friend_volume` is
`GLUSTERD_VOL_COMP_UPDATE_REQ`, or if the `glusterd_launch_synctask`
fails to start the process.
And, added a check for return value on calling
`glusterd_launch_synctask` function and exit if the
thread creation fails.
CID: 1401716
>Updates: #1060
>Change-Id: I4abd621771f88853d8d01e9039cdee2f3d862c4f
>Signed-off-by: nik-redhat <nladha@redhat.com>
Upstream link: https://github.com/gluster/glusterfs/pull/2321
BUG: 1997447
Change-Id: Ida81dfcd58c5ef45d3ae036d6bd6b36dc6693538
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280090
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-utils.c | 10 +++++++---
xlators/mgmt/glusterd/src/glusterd-utils.h | 2 +-
2 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index c037933..cec9c20 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -5371,6 +5371,7 @@ glusterd_compare_friend_data(dict_t *peer_data, dict_t *cmp, int32_t *status,
if (GLUSTERD_VOL_COMP_RJT == *status) {
ret = 0;
+ update = _gf_false;
goto out;
}
if (GLUSTERD_VOL_COMP_UPDATE_REQ == *status) {
@@ -5385,11 +5386,12 @@ glusterd_compare_friend_data(dict_t *peer_data, dict_t *cmp, int32_t *status,
* first brick to come up before attaching the subsequent bricks
* in case brick multiplexing is enabled
*/
- glusterd_launch_synctask(glusterd_import_friend_volumes_synctask, arg);
+ ret = glusterd_launch_synctask(glusterd_import_friend_volumes_synctask,
+ arg);
}
out:
- if (ret && arg) {
+ if ((ret || !update) && arg) {
dict_unref(arg->peer_data);
dict_unref(arg->peer_ver_data);
GF_FREE(arg);
@@ -13115,7 +13117,7 @@ gd_default_synctask_cbk(int ret, call_frame_t *frame, void *opaque)
return ret;
}
-void
+int
glusterd_launch_synctask(synctask_fn_t fn, void *opaque)
{
xlator_t *this = NULL;
@@ -13131,6 +13133,8 @@ glusterd_launch_synctask(synctask_fn_t fn, void *opaque)
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_SPAWN_SVCS_FAIL,
"Failed to spawn bricks"
" and other volume related services");
+
+ return ret;
}
/*
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index 4541471..3f4f3b8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -681,7 +681,7 @@ int32_t
glusterd_take_lvm_snapshot(glusterd_brickinfo_t *brickinfo,
char *origin_brick_path);
-void
+int
glusterd_launch_synctask(synctask_fn_t fn, void *opaque);
int
--
1.8.3.1

View File

@ -0,0 +1,87 @@
From 2ff83650a5f05e3f06853df6d79d3b18f88dfb23 Mon Sep 17 00:00:00 2001
From: Nikhil Ladha <nladha@redhat.com>
Date: Thu, 6 May 2021 10:45:46 +0530
Subject: [PATCH 602/610] coverity: null dereference (#2395)
Fix:
Updated the code to make it more readable and fixed
the NULL dereferencing.
CID: 1234622
>Updates: #1060
>Change-Id: I05bd203bc46fe84be86398bd664a3485409c3bfe
>Signed-off-by: nik-redhat <nladha@redhat.com>
Upstream link: https://github.com/gluster/glusterfs/pull/2395
BUG: 1997447
Change-Id: If39cc85115de673a83b6c97137ea8d1f0f825245
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280093
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/cluster/dht/src/dht-lock.c | 32 +++++++++++++++-----------------
1 file changed, 15 insertions(+), 17 deletions(-)
diff --git a/xlators/cluster/dht/src/dht-lock.c b/xlators/cluster/dht/src/dht-lock.c
index f9bac4f..6474dfa 100644
--- a/xlators/cluster/dht/src/dht-lock.c
+++ b/xlators/cluster/dht/src/dht-lock.c
@@ -914,37 +914,35 @@ dht_nonblocking_inodelk_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
dht_local_t *local = NULL;
int lk_index = 0, call_cnt = 0;
char gfid[GF_UUID_BUF_SIZE] = {0};
+ dht_ilock_wrap_t *my_layout;
local = frame->local;
lk_index = (long)cookie;
+ my_layout = &(local->lock[0].layout.my_layout);
+
if (op_ret == -1) {
- local->lock[0].layout.my_layout.op_ret = -1;
- local->lock[0].layout.my_layout.op_errno = op_errno;
-
- if (local && local->lock[0].layout.my_layout.locks[lk_index]) {
- uuid_utoa_r(local->lock[0]
- .layout.my_layout.locks[lk_index]
- ->loc.inode->gfid,
- gfid);
-
- gf_msg_debug(
- this->name, op_errno,
- "inodelk failed on gfid: %s "
- "subvolume: %s",
- gfid,
- local->lock[0].layout.my_layout.locks[lk_index]->xl->name);
+ my_layout->op_ret = -1;
+ my_layout->op_errno = op_errno;
+
+ if (my_layout->locks[lk_index]) {
+ uuid_utoa_r(my_layout->locks[lk_index]->loc.inode->gfid, gfid);
+
+ gf_msg_debug(this->name, op_errno,
+ "inodelk failed on gfid: %s "
+ "subvolume: %s",
+ gfid, my_layout->locks[lk_index]->xl->name);
}
goto out;
}
- local->lock[0].layout.my_layout.locks[lk_index]->locked = _gf_true;
+ my_layout->locks[lk_index]->locked = _gf_true;
out:
call_cnt = dht_frame_return(frame);
if (is_last_call(call_cnt)) {
- if (local->lock[0].layout.my_layout.op_ret < 0) {
+ if (my_layout->op_ret < 0) {
dht_inodelk_cleanup(frame);
return 0;
}
--
1.8.3.1

View File

@ -0,0 +1,51 @@
From 015e6cac71b0a0c330f1e4792f9d60214b191f45 Mon Sep 17 00:00:00 2001
From: karthik-us <ksubrahm@redhat.com>
Date: Thu, 7 Oct 2021 21:07:46 +0530
Subject: [PATCH 603/610] Coverity: Resource leak fix (CID: 1356547)
Issue:
In function gf_svc_readdirp() there is a chance that 'local' will be allocated
memory but not released in the failure path.
Fix:
Assign 'local' to 'frame->local' immediately after the successful allocation, so
it will be released by the existing failure path code itself.
> Upstream patch: https://github.com/gluster/glusterfs/pull/2362/
> Change-Id: I4474dc4d4be5432d169cb7d434728f211054997e
> Signed-off-by: karthik-us <ksubrahm@redhat.com>
> Updates: gluster#1060
BUG: 1997447
Change-Id: I4474dc4d4be5432d169cb7d434728f211054997e
Signed-off-by: karthik-us <ksubrahm@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280100
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/features/snapview-client/src/snapview-client.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/xlators/features/snapview-client/src/snapview-client.c b/xlators/features/snapview-client/src/snapview-client.c
index 9c789ae..e97db89 100644
--- a/xlators/features/snapview-client/src/snapview-client.c
+++ b/xlators/features/snapview-client/src/snapview-client.c
@@ -2156,6 +2156,7 @@ gf_svc_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
"failed to allocate local");
goto out;
}
+ frame->local = local;
/*
* This is mainly for samba shares (or windows clients). As part of
@@ -2184,7 +2185,6 @@ gf_svc_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
local->subvolume = subvolume;
local->fd = fd_ref(fd);
- frame->local = local;
STACK_WIND(frame, gf_svc_readdirp_cbk, subvolume, subvolume->fops->readdirp,
fd, size, off, xdata);
--
1.8.3.1

View File

@ -0,0 +1,50 @@
From dee1c932df22ee12fe4568b40e58a475309e62fd Mon Sep 17 00:00:00 2001
From: karthik-us <ksubrahm@redhat.com>
Date: Thu, 7 Oct 2021 21:18:49 +0530
Subject: [PATCH 604/610] Coverity: Fix dereference before null check (CID:
1391415)
Problem:
In function gf_client_dump_inodes_to_dict() there is a null check for
a variable which is already dereferenced in the previous line. This
means that there could be a chance that this variable is null. But it
is not being validate for null before dereferencing it in the first
place.
Fix:
Added null check before dereferencing the variable at the first place.
> Upstream patch: https://github.com/gluster/glusterfs/pull/2369/
> Change-Id: I988b0e93542782353a8059e33db1522b6a5e55f8
> Signed-off-by: karthik-us <ksubrahm@redhat.com>
> Updates: gluster#1060
BUG: 1997447
Change-Id: I988b0e93542782353a8059e33db1522b6a5e55f8
Signed-off-by: karthik-us <ksubrahm@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280103
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/client_t.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/libglusterfs/src/client_t.c b/libglusterfs/src/client_t.c
index e875c8b..216900a 100644
--- a/libglusterfs/src/client_t.c
+++ b/libglusterfs/src/client_t.c
@@ -828,8 +828,9 @@ gf_client_dump_inodes_to_dict(xlator_t *this, dict_t *dict)
clienttable->cliententries[count].next_free)
continue;
client = clienttable->cliententries[count].client;
- if (!strcmp(client->bound_xl->name, this->name)) {
- if (client->bound_xl && client->bound_xl->itable) {
+ if (client->bound_xl &&
+ !strcmp(client->bound_xl->name, this->name)) {
+ if (client->bound_xl->itable) {
/* Presently every brick contains only
* one bound_xl for all connections.
* This will lead to duplicating of
--
1.8.3.1

View File

@ -0,0 +1,53 @@
From 25fc2530f7ee6d7267e2ccc1b75a47a3ae539dff Mon Sep 17 00:00:00 2001
From: karthik-us <ksubrahm@redhat.com>
Date: Thu, 7 Oct 2021 21:29:27 +0530
Subject: [PATCH 605/610] Coverity: Fix copy into fixed size buffer (CID:
1325542)
Problem:
In __mnt3_fresh_lookup() mres->resolveloc.path is being copied into
a fixed size string mres->remainingdir, with strncpy without checking
the size of the source string. This could lead to string overflow.
Fix:
Copy only till the destination string length and check whether the
soruce string overflows. If so log an error message and return.
> Upstream patch: https://github.com/gluster/glusterfs/pull/2474/
> Change-Id: I26dd0653d2636c667ad4e356d12d3d51956c77c3
> Signed-off-by: karthik-us <ksubrahm@redhat.com>
> Updates: gluster#1060
BUG: 1997447
Change-Id: I26dd0653d2636c667ad4e356d12d3d51956c77c3
Signed-off-by: karthik-us <ksubrahm@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280106
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/nfs/server/src/mount3.c | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/xlators/nfs/server/src/mount3.c b/xlators/nfs/server/src/mount3.c
index 734453c..3951b9e 100644
--- a/xlators/nfs/server/src/mount3.c
+++ b/xlators/nfs/server/src/mount3.c
@@ -1104,8 +1104,13 @@ __mnt3_fresh_lookup(mnt3_resolve_t *mres)
{
inode_unlink(mres->resolveloc.inode, mres->resolveloc.parent,
mres->resolveloc.name);
- strncpy(mres->remainingdir, mres->resolveloc.path,
- strlen(mres->resolveloc.path));
+ if (snprintf(mres->remainingdir, sizeof(mres->remainingdir), "%s",
+ mres->resolveloc.path) >= sizeof(mres->remainingdir)) {
+ gf_msg(GF_MNT, GF_LOG_ERROR, EFAULT, NFS_MSG_RESOLVE_INODE_FAIL,
+ "Failed to copy resolve path: %s", mres->resolveloc.path);
+ nfs_loc_wipe(&mres->resolveloc);
+ return -EFAULT;
+ }
nfs_loc_wipe(&mres->resolveloc);
return __mnt3_resolve_subdir(mres);
}
--
1.8.3.1

View File

@ -0,0 +1,69 @@
From a6ba95b73469ad81d8c5a27293f8d09cc26928a3 Mon Sep 17 00:00:00 2001
From: Ravishankar N <ravishankar@redhat.com>
Date: Fri, 18 Dec 2020 16:28:29 +0530
Subject: [PATCH 606/610] dht: handle DHT_SUBVOL_STATUS_KEY in dht_pt_getxattr
(#1934)
In non distribute volumes (plain replicate, ec), DHT uses pass-through
FOPs (dht_pt_getxattr) instead of the usual FOPS (dht_getxattr). The
pass through FOP was not handling the DHT_SUBVOL_STATUS_KEY virtual
xattr because of which geo-rep session was going into a faulty state.
Fixing it now.
> updates: #1925
> Change-Id: I766b5b5c047c954a9957ab78aca680eedef1ff1f
> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Upstream patch: https://github.com/gluster/glusterfs/pull/1934
BUG: 2006205
Change-Id: I766b5b5c047c954a9957ab78aca680eedef1ff1f
Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280112
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/cluster/dht/src/dht-common.c | 24 ++++++++++++++++++++++++
1 file changed, 24 insertions(+)
diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c
index 5eaaa1e..c8980e5 100644
--- a/xlators/cluster/dht/src/dht-common.c
+++ b/xlators/cluster/dht/src/dht-common.c
@@ -11584,9 +11584,33 @@ int
dht_pt_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
const char *key, dict_t *xdata)
{
+ int op_errno = -1;
+ dht_local_t *local = NULL;
+
+ VALIDATE_OR_GOTO(frame, err);
+ VALIDATE_OR_GOTO(this, err);
+ VALIDATE_OR_GOTO(loc, err);
+ VALIDATE_OR_GOTO(loc->inode, err);
+ VALIDATE_OR_GOTO(this->private, err);
+
+ local = dht_local_init(frame, loc, NULL, GF_FOP_GETXATTR);
+ if (!local) {
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ if (key &&
+ strncmp(key, DHT_SUBVOL_STATUS_KEY, SLEN(DHT_SUBVOL_STATUS_KEY)) == 0) {
+ dht_vgetxattr_subvol_status(frame, this, key);
+ return 0;
+ }
+
STACK_WIND(frame, dht_pt_getxattr_cbk, FIRST_CHILD(this),
FIRST_CHILD(this)->fops->getxattr, loc, key, xdata);
return 0;
+err:
+ DHT_STACK_UNWIND(getxattr, frame, -1, op_errno, NULL, NULL);
+ return 0;
}
static int
--
1.8.3.1

View File

@ -0,0 +1,121 @@
From 4b65ff0d1a3d70fcf3cfa8ab769135ae12f529d8 Mon Sep 17 00:00:00 2001
From: nik-redhat <nladha@redhat.com>
Date: Thu, 7 Oct 2021 22:02:32 +0530
Subject: [PATCH 607/610] SELinux: Fix boolean management
Remove %triggerun ganesha
This trigger shouldn't be needed to begin with since removing
selinux-policy-targeted means that the user is switching SELinux off, or
is is switching the policy (to "mls" or "minimum"). In either case the
current boolean setting is not going to be used any more. The last
option, removal of glusterfs-ganesha, is covered by '%postun ganesha'.
But more importantly, the trigger is called every time
selinux-policy-targeted is updated (which can be avoided).
%triggerun is executed after %triggerin -
https://docs.fedoraproject.org/en-US/packaging-guidelines/Scriptlets/#ordering
So when selinux-policy-targeted is updated, the new version is installed
first triggering `semanage boolean -m ganesha_use_fusefs --on`,
and then the old version is uninstalled triggering
`semanage boolean -m ganesha_use_fusefs --off`.
* use selinux_[un]set_booleans instead of "semanage boolean"
The macro pair properly manages SELinux stores and doesn't disable the
boolean in case it was enabled before ${name}-ganesha was installed.
* Only change booleans when the package is first installed or
uninstalled
Updating ${name}-ganesha would disable the boolean because %postun is
called after %post (same issue as with the triggers).
Signed-off-by: Vit Mojzis <vmojzis@redhat.com>
Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
Change-Id: Ibb926ffbe00c9f000bd740708c0a4b3435ee7871
PR: https://github.com/gluster/glusterfs/pull/2833
Issue: https://github.com/gluster/glusterfs/issues/2522
Resolves: rhbz#1973566
Resolves: rhbz#1975400
BUG: 1973566
Change-Id: Idef6cbd6bce35151518d6f76e5b74774e5756fc9
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280114
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Kaleb Keithley <kkeithle@redhat.com>
---
glusterfs.spec.in | 34 +++++++++++++++++++++-------------
1 file changed, 21 insertions(+), 13 deletions(-)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 424f4ab..a9a83b1 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -954,7 +954,10 @@ exit 0
%if ( 0%{!?_without_server:1} )
%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
%post ganesha
-semanage boolean -m ganesha_use_fusefs --on
+# first install
+if [ $1 -eq 1 ]; then
+ %selinux_set_booleans ganesha_use_fusefs=1
+fi
exit 0
%endif
%endif
@@ -962,7 +965,9 @@ exit 0
%if ( 0%{!?_without_georeplication:1} )
%post geo-replication
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
-%selinux_set_booleans %{selinuxbooleans}
+if [ $1 -eq 1 ]; then
+ %selinux_set_booleans %{selinuxbooleans}
+fi
%endif
if [ $1 -ge 1 ]; then
%systemd_postun_with_restart glusterd
@@ -1089,29 +1094,32 @@ exit 0
%if ( 0%{!?_without_server:1} )
%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
%postun ganesha
-semanage boolean -m ganesha_use_fusefs --off
+if [ $1 -eq 0 ]; then
+ # use the value of ganesha_use_fusefs from before glusterfs-ganesha was installed
+ %selinux_unset_booleans ganesha_use_fusefs=1
+fi
exit 0
%endif
%endif
-##-----------------------------------------------------------------------------
-## All %%trigger should be placed here and keep them sorted
-##
-%if ( 0%{!?_without_server:1} )
-%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
-%trigger ganesha -- selinux-policy-targeted
-semanage boolean -m ganesha_use_fusefs --on
+%if ( 0%{!?_without_georeplication:1} )
+%postun geo-replication
+%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
+if [ $1 -eq 0 ]; then
+ %selinux_unset_booleans %{selinuxbooleans}
+fi
exit 0
%endif
%endif
##-----------------------------------------------------------------------------
-## All %%triggerun should be placed here and keep them sorted
+## All %%trigger should be placed here and keep them sorted
##
%if ( 0%{!?_without_server:1} )
%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
-%triggerun ganesha -- selinux-policy-targeted
-semanage boolean -m ganesha_use_fusefs --off
+# ensure ganesha_use_fusefs is on in case of policy mode switch (eg. mls->targeted)
+%triggerin ganesha -- selinux-policy-targeted
+semanage boolean -m ganesha_use_fusefs --on -S targeted
exit 0
%endif
%endif
--
1.8.3.1

View File

@ -0,0 +1,143 @@
From d806760f1d4c78a2519b01f1c2d07aba0c533755 Mon Sep 17 00:00:00 2001
From: Pranith Kumar K <pkarampu@redhat.com>
Date: Fri, 28 Aug 2020 16:03:54 +0530
Subject: [PATCH 608/610] cluster/ec: Track heal statistics in shd
With this change we should be able to inspect number of heals
attempted and completed by each shd.
> Upstream patch: https://review.gluster.org/#/c/glusterfs/+/24926/
> fixes: #1453
> Change-Id: I10f5d86efcc0a8e4d648da808751d37725682c39
> Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
BUG: 1853631
Change-Id: I10f5d86efcc0a8e4d648da808751d37725682c39
Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280208
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/cluster/ec/src/ec-heald.c | 49 ++++++++++++++++++++++++++++++++++++++-
xlators/cluster/ec/src/ec-types.h | 5 ++++
xlators/cluster/ec/src/ec.c | 6 +++++
3 files changed, 59 insertions(+), 1 deletion(-)
diff --git a/xlators/cluster/ec/src/ec-heald.c b/xlators/cluster/ec/src/ec-heald.c
index 4f4b6aa..cd4d3ad 100644
--- a/xlators/cluster/ec/src/ec-heald.c
+++ b/xlators/cluster/ec/src/ec-heald.c
@@ -152,15 +152,58 @@ ec_shd_index_purge(xlator_t *subvol, inode_t *inode, char *name)
return ret;
}
+static gf_boolean_t
+ec_is_heal_completed(char *status)
+{
+ char *bad_pos = NULL;
+ char *zero_pos = NULL;
+
+ if (!status) {
+ return _gf_false;
+ }
+
+ /*Logic:
+ * Status will be of the form Good: <binary>, Bad: <binary>
+ * If heal completes, if we do strchr for '0' it should be present after
+ * 'Bad:' i.e. strRchr for ':'
+ * */
+
+ zero_pos = strchr(status, '0');
+ bad_pos = strrchr(status, ':');
+ if (!zero_pos || !bad_pos) {
+ /*malformed status*/
+ return _gf_false;
+ }
+
+ if (zero_pos > bad_pos) {
+ return _gf_true;
+ }
+
+ return _gf_false;
+}
+
int
ec_shd_selfheal(struct subvol_healer *healer, int child, loc_t *loc,
gf_boolean_t full)
{
dict_t *xdata = NULL;
+ dict_t *dict = NULL;
uint32_t count;
int32_t ret;
+ char *heal_status = NULL;
+ ec_t *ec = healer->this->private;
+
+ GF_ATOMIC_INC(ec->stats.shd.attempted);
+ ret = syncop_getxattr(healer->this, loc, &dict, EC_XATTR_HEAL, NULL,
+ &xdata);
+ if (ret == 0) {
+ if (dict && (dict_get_str(dict, EC_XATTR_HEAL, &heal_status) == 0)) {
+ if (ec_is_heal_completed(heal_status)) {
+ GF_ATOMIC_INC(ec->stats.shd.completed);
+ }
+ }
+ }
- ret = syncop_getxattr(healer->this, loc, NULL, EC_XATTR_HEAL, NULL, &xdata);
if (!full && (loc->inode->ia_type == IA_IFDIR)) {
/* If we have just healed a directory, it's possible that
* other index entries have appeared to be healed. */
@@ -179,6 +222,10 @@ ec_shd_selfheal(struct subvol_healer *healer, int child, loc_t *loc,
dict_unref(xdata);
}
+ if (dict) {
+ dict_unref(dict);
+ }
+
return ret;
}
diff --git a/xlators/cluster/ec/src/ec-types.h b/xlators/cluster/ec/src/ec-types.h
index 700dc39..ef7a7fe 100644
--- a/xlators/cluster/ec/src/ec-types.h
+++ b/xlators/cluster/ec/src/ec-types.h
@@ -626,6 +626,11 @@ struct _ec_statistics {
requests. (Basically memory allocation
errors). */
} stripe_cache;
+ struct {
+ gf_atomic_t attempted; /*Number of heals attempted on
+ files/directories*/
+ gf_atomic_t completed; /*Number of heals complted on files/directories*/
+ } shd;
};
struct _ec {
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
index 047cdd8..24de9e8 100644
--- a/xlators/cluster/ec/src/ec.c
+++ b/xlators/cluster/ec/src/ec.c
@@ -649,6 +649,8 @@ ec_statistics_init(ec_t *ec)
GF_ATOMIC_INIT(ec->stats.stripe_cache.evicts, 0);
GF_ATOMIC_INIT(ec->stats.stripe_cache.allocs, 0);
GF_ATOMIC_INIT(ec->stats.stripe_cache.errors, 0);
+ GF_ATOMIC_INIT(ec->stats.shd.attempted, 0);
+ GF_ATOMIC_INIT(ec->stats.shd.completed, 0);
}
int32_t
@@ -1445,6 +1447,10 @@ ec_dump_private(xlator_t *this)
GF_ATOMIC_GET(ec->stats.stripe_cache.allocs));
gf_proc_dump_write("errors", "%" GF_PRI_ATOMIC,
GF_ATOMIC_GET(ec->stats.stripe_cache.errors));
+ gf_proc_dump_write("heals-attempted", "%" GF_PRI_ATOMIC,
+ GF_ATOMIC_GET(ec->stats.shd.attempted));
+ gf_proc_dump_write("heals-completed", "%" GF_PRI_ATOMIC,
+ GF_ATOMIC_GET(ec->stats.shd.completed));
return 0;
}
--
1.8.3.1

View File

@ -0,0 +1,43 @@
From 89cdfb40264c12105a1b4990fa9b45290aa6cef0 Mon Sep 17 00:00:00 2001
From: Vinayakswami Hariharmath <vharihar@redhat.com>
Date: Fri, 8 Oct 2021 09:40:41 +0530
Subject: [PATCH 609/610] feature/shard: wrong dname results in dentry not
found error
Due to wrong dname passed to inode_unlink in
shard_evicted_inode_fsync_cbk() resulting in dentry not found
error.
This patch addresses the issue.
> upstream patch: https://github.com/gluster/glusterfs/pull/2475
> Fixes: #2470
> Change-Id: I6c479980ae3fa7ba558327055a9e5e5c2d2a850f
> Signed-off-by: Vinayakswami Hariharmath vharihar@redhat.com
BUG: 1911665
Change-Id: I96aa5f57303b69a08990de039ddeecad7e7ae6af
Signed-off-by: Vinayakswami Hariharmath <vharihar@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280202
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/features/shard/src/shard.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c
index b828ff9..882373f 100644
--- a/xlators/features/shard/src/shard.c
+++ b/xlators/features/shard/src/shard.c
@@ -950,7 +950,7 @@ shard_evicted_inode_fsync_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
{
__shard_inode_ctx_get(shard_inode, this, &ctx);
if ((list_empty(&ctx->to_fsync_list)) && (list_empty(&ctx->ilist))) {
- shard_make_block_bname(ctx->block_num, shard_inode->gfid,
+ shard_make_block_bname(ctx->block_num, ctx->base_gfid,
block_bname, sizeof(block_bname));
inode_unlink(shard_inode, priv->dot_shard_inode, block_bname);
/* The following unref corresponds to the ref held by
--
1.8.3.1

View File

@ -0,0 +1,51 @@
From b3e86a66de224107f6760157a7cb692227e42954 Mon Sep 17 00:00:00 2001
From: Shwetha Acharya <sacharya@redhat.com>
Date: Mon, 30 Aug 2021 18:54:15 +0530
Subject: [PATCH 610/610] glusterfs.spec.in: remove condtionals from tar
dependency (#2734)
* glusterfs.spec.in: remove condtionals from tar dependency
The conditional on rhel minor version fails and tar is not
marked as required.
As there is not any universal macro to specify the
minor release, removing the conditionals above the
"Requires: tar" statement
with this change irrespective of rhel 8.3 and
above, tar will be marked required for geo-rep.
> Change-Id: Id1e3320a0b1a245fc9cd8c7acb09cc119fca18b8
> Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
Upstream patch: https://github.com/gluster/glusterfs/pull/2734
BUG: 1901468
Change-Id: Id1e3320a0b1a245fc9cd8c7acb09cc119fca18b8
Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/280116
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
glusterfs.spec.in | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index a9a83b1..8b6646f 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -521,9 +521,8 @@ Requires: python%{_pythonver}-gluster = %{version}-%{release}
Requires: rsync
Requires: util-linux
Requires: %{name}-libs%{?_isa} = %{version}-%{release}
-%if ( 0%{?rhel} && ( ( 0%{?rhel} == 8 && 0%{?rhel_minor_version} >= 3 ) || 0%{?rhel} >= 9 ) )
Requires: tar
-%endif
+
# required for setting selinux bools
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
Requires(post): policycoreutils-python-utils
--
1.8.3.1

View File

@ -237,7 +237,7 @@ Release: 0.2%{?prereltag:.%{prereltag}}%{?dist}
%else
Name: glusterfs
Version: 6.0
Release: 59%{?dist}
Release: 60%{?dist}
ExcludeArch: i686
%endif
License: GPLv2 or LGPLv3+
@ -903,6 +903,30 @@ Patch0583: 0583-protocol-client-Initialize-list-head-to-prevent-NULL.patch
Patch0584: 0584-dht-fixing-xattr-inconsistency.patch
Patch0585: 0585-ganesha_ha-ganesha_grace-RA-fails-in-start-and-or-fa.patch
Patch0586: 0586-protocol-client-Do-not-reopen-fd-post-handshake-if-p.patch
Patch0587: 0587-Update-rfc.sh-to-rhgs-3.5.6.patch
Patch0588: 0588-locks-Fix-null-gfid-in-lock-contention-notifications.patch
Patch0589: 0589-extras-fix-for-postscript-failure-on-logrotation-of-.patch
Patch0590: 0590-cluster-afr-Don-t-check-for-stale-entry-index.patch
Patch0591: 0591-afr-check-for-valid-iatt.patch
Patch0592: 0592-md-cache-fix-integer-signedness-mismatch.patch
Patch0593: 0593-dht-explicit-null-dereference.patch
Patch0594: 0594-glusterd-resource-leaks.patch
Patch0595: 0595-glusterd-use-after-free-coverity-issue.patch
Patch0596: 0596-locks-null-dereference.patch
Patch0597: 0597-glusterd-memory-deallocated-twice.patch
Patch0598: 0598-glusterd-null-dereference.patch
Patch0599: 0599-afr-null-dereference-nagative-value.patch
Patch0600: 0600-dht-xlator-integer-handling-issue.patch
Patch0601: 0601-coverity-resource-leak-2321.patch
Patch0602: 0602-coverity-null-dereference-2395.patch
Patch0603: 0603-Coverity-Resource-leak-fix-CID-1356547.patch
Patch0604: 0604-Coverity-Fix-dereference-before-null-check-CID-13914.patch
Patch0605: 0605-Coverity-Fix-copy-into-fixed-size-buffer-CID-1325542.patch
Patch0606: 0606-dht-handle-DHT_SUBVOL_STATUS_KEY-in-dht_pt_getxattr-.patch
Patch0607: 0607-SELinux-Fix-boolean-management.patch
Patch0608: 0608-cluster-ec-Track-heal-statistics-in-shd.patch
Patch0609: 0609-feature-shard-wrong-dname-results-in-dentry-not-foun.patch
Patch0610: 0610-glusterfs.spec.in-remove-condtionals-from-tar-depend.patch
%description
GlusterFS is a distributed file-system capable of scaling to several
@ -1111,9 +1135,8 @@ Requires: python%{_pythonver}-gluster = %{version}-%{release}
Requires: rsync
Requires: util-linux
Requires: %{name}-libs%{?_isa} = %{version}-%{release}
%if ( 0%{?rhel} && ( ( 0%{?rhel} == 8 && 0%{?rhel_minor_version} >= 3 ) || 0%{?rhel} >= 9 ) )
Requires: tar
%endif
# required for setting selinux bools
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
Requires(post): policycoreutils-python-utils
@ -1617,7 +1640,10 @@ exit 0
%if ( 0%{!?_without_server:1} )
%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
%post ganesha
semanage boolean -m ganesha_use_fusefs --on
# first install
if [ $1 -eq 1 ]; then
%selinux_set_booleans ganesha_use_fusefs=1
fi
exit 0
%endif
%endif
@ -1625,7 +1651,9 @@ exit 0
%if ( 0%{!?_without_georeplication:1} )
%post geo-replication
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
%selinux_set_booleans %{selinuxbooleans}
if [ $1 -eq 1 ]; then
%selinux_set_booleans %{selinuxbooleans}
fi
%endif
if [ $1 -ge 1 ]; then
%systemd_postun_with_restart glusterd
@ -1752,7 +1780,20 @@ exit 0
%if ( 0%{!?_without_server:1} )
%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
%postun ganesha
semanage boolean -m ganesha_use_fusefs --off
if [ $1 -eq 0 ]; then
# use the value of ganesha_use_fusefs from before glusterfs-ganesha was installed
%selinux_unset_booleans ganesha_use_fusefs=1
fi
exit 0
%endif
%endif
%if ( 0%{!?_without_georeplication:1} )
%postun geo-replication
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
if [ $1 -eq 0 ]; then
%selinux_unset_booleans %{selinuxbooleans}
fi
exit 0
%endif
%endif
@ -1762,19 +1803,9 @@ exit 0
##
%if ( 0%{!?_without_server:1} )
%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
%trigger ganesha -- selinux-policy-targeted
semanage boolean -m ganesha_use_fusefs --on
exit 0
%endif
%endif
##-----------------------------------------------------------------------------
## All %%triggerun should be placed here and keep them sorted
##
%if ( 0%{!?_without_server:1} )
%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
%triggerun ganesha -- selinux-policy-targeted
semanage boolean -m ganesha_use_fusefs --off
# ensure ganesha_use_fusefs is on in case of policy mode switch (eg. mls->targeted)
%triggerin ganesha -- selinux-policy-targeted
semanage boolean -m ganesha_use_fusefs --on -S targeted
exit 0
%endif
%endif
@ -2649,6 +2680,7 @@ fi
%endif
%changelog
* Mon Aug 09 2021 Mohan Boddu <mboddu@redhat.com> - 6.0-57.4
- Rebuilt for IMA sigs, glibc 2.34, aarch64 flags
Related: rhbz#1991688
@ -2662,6 +2694,10 @@ fi
- Fix changlog chronological order by removing unneeded changelogs
- fixes bug bz#1939340
* Mon Oct 11 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-60
- fixes bugs bz#1668303 bz#1853631 bz#1901468 bz#1904137 bz#1911665
bz#1962972 bz#1973566 bz#1994593 bz#1995029 bz#1997447 bz#2006205
* Tue Jul 06 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-59
- fixes bugs bz#1689375