autobuild v6.0-46

Resolves: bz#1873469 bz#1881823
Signed-off-by: Gluster Jenkins <dkhandel+glusterjenkins@redhat.com>
This commit is contained in:
Gluster Jenkins 2020-10-21 08:34:07 +00:00
parent 859811e556
commit 46b769fe94
5 changed files with 317 additions and 1 deletions

View File

@ -0,0 +1,63 @@
From 8e427716f4e2855093b1a1a0e3a9ec79ebac7faf Mon Sep 17 00:00:00 2001
From: Shwetha K Acharya <sacharya@redhat.com>
Date: Thu, 10 Sep 2020 13:49:09 +0530
Subject: [PATCH 470/473] extras/snap_scheduler: changes in
gluster-shared-storage mount path
The patch https://review.gluster.org/#/c/glusterfs/+/24934/, changes mount point
of gluster_shared_storage from /var/run to /run to address the issue of symlink
at mount path in fstab.
NOTE: mount point /var/run is symlink to /run
The required changes with respect to gluster_shared_storage mount path are
introduced with this patch in snap_scheduler.
>Fixes: #1476
>Change-Id: I9ce88c2f624c6aa5982de04edfee2d0a9f160d62
>Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
backport of https://review.gluster.org/#/c/glusterfs/+/24971/
BUG: 1873469
Change-Id: I9ce88c2f624c6aa5982de04edfee2d0a9f160d62
Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/211391
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/snap_scheduler/gcron.py | 4 ++--
extras/snap_scheduler/snap_scheduler.py | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/extras/snap_scheduler/gcron.py b/extras/snap_scheduler/gcron.py
index cc16310..0e4df77 100755
--- a/extras/snap_scheduler/gcron.py
+++ b/extras/snap_scheduler/gcron.py
@@ -19,10 +19,10 @@ import logging.handlers
import fcntl
-GCRON_TASKS = "/var/run/gluster/shared_storage/snaps/glusterfs_snap_cron_tasks"
+GCRON_TASKS = "/run/gluster/shared_storage/snaps/glusterfs_snap_cron_tasks"
GCRON_CROND_TASK = "/etc/cron.d/glusterfs_snap_cron_tasks"
GCRON_RELOAD_FLAG = "/var/run/gluster/crond_task_reload_flag"
-LOCK_FILE_DIR = "/var/run/gluster/shared_storage/snaps/lock_files/"
+LOCK_FILE_DIR = "/run/gluster/shared_storage/snaps/lock_files/"
log = logging.getLogger("gcron-logger")
start_time = 0.0
diff --git a/extras/snap_scheduler/snap_scheduler.py b/extras/snap_scheduler/snap_scheduler.py
index 5a29d41..e8fcc44 100755
--- a/extras/snap_scheduler/snap_scheduler.py
+++ b/extras/snap_scheduler/snap_scheduler.py
@@ -67,7 +67,7 @@ except ImportError:
SCRIPT_NAME = "snap_scheduler"
scheduler_enabled = False
log = logging.getLogger(SCRIPT_NAME)
-SHARED_STORAGE_DIR="/var/run/gluster/shared_storage"
+SHARED_STORAGE_DIR="/run/gluster/shared_storage"
GCRON_DISABLED = SHARED_STORAGE_DIR+"/snaps/gcron_disabled"
GCRON_ENABLED = SHARED_STORAGE_DIR+"/snaps/gcron_enabled"
GCRON_TASKS = SHARED_STORAGE_DIR+"/snaps/glusterfs_snap_cron_tasks"
--
1.8.3.1

View File

@ -0,0 +1,73 @@
From d23ad767281af85cf07f5c3f63de482d40ee1953 Mon Sep 17 00:00:00 2001
From: Shwetha K Acharya <sacharya@redhat.com>
Date: Thu, 10 Sep 2020 13:16:12 +0530
Subject: [PATCH 471/473] nfs-ganesha: gluster_shared_storage fails to
automount on node reboot on rhel 8
The patch https://review.gluster.org/#/c/glusterfs/+/24934/, changes mount point
of gluster_shared_storage from /var/run to /run to address the issue of symlink
at mount path in fstab.
NOTE: mount point /var/run is symlink to /run
The required changes with respect to gluster_shared_storage mount path are
introduced with this patch in nfs-ganesha.
>Fixes: #1475
>Change-Id: I9c7677a053e1291f71476d47ba6fa2e729f59625
>Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
backport of https://review.gluster.org/#/c/glusterfs/+/24970/
BUG: 1873469
Change-Id: I9c7677a053e1291f71476d47ba6fa2e729f59625
Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/211392
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/ganesha/ocf/ganesha_nfsd | 2 +-
extras/ganesha/scripts/ganesha-ha.sh | 2 +-
extras/hook-scripts/start/post/S31ganesha-start.sh | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/extras/ganesha/ocf/ganesha_nfsd b/extras/ganesha/ocf/ganesha_nfsd
index 93fc8be..f91e8b6 100644
--- a/extras/ganesha/ocf/ganesha_nfsd
+++ b/extras/ganesha/ocf/ganesha_nfsd
@@ -36,7 +36,7 @@ else
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
fi
-OCF_RESKEY_ha_vol_mnt_default="/var/run/gluster/shared_storage"
+OCF_RESKEY_ha_vol_mnt_default="/run/gluster/shared_storage"
: ${OCF_RESKEY_ha_vol_mnt=${OCF_RESKEY_ha_vol_mnt_default}}
ganesha_meta_data() {
diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index a6814b1..9790a71 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -24,7 +24,7 @@ GANESHA_HA_SH=$(realpath $0)
HA_NUM_SERVERS=0
HA_SERVERS=""
HA_VOL_NAME="gluster_shared_storage"
-HA_VOL_MNT="/var/run/gluster/shared_storage"
+HA_VOL_MNT="/run/gluster/shared_storage"
HA_CONFDIR=$HA_VOL_MNT"/nfs-ganesha"
SERVICE_MAN="DISTRO_NOT_FOUND"
diff --git a/extras/hook-scripts/start/post/S31ganesha-start.sh b/extras/hook-scripts/start/post/S31ganesha-start.sh
index 90ba6bc..7ad6f23 100755
--- a/extras/hook-scripts/start/post/S31ganesha-start.sh
+++ b/extras/hook-scripts/start/post/S31ganesha-start.sh
@@ -4,7 +4,7 @@ OPTSPEC="volname:,gd-workdir:"
VOL=
declare -i EXPORT_ID
ganesha_key="ganesha.enable"
-GANESHA_DIR="/var/run/gluster/shared_storage/nfs-ganesha"
+GANESHA_DIR="/run/gluster/shared_storage/nfs-ganesha"
CONF1="$GANESHA_DIR/ganesha.conf"
GLUSTERD_WORKDIR=
--
1.8.3.1

View File

@ -0,0 +1,98 @@
From ccd45222c46b91b4d0cd57db9ea8b1515c97ada0 Mon Sep 17 00:00:00 2001
From: Shwetha K Acharya <sacharya@redhat.com>
Date: Mon, 31 Aug 2020 20:08:39 +0530
Subject: [PATCH 472/473] geo-rep: gluster_shared_storage fails to automount on
node reboot on rhel 8.
Issue: On reboot, all the mounts get wiped out.
Only the mounts mentioned in /etc/fstab automatically gets mounted
during boot/reboot.
But /etc/fstab complains on not getting a canonical path
(it gets path containing a symlink)
This is because the gluster_shared_storage, is mounted to
/var/run which is symlink to /run. This is a general practice
followed by most operating systems.
[root@ ~]# ls -lsah /var/run
0 lrwxrwxrwx. 1 root root 6 Jul 22 19:39 /var/run -> ../run
Fix: Mount gluster_shared_storage on /run.
(Also It is seen that /var/run is mostly
used by old or legacy systems, thus it is a good practice to
update /var/run to /run)
>fixes: #1459
>Change-Id: I8c16653be8cd746c84f01abf0eea19284fb97c77
>Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
backport of https://review.gluster.org/#/c/glusterfs/+/24934/
BUG: 1873469
Change-Id: I8c16653be8cd746c84f01abf0eea19284fb97c77
Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/211387
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
.../set/post/S32gluster_enable_shared_storage.sh | 18 +++++++++---------
geo-replication/gsyncd.conf.in | 2 +-
2 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
index 885ed03..3bae37c 100755
--- a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
+++ b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
@@ -79,9 +79,9 @@ done
if [ "$option" == "disable" ]; then
# Unmount the volume on all the nodes
- umount /var/run/gluster/shared_storage
- cat /etc/fstab | grep -v "gluster_shared_storage /var/run/gluster/shared_storage/" > /var/run/gluster/fstab.tmp
- mv /var/run/gluster/fstab.tmp /etc/fstab
+ umount /run/gluster/shared_storage
+ cat /etc/fstab | grep -v "gluster_shared_storage /run/gluster/shared_storage/" > /run/gluster/fstab.tmp
+ mv /run/gluster/fstab.tmp /etc/fstab
fi
if [ "$is_originator" == 1 ]; then
@@ -105,7 +105,7 @@ function check_volume_status()
}
mount_cmd="mount -t glusterfs $local_node_hostname:/gluster_shared_storage \
- /var/run/gluster/shared_storage"
+ /run/gluster/shared_storage"
if [ "$option" == "enable" ]; then
retry=0;
@@ -120,10 +120,10 @@ if [ "$option" == "enable" ]; then
status=$(check_volume_status)
done
# Mount the volume on all the nodes
- umount /var/run/gluster/shared_storage
- mkdir -p /var/run/gluster/shared_storage
+ umount /run/gluster/shared_storage
+ mkdir -p /run/gluster/shared_storage
$mount_cmd
- cp /etc/fstab /var/run/gluster/fstab.tmp
- echo "$local_node_hostname:/gluster_shared_storage /var/run/gluster/shared_storage/ glusterfs defaults 0 0" >> /var/run/gluster/fstab.tmp
- mv /var/run/gluster/fstab.tmp /etc/fstab
+ cp /etc/fstab /run/gluster/fstab.tmp
+ echo "$local_node_hostname:/gluster_shared_storage /run/gluster/shared_storage/ glusterfs defaults 0 0" >> /run/gluster/fstab.tmp
+ mv /run/gluster/fstab.tmp /etc/fstab
fi
diff --git a/geo-replication/gsyncd.conf.in b/geo-replication/gsyncd.conf.in
index 11e57fd..9688c79 100644
--- a/geo-replication/gsyncd.conf.in
+++ b/geo-replication/gsyncd.conf.in
@@ -123,7 +123,7 @@ type=bool
help=Use this to set Active Passive mode to meta-volume.
[meta-volume-mnt]
-value=/var/run/gluster/shared_storage
+value=/run/gluster/shared_storage
help=Meta Volume or Shared Volume mount path
[allow-network]
--
1.8.3.1

View File

@ -0,0 +1,75 @@
From 80f1b3aedcde02ae25b341519857ba9a5b2fa722 Mon Sep 17 00:00:00 2001
From: Sheetal Pamecha <spamecha@redhat.com>
Date: Thu, 24 Sep 2020 19:43:29 +0530
Subject: [PATCH 473/473] glusterd: Fix Add-brick with increasing replica count
failure
Problem: add-brick operation fails with multiple bricks on same
server error when replica count is increased.
This was happening because of extra runs in a loop to compare
hostnames and if bricks supplied were less than "replica" count,
the bricks will get compared to itself resulting in above error.
>Upstream-patch: https://review.gluster.org/#/c/glusterfs/+/25029
>Fixes: #1508
BUG: 1881823
Change-Id: I8668e964340b7bf59728bb838525d2db062197ed
Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/213064
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/bugs/glusterd/brick-order-check-add-brick.t | 21 +++++++++++++++++++++
xlators/mgmt/glusterd/src/glusterd-utils.c | 4 ++++
2 files changed, 25 insertions(+)
diff --git a/tests/bugs/glusterd/brick-order-check-add-brick.t b/tests/bugs/glusterd/brick-order-check-add-brick.t
index 29f0ed1..0be31da 100644
--- a/tests/bugs/glusterd/brick-order-check-add-brick.t
+++ b/tests/bugs/glusterd/brick-order-check-add-brick.t
@@ -37,4 +37,25 @@ EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks'
TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5 force
EXPECT '4 x 3 = 12' volinfo_field $V0 'Number of Bricks'
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$L1/${V0}1 $H2:$L2/${V0}1
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks'
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#Add-brick with Increasing replica count
+TEST $CLI_1 volume add-brick $V0 replica 3 $H3:$L3/${V0}1
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+
+#Add-brick with Increasing replica count from same host should fail
+TEST ! $CLI_1 volume add-brick $V0 replica 5 $H1:$L1/${V0}2 $H1:$L1/${V0}3
+
+#adding multiple bricks from same host should fail the brick order check
+TEST ! $CLI_1 volume add-brick $V0 replica 3 $H1:$L1/${V0}{4..6} $H2:$L2/${V0}{7..9}
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+
cleanup
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 545e688..d25fc8a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -14908,6 +14908,10 @@ glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type,
i = 0;
ai_list_tmp1 = cds_list_entry(ai_list->list.next, addrinfo_list_t, list);
+ if (brick_count < sub_count) {
+ sub_count = brick_count;
+ }
+
/* Check for bad brick order */
while (i < brick_count) {
++i;
--
1.8.3.1

View File

@ -237,7 +237,7 @@ Release: 0.1%{?prereltag:.%{prereltag}}%{?dist}
%else
Name: glusterfs
Version: 6.0
Release: 45%{?dist}
Release: 46%{?dist}
ExcludeArch: i686
%endif
License: GPLv2 or LGPLv3+
@ -784,6 +784,10 @@ Patch0466: 0466-cluster-ec-Change-stale-index-handling.patch
Patch0467: 0467-build-Added-dependency-for-glusterfs-selinux.patch
Patch0468: 0468-build-Update-the-glusterfs-selinux-version.patch
Patch0469: 0469-cluster-ec-Don-t-trigger-heal-for-stale-index.patch
Patch0470: 0470-extras-snap_scheduler-changes-in-gluster-shared-stor.patch
Patch0471: 0471-nfs-ganesha-gluster_shared_storage-fails-to-automoun.patch
Patch0472: 0472-geo-rep-gluster_shared_storage-fails-to-automount-on.patch
Patch0473: 0473-glusterd-Fix-Add-brick-with-increasing-replica-count.patch
%description
GlusterFS is a distributed file-system capable of scaling to several
@ -2527,6 +2531,9 @@ fi
%endif
%changelog
* Wed Oct 21 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-46
- fixes bugs bz#1873469 bz#1881823
* Wed Sep 09 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-45
- fixes bugs bz#1785714