autobuild v6.0-1

Resolves: bz#1493284 bz#1578703 bz#1600918 bz#1670415 bz#1691620
Resolves: bz#1693935 bz#1695057
Signed-off-by: Milind Changire <mchangir@redhat.com>
This commit is contained in:
Milind Changire 2019-04-07 23:04:00 -04:00
parent 4341631c3a
commit 66d73904d1
55 changed files with 8207 additions and 2459 deletions

1
.gitignore vendored
View File

@ -1 +1,2 @@
/glusterfs-3.12.2.tar.gz
/glusterfs-6.0.tar.gz

View File

@ -0,0 +1,26 @@
From d6ae2eb7fa7431db2108173c08b9e4455dd06005 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Thu, 21 Mar 2019 12:22:43 +0530
Subject: [PATCH 01/52] Update rfc.sh to rhgs-3.5.0
Signed-off-by: Milind Changire <mchangir@redhat.com>
---
rfc.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/rfc.sh b/rfc.sh
index 764205c..94c92ef 100755
--- a/rfc.sh
+++ b/rfc.sh
@@ -18,7 +18,7 @@ done
shift $((OPTIND-1))
-branch="release-6";
+branch="rhgs-3.5.0";
set_hooks_commit_msg()
{
--
1.8.3.1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,51 @@
From 9b58731c83bc1ee9c5f2a3cd58a8f845cf09ee82 Mon Sep 17 00:00:00 2001
From: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
Date: Mon, 21 Mar 2016 13:54:19 +0530
Subject: [PATCH 03/52] rpc: set bind-insecure to off by default
commit 243a5b429f225acb8e7132264fe0a0835ff013d5 turn's 'ON'
allow-insecure and bind-insecure by default.
Problem:
Now with newer versions we have bind-insecure 'ON' by default.
So, while upgrading subset of nodes from a trusted storage pool,
nodes which have older versions of glusterfs will expect
connection from secure ports only (since they still have
bind-insecure off) thus they reject connection from upgraded
nodes which now have insecure ports.
Hence we will run into connection issues between peers.
Solution:
This patch will turn bind-insecure 'OFF' by default to avoid
problem explained above.
Label: DOWNSTREAM ONLY
Change-Id: Id7a19b4872399d3b019243b0857c9c7af75472f7
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/70313
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: Atin Mukherjee <amukherj@redhat.com>
---
rpc/rpc-lib/src/rpc-transport.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/rpc/rpc-lib/src/rpc-transport.c b/rpc/rpc-lib/src/rpc-transport.c
index f9cbdf1..4beaaf9 100644
--- a/rpc/rpc-lib/src/rpc-transport.c
+++ b/rpc/rpc-lib/src/rpc-transport.c
@@ -269,8 +269,8 @@ rpc_transport_load(glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
else
trans->bind_insecure = 0;
} else {
- /* By default allow bind insecure */
- trans->bind_insecure = 1;
+ /* Turning off bind insecure by default*/
+ trans->bind_insecure = 0;
}
ret = dict_get_str(options, "transport-type", &type);
--
1.8.3.1

View File

@ -0,0 +1,47 @@
From aa73240892a7072be68772370fd95173e6e77d10 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Mon, 21 Mar 2016 17:07:00 +0530
Subject: [PATCH 04/52] glusterd/spec: fixing autogen issue
Backport of https://code.engineering.redhat.com/gerrit/#/c/59463/
Because of the incorrect build section, autogen.sh wasn't re-run during the rpm
build process. The `extras/Makefile.in` was not regenerated with the changes
made to `extras/Makefile.am` in the firewalld patch. This meant that
`extras/Makefile` was generated without the firewalld changes. So the firewalld
config wasn't installed during `make install` and rpmbuild later failed when it
failed to find `/usr/lib/firewalld/glusterfs.xml`
Label: DOWNSTREAM ONLY
>Reviewed-on: https://code.engineering.redhat.com/gerrit/59463
Change-Id: I498bcceeacbd839640282eb6467c9f1464505697
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/70343
Reviewed-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 7 +------
1 file changed, 1 insertion(+), 6 deletions(-)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index c655f16..f5c1f79 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -700,12 +700,7 @@ done
%build
-# RHEL6 and earlier need to manually replace config.guess and config.sub
-%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
-./autogen.sh
-%endif
-
-%configure \
+./autogen.sh && %configure \
%{?_with_asan} \
%{?_with_cmocka} \
%{?_with_debug} \
--
1.8.3.1

View File

@ -0,0 +1,36 @@
From 44f758a56c5c5ad340ebc6d6a6478e8712c2c101 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Mon, 21 Mar 2016 22:31:02 +0530
Subject: [PATCH 05/52] libglusterfs/glusterd: Fix compilation errors
1. Removed duplicate definition of GD_OP_VER_PERSISTENT_AFR_XATTRS introduced in
d367a88 where GD_OP_VER_PERSISTENT_AFR_XATTRS was redfined
2. Fixed incorrect op-version
Label: DOWNSTREAM ONLY
Change-Id: Icfa3206e8a41a11875641f57523732b80837f8f6
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/70384
Reviewed-by: Nithya Balachandran <nbalacha@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-store.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index 64447e7..51ca3d1 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -967,7 +967,7 @@ glusterd_volume_exclude_options_write(int fd, glusterd_volinfo_t *volinfo)
goto out;
}
- if (conf->op_version >= GD_OP_VERSION_RHS_3_0) {
+ if (conf->op_version >= GD_OP_VERSION_3_7_0) {
snprintf(buf, sizeof(buf), "%d", volinfo->disperse_count);
ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_DISPERSE_CNT, buf);
if (ret)
--
1.8.3.1

View File

@ -0,0 +1,58 @@
From 1f28e008825ae291208a9e6c714dd642f715a2a1 Mon Sep 17 00:00:00 2001
From: "Bala.FA" <barumuga@redhat.com>
Date: Mon, 7 Apr 2014 15:24:10 +0530
Subject: [PATCH 06/52] build: remove ghost directory entries
ovirt requires hook directories for gluster management and ghost
directories are no more ghost entries
Label: DOWNSTREAM ONLY
Change-Id: Iaf1066ba0655619024f87eaaa039f0010578c567
Signed-off-by: Bala.FA <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/60133
Tested-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 19 +++++++++++++++++--
1 file changed, 17 insertions(+), 2 deletions(-)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index f5c1f79..6be492e 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -799,15 +799,30 @@ install -D -p -m 0644 extras/glusterfs-logrotate \
%{buildroot}%{_sysconfdir}/logrotate.d/glusterfs
%if ( 0%{!?_without_georeplication:1} )
-# geo-rep ghosts
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication
touch %{buildroot}%{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
install -D -p -m 0644 extras/glusterfs-georep-logrotate \
%{buildroot}%{_sysconfdir}/logrotate.d/glusterfs-georep
%endif
+%if ( 0%{!?_without_syslog:1} )
+%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
+install -D -p -m 0644 extras/gluster-rsyslog-7.2.conf \
+ %{buildroot}%{_sysconfdir}/rsyslog.d/gluster.conf.example
+%endif
+
+%if ( 0%{?rhel} && 0%{?rhel} == 6 )
+install -D -p -m 0644 extras/gluster-rsyslog-5.8.conf \
+ %{buildroot}%{_sysconfdir}/rsyslog.d/gluster.conf.example
+%endif
+
+%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
+install -D -p -m 0644 extras/logger.conf.example \
+ %{buildroot}%{_sysconfdir}/glusterfs/logger.conf.example
+%endif
+%endif
+
%if ( 0%{!?_without_server:1} )
-# the rest of the ghosts
touch %{buildroot}%{_sharedstatedir}/glusterd/glusterd.info
touch %{buildroot}%{_sharedstatedir}/glusterd/options
subdirs=(add-brick create copy-file delete gsync-create remove-brick reset set start stop)
--
1.8.3.1

View File

@ -0,0 +1,620 @@
From 7744475550cd27f58f536741e9c50c639d3b02d8 Mon Sep 17 00:00:00 2001
From: "Bala.FA" <barumuga@redhat.com>
Date: Thu, 6 Dec 2018 20:06:27 +0530
Subject: [PATCH 07/52] build: add RHGS specific changes
Label: DOWNSTREAM ONLY
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1074947
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1097782
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1115267
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1221743
Change-Id: I08333334745adf2350e772c6454ffcfe9c08cb89
Reviewed-on: https://code.engineering.redhat.com/gerrit/24983
Reviewed-on: https://code.engineering.redhat.com/gerrit/25451
Reviewed-on: https://code.engineering.redhat.com/gerrit/25518
Reviewed-on: https://code.engineering.redhat.com/gerrit/25983
Signed-off-by: Bala.FA <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/60134
Tested-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 485 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 481 insertions(+), 4 deletions(-)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 6be492e..eb04491 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -95,9 +95,16 @@
%{?_without_server:%global _without_server --without-server}
# disable server components forcefully as rhel <= 6
-%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
+%if ( 0%{?rhel} )
+%if ( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" )
+%global _without_server %{nil}
+%else
%global _without_server --without-server
%endif
+%endif
+
+%global _without_extra_xlators 1
+%global _without_regression_tests 1
# syslog
# if you wish to build rpms without syslog logging, compile like this
@@ -229,7 +236,8 @@ Release: 0.1%{?prereltag:.%{prereltag}}%{?dist}
%else
Name: @PACKAGE_NAME@
Version: @PACKAGE_VERSION@
-Release: 0.@PACKAGE_RELEASE@%{?dist}
+Release: @PACKAGE_RELEASE@%{?dist}
+ExcludeArch: i686
%endif
License: GPLv2 or LGPLv3+
URL: http://docs.gluster.org/
@@ -243,8 +251,6 @@ Source8: glusterfsd.init
Source0: @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz
%endif
-BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
-
Requires(pre): shadow-utils
%if ( 0%{?_with_systemd:1} )
BuildRequires: systemd
@@ -384,7 +390,9 @@ This package provides cloudsync plugins for archival feature.
Summary: Development Libraries
Requires: %{name}%{?_isa} = %{version}-%{release}
# Needed for the Glupy examples to work
+%if ( 0%{!?_without_extra_xlators:1} )
Requires: %{name}-extra-xlators%{?_isa} = %{version}-%{release}
+%endif
%description devel
GlusterFS is a distributed file-system capable of scaling to several
@@ -397,6 +405,7 @@ is in user space and easily manageable.
This package provides the development libraries and include files.
+%if ( 0%{!?_without_extra_xlators:1} )
%package extra-xlators
Summary: Extra Gluster filesystem Translators
# We need python-gluster rpm for gluster module's __init__.py in Python
@@ -415,6 +424,7 @@ is in user space and easily manageable.
This package provides extra filesystem Translators, such as Glupy,
for GlusterFS.
+%endif
%package fuse
Summary: Fuse client
@@ -440,6 +450,30 @@ is in user space and easily manageable.
This package provides support to FUSE based clients and inlcudes the
glusterfs(d) binary.
+%if ( 0%{!?_without_server:1} )
+%package ganesha
+Summary: NFS-Ganesha configuration
+Group: Applications/File
+
+Requires: %{name}-server%{?_isa} = %{version}-%{release}
+Requires: nfs-ganesha-gluster, pcs, dbus
+%if ( 0%{?rhel} && 0%{?rhel} == 6 )
+Requires: cman, pacemaker, corosync
+%endif
+
+%description ganesha
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over Infiniband RDMA
+or TCP/IP interconnect into one large parallel network file
+system. GlusterFS is one of the most sophisticated file systems in
+terms of features and extensibility. It borrows a powerful concept
+called Translators from GNU Hurd kernel. Much of the code in GlusterFS
+is in user space and easily manageable.
+
+This package provides the configuration and related files for using
+NFS-Ganesha as the NFS server using GlusterFS
+%endif
+
%if ( 0%{!?_without_georeplication:1} )
%package geo-replication
Summary: GlusterFS Geo-replication
@@ -541,6 +575,7 @@ is in user space and easily manageable.
This package provides support to ib-verbs library.
%endif
+%if ( 0%{!?_without_regression_tests:1} )
%package regression-tests
Summary: Development Tools
Requires: %{name}%{?_isa} = %{version}-%{release}
@@ -556,6 +591,7 @@ Requires: nfs-utils xfsprogs yajl psmisc bc
%description regression-tests
The Gluster Test Framework, is a suite of scripts used for
regression testing of Gluster.
+%endif
%if ( 0%{!?_without_ocf:1} )
%package resource-agents
@@ -1092,6 +1128,16 @@ exit 0
%if 0%{?_tmpfilesdir:1} && 0%{!?_without_server:1}
%{_tmpfilesdir}/gluster.conf
%endif
+%if ( 0%{?_without_extra_xlators:1} )
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/rot-13.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/performance/symlink-cache.so
+%endif
+%if ( 0%{?_without_regression_tests:1} )
+%exclude %{_datadir}/glusterfs/run-tests.sh
+%exclude %{_datadir}/glusterfs/tests
+%endif
%files api
%exclude %{_libdir}/*.so
@@ -1134,12 +1180,14 @@ exit 0
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so
+%if ( 0%{!?_without_extra_xlators:1} )
%files extra-xlators
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
+%endif
%files fuse
# glusterfs is a symlink to glusterfsd, -server depends on -fuse.
@@ -1239,11 +1287,13 @@ exit 0
%{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
%endif
+%if ( 0%{!?_without_regression_tests:1} )
%files regression-tests
%dir %{_datadir}/glusterfs
%{_datadir}/glusterfs/run-tests.sh
%{_datadir}/glusterfs/tests
%exclude %{_datadir}/glusterfs/tests/vagrant
+%endif
%if ( 0%{!?_without_ocf:1} )
%files resource-agents
@@ -1424,6 +1474,433 @@ exit 0
%endif
%endif
+##-----------------------------------------------------------------------------
+## All %pretrans should be placed here and keep them sorted
+##
+%if 0%{!?_without_server:1}
+%pretrans -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ echo "ERROR: Distribute volumes detected. In-service rolling upgrade requires distribute volume(s) to be stopped."
+ echo "ERROR: Please stop distribute volume(s) before proceeding... exiting!"
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ echo "WARNING: Updating glusterfs requires its processes to be killed. This action does NOT incur downtime."
+ echo "WARNING: Ensure to wait for the upgraded server to finish healing before proceeding."
+ echo "WARNING: Refer upgrade section of install guide for more details"
+ echo "Please run # service glusterd stop; pkill glusterfs; pkill glusterfsd; pkill gsyncd.py;"
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
+
+
+%pretrans api -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-api_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
+
+
+%pretrans api-devel -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-api-devel_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
+
+
+%pretrans devel -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-devel_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
+
+
+%pretrans fuse -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-fuse_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
+
+
+%if 0%{?_can_georeplicate}
+%if ( 0%{!?_without_georeplication:1} )
+%pretrans geo-replication -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-geo-replication_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+%endif
+%endif
+
+
+
+%pretrans libs -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-libs_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
+
+
+%if ( 0%{!?_without_rdma:1} )
+%pretrans rdma -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-rdma_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+%endif
+
+
+
+%if ( 0%{!?_without_ocf:1} )
+%pretrans resource-agents -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-resource-agents_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+%endif
+
+
+
+%pretrans server -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-server_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+%endif
+
%changelog
* Wed Mar 6 2019 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- remove unneeded ldconfig in scriptlets
--
1.8.3.1

View File

@ -0,0 +1,35 @@
From 0ab54c5b274f29fcdd4787325c7183a84e875bbc Mon Sep 17 00:00:00 2001
From: "Bala.FA" <barumuga@redhat.com>
Date: Thu, 22 May 2014 08:37:27 +0530
Subject: [PATCH 08/52] secalert: remove setuid bit for fusermount-glusterfs
glusterfs-fuse: File /usr/bin/fusermount-glusterfs on x86_64 is setuid
root but is not on the setxid whitelist
Label: DOWNSTREAM ONLY
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=989480
Change-Id: Icf6e5db72ae15ccc60b02be6713fb6c4f4c8a15f
Signed-off-by: Bala.FA <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/25453
Signed-off-by: Bala.FA <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/60135
Tested-by: Milind Changire <mchangir@redhat.com>
---
contrib/fuse-util/Makefile.am | 1 -
1 file changed, 1 deletion(-)
diff --git a/contrib/fuse-util/Makefile.am b/contrib/fuse-util/Makefile.am
index abbc10e..a071c81 100644
--- a/contrib/fuse-util/Makefile.am
+++ b/contrib/fuse-util/Makefile.am
@@ -9,6 +9,5 @@ AM_CFLAGS = -Wall $(GF_CFLAGS)
install-exec-hook:
-chown root $(DESTDIR)$(bindir)/fusermount-glusterfs
- chmod u+s $(DESTDIR)$(bindir)/fusermount-glusterfs
CLEANFILES =
--
1.8.3.1

View File

@ -0,0 +1,57 @@
From 2adb5d540e9344149ae2591811ad34928775e6fd Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Wed, 3 Jun 2015 11:09:21 +0530
Subject: [PATCH 09/52] build: introduce security hardening flags in gluster
This patch introduces two of the security hardening compiler flags RELRO & PIE
in gluster codebase. Using _hardened_build as 1 doesn't guarantee the existance
of these flags in the compilation as different versions of RHEL have different
redhat-rpm-config macro. So the idea is to export these flags at spec file
level.
Label: DOWNSTREAM ONLY
Change-Id: I0a1a56d0a8f54f110d306ba5e55e39b1b073dc84
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/49780
Reviewed-by: Balamurugan Arumugam <barumuga@redhat.com>
Tested-by: Balamurugan Arumugam <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/60137
Tested-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index eb04491..8a31a98 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -736,6 +736,25 @@ done
%build
+# In RHEL7 few hardening flags are available by default, however the RELRO
+# default behaviour is partial, convert to full
+%if ( 0%{?rhel} && 0%{?rhel} >= 7 )
+LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
+export LDFLAGS
+%else
+%if ( 0%{?rhel} && 0%{?rhel} == 6 )
+CFLAGS="$RPM_OPT_FLAGS -fPIE -DPIE"
+LDFLAGS="$RPM_LD_FLAGS -pie -Wl,-z,relro,-z,now"
+%else
+#It appears that with gcc-4.1.2 in RHEL5 there is an issue using both -fPIC and
+ # -fPIE that makes -z relro not work; -fPIE seems to undo what -fPIC does
+CFLAGS="$CFLAGS $RPM_OPT_FLAGS"
+LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
+%endif
+export CFLAGS
+export LDFLAGS
+%endif
+
./autogen.sh && %configure \
%{?_with_asan} \
%{?_with_cmocka} \
--
1.8.3.1

View File

@ -0,0 +1,100 @@
From bf5906cbc9bf986c7495db792d098001e28c47e3 Mon Sep 17 00:00:00 2001
From: Niels de Vos <ndevos@redhat.com>
Date: Wed, 22 Apr 2015 15:39:59 +0200
Subject: [PATCH 10/52] spec: fix/add pre-transaction scripts for geo-rep and
cli packages
The cli subpackage never had a %pretrans script, this has been added
now.
The %pretrans script for ge-repliaction was never included in the RPM
package because it was disable by a undefined macro (_can_georeplicate).
This macro is not used/set anywhere else and _without_georeplication
should take care of it anyway.
Note: This is a Red Hat Gluster Storage specific patch. Upstream
packaging guidelines do not allow these kind of 'features'.
Label: DOWNSTREAM ONLY
Change-Id: I16aab5bba72f1ed178f3bcac47f9d8ef767cfcef
Signed-off-by: Niels de Vos <ndevos@redhat.com>
Signed-off-by: Bala.FA <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/50491
Reviewed-on: https://code.engineering.redhat.com/gerrit/60138
Tested-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 43 +++++++++++++++++++++++++++++++++++++++++--
1 file changed, 41 insertions(+), 2 deletions(-)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 8a31a98..b70dbfc 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1626,6 +1626,47 @@ end
+%pretrans cli -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-cli_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
+
+
%pretrans devel -p <lua>
if not posix.access("/bin/bash", "x") then
-- initial installation, no shell, no running glusterfsd
@@ -1708,7 +1749,6 @@ end
-%if 0%{?_can_georeplicate}
%if ( 0%{!?_without_georeplication:1} )
%pretrans geo-replication -p <lua>
if not posix.access("/bin/bash", "x") then
@@ -1749,7 +1789,6 @@ if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
%endif
-%endif
--
1.8.3.1

View File

@ -0,0 +1,138 @@
From 40eb62a8872ce061416e899fb6c0784b6253ab16 Mon Sep 17 00:00:00 2001
From: Niels de Vos <ndevos@redhat.com>
Date: Fri, 7 Dec 2018 14:05:21 +0530
Subject: [PATCH 11/52] rpm: glusterfs-devel for client-builds should not
depend on -server
glusterfs-devel for client-side packages should *not* include the
libgfdb.so symlink and libgfdb.pc file or any of the libchangelog
ones.
Label: DOWNSTREAM ONLY
Change-Id: Ifb4a9cf48841e5af5dd0a98b6de51e2ee469fc56
Signed-off-by: Niels de Vos <ndevos@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/51019
Reviewed-by: Balamurugan Arumugam <barumuga@redhat.com>
Tested-by: Balamurugan Arumugam <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/60139
Tested-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 86 +++++++++++++++++++++++++++++++++++++++----------------
1 file changed, 62 insertions(+), 24 deletions(-)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index b70dbfc..1c631db 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -450,30 +450,6 @@ is in user space and easily manageable.
This package provides support to FUSE based clients and inlcudes the
glusterfs(d) binary.
-%if ( 0%{!?_without_server:1} )
-%package ganesha
-Summary: NFS-Ganesha configuration
-Group: Applications/File
-
-Requires: %{name}-server%{?_isa} = %{version}-%{release}
-Requires: nfs-ganesha-gluster, pcs, dbus
-%if ( 0%{?rhel} && 0%{?rhel} == 6 )
-Requires: cman, pacemaker, corosync
-%endif
-
-%description ganesha
-GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
-
-This package provides the configuration and related files for using
-NFS-Ganesha as the NFS server using GlusterFS
-%endif
-
%if ( 0%{!?_without_georeplication:1} )
%package geo-replication
Summary: GlusterFS Geo-replication
@@ -1157,6 +1133,62 @@ exit 0
%exclude %{_datadir}/glusterfs/run-tests.sh
%exclude %{_datadir}/glusterfs/tests
%endif
+%if 0%{?_without_server:1}
+%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-5.8.conf
+%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-7.2.conf
+%exclude %{_sysconfdir}/glusterfs/glusterd.vol
+%exclude %{_sysconfdir}/glusterfs/glusterfs-georep-logrotate
+%exclude %{_sysconfdir}/glusterfs/glusterfs-logrotate
+%exclude %{_sysconfdir}/glusterfs/group-db-workload
+%exclude %{_sysconfdir}/glusterfs/group-distributed-virt
+%exclude %{_sysconfdir}/glusterfs/group-gluster-block
+%exclude %{_sysconfdir}/glusterfs/group-metadata-cache
+%exclude %{_sysconfdir}/glusterfs/group-nl-cache
+%exclude %{_sysconfdir}/glusterfs/group-virt.example
+%exclude %{_sysconfdir}/glusterfs/logger.conf.example
+%exclude %{_sysconfdir}/rsyslog.d/gluster.conf.example
+%exclude %{_prefix}/bin/glusterfind
+%exclude %{_prefix}/lib/firewalld/services/glusterfs.xml
+%exclude %{_prefix}/lib/systemd/system/glusterd.service
+%exclude %{_prefix}/lib/systemd/system/glusterfssharedstorage.service
+%exclude %{_prefix}/lib/tmpfiles.d/gluster.conf
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix-locks.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quotad.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/sdfs.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/selinux.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/thin-arbiter.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt/glusterd.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage/posix.so
+%exclude %{_libexecdir}/glusterfs/*
+%exclude %{_sbindir}/conf.py
+%exclude %{_sbindir}/gcron.py
+%exclude %{_sbindir}/gf_attach
+%exclude %{_sbindir}/gfind_missing_files
+%exclude %{_sbindir}/glfsheal
+%exclude %{_sbindir}/gluster
+%exclude %{_sbindir}/gluster-setgfid2path
+%exclude %{_sbindir}/glusterd
+%exclude %{_sbindir}/snap_scheduler.py
+%exclude %{_datadir}/glusterfs/scripts/control-cpu-load.sh
+%exclude %{_datadir}/glusterfs/scripts/control-mem.sh
+%exclude %{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh
+%exclude %{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh
+%exclude %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
+%exclude %{_sharedstatedir}/glusterd/*
+%endif
%files api
%exclude %{_libdir}/*.so
@@ -1190,7 +1222,13 @@ exit 0
%exclude %{_includedir}/glusterfs/api
%exclude %{_libdir}/libgfapi.so
%{_libdir}/*.so
+%if ( 0%{?_without_server:1} )
+%exclude %{_libdir}/pkgconfig/libgfchangelog.pc
+%exclude %{_libdir}/libgfchangelog.so
+%else
%{_libdir}/pkgconfig/libgfchangelog.pc
+%{_libdir}/libgfchangelog.so
+%endif
%files client-xlators
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
--
1.8.3.1

View File

@ -0,0 +1,73 @@
From f054086daf4549a6227196fe37a57a7e49aa5849 Mon Sep 17 00:00:00 2001
From: "Bala.FA" <barumuga@redhat.com>
Date: Fri, 7 Dec 2018 14:13:40 +0530
Subject: [PATCH 12/52] build: add pretrans check
This patch adds pretrans check for client-xlators
NOTE: ganesha and python-gluster sub-packages are now obsolete
Label: DOWNSTREAM ONLY
Change-Id: I454016319832c11902c0ca79a79fbbcf8ac0a121
Signed-off-by: Bala.FA <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/50967
Reviewed-on: https://code.engineering.redhat.com/gerrit/60140
Tested-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 39 +++++++++++++++++++++++++++++++++++++++
1 file changed, 39 insertions(+)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 1c631db..a1ff6e0 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1704,6 +1704,45 @@ if not (ok == 0) then
end
+%pretrans client-xlators -p <lua>
+if not posix.access("/bin/bash", "x") then
+ -- initial installation, no shell, no running glusterfsd
+ return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+ pushd . > /dev/null 2>&1
+ for volume in /var/lib/glusterd/vols/*; do cd $volume;
+ vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+ volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+ if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+ exit 1;
+ fi
+ done
+
+ popd > /dev/null 2>&1
+ exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-client-xlators_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+ error("Detected running glusterfs processes", ok)
+end
+
%pretrans devel -p <lua>
if not posix.access("/bin/bash", "x") then
--
1.8.3.1

View File

@ -0,0 +1,50 @@
From 39932e6bbc8de25813387bb1394cc7942b79ef46 Mon Sep 17 00:00:00 2001
From: anand <anekkunt@redhat.com>
Date: Wed, 18 Nov 2015 16:13:46 +0530
Subject: [PATCH 13/52] glusterd: fix info file checksum mismatch during
upgrade
peers are moving rejected state when upgrading from RHS2.1 to RHGS3.1.2
due to checksum mismatch.
Label: DOWNSTREAM ONLY
Change-Id: Ifea6b7dfe8477c7f17eefc5ca87ced58aaa21c84
Signed-off-by: anand <anekkunt@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/61774
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: Atin Mukherjee <amukherj@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-store.c | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index 51ca3d1..fb52957 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -1009,10 +1009,18 @@ glusterd_volume_exclude_options_write(int fd, glusterd_volinfo_t *volinfo)
goto out;
}
- snprintf(buf, sizeof(buf), "%d", volinfo->op_version);
- ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_OP_VERSION, buf);
- if (ret)
- goto out;
+ if (conf->op_version >= GD_OP_VERSION_RHS_3_0) {
+ snprintf (buf, sizeof (buf), "%d", volinfo->op_version);
+ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_VOL_OP_VERSION, buf);
+ if (ret)
+ goto out;
+
+ snprintf (buf, sizeof (buf), "%d", volinfo->client_op_version);
+ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_VOL_CLIENT_OP_VERSION,
+ buf);
+ if (ret)
+ goto out;
+ }
snprintf(buf, sizeof(buf), "%d", volinfo->client_op_version);
ret = gf_store_save_value(fd, GLUSTERD_STORE_KEY_VOL_CLIENT_OP_VERSION,
--
1.8.3.1

View File

@ -0,0 +1,72 @@
From f76d2370160c50a1f59d08a03a444254c289da60 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Fri, 7 Dec 2018 16:18:07 +0530
Subject: [PATCH 14/52] build: spec file conflict resolution
Missed conflict resolution for removing references to
gluster.conf.example as mentioned in patch titled:
packaging: gratuitous dependencies on rsyslog-mm{count,jsonparse}
by Kaleb
References to hook scripts S31ganesha-start.sh and
S31ganesha-reset.sh got lost in the downstream only
patch conflict resolution.
Commented blanket reference to %{_sharedsstatedir}/glusterd/*
in section %files server to avoid rpmbuild warning related to
multiple references to hook scripts and other files under
/var/lib/glusterd.
Label: DOWNSTREAM ONLY
Change-Id: I9d409f1595ab985ed9f79d9d4f4298877609ba17
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/70535
Reviewed-by: Rajesh Joseph <rjoseph@redhat.com>
Tested-by: Rajesh Joseph <rjoseph@redhat.com>
---
glusterfs.spec.in | 21 +--------------------
1 file changed, 1 insertion(+), 20 deletions(-)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index a1ff6e0..8c57f57 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -96,9 +96,7 @@
# disable server components forcefully as rhel <= 6
%if ( 0%{?rhel} )
-%if ( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" )
-%global _without_server %{nil}
-%else
+%if (!(( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" )))
%global _without_server --without-server
%endif
%endif
@@ -836,23 +834,6 @@ install -D -p -m 0644 extras/glusterfs-georep-logrotate \
%{buildroot}%{_sysconfdir}/logrotate.d/glusterfs-georep
%endif
-%if ( 0%{!?_without_syslog:1} )
-%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
-install -D -p -m 0644 extras/gluster-rsyslog-7.2.conf \
- %{buildroot}%{_sysconfdir}/rsyslog.d/gluster.conf.example
-%endif
-
-%if ( 0%{?rhel} && 0%{?rhel} == 6 )
-install -D -p -m 0644 extras/gluster-rsyslog-5.8.conf \
- %{buildroot}%{_sysconfdir}/rsyslog.d/gluster.conf.example
-%endif
-
-%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
-install -D -p -m 0644 extras/logger.conf.example \
- %{buildroot}%{_sysconfdir}/glusterfs/logger.conf.example
-%endif
-%endif
-
%if ( 0%{!?_without_server:1} )
touch %{buildroot}%{_sharedstatedir}/glusterd/glusterd.info
touch %{buildroot}%{_sharedstatedir}/glusterd/options
--
1.8.3.1

View File

@ -0,0 +1,198 @@
From 3d0e09400dc21dbb5f76fd9ca4bfce3edad0d626 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Fri, 14 Oct 2016 12:53:27 +0530
Subject: [PATCH 15/52] build: randomize temp file names in pretrans scriptlets
Security issue CVE-2015-1795 mentions about possibility of file name
spoof attack for the %pretrans server scriptlet.
Since %pretrans scriptlets are executed only for server builds, we can
use os.tmpname() to randomize temporary file names for all %pretrans
scriptlets using this mechanism.
Label: DOWNSTREAM ONLY
Change-Id: Ic82433897432794b6d311d836355aa4bad886369
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/86187
Reviewed-by: Siddharth Sharma <siddharth@redhat.com>
Reviewed-by: Niels de Vos <ndevos@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
glusterfs.spec.in | 84 +++++++++++++++++++++++++++++++------------------------
1 file changed, 48 insertions(+), 36 deletions(-)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 8c57f57..3a98822 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1549,9 +1549,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1590,9 +1591,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-api_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1631,9 +1633,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-api-devel_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1672,9 +1675,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-cli_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1712,9 +1716,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-client-xlators_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1752,9 +1757,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-devel_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1793,9 +1799,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-fuse_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1835,9 +1842,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-geo-replication_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1877,9 +1885,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-libs_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1919,9 +1928,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-rdma_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -1962,9 +1972,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-resource-agents_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
@@ -2004,9 +2015,10 @@ if [ $? -eq 0 ]; then
fi
]]
--- rpm in RHEL5 does not have os.tmpname()
--- io.tmpfile() can not be resolved to a filename to pass to bash :-/
-tmpname = "/tmp/glusterfs-server_pretrans_" .. os.date("%s")
+-- Since we run pretrans scripts only for RPMs built for a server build,
+-- we can now use os.tmpname() since it is available on RHEL6 and later
+-- platforms which are server platforms.
+tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
--
1.8.3.1

View File

@ -0,0 +1,42 @@
From c283f15ac9bfb1c98ce95ed0000ebed81cd3b318 Mon Sep 17 00:00:00 2001
From: Poornima G <pgurusid@redhat.com>
Date: Wed, 26 Apr 2017 14:07:58 +0530
Subject: [PATCH 16/52] glusterd, parallel-readdir: Change the op-version of
parallel-readdir to 31100
Issue: Downstream 3.2 was released with op-version 31001, parallel-readdir
feature in upstream was released in 3.10 and hence with op-version 31000.
With this, parallel-readdir will be allowed in 3.2 cluster/clients as well.
But 3.2 didn't have parallel-readdir feature backported.
Fix:
Increase the op-version of parallel-readdir feature only in downstream
to 31100(3.3 highest op-version)
Label: DOWNSTREAM ONLY
Change-Id: I2640520985627f3a1cb4fb96e28350f8bb9b146c
Signed-off-by: Poornima G <pgurusid@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/104403
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: Atin Mukherjee <amukherj@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index d07fc10..a31ecda 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2718,7 +2718,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.option = "parallel-readdir",
.value = "off",
.type = DOC,
- .op_version = GD_OP_VERSION_3_10_0,
+ .op_version = GD_OP_VERSION_3_11_0,
.validate_fn = validate_parallel_readdir,
.description = "If this option is enabled, the readdir operation "
"is performed in parallel on all the bricks, thus "
--
1.8.3.1

View File

@ -0,0 +1,37 @@
From 5d3315a53611f23a69f88bc8266448e258e2e10f Mon Sep 17 00:00:00 2001
From: Samikshan Bairagya <sbairagy@redhat.com>
Date: Mon, 10 Jul 2017 11:54:52 +0530
Subject: [PATCH 17/52] glusterd: Revert op-version for
"cluster.max-brick-per-process"
The op-version for the "cluster.max-brick-per-process" option was
set to 3.12.0 in the upstream patch and was backported here:
https://code.engineering.redhat.com/gerrit/#/c/111799. This commit
reverts the op-version for this option to 3.11.1 instead.
Label: DOWNSTREAM ONLY
Change-Id: I23639cef43d41915eea0394d019b1e0796a99d7b
Signed-off-by: Samikshan Bairagya <sbairagy@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/111804
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index a31ecda..9a6fe9f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2794,7 +2794,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{.key = GLUSTERD_BRICKMUX_LIMIT_KEY,
.voltype = "mgmt/glusterd",
.value = GLUSTERD_BRICKMUX_LIMIT_DFLT_VALUE,
- .op_version = GD_OP_VERSION_3_12_0,
+ .op_version = GD_OP_VERSION_3_11_1,
.validate_fn = validate_mux_limit,
.type = GLOBAL_DOC,
.description = "This option can be used to limit the number of brick "
--
1.8.3.1

View File

@ -0,0 +1,56 @@
From 539626a64e5b8cfe05d42f5398073e8a57644073 Mon Sep 17 00:00:00 2001
From: Samikshan Bairagya <sbairagy@redhat.com>
Date: Wed, 9 Aug 2017 14:32:59 +0530
Subject: [PATCH 18/52] cli: Add message for user before modifying
brick-multiplex option
Users should ne notified that brick-multiplexing feature is
supported only for container workloads (CNS/CRS). It should also be
made known to users that it is advisable to either have all volumes
in stopped state or have no bricks running before modifying the
"brick-multiplex" option. This commit makes sure these messages
are displayed to the user before brick-multiplexing is enabled or
disabled.
Label: DOWNSTREAM ONLY
Change-Id: Ic40294b26c691ea03185c4d1fce840ef23f95718
Signed-off-by: Samikshan Bairagya <sbairagy@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/114793
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
cli/src/cli-cmd-parser.c | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index d9913f6..f148c59 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -1698,6 +1698,24 @@ cli_cmd_volume_set_parse(struct cli_state *state, const char **words,
}
}
+ if ((strcmp (key, "cluster.brick-multiplex") == 0)) {
+ question = "Brick-multiplexing is supported only for "
+ "container workloads (CNS/CRS). Also it is "
+ "advised to make sure that either all "
+ "volumes are in stopped state or no bricks "
+ "are running before this option is modified."
+ "Do you still want to continue?";
+
+ answer = cli_cmd_get_confirmation (state, question);
+ if (GF_ANSWER_NO == answer) {
+ gf_log ("cli", GF_LOG_ERROR, "Operation "
+ "cancelled, exiting");
+ *op_errstr = gf_strdup ("Aborted by user.");
+ ret = -1;
+ goto out;
+ }
+ }
+
ret = dict_set_int32(dict, "count", wordcount - 3);
if (ret)
--
1.8.3.1

View File

@ -0,0 +1,99 @@
From 8a3035bf612943694a3cd1c6a857bd009e84f55d Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Tue, 10 Oct 2017 09:58:24 +0530
Subject: [PATCH 19/52] build: launch glusterd upgrade after all new bits are
installed
Problem:
glusterd upgrade mode needs new bits from glusterfs-rdma which
optional and causes the dependency graph to break since it is
not tied into glusterfs-server requirements
Solution:
Run glusterd upgrade mode after all new bits are installed
i.e. in %posttrans server section
Label: DOWNSTREAM ONLY
Change-Id: I356e02d0bf0eaaef43c20ce07b388262f63093a4
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/120094
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
---
glusterfs.spec.in | 51 +++++++++++++++++++++++++++++----------------------
1 file changed, 29 insertions(+), 22 deletions(-)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 3a98822..208a82d 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -946,28 +946,6 @@ fi
%firewalld_reload
%endif
-pidof -c -o %PPID -x glusterd &> /dev/null
-if [ $? -eq 0 ]; then
- kill -9 `pgrep -f gsyncd.py` &> /dev/null
-
- killall --wait glusterd &> /dev/null
- glusterd --xlator-option *.upgrade=on -N
-
- #Cleaning leftover glusterd socket file which is created by glusterd in
- #rpm_script_t context.
- rm -f %{_rundir}/glusterd.socket
-
- # glusterd _was_ running, we killed it, it exited after *.upgrade=on,
- # so start it again
- %service_start glusterd
-else
- glusterd --xlator-option *.upgrade=on -N
-
- #Cleaning leftover glusterd socket file which is created by glusterd in
- #rpm_script_t context.
- rm -f %{_rundir}/glusterd.socket
-fi
-exit 0
%endif
##-----------------------------------------------------------------------------
@@ -2027,6 +2005,35 @@ os.remove(tmpname)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
+
+%posttrans server
+pidof -c -o %PPID -x glusterd &> /dev/null
+if [ $? -eq 0 ]; then
+ kill -9 `pgrep -f gsyncd.py` &> /dev/null
+
+ killall --wait -SIGTERM glusterd &> /dev/null
+
+ if [ "$?" != "0" ]; then
+ echo "killall failed while killing glusterd"
+ fi
+
+ glusterd --xlator-option *.upgrade=on -N
+
+ #Cleaning leftover glusterd socket file which is created by glusterd in
+ #rpm_script_t context.
+ rm -rf /var/run/glusterd.socket
+
+ # glusterd _was_ running, we killed it, it exited after *.upgrade=on,
+ # so start it again
+ %service_start glusterd
+else
+ glusterd --xlator-option *.upgrade=on -N
+
+ #Cleaning leftover glusterd socket file which is created by glusterd in
+ #rpm_script_t context.
+ rm -rf /var/run/glusterd.socket
+fi
+
%endif
%changelog
--
1.8.3.1

View File

@ -0,0 +1,38 @@
From 968e5e698a070f9e6905a86c9c8338c36fcfa339 Mon Sep 17 00:00:00 2001
From: moagrawa <moagrawa@redhat.com>
Date: Mon, 15 Jan 2018 18:21:27 +0530
Subject: [PATCH 20/52] spec: unpackaged files found for RHEL-7 client build
Problem: unpackages files found for RHEL-7 client build
Solution: Update glusterfs.specs.in to exclude unpackage files
Label: DOWNSTREAM ONLY
Change-Id: I761188a6a8447105b53bf3334ded963c645cab5b
Signed-off-by: moagrawa <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/127758
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Milind Changire <mchangir@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 2 ++
1 file changed, 2 insertions(+)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 208a82d..ec06176 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1141,8 +1141,10 @@ exit 0
%exclude %{_sbindir}/gluster-setgfid2path
%exclude %{_sbindir}/glusterd
%exclude %{_sbindir}/snap_scheduler.py
+%if ( 0%{?_with_systemd:1} )
%exclude %{_datadir}/glusterfs/scripts/control-cpu-load.sh
%exclude %{_datadir}/glusterfs/scripts/control-mem.sh
+%endif
%exclude %{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh
%exclude %{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh
%exclude %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
--
1.8.3.1

View File

@ -0,0 +1,66 @@
From fbc7f0e5ac8c292b865a8e02ceed2efa101d145c Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Mon, 12 Mar 2018 19:47:11 +0530
Subject: [PATCH 21/52] cli/glusterfsd: remove copyright information
There's no point of dumping upstream copyright information in --version.
Label: DOWNSTREAM ONLY
Change-Id: I3a10e30878698e1d53082936bbf22bca560a3896
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/132445
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Milind Changire <mchangir@redhat.com>
---
cli/src/cli.c | 11 +----------
glusterfsd/src/glusterfsd.c | 11 +----------
2 files changed, 2 insertions(+), 20 deletions(-)
diff --git a/cli/src/cli.c b/cli/src/cli.c
index 84ce0f4..08f117e 100644
--- a/cli/src/cli.c
+++ b/cli/src/cli.c
@@ -65,16 +65,7 @@ extern int connected;
/* using argp for command line parsing */
const char *argp_program_version =
- "" PACKAGE_NAME " " PACKAGE_VERSION
- "\nRepository revision: " GLUSTERFS_REPOSITORY_REVISION
- "\n"
- "Copyright (c) 2006-2016 Red Hat, Inc. "
- "<https://www.gluster.org/>\n"
- "GlusterFS comes with ABSOLUTELY NO WARRANTY.\n"
- "It is licensed to you under your choice of the GNU Lesser\n"
- "General Public License, version 3 or any later version (LGPLv3\n"
- "or later), or the GNU General Public License, version 2 (GPLv2),\n"
- "in all cases as published by the Free Software Foundation.";
+ PACKAGE_NAME" "PACKAGE_VERSION;
const char *argp_program_bug_address = "<" PACKAGE_BUGREPORT ">";
struct rpc_clnt *global_quotad_rpc;
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index 5d46b3d..c983882 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -86,16 +86,7 @@ static char argp_doc[] =
"--volfile-server=SERVER [MOUNT-POINT]\n"
"--volfile=VOLFILE [MOUNT-POINT]";
const char *argp_program_version =
- "" PACKAGE_NAME " " PACKAGE_VERSION
- "\nRepository revision: " GLUSTERFS_REPOSITORY_REVISION
- "\n"
- "Copyright (c) 2006-2016 Red Hat, Inc. "
- "<https://www.gluster.org/>\n"
- "GlusterFS comes with ABSOLUTELY NO WARRANTY.\n"
- "It is licensed to you under your choice of the GNU Lesser\n"
- "General Public License, version 3 or any later version (LGPLv3\n"
- "or later), or the GNU General Public License, version 2 (GPLv2),\n"
- "in all cases as published by the Free Software Foundation.";
+ PACKAGE_NAME" "PACKAGE_VERSION;
const char *argp_program_bug_address = "<" PACKAGE_BUGREPORT ">";
static error_t
--
1.8.3.1

View File

@ -0,0 +1,40 @@
From 00db0c44d109e6f3e394487bf76ff28ba2eee7de Mon Sep 17 00:00:00 2001
From: Ravishankar N <ravishankar@redhat.com>
Date: Thu, 15 Mar 2018 12:56:02 +0530
Subject: [PATCH 22/52] cli: Remove upstream doc reference
...that is displayed while creating replica 2 volumes.
Label: DOWNSTREAM ONLY
Change-Id: I16b45c8ad3a33cdd2a464d84f51d006d8f568b23
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/132744
Reviewed-by: Karthik Subrahmanya <ksubrahm@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
cli/src/cli-cmd-parser.c | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index f148c59..760a10c 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -606,11 +606,8 @@ cli_cmd_volume_create_parse(struct cli_state *state, const char **words,
"Replica 2 volumes are prone"
" to split-brain. Use "
"Arbiter or Replica 3 to "
- "avoid this. See: "
- "http://docs.gluster.org/en/latest/"
- "Administrator%20Guide/"
- "Split%20brain%20and%20ways%20to%20deal%20with%20it/."
- "\nDo you still want to "
+ "avoid this.\n"
+ "Do you still want to "
"continue?\n";
answer = cli_cmd_get_confirmation(state, question);
if (GF_ANSWER_NO == answer) {
--
1.8.3.1

View File

@ -0,0 +1,148 @@
From 421743b7cfa6a249544f6abb4cca5a612bd20ea1 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Tue, 11 Dec 2018 16:21:43 +0530
Subject: [PATCH 23/52] hooks: remove selinux hooks
Label: DOWNSTREAM ONLY
Change-Id: I810466a0ca99ab21f5a8eac8cdffbb18333d10ad
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/135800
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Jiffin Thottan <jthottan@redhat.com>
Reviewed-by: Milind Changire <mchangir@redhat.com>
---
configure.ac | 20 --------------------
extras/hook-scripts/Makefile.am | 2 +-
extras/hook-scripts/create/Makefile.am | 1 -
extras/hook-scripts/create/post/Makefile.am | 8 --------
extras/hook-scripts/delete/Makefile.am | 1 -
extras/hook-scripts/delete/pre/Makefile.am | 8 --------
glusterfs.spec.in | 2 --
7 files changed, 1 insertion(+), 41 deletions(-)
delete mode 100644 extras/hook-scripts/create/Makefile.am
delete mode 100644 extras/hook-scripts/create/post/Makefile.am
delete mode 100644 extras/hook-scripts/delete/Makefile.am
delete mode 100644 extras/hook-scripts/delete/pre/Makefile.am
diff --git a/configure.ac b/configure.ac
index 2f341de..0d06f5a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -214,10 +214,6 @@ AC_CONFIG_FILES([Makefile
extras/hook-scripts/add-brick/Makefile
extras/hook-scripts/add-brick/pre/Makefile
extras/hook-scripts/add-brick/post/Makefile
- extras/hook-scripts/create/Makefile
- extras/hook-scripts/create/post/Makefile
- extras/hook-scripts/delete/Makefile
- extras/hook-scripts/delete/pre/Makefile
extras/hook-scripts/start/Makefile
extras/hook-scripts/start/post/Makefile
extras/hook-scripts/set/Makefile
@@ -909,21 +905,6 @@ fi
AM_CONDITIONAL([BUILD_CLOUDSYNC], [test "x$BUILD_CLOUDSYNC" = "xyes"])
dnl end cloudsync section
-dnl SELinux feature enablement
-case $host_os in
- linux*)
- AC_ARG_ENABLE([selinux],
- AC_HELP_STRING([--disable-selinux],
- [Disable SELinux features]),
- [USE_SELINUX="${enableval}"], [USE_SELINUX="yes"])
- ;;
- *)
- USE_SELINUX=no
- ;;
-esac
-AM_CONDITIONAL(USE_SELINUX, test "x${USE_SELINUX}" = "xyes")
-dnl end of SELinux feature enablement
-
AC_CHECK_HEADERS([execinfo.h], [have_backtrace=yes])
if test "x${have_backtrace}" = "xyes"; then
AC_DEFINE(HAVE_BACKTRACE, 1, [define if found backtrace])
@@ -1599,7 +1580,6 @@ echo "XML output : $BUILD_XML_OUTPUT"
echo "Unit Tests : $BUILD_UNITTEST"
echo "Track priv ports : $TRACK_PRIVPORTS"
echo "POSIX ACLs : $BUILD_POSIX_ACLS"
-echo "SELinux features : $USE_SELINUX"
echo "firewalld-config : $BUILD_FIREWALLD"
echo "Events : $BUILD_EVENTS"
echo "EC dynamic support : $EC_DYNAMIC_SUPPORT"
diff --git a/extras/hook-scripts/Makefile.am b/extras/hook-scripts/Makefile.am
index 26059d7..771b37e 100644
--- a/extras/hook-scripts/Makefile.am
+++ b/extras/hook-scripts/Makefile.am
@@ -1,5 +1,5 @@
EXTRA_DIST = S40ufo-stop.py S56glusterd-geo-rep-create-post.sh
-SUBDIRS = add-brick create delete set start stop reset
+SUBDIRS = add-brick set start stop reset
scriptsdir = $(GLUSTERD_WORKDIR)/hooks/1/gsync-create/post/
if USE_GEOREP
diff --git a/extras/hook-scripts/create/Makefile.am b/extras/hook-scripts/create/Makefile.am
deleted file mode 100644
index b083a91..0000000
--- a/extras/hook-scripts/create/Makefile.am
+++ /dev/null
@@ -1 +0,0 @@
-SUBDIRS = post
diff --git a/extras/hook-scripts/create/post/Makefile.am b/extras/hook-scripts/create/post/Makefile.am
deleted file mode 100644
index fd1892e..0000000
--- a/extras/hook-scripts/create/post/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-EXTRA_DIST = S10selinux-label-brick.sh
-
-scriptsdir = $(GLUSTERD_WORKDIR)/hooks/1/create/post/
-if WITH_SERVER
-if USE_SELINUX
-scripts_SCRIPTS = S10selinux-label-brick.sh
-endif
-endif
diff --git a/extras/hook-scripts/delete/Makefile.am b/extras/hook-scripts/delete/Makefile.am
deleted file mode 100644
index c98a05d..0000000
--- a/extras/hook-scripts/delete/Makefile.am
+++ /dev/null
@@ -1 +0,0 @@
-SUBDIRS = pre
diff --git a/extras/hook-scripts/delete/pre/Makefile.am b/extras/hook-scripts/delete/pre/Makefile.am
deleted file mode 100644
index 4fbfbe7..0000000
--- a/extras/hook-scripts/delete/pre/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-EXTRA_DIST = S10selinux-del-fcontext.sh
-
-scriptsdir = $(GLUSTERD_WORKDIR)/hooks/1/delete/pre/
-if WITH_SERVER
-if USE_SELINUX
-scripts_SCRIPTS = S10selinux-del-fcontext.sh
-endif
-endif
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index ec06176..db50b8e 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1413,7 +1413,6 @@ exit 0
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre/S28Quota-enable-root-xattr-heal.sh
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/post
- %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/post/S10selinux-label-brick.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/pre
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/post
@@ -1422,7 +1421,6 @@ exit 0
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/post
%{_sharedstatedir}/glusterd/hooks/1/delete/post/S57glusterfind-delete-post
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/pre
- %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/pre/S10selinux-del-fcontext.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/post
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/pre
--
1.8.3.1

View File

@ -0,0 +1,50 @@
From 79c19f0c6d02228aa8cf4b9299afeb7e0b2ad0da Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Mon, 16 Apr 2018 17:44:19 +0530
Subject: [PATCH 24/52] glusterd: Make localtime-logging option invisible in
downstream
Label: DOWNSTREAM ONLY
Change-Id: Ie631edebb7e19152392bfd3c369a96e88796bd75
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/135754
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 2 +-
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index dd3f9eb..cbbb5d9 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -86,7 +86,7 @@ glusterd_all_vol_opts valid_all_vol_opts[] = {
* TBD: Discuss the default value for this. Maybe this should be a
* dynamic value depending on the memory specifications per node */
{GLUSTERD_BRICKMUX_LIMIT_KEY, GLUSTERD_BRICKMUX_LIMIT_DFLT_VALUE},
- {GLUSTERD_LOCALTIME_LOGGING_KEY, "disable"},
+ /*{GLUSTERD_LOCALTIME_LOGGING_KEY, "disable"},*/
{GLUSTERD_DAEMON_LOG_LEVEL_KEY, "INFO"},
{NULL},
};
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 9a6fe9f..fed2864 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2850,10 +2850,11 @@ struct volopt_map_entry glusterd_volopt_map[] = {
"to have enabled when clients and/or bricks support "
"SELinux."},
{.key = GLUSTERD_LOCALTIME_LOGGING_KEY,
+ /*{.key = GLUSTERD_LOCALTIME_LOGGING_KEY,
.voltype = "mgmt/glusterd",
.type = GLOBAL_DOC,
.op_version = GD_OP_VERSION_3_12_0,
- .validate_fn = validate_boolean},
+ .validate_fn = validate_boolean},*/
{.key = GLUSTERD_DAEMON_LOG_LEVEL_KEY,
.voltype = "mgmt/glusterd",
.type = GLOBAL_NO_DOC,
--
1.8.3.1

View File

@ -0,0 +1,45 @@
From 12ae1a9a62c2c94af44f55b03575ab8806bd22ee Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Mon, 23 Apr 2018 13:16:30 +0530
Subject: [PATCH 25/52] build: make RHGS version available for server
Make /usr/share/glusterfs/release available for gluserfs-server package.
This file contains the RHGS release number for the release.
Label: DOWNSTREAM ONLY
Change-Id: I7485f77cfb8ca7f0f8363a20124900ae9ae8a528
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/137139
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
glusterfs.spec.in | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index db50b8e..bdb47ba 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -862,6 +862,10 @@ install -p -m 0744 -D extras/command-completion/gluster.bash \
%{buildroot}%{_sysconfdir}/bash_completion.d/gluster
%endif
+%if ( 0%{!?_without_server:1} )
+echo "RHGS 3.5" > %{buildroot}%{_datadir}/glusterfs/release
+%endif
+
%clean
rm -rf %{buildroot}
@@ -1452,6 +1456,7 @@ exit 0
# Extra utility script
%dir %{_libexecdir}/glusterfs
+ %{_datadir}/glusterfs/release
%dir %{_datadir}/glusterfs/scripts
%{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
%if ( 0%{?_with_systemd:1} )
--
1.8.3.1

View File

@ -0,0 +1,68 @@
From a3538a7d1fb7674acdf0934847f4004d8fbc4709 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Tue, 11 Dec 2018 17:57:50 +0530
Subject: [PATCH 26/52] glusterd: Introduce daemon-log-level cluster wide
option
This option, applicable to the node level daemons can be very helpful in
controlling the log level of these services. Please note any daemon
which is started prior to setting the specific value of this option (if
not INFO) will need to go through a restart to have this change into
effect.
> upstream patch : https://review.gluster.org/#/c/20442/
Please note there's a difference in deownstream delta. The op-version
against this option is already tageed as 3_11_2 in RHGS 3.3.1 and hence
the same is retained. Marking this DOWNSTREAM_ONLY label because of
Label: DOWNSTREAM ONLY
IMPORTANT:
This patch only sets .op_version in glusterd-volume-set.c to
GD_OP_VERSION_3_11_2 as per Atin's recommendation on
Tue, Dec 11, 2018 5:46pm IST
>Change-Id: I7f6d2620bab2b094c737f5cc816bc093e9c9c4c9
>fixes: bz#1597473
>Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Change-Id: I7f6d2620bab2b094c737f5cc816bc093e9c9c4c9
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/143137
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sanju Rakonde <srakonde@redhat.com>
---
libglusterfs/src/glusterfs/globals.h | 2 ++
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
index b9da872..a278f18 100644
--- a/libglusterfs/src/glusterfs/globals.h
+++ b/libglusterfs/src/glusterfs/globals.h
@@ -104,6 +104,8 @@
#define GD_OP_VERSION_3_11_1 31101 /* Op-version for GlusterFS 3.11.1 */
+#define GD_OP_VERSION_3_11_2 31102 /* Op-version for GlusterFS 3.11.2 */
+
#define GD_OP_VERSION_3_12_0 31200 /* Op-version for GlusterFS 3.12.0 */
#define GD_OP_VERSION_3_12_2 31202 /* Op-version for GlusterFS 3.12.2 */
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index fed2864..84f2705 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2859,7 +2859,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.voltype = "mgmt/glusterd",
.type = GLOBAL_NO_DOC,
.value = "INFO",
- .op_version = GD_OP_VERSION_5_0},
+ .op_version = GD_OP_VERSION_3_11_2},
{.key = "debug.delay-gen",
.voltype = "debug/delay-gen",
.option = "!debug",
--
1.8.3.1

View File

@ -0,0 +1,50 @@
From 9be3c4745b161f1815f77cd19b550ac9795845f5 Mon Sep 17 00:00:00 2001
From: Ravishankar N <ravishankar@redhat.com>
Date: Thu, 20 Sep 2018 22:01:05 +0530
Subject: [PATCH 27/52] glusterd: change op-version of fips-mode-rchecksum
..to GD_OP_VERSION_3_13_3 since GD_OP_VERSION_4_0_0 is not present in
rhgs-3.4.1
Label: DOWNSTREAM ONLY
Change-Id: I759272748177d174b15123faffc2305f7a5ec58f
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/150714
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/glusterfs/globals.h | 2 ++
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
index a278f18..4a82889 100644
--- a/libglusterfs/src/glusterfs/globals.h
+++ b/libglusterfs/src/glusterfs/globals.h
@@ -118,6 +118,8 @@
#define GD_OP_VERSION_3_13_2 31302 /* Op-version for GlusterFS 3.13.2 */
+#define GD_OP_VERSION_3_13_3 31303 /* Op-version for GlusterFS 3.13.3 */
+
#define GD_OP_VERSION_4_0_0 40000 /* Op-version for GlusterFS 4.0.0 */
#define GD_OP_VERSION_4_1_0 40100 /* Op-version for GlusterFS 4.1.0 */
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 84f2705..2bd0a9c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2329,7 +2329,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.key = "storage.fips-mode-rchecksum",
.type = NO_DOC,
.voltype = "storage/posix",
- .op_version = GD_OP_VERSION_4_0_0,
+ .op_version = GD_OP_VERSION_3_13_3,
},
{
.option = "force-create-mode",
--
1.8.3.1

View File

@ -0,0 +1,52 @@
From 64ffcf770c5c0087f8937b5235ed0ad5b0efe7f2 Mon Sep 17 00:00:00 2001
From: Krutika Dhananjay <kdhananj@redhat.com>
Date: Wed, 12 Sep 2018 21:41:35 +0530
Subject: [PATCH 28/52] glusterd: Reset op-version for
"features.shard-deletion-rate"
The op-version for the "features.shard-deletion-rate" option was set to
4.2.0 in the upstream patch and backported at
e75be952569eb69325d5f505f7ab94aace31be52.
This commit reverts the op-version for this option to 3.13.3.
Label: DOWNSTREAM ONLY
Change-Id: Ie3d12f3119ad7a4b40d81bd8bd6ed591658e8371
Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/154865
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/glusterfs/globals.h | 2 ++
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
index 4a82889..4d95f75 100644
--- a/libglusterfs/src/glusterfs/globals.h
+++ b/libglusterfs/src/glusterfs/globals.h
@@ -120,6 +120,8 @@
#define GD_OP_VERSION_3_13_3 31303 /* Op-version for GlusterFS 3.13.3 */
+#define GD_OP_VERSION_3_13_4 31304 /* Op-version for GlusterFS 3.13.4 */
+
#define GD_OP_VERSION_4_0_0 40000 /* Op-version for GlusterFS 4.0.0 */
#define GD_OP_VERSION_4_1_0 40100 /* Op-version for GlusterFS 4.1.0 */
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 2bd0a9c..2f3271f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2552,7 +2552,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
},
{.key = "features.shard-deletion-rate",
.voltype = "features/shard",
- .op_version = GD_OP_VERSION_5_0,
+ .op_version = GD_OP_VERSION_3_13_4,
.flags = VOLOPT_FLAG_CLIENT_OPT},
{
.key = "features.scrub-throttle",
--
1.8.3.1

View File

@ -0,0 +1,39 @@
From b504052d003aa41fbd44eec286d1733b6f2a168e Mon Sep 17 00:00:00 2001
From: Krutika Dhananjay <kdhananj@redhat.com>
Date: Tue, 6 Nov 2018 18:44:55 +0530
Subject: [PATCH 29/52] glusterd: Reset op-version for
"features.shard-lru-limit"
The op-version for the "features.shard-lru-limit" option was set to
4.2.0 in the upstream patch and backported at
41e7e33c6512e98a1567e5a5532d3898b59cfa98
This commit reverts the op-version for this option to 3.13.4.
Label: DOWNSTREAM ONLY
Change-Id: I7d3ed6b373851267c78fc6815a83bee2c0906413
Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/155127
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Xavi Hernandez <xhernandez@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 2f3271f..4bf89a6 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2546,7 +2546,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{
.key = "features.shard-lru-limit",
.voltype = "features/shard",
- .op_version = GD_OP_VERSION_5_0,
+ .op_version = GD_OP_VERSION_3_13_4,
.flags = VOLOPT_FLAG_CLIENT_OPT,
.type = NO_DOC,
},
--
1.8.3.1

View File

@ -0,0 +1,42 @@
From 1d2d29396ee25f09c7d379a992ac9bd244e89c39 Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Thu, 13 Dec 2018 14:28:57 +0530
Subject: [PATCH 30/52] selinux/glusterd : add "features.selinux" to
glusterd-volume-set.c
updates: #593
Change-Id: I38675ba4d47c8ba7f94cfb4734692683ddb3dcfd
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 4bf89a6..11265bf 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -1203,10 +1203,9 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.voltype = "performance/io-threads",
.option = "pass-through",
.op_version = GD_OP_VERSION_4_1_0},
- {.key = "performance.least-rate-limit",
- .voltype = "performance/io-threads",
- .op_version = 1
- },
+ {.key = "performance.least-rate-limit",
+ .voltype = "performance/io-threads",
+ .op_version = 1},
/* Other perf xlators' options */
{.key = "performance.io-cache-pass-through",
@@ -2849,7 +2848,6 @@ struct volopt_map_entry glusterd_volopt_map[] = {
"trusted.gluster.selinux on the bricks. Recommended "
"to have enabled when clients and/or bricks support "
"SELinux."},
- {.key = GLUSTERD_LOCALTIME_LOGGING_KEY,
/*{.key = GLUSTERD_LOCALTIME_LOGGING_KEY,
.voltype = "mgmt/glusterd",
.type = GLOBAL_DOC,
--
1.8.3.1

View File

@ -0,0 +1,34 @@
From c3176144e531e22bfe97d0fef3b0e3e449fb1d32 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Mon, 16 Apr 2018 13:47:12 +0530
Subject: [PATCH 31/52] glusterd: turn off selinux feature in downstream
In RHGS 3.4.0 selinux feature was never meant to be qualified.
Label: DOWNSTREAM ONLY
Change-Id: I0cd5eb5207a757c8b6ef789980c061f211410bd5
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/135716
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 11265bf..d1244e4 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -2842,7 +2842,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{.key = VKEY_FEATURES_SELINUX,
.voltype = "features/selinux",
.type = NO_DOC,
- .value = "on",
+ .value = "off",
.op_version = GD_OP_VERSION_3_11_0,
.description = "Convert security.selinux xattrs to "
"trusted.gluster.selinux on the bricks. Recommended "
--
1.8.3.1

View File

@ -0,0 +1,29 @@
From bfa7055c3901b34a49f7933ea9edcf6465843de1 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Wed, 23 Jan 2019 14:22:00 +0530
Subject: [PATCH 32/52] glusterd: update gd-op-version to 3_7_0
Label: DOWNSTREAM ONLY
Change-Id: Ia6456134cd7e544a415692d09cd1ccbb6e02dd82
Signed-off-by: Milind Changire <mchangir@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-rebalance.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index 6365b6e..e20e3c4 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -1174,7 +1174,7 @@ glusterd_op_stage_rebalance(dict_t *dict, char **op_errstr)
* 'force'
*/
ret = glusterd_check_client_op_version_support(
- volname, GD_OP_VERSION_3_6_0, NULL);
+ volname, GD_OP_VERSION_3_7_0, NULL);
if (ret) {
ret = gf_asprintf(op_errstr,
"Volume %s has one or "
--
1.8.3.1

View File

@ -0,0 +1,83 @@
From 52e2d75c2c8e32d2e4f69840e34d21b39279284a Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Thu, 13 Dec 2018 12:46:56 +0530
Subject: [PATCH 33/52] build: add missing explicit package dependencies
Add dependencies for glusterfs-libs, and other packages.
This is an Errata Tool whine.
Label: DOWNSTREAM ONLY
Change-Id: Ieaadb6e4ffa84d1811aa740f7891855568ecbcbb
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/158501
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index bdb47ba..9cd4372 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -323,6 +323,7 @@ and client framework.
Summary: GlusterFS api library
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
+Requires: %{name}-libs%{?_isa} = %{version}-%{release}
%description api
GlusterFS is a distributed file-system capable of scaling to several
@@ -340,6 +341,7 @@ Summary: Development Libraries
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-devel%{?_isa} = %{version}-%{release}
Requires: libacl-devel
+Requires: %{name}-api%{?_isa} = %{version}-%{release}
%description api-devel
GlusterFS is a distributed file-system capable of scaling to several
@@ -391,6 +393,8 @@ Requires: %{name}%{?_isa} = %{version}-%{release}
%if ( 0%{!?_without_extra_xlators:1} )
Requires: %{name}-extra-xlators%{?_isa} = %{version}-%{release}
%endif
+Requires: %{name}-libs%{?_isa} = %{version}-%{release}
+Requires: %{name}-server%{?_isa} = %{version}-%{release}
%description devel
GlusterFS is a distributed file-system capable of scaling to several
@@ -435,6 +439,7 @@ Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
Obsoletes: %{name}-client < %{version}-%{release}
Provides: %{name}-client = %{version}-%{release}
+Requires: %{name}-libs%{?_isa} = %{version}-%{release}
%description fuse
GlusterFS is a distributed file-system capable of scaling to several
@@ -459,6 +464,7 @@ Requires: python%{_pythonver}-gluster = %{version}-%{release}
Requires: rsync
Requires: util-linux
+Requires: %{name}-libs%{?_isa} = %{version}-%{release}
%description geo-replication
GlusterFS is a distributed file-system capable of scaling to several
@@ -536,6 +542,7 @@ BuildRequires: libibverbs-devel
BuildRequires: librdmacm-devel >= 1.0.15
%endif
Requires: %{name}%{?_isa} = %{version}-%{release}
+Requires: %{name}-libs%{?_isa} = %{version}-%{release}
%description rdma
GlusterFS is a distributed file-system capable of scaling to several
@@ -664,6 +671,7 @@ This package provides the glusterfs thin-arbiter translator.
%package client-xlators
Summary: GlusterFS client-side translators
+Requires: %{name}-libs%{?_isa} = %{version}-%{release}
%description client-xlators
GlusterFS is a distributed file-system capable of scaling to several
--
1.8.3.1

View File

@ -0,0 +1,59 @@
From 463a920541a7579f2407f22597e4014494422804 Mon Sep 17 00:00:00 2001
From: Sanju Rakonde <srakonde@redhat.com>
Date: Mon, 17 Dec 2018 14:07:01 +0530
Subject: [PATCH 34/52] glusterd: introduce a new op-version for rhgs-3.4.3
This patch introduces a new op-version 31305 for rhgs-3.4.3 and
sets the max op-version to 31305.
For migrating profile commands (commit e68845ff7018e5d81d7979684b18e6eda449b088)
we used GD_OP_VERSION_6_0 in upstream. we are changing
it to GD_OP_VERSION_3_13_5 here.
Label: DOWNSTREAM ONLY
Change-Id: Ie3a05c70eb4e406889c468343f54e999b1218f19
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/158795
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
libglusterfs/src/glusterfs/globals.h | 2 ++
xlators/mgmt/glusterd/src/glusterd-handler.c | 4 ++--
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
index 4d95f75..6642ba0 100644
--- a/libglusterfs/src/glusterfs/globals.h
+++ b/libglusterfs/src/glusterfs/globals.h
@@ -122,6 +122,8 @@
#define GD_OP_VERSION_3_13_4 31304 /* Op-version for GlusterFS 3.13.4 */
+#define GD_OP_VERSION_3_13_5 31305 /* Op-version for GlusterFS 3.13.5 */
+
#define GD_OP_VERSION_4_0_0 40000 /* Op-version for GlusterFS 4.0.0 */
#define GD_OP_VERSION_4_1_0 40100 /* Op-version for GlusterFS 4.1.0 */
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 387643d..de44af7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -3085,12 +3085,12 @@ __glusterd_handle_cli_profile_volume(rpcsvc_request_t *req)
goto out;
}
- if (conf->op_version < GD_OP_VERSION_6_0) {
+ if (conf->op_version < GD_OP_VERSION_3_13_5) {
gf_msg_debug(this->name, 0,
"The cluster is operating at "
"version less than %d. Falling back "
"to op-sm framework.",
- GD_OP_VERSION_6_0);
+ GD_OP_VERSION_3_13_5);
ret = glusterd_op_begin(req, cli_op, dict, err_str, sizeof(err_str));
glusterd_friend_sm();
glusterd_op_sm();
--
1.8.3.1

View File

@ -0,0 +1,41 @@
From 254033a80d85460675c921c272fb94bb7e9f67d4 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Tue, 18 Dec 2018 17:57:25 +0530
Subject: [PATCH 35/52] glusterd: tag rebalance mgmt_v3 command to op-version
31305
In upstream migrating rebalance command is tagged to op-version 60000
but in downstream the latest new op-version is 31305.
Label: DOWNSTREAM ONLY
Change-Id: I30bbad3efca29bf42b9a750581eb1aebc8a30ff9
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/158943
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-rebalance.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index e20e3c4..ed5ded5 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -573,12 +573,12 @@ __glusterd_handle_defrag_volume(rpcsvc_request_t *req)
} else
op = GD_OP_REBALANCE;
- if (priv->op_version < GD_OP_VERSION_6_0) {
+ if (priv->op_version < GD_OP_VERSION_3_13_5) {
gf_msg_debug(this->name, 0,
"The cluster is operating at "
"version less than %d. Falling back "
"to op-sm framework.",
- GD_OP_VERSION_6_0);
+ GD_OP_VERSION_3_13_5);
ret = glusterd_op_begin(req, op, dict, msg, sizeof(msg));
glusterd_friend_sm();
glusterd_op_sm();
--
1.8.3.1

View File

@ -0,0 +1,47 @@
From d6458c40706d8886187bd9c2016087a3a1eee882 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Wed, 19 Dec 2018 13:17:42 +0530
Subject: [PATCH 36/52] build: add conditional dependency on server for devel
Add conditional depedency on server for glusterfs-devel
Label: DOWNSTREAM ONLY
Change-Id: Icc45df3db137dbc03d240c1ac774b5c8735c5f2f
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/159030
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 9cd4372..9db5a34 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -394,7 +394,9 @@ Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-extra-xlators%{?_isa} = %{version}-%{release}
%endif
Requires: %{name}-libs%{?_isa} = %{version}-%{release}
+%if ( 0%{!?_without_server:1} )
Requires: %{name}-server%{?_isa} = %{version}-%{release}
+%endif
%description devel
GlusterFS is a distributed file-system capable of scaling to several
@@ -2067,6 +2069,11 @@ fi
* Thu Feb 21 2019 Jiffin Tony Thottan <jthottan@redhat.com>
- Obsoleting gluster-gnfs package
+* Wed Dec 19 2018 Milind Changire <mchangir@redhat.com>
+- Add explicit package dependencies (#1656357)
+- Remove absolute paths from spec file (#1350745)
+- Do not package crypt.so for FIPS compliance (#1653224)
+
* Wed Nov 28 2018 Krutika Dhananjay <kdhananj@redhat.com>
- Install /var/lib/glusterd/groups/distributed-virt by default
--
1.8.3.1

View File

@ -0,0 +1,35 @@
From 7e0342e0d01204f136b0bd28931a6313ea216649 Mon Sep 17 00:00:00 2001
From: Sanju Rakonde <srakonde@redhat.com>
Date: Wed, 6 Feb 2019 19:06:45 +0530
Subject: [PATCH 37/52] cli: change the warning message
This patch changes the warning message user gets, when enabling brick
multiplexing to reflect OCS instead of CNS/CRS.
Label: DOWNSTREAM ONLY
Change-Id: Id5fd87955d5a692f8e57560245f8b0cf9882e1da
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/162405
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
cli/src/cli-cmd-parser.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index 760a10c..541dc62 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -1697,7 +1697,7 @@ cli_cmd_volume_set_parse(struct cli_state *state, const char **words,
if ((strcmp (key, "cluster.brick-multiplex") == 0)) {
question = "Brick-multiplexing is supported only for "
- "container workloads (CNS/CRS). Also it is "
+ "OCS converged or independent mode. Also it is "
"advised to make sure that either all "
"volumes are in stopped state or no bricks "
"are running before this option is modified."
--
1.8.3.1

View File

@ -0,0 +1,230 @@
From a577dd0a3cbf435681f10d095a0dca0595c6a354 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Sat, 9 Feb 2019 14:01:28 +0530
Subject: [PATCH 38/52] spec: avoid creation of temp file in lua script
Avoiding creation of temporary file to execute bash shell script from a
lua scriptlet increases install time security.
Label: DOWNSTREAM ONLY
Change-Id: Ie5b9035f292402b18dea768aca8bc82a1e7fa615
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/162621
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 120 ++++++------------------------------------------------
1 file changed, 12 insertions(+), 108 deletions(-)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 9db5a34..df8d116 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1542,15 +1542,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1584,15 +1576,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1626,15 +1610,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1668,15 +1644,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1709,15 +1677,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1750,15 +1710,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1792,15 +1744,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1835,15 +1779,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1878,15 +1814,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1921,15 +1849,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -1965,15 +1885,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
@@ -2008,15 +1920,7 @@ if [ $? -eq 0 ]; then
fi
]]
--- Since we run pretrans scripts only for RPMs built for a server build,
--- we can now use os.tmpname() since it is available on RHEL6 and later
--- platforms which are server platforms.
-tmpname = os.tmpname()
-tmpfile = io.open(tmpname, "w")
-tmpfile:write(script)
-tmpfile:close()
-ok, how, val = os.execute("/bin/bash " .. tmpname)
-os.remove(tmpname)
+ok, how, val = os.execute(script)
if not (ok == 0) then
error("Detected running glusterfs processes", ok)
end
--
1.8.3.1

View File

@ -0,0 +1,61 @@
From ec707e099d4e4338d1ea21560d367b02e6339532 Mon Sep 17 00:00:00 2001
From: Sunil Kumar Acharya <sheggodu@redhat.com>
Date: Mon, 1 Apr 2019 16:16:47 +0530
Subject: [PATCH 39/52] cli: fix query to user during brick-mux selection
Label: DOWNSTREAM ONLY
Change-Id: I59472066b917ea2b23de72bcd91dc3e275d5e055
Signed-off-by: Sunil Kumar Acharya <sheggodu@redhat.com>
---
cli/src/cli-cmd-parser.c | 33 +++++++++++++++++----------------
1 file changed, 17 insertions(+), 16 deletions(-)
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index 541dc62..d9ccba1 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -1693,23 +1693,24 @@ cli_cmd_volume_set_parse(struct cli_state *state, const char **words,
goto out;
}
}
- }
-
- if ((strcmp (key, "cluster.brick-multiplex") == 0)) {
- question = "Brick-multiplexing is supported only for "
- "OCS converged or independent mode. Also it is "
- "advised to make sure that either all "
- "volumes are in stopped state or no bricks "
- "are running before this option is modified."
- "Do you still want to continue?";
- answer = cli_cmd_get_confirmation (state, question);
- if (GF_ANSWER_NO == answer) {
- gf_log ("cli", GF_LOG_ERROR, "Operation "
- "cancelled, exiting");
- *op_errstr = gf_strdup ("Aborted by user.");
- ret = -1;
- goto out;
+ if ((strcmp (key, "cluster.brick-multiplex") == 0)) {
+ question =
+ "Brick-multiplexing is supported only for "
+ "OCS converged or independent mode. Also it is "
+ "advised to make sure that either all "
+ "volumes are in stopped state or no bricks "
+ "are running before this option is modified."
+ "Do you still want to continue?";
+
+ answer = cli_cmd_get_confirmation (state, question);
+ if (GF_ANSWER_NO == answer) {
+ gf_log ("cli", GF_LOG_ERROR, "Operation "
+ "cancelled, exiting");
+ *op_errstr = gf_strdup ("Aborted by user.");
+ ret = -1;
+ goto out;
+ }
}
}
--
1.8.3.1

View File

@ -0,0 +1,136 @@
From 79c74009892804419dce264399f3fde357d5b1c3 Mon Sep 17 00:00:00 2001
From: Susant Palai <spalai@redhat.com>
Date: Tue, 2 Apr 2019 11:07:03 +0530
Subject: [PATCH 40/52] build: Remove unsupported test cases failing
consistently
The following two test cases failing in downstream regression runs.
Hence removing them as they are not supported downstream.
tests/basic/cloudsync-sanity.t
tests/bugs/distribute/bug-882278.t
Label: DOWNSTREAM ONLY
Change-Id: Ie4b506639a017ec9910e44df1b721d9bfadf07b3
Signed-off-by: Susant Palai <spalai@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166662
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
Tested-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/basic/cloudsync-sanity.t | 22 ------------
tests/bugs/distribute/bug-882278.t | 73 --------------------------------------
2 files changed, 95 deletions(-)
delete mode 100644 tests/basic/cloudsync-sanity.t
delete mode 100755 tests/bugs/distribute/bug-882278.t
diff --git a/tests/basic/cloudsync-sanity.t b/tests/basic/cloudsync-sanity.t
deleted file mode 100644
index 3cf719d..0000000
--- a/tests/basic/cloudsync-sanity.t
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../include.rc
-. $(dirname $0)/../volume.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6,7,8,9};
-TEST $CLI volume set $V0 features.cloudsync enable;
-TEST $CLI volume start $V0;
-
-## Mount FUSE
-TEST $GFS -s $H0 --volfile-id $V0 $M1;
-
-# This test covers lookup, mkdir, mknod, symlink, link, rename,
-# create operations
-TEST $(dirname $0)/rpc-coverage.sh $M1
-
-cleanup;
diff --git a/tests/bugs/distribute/bug-882278.t b/tests/bugs/distribute/bug-882278.t
deleted file mode 100755
index 8cb5147..0000000
--- a/tests/bugs/distribute/bug-882278.t
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-cleanup
-
-# Is there a good reason to require --fqdn elsewhere? It's worse than useless
-# here.
-H0=$(hostname -s)
-
-function recreate {
- # The rm is necessary so we don't get fooled by leftovers from old runs.
- rm -rf $1 && mkdir -p $1
-}
-
-function count_lines {
- grep "$1" $2/* | wc -l
-}
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info;
-
-## Start and create a volume
-TEST recreate ${B0}/${V0}-0
-TEST recreate ${B0}/${V0}-1
-TEST $CLI volume create $V0 $H0:$B0/${V0}-{0,1}
-TEST $CLI volume set $V0 cluster.nufa on
-
-function volinfo_field()
-{
- local vol=$1;
- local field=$2;
-
- $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
-}
-
-
-## Verify volume is created
-EXPECT "$V0" volinfo_field $V0 'Volume Name';
-EXPECT 'Created' volinfo_field $V0 'Status';
-
-## Start volume and verify
-TEST $CLI volume start $V0;
-EXPECT 'Started' volinfo_field $V0 'Status';
-
-## Mount native
-special_option="--xlator-option ${V0}-dht.local-volume-name=${V0}-client-1"
-TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $special_option $M0
-
-## Create a bunch of test files.
-for i in $(seq 0 99); do
- echo hello > $(printf $M0/file%02d $i)
-done
-
-## Make sure the files went to the right place. There might be link files in
-## the other brick, but they won't have any contents.
-EXPECT "0" count_lines hello ${B0}/${V0}-0
-EXPECT "100" count_lines hello ${B0}/${V0}-1
-
-if [ "$EXIT_EARLY" = "1" ]; then
- exit 0;
-fi
-
-## Finish up
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-TEST $CLI volume stop $V0;
-EXPECT 'Stopped' volinfo_field $V0 'Status';
-
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
--
1.8.3.1

View File

@ -0,0 +1,43 @@
From c8f0ac9b429e1ff73a3e87247193c35c66212540 Mon Sep 17 00:00:00 2001
From: Shwetha K Acharya <sacharya@redhat.com>
Date: Tue, 2 Apr 2019 12:06:53 +0530
Subject: [PATCH 41/52] tests/geo-rep: Build failed in Jenkins for test
bug-1600145.t
Problem: the ((strcmp (key, "cluster.brick-multiplex") == 0))
comparision in cli/src/cli-cmd-parser.c is expecting
either yes or no confirmation from cli, which is not handled
in bug-1600145.t, causing test to wait till timeout and
then fail.
Solution: Passing yes as pipeline to
`gluster v set all cluster.brick-multiplex on` in bug-1600145.t
Label: DOWNSTREAM ONLY
Change-Id: I1a6c2a992b65380cea145fd1c46d22ec1251c77e
Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166694
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
Tested-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
Reviewed-by: Sunny Kumar <sunkumar@redhat.com>
---
tests/00-geo-rep/bug-1600145.t | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/00-geo-rep/bug-1600145.t b/tests/00-geo-rep/bug-1600145.t
index 1d38bf9..359bc4f 100644
--- a/tests/00-geo-rep/bug-1600145.t
+++ b/tests/00-geo-rep/bug-1600145.t
@@ -29,7 +29,7 @@ slave_mnt=$M1
##create_and_start_master_volume
TEST $CLI volume create $GMV0 replica 2 $H0:$B0/${GMV0}{1,2};
-gluster v set all cluster.brick-multiplex on
+yes | gluster v set all cluster.brick-multiplex on
TEST $CLI volume start $GMV0
##create_and_start_slave_volume
--
1.8.3.1

View File

@ -0,0 +1,123 @@
From f25a92028ecc2018953a6375bba43a21d3a93566 Mon Sep 17 00:00:00 2001
From: Mohit Agrawal <moagrawa@redhat.com>
Date: Thu, 4 Apr 2019 16:18:51 +0530
Subject: [PATCH 42/52] spec: (client|server) Builds are failing on rhel-6
Problem: 1) For sever-rpm gluster build is throwing an error
undefined reference to `dlcose` on RHEL 6
2) For server-rpm build is throwing reference for
For Not found for rot-13.so and symlink-cache.so
3) For client-rpms build is throwing an error
File Not found for all files with exclude
file in without_server check
Solution: 1) For server-rpm add LIB_DL link in Makefile
and remove reference for rot.so and symlink-cache.so
from glusterfs.spec.in
2) Remove exclude files list as they are not
being build
Label: DOWNSTREAM ONLY
Change-Id: I2b41604cbc8525b91231b0c5caee588c5d5d6b08
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166962
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 54 -----------------------------------
xlators/mgmt/glusterd/src/Makefile.am | 2 +-
2 files changed, 1 insertion(+), 55 deletions(-)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index df8d116..7c7f7c0 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1097,72 +1097,18 @@ exit 0
%{_tmpfilesdir}/gluster.conf
%endif
%if ( 0%{?_without_extra_xlators:1} )
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/rot-13.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/performance/symlink-cache.so
%endif
%if ( 0%{?_without_regression_tests:1} )
%exclude %{_datadir}/glusterfs/run-tests.sh
%exclude %{_datadir}/glusterfs/tests
%endif
%if 0%{?_without_server:1}
-%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-5.8.conf
-%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-7.2.conf
-%exclude %{_sysconfdir}/glusterfs/glusterd.vol
-%exclude %{_sysconfdir}/glusterfs/glusterfs-georep-logrotate
-%exclude %{_sysconfdir}/glusterfs/glusterfs-logrotate
-%exclude %{_sysconfdir}/glusterfs/group-db-workload
-%exclude %{_sysconfdir}/glusterfs/group-distributed-virt
-%exclude %{_sysconfdir}/glusterfs/group-gluster-block
-%exclude %{_sysconfdir}/glusterfs/group-metadata-cache
-%exclude %{_sysconfdir}/glusterfs/group-nl-cache
-%exclude %{_sysconfdir}/glusterfs/group-virt.example
-%exclude %{_sysconfdir}/glusterfs/logger.conf.example
-%exclude %{_sysconfdir}/rsyslog.d/gluster.conf.example
-%exclude %{_prefix}/bin/glusterfind
-%exclude %{_prefix}/lib/firewalld/services/glusterfs.xml
-%exclude %{_prefix}/lib/systemd/system/glusterd.service
-%exclude %{_prefix}/lib/systemd/system/glusterfssharedstorage.service
-%exclude %{_prefix}/lib/tmpfiles.d/gluster.conf
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix-locks.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quotad.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/sdfs.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/selinux.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/thin-arbiter.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt/glusterd.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server.so
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage/posix.so
-%exclude %{_libexecdir}/glusterfs/*
-%exclude %{_sbindir}/conf.py
-%exclude %{_sbindir}/gcron.py
-%exclude %{_sbindir}/gf_attach
-%exclude %{_sbindir}/gfind_missing_files
-%exclude %{_sbindir}/glfsheal
-%exclude %{_sbindir}/gluster
-%exclude %{_sbindir}/gluster-setgfid2path
-%exclude %{_sbindir}/glusterd
-%exclude %{_sbindir}/snap_scheduler.py
%if ( 0%{?_with_systemd:1} )
%exclude %{_datadir}/glusterfs/scripts/control-cpu-load.sh
%exclude %{_datadir}/glusterfs/scripts/control-mem.sh
%endif
-%exclude %{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh
-%exclude %{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh
-%exclude %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
-%exclude %{_sharedstatedir}/glusterd/*
%endif
%files api
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am
index 6d09e37..c8dd8e3 100644
--- a/xlators/mgmt/glusterd/src/Makefile.am
+++ b/xlators/mgmt/glusterd/src/Makefile.am
@@ -6,7 +6,7 @@ xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/mgmt
glusterd_la_CPPFLAGS = $(AM_CPPFLAGS) \
-DFILTERDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/filter\" \
-DXLATORDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator\"
-glusterd_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+glusterd_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS) $(LIB_DL)
glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \
glusterd-op-sm.c glusterd-utils.c glusterd-rpc-ops.c \
glusterd-store.c glusterd-handshake.c glusterd-pmap.c \
--
1.8.3.1

View File

@ -0,0 +1,137 @@
From 416dfc70ef87400e1ddfd70e5b6e512d330b54a6 Mon Sep 17 00:00:00 2001
From: Sheetal Pamecha <sheetal.pamecha08@gmail.com>
Date: Tue, 2 Apr 2019 23:25:11 +0530
Subject: [PATCH 43/52] inode: don't dump the whole table to CLI
dumping the whole inode table detail to screen doesn't solve any
purpose. We should be getting only toplevel details on CLI, and
then if one wants to debug further, then they need to get to
'statedump' to get full details.
Patch on upstream master: https://review.gluster.org/#/c/glusterfs/+/22347/
BUG: 1578703
Change-Id: Ie7e7f5a67c1606e3c18ce21ee6df6c7e4550c211
Signed-off-by: Sheetal Pamecha <spamecha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166768
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
cli/src/cli-rpc-ops.c | 23 ++++++++++++++++++++++-
libglusterfs/src/inode.c | 13 +++++++++++++
2 files changed, 35 insertions(+), 1 deletion(-)
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index 78043cd..12e7fcc 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -7606,15 +7606,24 @@ cli_print_volume_status_itables(dict_t *dict, char *prefix)
uint32_t active_size = 0;
uint32_t lru_size = 0;
uint32_t purge_size = 0;
+ uint32_t lru_limit = 0;
int i = 0;
GF_ASSERT(dict);
GF_ASSERT(prefix);
+ snprintf(key, sizeof(key), "%s.lru_limit", prefix);
+ ret = dict_get_uint32(dict, key, &lru_limit);
+ if (ret)
+ goto out;
+ cli_out("LRU limit : %u", lru_limit);
+
snprintf(key, sizeof(key), "%s.active_size", prefix);
ret = dict_get_uint32(dict, key, &active_size);
if (ret)
goto out;
+
+#ifdef DEBUG
if (active_size != 0) {
cli_out("Active inodes:");
cli_out("%-40s %14s %14s %9s", "GFID", "Lookups", "Ref", "IA type");
@@ -7626,10 +7635,16 @@ cli_print_volume_status_itables(dict_t *dict, char *prefix)
}
cli_out(" ");
+#else
+ cli_out("Active Inodes : %u", active_size);
+
+#endif
snprintf(key, sizeof(key), "%s.lru_size", prefix);
ret = dict_get_uint32(dict, key, &lru_size);
if (ret)
goto out;
+
+#ifdef DEBUG
if (lru_size != 0) {
cli_out("LRU inodes:");
cli_out("%-40s %14s %14s %9s", "GFID", "Lookups", "Ref", "IA type");
@@ -7640,11 +7655,15 @@ cli_print_volume_status_itables(dict_t *dict, char *prefix)
cli_print_volume_status_inode_entry(dict, key);
}
cli_out(" ");
+#else
+ cli_out("LRU Inodes : %u", lru_size);
+#endif
snprintf(key, sizeof(key), "%s.purge_size", prefix);
ret = dict_get_uint32(dict, key, &purge_size);
if (ret)
goto out;
+#ifdef DEBUG
if (purge_size != 0) {
cli_out("Purged inodes:");
cli_out("%-40s %14s %14s %9s", "GFID", "Lookups", "Ref", "IA type");
@@ -7654,7 +7673,9 @@ cli_print_volume_status_itables(dict_t *dict, char *prefix)
snprintf(key, sizeof(key), "%s.purge%d", prefix, i);
cli_print_volume_status_inode_entry(dict, key);
}
-
+#else
+ cli_out("Purge Inodes : %u", purge_size);
+#endif
out:
return;
}
diff --git a/libglusterfs/src/inode.c b/libglusterfs/src/inode.c
index 87f74e0..96ddea5 100644
--- a/libglusterfs/src/inode.c
+++ b/libglusterfs/src/inode.c
@@ -2598,6 +2598,11 @@ inode_table_dump_to_dict(inode_table_t *itable, char *prefix, dict_t *dict)
if (ret)
return;
+ snprintf(key, sizeof(key), "%s.itable.lru_limit", prefix);
+ ret = dict_set_uint32(dict, key, itable->lru_limit);
+ if (ret)
+ goto out;
+
snprintf(key, sizeof(key), "%s.itable.active_size", prefix);
ret = dict_set_uint32(dict, key, itable->active_size);
if (ret)
@@ -2613,6 +2618,13 @@ inode_table_dump_to_dict(inode_table_t *itable, char *prefix, dict_t *dict)
if (ret)
goto out;
+#ifdef DEBUG
+ /* Dumping inode details in dictionary and sending it to CLI is not
+ required as when a developer (or support team) asks for this command
+ output, they just want to get top level detail of inode table.
+ If one wants to debug, let them take statedump and debug, this
+ wouldn't be available in CLI during production setup.
+ */
list_for_each_entry(inode, &itable->active, list)
{
snprintf(key, sizeof(key), "%s.itable.active%d", prefix, count++);
@@ -2632,6 +2644,7 @@ inode_table_dump_to_dict(inode_table_t *itable, char *prefix, dict_t *dict)
snprintf(key, sizeof(key), "%s.itable.purge%d", prefix, count++);
inode_dump_to_dict(inode, key, dict);
}
+#endif
out:
pthread_mutex_unlock(&itable->lock);
--
1.8.3.1

View File

@ -0,0 +1,360 @@
From bc6588890ce94101a63b861178cf38db5549d8a8 Mon Sep 17 00:00:00 2001
From: Ashish Pandey <aspandey@redhat.com>
Date: Wed, 28 Nov 2018 11:22:52 +0530
Subject: [PATCH 44/52] cluster/ec: Don't enqueue an entry if it is already
healing
Problem:
1 - heal-wait-qlength is by default 128. If shd is disabled
and we need to heal files, client side heal is needed.
If we access these files that will trigger the heal.
However, it has been observed that a file will be enqueued
multiple times in the heal wait queue, which in turn causes
queue to be filled and prevent other files to be enqueued.
2 - While a file is going through healing and a write fop from
mount comes on that file, it sends write on all the bricks including
healing one. At the end it updates version and size on all the
bricks. However, it does not unset dirty flag on all the bricks,
even if this write fop was successful on all the bricks.
After healing completion this dirty flag remain set and never
gets cleaned up if SHD is disabled.
Solution:
1 - If an entry is already in queue or going through heal process,
don't enqueue next client side request to heal the same file.
2 - Unset dirty on all the bricks at the end if fop has succeeded on
all the bricks even if some of the bricks are going through heal.
backport of : https://review.gluster.org/#/c/glusterfs/+/21744/
Change-Id: Ia61ffe230c6502ce6cb934425d55e2f40dd1a727
BUG: 1600918
Signed-off-by: Ashish Pandey <aspandey@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166296
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/bugs/ec/bug-1236065.t | 1 -
xlators/cluster/ec/src/ec-common.c | 43 +++++++++------
xlators/cluster/ec/src/ec-common.h | 8 +++
xlators/cluster/ec/src/ec-heal.c | 104 +++++++++++++++++++++++++++++++-----
xlators/cluster/ec/src/ec-helpers.c | 1 +
xlators/cluster/ec/src/ec-types.h | 1 +
6 files changed, 127 insertions(+), 31 deletions(-)
diff --git a/tests/bugs/ec/bug-1236065.t b/tests/bugs/ec/bug-1236065.t
index 76d25d7..9181e73 100644
--- a/tests/bugs/ec/bug-1236065.t
+++ b/tests/bugs/ec/bug-1236065.t
@@ -85,7 +85,6 @@ TEST pidof glusterd
EXPECT "$V0" volinfo_field $V0 'Volume Name'
EXPECT 'Started' volinfo_field $V0 'Status'
EXPECT '7' online_brick_count
-
## cleanup
cd
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c
index 8d65670..5183680 100644
--- a/xlators/cluster/ec/src/ec-common.c
+++ b/xlators/cluster/ec/src/ec-common.c
@@ -313,14 +313,15 @@ ec_check_status(ec_fop_data_t *fop)
gf_msg(fop->xl->name, GF_LOG_WARNING, 0, EC_MSG_OP_FAIL_ON_SUBVOLS,
"Operation failed on %d of %d subvolumes.(up=%s, mask=%s, "
- "remaining=%s, good=%s, bad=%s)",
+ "remaining=%s, good=%s, bad=%s, %s)",
gf_bits_count(ec->xl_up & ~(fop->remaining | fop->good)), ec->nodes,
ec_bin(str1, sizeof(str1), ec->xl_up, ec->nodes),
ec_bin(str2, sizeof(str2), fop->mask, ec->nodes),
ec_bin(str3, sizeof(str3), fop->remaining, ec->nodes),
ec_bin(str4, sizeof(str4), fop->good, ec->nodes),
ec_bin(str5, sizeof(str5), ec->xl_up & ~(fop->remaining | fop->good),
- ec->nodes));
+ ec->nodes),
+ ec_msg_str(fop));
if (fop->use_fd) {
if (fop->fd != NULL) {
ec_fheal(NULL, fop->xl, -1, EC_MINIMUM_ONE, ec_heal_report, NULL,
@@ -2371,37 +2372,47 @@ ec_update_info(ec_lock_link_t *link)
uint64_t dirty[2] = {0, 0};
uint64_t size;
ec_t *ec = NULL;
+ uintptr_t mask;
lock = link->lock;
ctx = lock->ctx;
ec = link->fop->xl->private;
/* pre_version[*] will be 0 if have_version is false */
- version[0] = ctx->post_version[0] - ctx->pre_version[0];
- version[1] = ctx->post_version[1] - ctx->pre_version[1];
+ version[EC_DATA_TXN] = ctx->post_version[EC_DATA_TXN] -
+ ctx->pre_version[EC_DATA_TXN];
+ version[EC_METADATA_TXN] = ctx->post_version[EC_METADATA_TXN] -
+ ctx->pre_version[EC_METADATA_TXN];
size = ctx->post_size - ctx->pre_size;
/* If we set the dirty flag for update fop, we have to unset it.
* If fop has failed on some bricks, leave the dirty as marked. */
+
if (lock->unlock_now) {
+ if (version[EC_DATA_TXN]) {
+ /*A data fop will have difference in post and pre version
+ *and for data fop we send writes on healing bricks also */
+ mask = lock->good_mask | lock->healing;
+ } else {
+ mask = lock->good_mask;
+ }
/* Ensure that nodes are up while doing final
* metadata update.*/
- if (!(ec->node_mask & ~lock->good_mask) &&
- !(ec->node_mask & ~ec->xl_up)) {
- if (ctx->dirty[0] != 0) {
- dirty[0] = -1;
+ if (!(ec->node_mask & ~(mask)) && !(ec->node_mask & ~ec->xl_up)) {
+ if (ctx->dirty[EC_DATA_TXN] != 0) {
+ dirty[EC_DATA_TXN] = -1;
}
- if (ctx->dirty[1] != 0) {
- dirty[1] = -1;
+ if (ctx->dirty[EC_METADATA_TXN] != 0) {
+ dirty[EC_METADATA_TXN] = -1;
}
/*If everything is fine and we already
*have version xattr set on entry, there
*is no need to update version again*/
- if (ctx->pre_version[0]) {
- version[0] = 0;
+ if (ctx->pre_version[EC_DATA_TXN]) {
+ version[EC_DATA_TXN] = 0;
}
- if (ctx->pre_version[1]) {
- version[1] = 0;
+ if (ctx->pre_version[EC_METADATA_TXN]) {
+ version[EC_METADATA_TXN] = 0;
}
} else {
link->optimistic_changelog = _gf_false;
@@ -2410,8 +2421,8 @@ ec_update_info(ec_lock_link_t *link)
memset(ctx->dirty, 0, sizeof(ctx->dirty));
}
- if ((version[0] != 0) || (version[1] != 0) || (dirty[0] != 0) ||
- (dirty[1] != 0)) {
+ if ((version[EC_DATA_TXN] != 0) || (version[EC_METADATA_TXN] != 0) ||
+ (dirty[EC_DATA_TXN] != 0) || (dirty[EC_METADATA_TXN] != 0)) {
ec_update_size_version(link, version, size, dirty);
return _gf_true;
}
diff --git a/xlators/cluster/ec/src/ec-common.h b/xlators/cluster/ec/src/ec-common.h
index 115e147..54aaa77 100644
--- a/xlators/cluster/ec/src/ec-common.h
+++ b/xlators/cluster/ec/src/ec-common.h
@@ -190,4 +190,12 @@ ec_lock_unlocked(call_frame_t *frame, void *cookie, xlator_t *this,
void
ec_update_fd_status(fd_t *fd, xlator_t *xl, int child_index,
int32_t ret_status);
+gf_boolean_t
+ec_is_entry_healing(ec_fop_data_t *fop);
+void
+ec_set_entry_healing(ec_fop_data_t *fop);
+void
+ec_reset_entry_healing(ec_fop_data_t *fop);
+char *
+ec_msg_str(ec_fop_data_t *fop);
#endif /* __EC_COMMON_H__ */
diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
index eaf80e0..1ca12c1 100644
--- a/xlators/cluster/ec/src/ec-heal.c
+++ b/xlators/cluster/ec/src/ec-heal.c
@@ -103,6 +103,48 @@ ec_sh_key_match(dict_t *dict, char *key, data_t *val, void *mdata)
}
/* FOP: heal */
+void
+ec_set_entry_healing(ec_fop_data_t *fop)
+{
+ ec_inode_t *ctx = NULL;
+ loc_t *loc = NULL;
+
+ if (!fop)
+ return;
+
+ loc = &fop->loc[0];
+ LOCK(&loc->inode->lock);
+ {
+ ctx = __ec_inode_get(loc->inode, fop->xl);
+ if (ctx) {
+ ctx->heal_count += 1;
+ }
+ }
+ UNLOCK(&loc->inode->lock);
+}
+
+void
+ec_reset_entry_healing(ec_fop_data_t *fop)
+{
+ ec_inode_t *ctx = NULL;
+ loc_t *loc = NULL;
+ int32_t heal_count = 0;
+ if (!fop)
+ return;
+
+ loc = &fop->loc[0];
+ LOCK(&loc->inode->lock);
+ {
+ ctx = __ec_inode_get(loc->inode, fop->xl);
+ if (ctx) {
+ ctx->heal_count += -1;
+ heal_count = ctx->heal_count;
+ }
+ }
+ UNLOCK(&loc->inode->lock);
+ GF_ASSERT(heal_count >= 0);
+}
+
uintptr_t
ec_heal_check(ec_fop_data_t *fop, uintptr_t *pgood)
{
@@ -2507,17 +2549,6 @@ ec_heal_do(xlator_t *this, void *data, loc_t *loc, int32_t partial)
"Heal is not required for : %s ", uuid_utoa(loc->gfid));
goto out;
}
-
- msources = alloca0(ec->nodes);
- mhealed_sinks = alloca0(ec->nodes);
- ret = ec_heal_metadata(frame, ec, loc->inode, msources, mhealed_sinks);
- if (ret == 0) {
- mgood = ec_char_array_to_mask(msources, ec->nodes);
- mbad = ec_char_array_to_mask(mhealed_sinks, ec->nodes);
- } else {
- op_ret = -1;
- op_errno = -ret;
- }
sources = alloca0(ec->nodes);
healed_sinks = alloca0(ec->nodes);
if (IA_ISREG(loc->inode->ia_type)) {
@@ -2538,8 +2569,19 @@ ec_heal_do(xlator_t *this, void *data, loc_t *loc, int32_t partial)
op_ret = -1;
op_errno = -ret;
}
+ msources = alloca0(ec->nodes);
+ mhealed_sinks = alloca0(ec->nodes);
+ ret = ec_heal_metadata(frame, ec, loc->inode, msources, mhealed_sinks);
+ if (ret == 0) {
+ mgood = ec_char_array_to_mask(msources, ec->nodes);
+ mbad = ec_char_array_to_mask(mhealed_sinks, ec->nodes);
+ } else {
+ op_ret = -1;
+ op_errno = -ret;
+ }
out:
+ ec_reset_entry_healing(fop);
if (fop->cbks.heal) {
fop->cbks.heal(fop->req_frame, fop, fop->xl, op_ret, op_errno,
ec_char_array_to_mask(participants, ec->nodes),
@@ -2650,11 +2692,33 @@ ec_handle_healers_done(ec_fop_data_t *fop)
ec_launch_heal(ec, heal_fop);
}
+gf_boolean_t
+ec_is_entry_healing(ec_fop_data_t *fop)
+{
+ ec_inode_t *ctx = NULL;
+ int32_t heal_count = 0;
+ loc_t *loc = NULL;
+
+ loc = &fop->loc[0];
+
+ LOCK(&loc->inode->lock);
+ {
+ ctx = __ec_inode_get(loc->inode, fop->xl);
+ if (ctx) {
+ heal_count = ctx->heal_count;
+ }
+ }
+ UNLOCK(&loc->inode->lock);
+ GF_ASSERT(heal_count >= 0);
+ return heal_count;
+}
+
void
ec_heal_throttle(xlator_t *this, ec_fop_data_t *fop)
{
gf_boolean_t can_heal = _gf_true;
ec_t *ec = this->private;
+ ec_fop_data_t *fop_rel = NULL;
if (fop->req_frame == NULL) {
LOCK(&ec->lock);
@@ -2662,8 +2726,13 @@ ec_heal_throttle(xlator_t *this, ec_fop_data_t *fop)
if ((ec->background_heals > 0) &&
(ec->heal_wait_qlen + ec->background_heals) >
(ec->heal_waiters + ec->healers)) {
- list_add_tail(&fop->healer, &ec->heal_waiting);
- ec->heal_waiters++;
+ if (!ec_is_entry_healing(fop)) {
+ list_add_tail(&fop->healer, &ec->heal_waiting);
+ ec->heal_waiters++;
+ ec_set_entry_healing(fop);
+ } else {
+ fop_rel = fop;
+ }
fop = __ec_dequeue_heals(ec);
} else {
can_heal = _gf_false;
@@ -2673,8 +2742,12 @@ ec_heal_throttle(xlator_t *this, ec_fop_data_t *fop)
}
if (can_heal) {
- if (fop)
+ if (fop) {
+ if (fop->req_frame != NULL) {
+ ec_set_entry_healing(fop);
+ }
ec_launch_heal(ec, fop);
+ }
} else {
gf_msg_debug(this->name, 0,
"Max number of heals are "
@@ -2682,6 +2755,9 @@ ec_heal_throttle(xlator_t *this, ec_fop_data_t *fop)
ec_fop_set_error(fop, EBUSY);
ec_heal_fail(ec, fop);
}
+ if (fop_rel) {
+ ec_heal_done(0, NULL, fop_rel);
+ }
}
void
diff --git a/xlators/cluster/ec/src/ec-helpers.c b/xlators/cluster/ec/src/ec-helpers.c
index e6b0359..43f6e3b 100644
--- a/xlators/cluster/ec/src/ec-helpers.c
+++ b/xlators/cluster/ec/src/ec-helpers.c
@@ -717,6 +717,7 @@ __ec_inode_get(inode_t *inode, xlator_t *xl)
memset(ctx, 0, sizeof(*ctx));
INIT_LIST_HEAD(&ctx->heal);
INIT_LIST_HEAD(&ctx->stripe_cache.lru);
+ ctx->heal_count = 0;
value = (uint64_t)(uintptr_t)ctx;
if (__inode_ctx_set(inode, xl, &value) != 0) {
GF_FREE(ctx);
diff --git a/xlators/cluster/ec/src/ec-types.h b/xlators/cluster/ec/src/ec-types.h
index f3d63ca..6ae4a2b 100644
--- a/xlators/cluster/ec/src/ec-types.h
+++ b/xlators/cluster/ec/src/ec-types.h
@@ -171,6 +171,7 @@ struct _ec_inode {
gf_boolean_t have_config;
gf_boolean_t have_version;
gf_boolean_t have_size;
+ int32_t heal_count;
ec_config_t config;
uint64_t pre_version[2];
uint64_t post_version[2];
--
1.8.3.1

View File

@ -0,0 +1,126 @@
From 6c004c6c8b8f98f56e186740881520b8364e6f85 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Mon, 18 Mar 2019 16:08:04 +0530
Subject: [PATCH 45/52] glusterd: fix txn-id mem leak
This commit ensures the following:
1. Don't send commit op request to the remote nodes when gluster v
status all is executed as for the status all transaction the local
commit gets the name of the volumes and remote commit ops are
technically a no-op. So no need for additional rpc requests.
2. In op state machine flow, if the transaction is in staged state and
op_info.skip_locking is true, then no need to set the txn id in the
priv->glusterd_txn_opinfo dictionary which never gets freed.
> Fixes: bz#1691164
> Change-Id: Ib6a9300ea29633f501abac2ba53fb72ff648c822
> Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
upstream patch: https://review.gluster.org/#/c/glusterfs/+/22388/
BUG: 1670415
Change-Id: Ib6a9300ea29633f501abac2ba53fb72ff648c822
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166449
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 26 ++++++++++++++++++++------
xlators/mgmt/glusterd/src/glusterd-syncop.c | 16 ++++++++++++++++
2 files changed, 36 insertions(+), 6 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index cbbb5d9..12d857a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -5652,6 +5652,9 @@ glusterd_op_ac_stage_op(glusterd_op_sm_event_t *event, void *ctx)
dict_t *dict = NULL;
xlator_t *this = NULL;
uuid_t *txn_id = NULL;
+ glusterd_op_info_t txn_op_info = {
+ {0},
+ };
this = THIS;
GF_ASSERT(this);
@@ -5686,6 +5689,7 @@ glusterd_op_ac_stage_op(glusterd_op_sm_event_t *event, void *ctx)
ret = -1;
goto out;
}
+ ret = glusterd_get_txn_opinfo(&event->txn_id, &txn_op_info);
ret = dict_set_bin(rsp_dict, "transaction_id", txn_id, sizeof(*txn_id));
if (ret) {
@@ -5704,6 +5708,12 @@ out:
gf_msg_debug(this->name, 0, "Returning with %d", ret);
+ /* for no volname transactions, the txn_opinfo needs to be cleaned up
+ * as there's no unlock event triggered
+ */
+ if (txn_op_info.skip_locking)
+ ret = glusterd_clear_txn_opinfo(txn_id);
+
if (rsp_dict)
dict_unref(rsp_dict);
@@ -8159,12 +8169,16 @@ glusterd_op_sm()
"Unable to clear "
"transaction's opinfo");
} else {
- ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
- if (ret)
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
+ if (!(event_type == GD_OP_EVENT_STAGE_OP &&
+ opinfo.state.state == GD_OP_STATE_STAGED &&
+ opinfo.skip_locking)) {
+ ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
+ }
}
glusterd_destroy_op_event_ctx(event);
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index 1741cf8..618d8bc 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -1392,6 +1392,8 @@ gd_commit_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
char *errstr = NULL;
struct syncargs args = {0};
int type = GF_QUOTA_OPTION_TYPE_NONE;
+ uint32_t cmd = 0;
+ gf_boolean_t origin_glusterd = _gf_false;
this = THIS;
GF_ASSERT(this);
@@ -1449,6 +1451,20 @@ commit_done:
gd_syncargs_init(&args, op_ctx);
synctask_barrier_init((&args));
peer_cnt = 0;
+ origin_glusterd = is_origin_glusterd(req_dict);
+
+ if (op == GD_OP_STATUS_VOLUME) {
+ ret = dict_get_uint32(req_dict, "cmd", &cmd);
+ if (ret)
+ goto out;
+
+ if (origin_glusterd) {
+ if ((cmd & GF_CLI_STATUS_ALL)) {
+ ret = 0;
+ goto out;
+ }
+ }
+ }
RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
--
1.8.3.1

View File

@ -0,0 +1,98 @@
From a0661449cd8ba7b851fec473191733767f4541b8 Mon Sep 17 00:00:00 2001
From: Pranith Kumar K <pkarampu@redhat.com>
Date: Thu, 28 Mar 2019 17:55:54 +0530
Subject: [PATCH 46/52] protocol/client: Do not fallback to anon-fd if fd is
not open
If an open comes on a file when a brick is down and after the brick comes up,
a fop comes on the fd, client xlator would still wind the fop on anon-fd
leading to wrong behavior of the fops in some cases.
Example:
If lk fop is issued on the fd just after the brick is up in the scenario above,
lk fop will be sent on anon-fd instead of failing it on that client xlator.
This lock will never be freed upon close of the fd as flush on anon-fd is
invalid and is not wound below server xlator.
As a fix, failing the fop unless the fd has FALLBACK_TO_ANON_FD flag.
>Upstream-patch: https://review.gluster.org/c/glusterfs/+/15804
>Change-Id: I77692d056660b2858e323bdabdfe0a381807cccc
>fixes bz#1390914
BUG: 1695057
Change-Id: Id656bea8dde14327212fbe7ecc97519dc5b32098
Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166833
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/bugs/protocol/bug-1390914.t | 36 ++++++++++++++++++++++++++++
xlators/protocol/client/src/client-helpers.c | 8 ++++++-
2 files changed, 43 insertions(+), 1 deletion(-)
create mode 100644 tests/bugs/protocol/bug-1390914.t
diff --git a/tests/bugs/protocol/bug-1390914.t b/tests/bugs/protocol/bug-1390914.t
new file mode 100644
index 0000000..e3dab92
--- /dev/null
+++ b/tests/bugs/protocol/bug-1390914.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+cleanup;
+
+#test that fops are not wound on anon-fd when fd is not open on that brick
+TEST glusterd;
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3};
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+TEST $GFS -s $H0 --volfile-id=$V0 --direct-io-mode=enable $M0;
+
+TEST touch $M0/1
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST fd_open 200 'w' "$M0/1"
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#lk should only happen on 2 bricks, if there is a bug, it will plant a lock
+#with anon-fd on first-brick which will never be released because flush won't
+#be wound below server xlator for anon-fd
+TEST flock -x -n 200
+TEST fd_close 200
+
+TEST fd_open 200 'w' "$M0/1"
+#this lock will fail if there is a stale lock
+TEST flock -x -n 200
+TEST fd_close 200
+cleanup;
diff --git a/xlators/protocol/client/src/client-helpers.c b/xlators/protocol/client/src/client-helpers.c
index 55e87b3..2dd7106 100644
--- a/xlators/protocol/client/src/client-helpers.c
+++ b/xlators/protocol/client/src/client-helpers.c
@@ -419,7 +419,13 @@ client_get_remote_fd(xlator_t *this, fd_t *fd, int flags, int64_t *remote_fd)
{
fdctx = this_fd_get_ctx(fd, this);
if (!fdctx) {
- *remote_fd = GF_ANON_FD_NO;
+ if (fd->anonymous) {
+ *remote_fd = GF_ANON_FD_NO;
+ } else {
+ *remote_fd = -1;
+ gf_msg_debug(this->name, EBADF, "not a valid fd for gfid: %s",
+ uuid_utoa(fd->inode->gfid));
+ }
} else {
if (__is_fd_reopen_in_progress(fdctx))
*remote_fd = -1;
--
1.8.3.1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,115 @@
From 2449a1824c6f7b57889335caaeb09f4c5cb3efce Mon Sep 17 00:00:00 2001
From: Soumya Koduri <skoduri@redhat.com>
Date: Thu, 28 Mar 2019 14:59:00 +0530
Subject: [PATCH 48/52] gfapi: Unblock epoll thread for upcall processing
With commit#ad35193,we have made changes to offload
processing upcall notifications to synctask so as not
to block epoll threads. However seems like the issue wasnt
fully addressed.
In "glfs_cbk_upcall_data" -> "synctask_new1" after creating synctask
if there is no callback defined, the thread waits on synctask_join
till the syncfn is finished. So that way even with those changes,
epoll threads are blocked till the upcalls are processed.
Hence the right fix now is to define a callback function for that
synctask "glfs_cbk_upcall_syncop" so as to unblock epoll/notify threads
completely and the upcall processing can happen in parallel by synctask
threads.
Upstream references-
mainline : https://review.gluster.org/22436
release-6.0 : https://review.gluster.org/22459
Change-Id: I4d8645e3588fab2c3ca534e0112773aaab68a5dd
fixes: bz#1694565
Signed-off-by: Soumya Koduri <skoduri@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/166586
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
api/src/glfs-fops.c | 42 ++++++++++++++++++++++++++++++++++--------
1 file changed, 34 insertions(+), 8 deletions(-)
diff --git a/api/src/glfs-fops.c b/api/src/glfs-fops.c
index 88cd32b..01ba60b 100644
--- a/api/src/glfs-fops.c
+++ b/api/src/glfs-fops.c
@@ -5714,6 +5714,16 @@ out:
}
static int
+glfs_upcall_syncop_cbk(int ret, call_frame_t *frame, void *opaque)
+{
+ struct upcall_syncop_args *args = opaque;
+
+ GF_FREE(args->upcall_data);
+ GF_FREE(args);
+ return 0;
+}
+
+static int
glfs_cbk_upcall_syncop(void *opaque)
{
struct upcall_syncop_args *args = opaque;
@@ -5770,15 +5780,13 @@ out:
GLFS_FREE(up_arg);
}
- return ret;
+ return 0;
}
static void
glfs_cbk_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
{
- struct upcall_syncop_args args = {
- 0,
- };
+ struct upcall_syncop_args *args = NULL;
int ret = -1;
if (!fs || !upcall_data)
@@ -5789,16 +5797,34 @@ glfs_cbk_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
goto out;
}
- args.fs = fs;
- args.upcall_data = upcall_data;
+ args = GF_CALLOC(1, sizeof(struct upcall_syncop_args),
+ glfs_mt_upcall_entry_t);
+ if (!args) {
+ gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
+ "Upcall syncop args allocation failed.");
+ goto out;
+ }
+
+ /* Note: we are not taking any ref on fs here.
+ * Ideally applications have to unregister for upcall events
+ * or stop polling for upcall events before performing
+ * glfs_fini. And as for outstanding synctasks created, we wait
+ * for all syncenv threads to finish tasks before cleaning up the
+ * fs->ctx. Hence it seems safe to process these callback
+ * notification without taking any lock/ref.
+ */
+ args->fs = fs;
+ args->upcall_data = gf_memdup(upcall_data, sizeof(*upcall_data));
- ret = synctask_new(THIS->ctx->env, glfs_cbk_upcall_syncop, NULL, NULL,
- &args);
+ ret = synctask_new(THIS->ctx->env, glfs_cbk_upcall_syncop,
+ glfs_upcall_syncop_cbk, NULL, args);
/* should we retry incase of failure? */
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_UPCALL_SYNCOP_FAILED,
"Synctak for Upcall event_type(%d) and gfid(%s) failed",
upcall_data->event_type, (char *)(upcall_data->gfid));
+ GF_FREE(args->upcall_data);
+ GF_FREE(args);
}
out:
--
1.8.3.1

View File

@ -0,0 +1,49 @@
From e205516ef874d617e4756856098bf10e17b0ea3d Mon Sep 17 00:00:00 2001
From: Raghavendra G <rgowdapp@redhat.com>
Date: Fri, 22 Mar 2019 10:40:45 +0530
Subject: [PATCH 49/52] transport/socket: log shutdown msg occasionally
>Change-Id: If3fc0884e7e2f45de2d278b98693b7a473220a5f
>Signed-off-by: Raghavendra G <rgowdapp@redhat.com>
>Fixes: bz#1691616
BUG: 1691620
Change-Id: If3fc0884e7e2f45de2d278b98693b7a473220a5f
Signed-off-by: Sunil Kumar Acharya <sheggodu@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167088
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
rpc/rpc-transport/socket/src/socket.c | 4 ++--
rpc/rpc-transport/socket/src/socket.h | 1 +
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c
index 121d46b..f6de1d3 100644
--- a/rpc/rpc-transport/socket/src/socket.c
+++ b/rpc/rpc-transport/socket/src/socket.c
@@ -807,8 +807,8 @@ __socket_shutdown(rpc_transport_t *this)
gf_log(this->name, GF_LOG_DEBUG, "shutdown() returned %d. %s", ret,
strerror(errno));
} else {
- gf_log(this->name, GF_LOG_INFO, "intentional socket shutdown(%d)",
- priv->sock);
+ GF_LOG_OCCASIONALLY(priv->shutdown_log_ctr, this->name, GF_LOG_INFO,
+ "intentional socket shutdown(%d)", priv->sock);
}
return ret;
diff --git a/rpc/rpc-transport/socket/src/socket.h b/rpc/rpc-transport/socket/src/socket.h
index 32339d3..e1ccae2 100644
--- a/rpc/rpc-transport/socket/src/socket.h
+++ b/rpc/rpc-transport/socket/src/socket.h
@@ -219,6 +219,7 @@ typedef struct {
int keepalivecnt;
int timeout;
int log_ctr;
+ int shutdown_log_ctr;
/* ssl_error_required is used only during the SSL connection setup
* phase.
* It holds the error code returned by SSL_get_error() and is used to
--
1.8.3.1

View File

@ -0,0 +1,142 @@
From 161a039f8088bf8ce7000d8175e3793219525179 Mon Sep 17 00:00:00 2001
From: Kotresh HR <khiremat@redhat.com>
Date: Thu, 28 Mar 2019 07:17:16 -0400
Subject: [PATCH 50/52] geo-rep: Fix syncing multiple rename of symlink
Problem:
Geo-rep fails to sync rename of symlink if it's
renamed multiple times if creation and rename
happened successively
Worker crash at slave:
Traceback (most recent call last):
File "/usr/libexec/glusterfs/python/syncdaemon/repce.py", in worker
res = getattr(self.obj, rmeth)(*in_data[2:])
File "/usr/libexec/glusterfs/python/syncdaemon/resource.py", in entry_ops
[ESTALE, EINVAL, EBUSY])
File "/usr/libexec/glusterfs/python/syncdaemon/syncdutils.py", in errno_wrap
return call(*arg)
File "/usr/libexec/glusterfs/python/syncdaemon/libcxattr.py", in lsetxattr
cls.raise_oserr()
File "/usr/libexec/glusterfs/python/syncdaemon/libcxattr.py", in raise_oserr
raise OSError(errn, os.strerror(errn))
OSError: [Errno 12] Cannot allocate memory
Geo-rep Behaviour:
1. SYMLINK doesn't record target path in changelog.
So while syncing SYMLINK, readlink is done on
master to get target path.
2. Geo-rep will create destination if source is not
present while syncing RENAME. Hence while syncing
RENAME of SYMLINK, target path is collected from
destination.
Cause:
If symlink is created and renamed multiple times, creation of
symlink is ignored, as it's no longer present on master at
that path. While symlink is renamed multiple times at master,
when syncing first RENAME of SYMLINK, both source and destination
is not present, hence target path is not known. In this case,
while creating destination directly at slave, regular file
attributes were encoded into blob instead of symlink,
causing failure in gfid-access translator while decoding
blob.
Solution:
While syncing of RENAME of SYMLINK, when target is not known
and when src and destination is not present on the master,
don't create destination. Ignore the rename. It's ok to ignore.
If it's unliked, it's fine. If it's renamed to something else,
it will be synced then.
Backport of:
> Patch: https://review.gluster.org/22438
> Change-Id: Ibdfa495513b7c05b5370ab0b89c69a6802338d87
> BUG: 1693648
> Signed-off-by: Kotresh HR <khiremat@redhat.com>
Change-Id: Ibdfa495513b7c05b5370ab0b89c69a6802338d87
fixes: bz#1670429
Signed-off-by: Kotresh HR <khiremat@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167122
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
geo-replication/syncdaemon/resource.py | 23 ++++++++++++++---------
tests/00-geo-rep/georep-basic-dr-rsync.t | 1 +
tests/geo-rep.rc | 12 ++++++++++++
3 files changed, 27 insertions(+), 9 deletions(-)
diff --git a/geo-replication/syncdaemon/resource.py b/geo-replication/syncdaemon/resource.py
index a2d0b16..c290d86 100644
--- a/geo-replication/syncdaemon/resource.py
+++ b/geo-replication/syncdaemon/resource.py
@@ -625,15 +625,20 @@ class Server(object):
# exist with different gfid.
if not matching_disk_gfid(gfid, entry):
if e['stat'] and not stat.S_ISDIR(e['stat']['mode']):
- if stat.S_ISLNK(e['stat']['mode']) and \
- e['link'] is not None:
- st1 = lstat(en)
- if isinstance(st1, int):
- (pg, bname) = entry2pb(en)
- blob = entry_pack_symlink(cls, gfid, bname,
- e['link'], e['stat'])
- elif not matching_disk_gfid(gfid, en):
- collect_failure(e, EEXIST, uid, gid, True)
+ if stat.S_ISLNK(e['stat']['mode']):
+ # src is not present, so don't sync symlink as
+ # we don't know target. It's ok to ignore. If
+ # it's unliked, it's fine. If it's renamed to
+ # something else, it will be synced then.
+ if e['link'] is not None:
+ st1 = lstat(en)
+ if isinstance(st1, int):
+ (pg, bname) = entry2pb(en)
+ blob = entry_pack_symlink(cls, gfid, bname,
+ e['link'],
+ e['stat'])
+ elif not matching_disk_gfid(gfid, en):
+ collect_failure(e, EEXIST, uid, gid, True)
else:
slink = os.path.join(pfx, gfid)
st = lstat(slink)
diff --git a/tests/00-geo-rep/georep-basic-dr-rsync.t b/tests/00-geo-rep/georep-basic-dr-rsync.t
index 4a03930..8b64370 100644
--- a/tests/00-geo-rep/georep-basic-dr-rsync.t
+++ b/tests/00-geo-rep/georep-basic-dr-rsync.t
@@ -110,6 +110,7 @@ EXPECT_WITHIN $GEO_REP_TIMEOUT 0 chown_file_ok ${slave_mnt}/hybrid_chown_f1
#Check History Crawl.
TEST $GEOREP_CLI $master $slave stop
TEST create_data "history"
+TEST create_rename_symlink_case
TEST $GEOREP_CLI $master $slave start
EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Active"
EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Passive"
diff --git a/tests/geo-rep.rc b/tests/geo-rep.rc
index 396b4c4..d723129 100644
--- a/tests/geo-rep.rc
+++ b/tests/geo-rep.rc
@@ -19,6 +19,18 @@ function check_common_secret_file()
echo $?
}
+function create_rename_symlink_case()
+{
+ mkdir ${mastermnt}/MUL_REN_SYMLINK
+ cd ${mastermnt}/MUL_REN_SYMLINK
+ mkdir sym_dir1
+ ln -s "sym_dir1" sym1
+ mv sym1 sym2
+ mv sym2 sym3
+ mv sym3 sym4
+ cd -
+}
+
function create_data()
{
prefix=$1
--
1.8.3.1

View File

@ -0,0 +1,67 @@
From 71f4d55770287288f39b31a0435916ac3d9f742b Mon Sep 17 00:00:00 2001
From: Sunil Kumar Acharya <sheggodu@redhat.com>
Date: Fri, 5 Apr 2019 22:27:52 +0530
Subject: [PATCH 51/52] spec: update rpm install condition
Update code to allow rpm install without gluster process shutdown.
Label: DOWNSTREAM ONLY
BUG: 1493284
Change-Id: I308e7e4629a2428927a6df34536e3cd645a54f8c
Signed-off-by: Sunil Kumar Acharya <sheggodu@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167089
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Milind Changire <mchangir@redhat.com>
---
glusterfs.spec.in | 34 ----------------------------------
1 file changed, 34 deletions(-)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 7c7f7c0..0d57b49 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1629,40 +1629,6 @@ if not (ok == 0) then
end
-%pretrans devel -p <lua>
-if not posix.access("/bin/bash", "x") then
- -- initial installation, no shell, no running glusterfsd
- return 0
-end
-
--- TODO: move this completely to a lua script
--- For now, we write a temporary bash script and execute that.
-
-script = [[#!/bin/sh
-pidof -c -o %PPID -x glusterfsd &>/dev/null
-
-if [ $? -eq 0 ]; then
- pushd . > /dev/null 2>&1
- for volume in /var/lib/glusterd/vols/*; do cd $volume;
- vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
- volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
- if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
- exit 1;
- fi
- done
-
- popd > /dev/null 2>&1
- exit 1;
-fi
-]]
-
-ok, how, val = os.execute(script)
-if not (ok == 0) then
- error("Detected running glusterfs processes", ok)
-end
-
-
-
%pretrans fuse -p <lua>
if not posix.access("/bin/bash", "x") then
-- initial installation, no shell, no running glusterfsd
--
1.8.3.1

View File

@ -0,0 +1,299 @@
From d7bb933742f4d9135621590bf13713633c549af1 Mon Sep 17 00:00:00 2001
From: Aravinda VK <avishwan@redhat.com>
Date: Thu, 14 Mar 2019 20:06:54 +0530
Subject: [PATCH 52/52] geo-rep: IPv6 support
`address_family=inet6` needs to be added while mounting master and
slave volumes in gverify script.
New option introduced to gluster cli(`--inet6`) which will be used
internally by geo-rep while calling `gluster volume info
--remote-host=<ipv6>`.
Backport of https://review.gluster.org/22363
Fixes: bz#1688231
Change-Id: I1e0d42cae07158df043e64a2f991882d8c897837
Signed-off-by: Aravinda VK <avishwan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/167120
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
cli/src/cli.c | 11 ++++++++++
cli/src/cli.h | 1 +
geo-replication/src/gverify.sh | 22 ++++++++++++++++----
geo-replication/syncdaemon/argsupgrade.py | 13 +++++++++++-
geo-replication/syncdaemon/gsyncd.py | 1 +
geo-replication/syncdaemon/subcmds.py | 9 +++++++--
xlators/mgmt/glusterd/src/glusterd-geo-rep.c | 30 ++++++++++++++++++++++++++--
7 files changed, 78 insertions(+), 9 deletions(-)
diff --git a/cli/src/cli.c b/cli/src/cli.c
index 08f117e..c33d152 100644
--- a/cli/src/cli.c
+++ b/cli/src/cli.c
@@ -433,6 +433,12 @@ cli_opt_parse(char *opt, struct cli_state *state)
return 0;
}
+ oarg = strtail(opt, "inet6");
+ if (oarg) {
+ state->address_family = "inet6";
+ return 0;
+ }
+
oarg = strtail(opt, "log-file=");
if (oarg) {
state->log_file = oarg;
@@ -679,6 +685,11 @@ cli_rpc_init(struct cli_state *state)
this = THIS;
cli_rpc_prog = &cli_prog;
+ /* If address family specified in CLI */
+ if (state->address_family) {
+ addr_family = state->address_family;
+ }
+
/* Connect to glusterd using the specified method, giving preference
* to a unix socket connection. If nothing is specified, connect to
* the default glusterd socket.
diff --git a/cli/src/cli.h b/cli/src/cli.h
index 5df86d5..b79a0a2 100644
--- a/cli/src/cli.h
+++ b/cli/src/cli.h
@@ -136,6 +136,7 @@ struct cli_state {
gf_loglevel_t log_level;
char *glusterd_sock;
+ char *address_family;
};
struct cli_local {
diff --git a/geo-replication/src/gverify.sh b/geo-replication/src/gverify.sh
index d048de0..7c88f9f 100755
--- a/geo-replication/src/gverify.sh
+++ b/geo-replication/src/gverify.sh
@@ -94,6 +94,7 @@ echo $cmd_line;
function master_stats()
{
MASTERVOL=$1;
+ local inet6=$2;
local d;
local i;
local disk_size;
@@ -102,7 +103,12 @@ function master_stats()
local m_status;
d=$(mktemp -d -t ${0##*/}.XXXXXX 2>/dev/null);
- glusterfs -s localhost --xlator-option="*dht.lookup-unhashed=off" --volfile-id $MASTERVOL -l $master_log_file $d;
+ if [ "$inet6" = "inet6" ]; then
+ glusterfs -s localhost --xlator-option="*dht.lookup-unhashed=off" --xlator-option="transport.address-family=inet6" --volfile-id $MASTERVOL -l $master_log_file $d;
+ else
+ glusterfs -s localhost --xlator-option="*dht.lookup-unhashed=off" --volfile-id $MASTERVOL -l $master_log_file $d;
+ fi
+
i=$(get_inode_num $d);
if [[ "$i" -ne "1" ]]; then
echo 0:0;
@@ -124,12 +130,18 @@ function slave_stats()
SLAVEUSER=$1;
SLAVEHOST=$2;
SLAVEVOL=$3;
+ local inet6=$4;
local cmd_line;
local ver;
local status;
d=$(mktemp -d -t ${0##*/}.XXXXXX 2>/dev/null);
- glusterfs --xlator-option="*dht.lookup-unhashed=off" --volfile-server $SLAVEHOST --volfile-id $SLAVEVOL -l $slave_log_file $d;
+ if [ "$inet6" = "inet6" ]; then
+ glusterfs --xlator-option="*dht.lookup-unhashed=off" --xlator-option="transport.address-family=inet6" --volfile-server $SLAVEHOST --volfile-id $SLAVEVOL -l $slave_log_file $d;
+ else
+ glusterfs --xlator-option="*dht.lookup-unhashed=off" --volfile-server $SLAVEHOST --volfile-id $SLAVEVOL -l $slave_log_file $d;
+ fi
+
i=$(get_inode_num $d);
if [[ "$i" -ne "1" ]]; then
echo 0:0;
@@ -167,6 +179,8 @@ function main()
log_file=$6
> $log_file
+ inet6=$7
+
# Use FORCE_BLOCKER flag in the error message to differentiate
# between the errors which the force command should bypass
@@ -204,8 +218,8 @@ function main()
fi;
ERRORS=0;
- master_data=$(master_stats $1);
- slave_data=$(slave_stats $2 $3 $4);
+ master_data=$(master_stats $1 ${inet6});
+ slave_data=$(slave_stats $2 $3 $4 ${inet6});
master_disk_size=$(echo $master_data | cut -f1 -d':');
slave_disk_size=$(echo $slave_data | cut -f1 -d':');
master_used_size=$(echo $master_data | cut -f2 -d':');
diff --git a/geo-replication/syncdaemon/argsupgrade.py b/geo-replication/syncdaemon/argsupgrade.py
index 4018143..7af4063 100644
--- a/geo-replication/syncdaemon/argsupgrade.py
+++ b/geo-replication/syncdaemon/argsupgrade.py
@@ -84,6 +84,10 @@ def upgrade():
# fail when it does stat to check the existence.
init_gsyncd_template_conf()
+ inet6 = False
+ if "--inet6" in sys.argv:
+ inet6 = True
+
if "--monitor" in sys.argv:
# python gsyncd.py --path=/bricks/b1
# --monitor -c gsyncd.conf
@@ -147,8 +151,11 @@ def upgrade():
user, hname = remote_addr.split("@")
+ if not inet6:
+ hname = gethostbyname(hname)
+
print(("ssh://%s@%s:gluster://127.0.0.1:%s" % (
- user, gethostbyname(hname), vol)))
+ user, hname, vol)))
sys.exit(0)
elif "--normalize-url" in sys.argv:
@@ -346,3 +353,7 @@ def upgrade():
if pargs.reset_sync_time:
sys.argv.append("--reset-sync-time")
+
+ if inet6:
+ # Add `--inet6` as first argument
+ sys.argv = [sys.argv[0], "--inet6"] + sys.argv[1:]
diff --git a/geo-replication/syncdaemon/gsyncd.py b/geo-replication/syncdaemon/gsyncd.py
index 037f351..effe0ce 100644
--- a/geo-replication/syncdaemon/gsyncd.py
+++ b/geo-replication/syncdaemon/gsyncd.py
@@ -47,6 +47,7 @@ def main():
sys.exit(0)
parser = ArgumentParser()
+ parser.add_argument("--inet6", action="store_true")
sp = parser.add_subparsers(dest="subcmd")
# Monitor Status File update
diff --git a/geo-replication/syncdaemon/subcmds.py b/geo-replication/syncdaemon/subcmds.py
index 30050ec..4ece7e0 100644
--- a/geo-replication/syncdaemon/subcmds.py
+++ b/geo-replication/syncdaemon/subcmds.py
@@ -110,8 +110,13 @@ def subcmd_voluuidget(args):
ParseError = XET.ParseError if hasattr(XET, 'ParseError') else SyntaxError
- po = Popen(['gluster', '--xml', '--remote-host=' + args.host,
- 'volume', 'info', args.volname], bufsize=0,
+ cmd = ['gluster', '--xml', '--remote-host=' + args.host,
+ 'volume', 'info', args.volname]
+
+ if args.inet6:
+ cmd.append("--inet6")
+
+ po = Popen(cmd, bufsize=0,
stdin=None, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
diff --git a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
index 49baa58..0f40bea 100644
--- a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
+++ b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
@@ -76,6 +76,19 @@ static char *gsync_reserved_opts[] = {"gluster-command",
static char *gsync_no_restart_opts[] = {"checkpoint", "log_rsync_performance",
"log-rsync-performance", NULL};
+void
+set_gsyncd_inet6_arg(runner_t *runner)
+{
+ xlator_t *this = NULL;
+ char *af;
+ int ret;
+
+ this = THIS;
+ ret = dict_get_str(this->options, "transport.address-family", &af);
+ if (ret == 0)
+ runner_argprintf(runner, "--%s", af);
+}
+
int
__glusterd_handle_sys_exec(rpcsvc_request_t *req)
{
@@ -384,6 +397,7 @@ glusterd_urltransform_init(runner_t *runner, const char *transname)
{
runinit(runner);
runner_add_arg(runner, GSYNCD_PREFIX "/gsyncd");
+ set_gsyncd_inet6_arg(runner);
runner_argprintf(runner, "--%s-url", transname);
}
@@ -725,6 +739,7 @@ glusterd_get_slave_voluuid(char *slave_host, char *slave_vol, char *vol_uuid)
runinit(&runner);
runner_add_arg(&runner, GSYNCD_PREFIX "/gsyncd");
+ set_gsyncd_inet6_arg(&runner);
runner_add_arg(&runner, "--slavevoluuid-get");
runner_argprintf(&runner, "%s::%s", slave_host, slave_vol);
@@ -788,6 +803,7 @@ glusterd_gsync_get_config(char *master, char *slave, char *conf_path,
runinit(&runner);
runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL);
runner_argprintf(&runner, "%s", conf_path);
+ set_gsyncd_inet6_arg(&runner);
runner_argprintf(&runner, "--iprefix=%s", DATADIR);
runner_argprintf(&runner, ":%s", master);
runner_add_args(&runner, slave, "--config-get-all", NULL);
@@ -917,6 +933,7 @@ glusterd_gsync_get_status(char *master, char *slave, char *conf_path,
runinit(&runner);
runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL);
runner_argprintf(&runner, "%s", conf_path);
+ set_gsyncd_inet6_arg(&runner);
runner_argprintf(&runner, "--iprefix=%s", DATADIR);
runner_argprintf(&runner, ":%s", master);
runner_add_args(&runner, slave, "--status-get", NULL);
@@ -937,6 +954,7 @@ glusterd_gsync_get_param_file(char *prmfile, const char *param, char *master,
runinit(&runner);
runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL);
runner_argprintf(&runner, "%s", conf_path);
+ set_gsyncd_inet6_arg(&runner);
runner_argprintf(&runner, "--iprefix=%s", DATADIR);
runner_argprintf(&runner, ":%s", master);
runner_add_args(&runner, slave, "--config-get", NULL);
@@ -2811,6 +2829,7 @@ glusterd_verify_slave(char *volname, char *slave_url, char *slave_vol,
char *slave_ip = NULL;
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
+ char *af = NULL;
this = THIS;
GF_ASSERT(this);
@@ -2852,9 +2871,16 @@ glusterd_verify_slave(char *volname, char *slave_url, char *slave_vol,
runner_argprintf(&runner, "%s", slave_vol);
runner_argprintf(&runner, "%d", ssh_port);
runner_argprintf(&runner, "%s", log_file_path);
- gf_msg_debug(this->name, 0, "gverify Args = %s %s %s %s %s %s %s",
+ ret = dict_get_str(this->options, "transport.address-family", &af);
+ if (ret)
+ af = "-";
+
+ runner_argprintf(&runner, "%s", af);
+
+ gf_msg_debug(this->name, 0, "gverify Args = %s %s %s %s %s %s %s %s",
runner.argv[0], runner.argv[1], runner.argv[2], runner.argv[3],
- runner.argv[4], runner.argv[5], runner.argv[6]);
+ runner.argv[4], runner.argv[5], runner.argv[6],
+ runner.argv[7]);
runner_redir(&runner, STDOUT_FILENO, RUN_PIPE);
synclock_unlock(&priv->big_lock);
ret = runner_run(&runner);
--
1.8.3.1

File diff suppressed because it is too large Load Diff

View File

@ -1 +1 @@
5119d330c92f155af7f161a8fa40a942 glusterfs-3.12.2.tar.gz
a313e3e38e9476b2ebb21bbdbabb2190 glusterfs-6.0.tar.gz