autobuild v6.0-8
Resolves: bz#1471742 bz#1698435 bz#1704562 bz#1712591 bz#1715447 Resolves: bz#1720488 bz#1721802 bz#1722209 bz#1722512 bz#1724089 Resolves: bz#1726991 bz#1727785 bz#1729108 Signed-off-by: Sunil Kumar Acharya <sheggodu@redhat.com>
This commit is contained in:
parent
911cf6c7be
commit
52ad549263
109
0222-cluster-dht-Fixed-a-memleak-in-dht_rename_cbk.patch
Normal file
109
0222-cluster-dht-Fixed-a-memleak-in-dht_rename_cbk.patch
Normal file
@ -0,0 +1,109 @@
|
||||
From 5f304e003cc24ff7877ab51bdfded0dbf8ec581b Mon Sep 17 00:00:00 2001
|
||||
From: N Balachandran <nbalacha@redhat.com>
|
||||
Date: Fri, 21 Jun 2019 09:04:19 +0530
|
||||
Subject: [PATCH 222/255] cluster/dht: Fixed a memleak in dht_rename_cbk
|
||||
|
||||
Fixed a memleak in dht_rename_cbk when creating
|
||||
a linkto file.
|
||||
|
||||
upstream: https://review.gluster.org/#/c/glusterfs/+/22912/
|
||||
|
||||
>Change-Id: I705adef3cb79e33806520fc2b15558e90e2c211c
|
||||
>fixes: bz#1722698
|
||||
>Signed-off-by: N Balachandran <nbalacha@redhat.com>
|
||||
|
||||
BUG:1722512
|
||||
Change-Id: I8450cac82a0e1611e698ffac476ea5516e614236
|
||||
Signed-off-by: N Balachandran <nbalacha@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175181
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Susant Palai <spalai@redhat.com>
|
||||
---
|
||||
xlators/cluster/dht/src/dht-rename.c | 44 +++++++++++++++++++++++++++---------
|
||||
1 file changed, 33 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/xlators/cluster/dht/src/dht-rename.c b/xlators/cluster/dht/src/dht-rename.c
|
||||
index 893b451..5ba2373 100644
|
||||
--- a/xlators/cluster/dht/src/dht-rename.c
|
||||
+++ b/xlators/cluster/dht/src/dht-rename.c
|
||||
@@ -1009,9 +1009,11 @@ dht_rename_links_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
{
|
||||
xlator_t *prev = NULL;
|
||||
dht_local_t *local = NULL;
|
||||
+ call_frame_t *main_frame = NULL;
|
||||
|
||||
prev = cookie;
|
||||
local = frame->local;
|
||||
+ main_frame = local->main_frame;
|
||||
|
||||
/* TODO: Handle this case in lookup-optimize */
|
||||
if (op_ret == -1) {
|
||||
@@ -1024,7 +1026,8 @@ dht_rename_links_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
dht_linkfile_attr_heal(frame, this);
|
||||
}
|
||||
|
||||
- dht_rename_unlink(frame, this);
|
||||
+ dht_rename_unlink(main_frame, this);
|
||||
+ DHT_STACK_DESTROY(frame);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1040,7 +1043,8 @@ dht_rename_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
xlator_t *src_cached = NULL;
|
||||
xlator_t *dst_hashed = NULL;
|
||||
xlator_t *dst_cached = NULL;
|
||||
- loc_t link_loc = {0};
|
||||
+ call_frame_t *link_frame = NULL;
|
||||
+ dht_local_t *link_local = NULL;
|
||||
|
||||
local = frame->local;
|
||||
prev = cookie;
|
||||
@@ -1110,18 +1114,36 @@ dht_rename_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
|
||||
/* Create the linkto file for the dst file */
|
||||
if ((src_cached == dst_cached) && (dst_hashed != dst_cached)) {
|
||||
- loc_copy(&link_loc, &local->loc2);
|
||||
- if (link_loc.inode)
|
||||
- inode_unref(link_loc.inode);
|
||||
- link_loc.inode = inode_ref(local->loc.inode);
|
||||
- gf_uuid_copy(local->gfid, local->loc.inode->gfid);
|
||||
- gf_uuid_copy(link_loc.gfid, local->loc.inode->gfid);
|
||||
-
|
||||
- dht_linkfile_create(frame, dht_rename_links_create_cbk, this,
|
||||
- src_cached, dst_hashed, &link_loc);
|
||||
+ link_frame = copy_frame(frame);
|
||||
+ if (!link_frame) {
|
||||
+ goto unlink;
|
||||
+ }
|
||||
+
|
||||
+ /* fop value sent as maxvalue because it is not used
|
||||
+ * anywhere in this case */
|
||||
+ link_local = dht_local_init(link_frame, &local->loc2, NULL,
|
||||
+ GF_FOP_MAXVALUE);
|
||||
+ if (!link_local) {
|
||||
+ goto unlink;
|
||||
+ }
|
||||
+
|
||||
+ if (link_local->loc.inode)
|
||||
+ inode_unref(link_local->loc.inode);
|
||||
+ link_local->loc.inode = inode_ref(local->loc.inode);
|
||||
+ link_local->main_frame = frame;
|
||||
+ link_local->stbuf = local->stbuf;
|
||||
+ gf_uuid_copy(link_local->gfid, local->loc.inode->gfid);
|
||||
+
|
||||
+ dht_linkfile_create(link_frame, dht_rename_links_create_cbk, this,
|
||||
+ src_cached, dst_hashed, &link_local->loc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
+unlink:
|
||||
+
|
||||
+ if (link_frame) {
|
||||
+ DHT_STACK_DESTROY(link_frame);
|
||||
+ }
|
||||
dht_rename_unlink(frame, this);
|
||||
return 0;
|
||||
|
||||
--
|
||||
1.8.3.1
|
||||
|
123
0223-change-get_real_filename-implementation-to-use-ENOAT.patch
Normal file
123
0223-change-get_real_filename-implementation-to-use-ENOAT.patch
Normal file
@ -0,0 +1,123 @@
|
||||
From 36b0bd86321436a951f225fcf2e921390ed8dc33 Mon Sep 17 00:00:00 2001
|
||||
From: Michael Adam <obnox@samba.org>
|
||||
Date: Thu, 20 Jun 2019 13:09:37 +0200
|
||||
Subject: [PATCH 223/255] change get_real_filename implementation to use
|
||||
ENOATTR instead of ENOENT
|
||||
|
||||
get_real_filename is implemented as a virtual extended attribute to help
|
||||
Samba implement the case-insensitive but case preserving SMB protocol
|
||||
more efficiently. It is implemented as a getxattr call on the parent directory
|
||||
with the virtual key of "get_real_filename:<entryname>" by looking for a
|
||||
spelling with different case for the provided file/dir name (<entryname>)
|
||||
and returning this correct spelling as a result if the entry is found.
|
||||
Originally (05aaec645a6262d431486eb5ac7cd702646cfcfb), the
|
||||
implementation used the ENOENT errno to return the authoritative answer
|
||||
that <entryname> does not exist in any case folding.
|
||||
|
||||
Now this implementation is actually a violation or misuse of the defined
|
||||
API for the getxattr call which returns ENOENT for the case that the dir
|
||||
that the call is made against does not exist and ENOATTR (or the synonym
|
||||
ENODATA) for the case that the xattr does not exist.
|
||||
|
||||
This was not a problem until the gluster fuse-bridge was changed
|
||||
to do map ENOENT to ESTALE in 59629f1da9dca670d5dcc6425f7f89b3e96b46bf,
|
||||
after which we the getxattr call for get_real_filename returned an
|
||||
ESTALE instead of ENOENT breaking the expectation in Samba.
|
||||
|
||||
It is an independent problem that ESTALE should not leak out to user
|
||||
space but is intended to trigger retries between fuse and gluster.
|
||||
But nevertheless, the semantics seem to be incorrect here and should
|
||||
be changed.
|
||||
|
||||
This patch changes the implementation of the get_real_filename virtual
|
||||
xattr to correctly return ENOATTR instead of ENOENT if the file/directory
|
||||
being looked up is not found.
|
||||
|
||||
The Samba glusterfs_fuse vfs module which takes advantage of the
|
||||
get_real_filename over a fuse mount will receive a corresponding change
|
||||
to map ENOATTR to ENOENT. Without this change, it will still work
|
||||
correctly, but the performance optimization for nonexisting files is
|
||||
lost. On the other hand side, this change removes the distinction
|
||||
between the old not-implemented case and the implemented case.
|
||||
So Samba changed to treat ENOATTR like ENOENT will not work correctly
|
||||
any more against old servers that don't implement get_real_filename.
|
||||
I.e. existing files will be reported as non-existing
|
||||
|
||||
Backport of https://review.gluster.org/c/glusterfs/+/22925
|
||||
|
||||
Change-Id: I971b427ab8410636d5d201157d9af70e0d075b67
|
||||
fixes: bz#1724089
|
||||
Signed-off-by: Michael Adam <obnox@samba.org>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175012
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
|
||||
---
|
||||
xlators/cluster/dht/src/dht-common.c | 8 ++++----
|
||||
xlators/storage/posix/src/posix-inode-fd-ops.c | 4 ++--
|
||||
2 files changed, 6 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c
|
||||
index 9a6ea5b..219b072 100644
|
||||
--- a/xlators/cluster/dht/src/dht-common.c
|
||||
+++ b/xlators/cluster/dht/src/dht-common.c
|
||||
@@ -4618,7 +4618,7 @@ dht_getxattr_get_real_filename_cbk(call_frame_t *frame, void *cookie,
|
||||
|
||||
LOCK(&frame->lock);
|
||||
{
|
||||
- if (local->op_errno == ENODATA || local->op_errno == EOPNOTSUPP) {
|
||||
+ if (local->op_errno == EOPNOTSUPP) {
|
||||
/* Nothing to do here, we have already found
|
||||
* a subvol which does not have the get_real_filename
|
||||
* optimization. If condition is for simple logic.
|
||||
@@ -4627,7 +4627,7 @@ dht_getxattr_get_real_filename_cbk(call_frame_t *frame, void *cookie,
|
||||
}
|
||||
|
||||
if (op_ret == -1) {
|
||||
- if (op_errno == ENODATA || op_errno == EOPNOTSUPP) {
|
||||
+ if (op_errno == EOPNOTSUPP) {
|
||||
/* This subvol does not have the optimization.
|
||||
* Better let the user know we don't support it.
|
||||
* Remove previous results if any.
|
||||
@@ -4655,7 +4655,7 @@ dht_getxattr_get_real_filename_cbk(call_frame_t *frame, void *cookie,
|
||||
goto post_unlock;
|
||||
}
|
||||
|
||||
- if (op_errno == ENOENT) {
|
||||
+ if (op_errno == ENOATTR) {
|
||||
/* Do nothing, our defaults are set to this.
|
||||
*/
|
||||
goto unlock;
|
||||
@@ -4723,7 +4723,7 @@ dht_getxattr_get_real_filename(call_frame_t *frame, xlator_t *this, loc_t *loc,
|
||||
cnt = local->call_cnt = layout->cnt;
|
||||
|
||||
local->op_ret = -1;
|
||||
- local->op_errno = ENOENT;
|
||||
+ local->op_errno = ENOATTR;
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
subvol = layout->list[i].xlator;
|
||||
diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c
|
||||
index c949f68..ea3b69c 100644
|
||||
--- a/xlators/storage/posix/src/posix-inode-fd-ops.c
|
||||
+++ b/xlators/storage/posix/src/posix-inode-fd-ops.c
|
||||
@@ -2954,7 +2954,7 @@ posix_xattr_get_real_filename(call_frame_t *frame, xlator_t *this, loc_t *loc,
|
||||
(void)sys_closedir(fd);
|
||||
|
||||
if (!found)
|
||||
- return -ENOENT;
|
||||
+ return -ENOATTR;
|
||||
|
||||
ret = dict_set_dynstr(dict, (char *)key, found);
|
||||
if (ret) {
|
||||
@@ -3422,7 +3422,7 @@ posix_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
|
||||
if (ret < 0) {
|
||||
op_ret = -1;
|
||||
op_errno = -ret;
|
||||
- if (op_errno == ENOENT) {
|
||||
+ if (op_errno == ENOATTR) {
|
||||
gf_msg_debug(this->name, 0,
|
||||
"Failed to get "
|
||||
"real filename (%s, %s)",
|
||||
--
|
||||
1.8.3.1
|
||||
|
53
0224-core-replace-inet_addr-with-inet_pton.patch
Normal file
53
0224-core-replace-inet_addr-with-inet_pton.patch
Normal file
@ -0,0 +1,53 @@
|
||||
From 3528c4fb59ca4d3efda2cf0689b7549e449bb91b Mon Sep 17 00:00:00 2001
|
||||
From: Rinku Kothiya <rkothiya@redhat.com>
|
||||
Date: Fri, 14 Jun 2019 07:53:06 +0000
|
||||
Subject: [PATCH 224/255] core: replace inet_addr with inet_pton
|
||||
|
||||
Fixes warning raised by RPMDiff on the use of inet_addr, which may
|
||||
impact Ipv6 support
|
||||
|
||||
> upstream patch : https://review.gluster.org/#/c/glusterfs/+/22866/
|
||||
|
||||
>fixes: bz#1721385
|
||||
>Change-Id: Id2d9afa1747efa64bc79d90dd2566bff54deedeb
|
||||
>Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
|
||||
|
||||
BUG: 1698435
|
||||
Change-Id: Id2d9afa1747efa64bc79d90dd2566bff54deedeb
|
||||
Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175318
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
libglusterfs/src/events.c | 8 +++++++-
|
||||
1 file changed, 7 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/libglusterfs/src/events.c b/libglusterfs/src/events.c
|
||||
index 9b3a226..2509767 100644
|
||||
--- a/libglusterfs/src/events.c
|
||||
+++ b/libglusterfs/src/events.c
|
||||
@@ -41,6 +41,7 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
|
||||
char *host = NULL;
|
||||
struct addrinfo hints;
|
||||
struct addrinfo *result = NULL;
|
||||
+ xlator_t *this = THIS;
|
||||
|
||||
/* Global context */
|
||||
ctx = THIS->ctx;
|
||||
@@ -82,7 +83,12 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
|
||||
/* Socket Configurations */
|
||||
server.sin_family = AF_INET;
|
||||
server.sin_port = htons(EVENT_PORT);
|
||||
- server.sin_addr.s_addr = inet_addr(host);
|
||||
+ ret = inet_pton(server.sin_family, host, &server.sin_addr);
|
||||
+ if (ret <= 0) {
|
||||
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
|
||||
+ "inet_pton failed with return code %d", ret);
|
||||
+ goto out;
|
||||
+ }
|
||||
memset(&server.sin_zero, '\0', sizeof(server.sin_zero));
|
||||
|
||||
va_start(arguments, fmt);
|
||||
--
|
||||
1.8.3.1
|
||||
|
448
0225-tests-utils-Fix-py2-py3-util-python-scripts.patch
Normal file
448
0225-tests-utils-Fix-py2-py3-util-python-scripts.patch
Normal file
@ -0,0 +1,448 @@
|
||||
From 9d10b1fd102dc2d5bfa71891ded52b7a8f5e08d8 Mon Sep 17 00:00:00 2001
|
||||
From: Kotresh HR <khiremat@redhat.com>
|
||||
Date: Thu, 6 Jun 2019 12:54:04 +0530
|
||||
Subject: [PATCH 225/255] tests/utils: Fix py2/py3 util python scripts
|
||||
|
||||
Following files are fixed.
|
||||
|
||||
tests/bugs/distribute/overlap.py
|
||||
tests/utils/changelogparser.py
|
||||
tests/utils/create-files.py
|
||||
tests/utils/gfid-access.py
|
||||
tests/utils/libcxattr.py
|
||||
|
||||
> upstream patch link : https://review.gluster.org/#/c/glusterfs/+/22829/
|
||||
|
||||
>Change-Id: I3db857cc19e19163d368d913eaec1269fbc37140
|
||||
>updates: bz#1193929
|
||||
>Signed-off-by: Kotresh HR <khiremat@redhat.com>
|
||||
|
||||
Change-Id: I3db857cc19e19163d368d913eaec1269fbc37140
|
||||
BUG: 1704562
|
||||
Signed-off-by: Kotresh HR <khiremat@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175483
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
|
||||
---
|
||||
tests/bugs/distribute/overlap.py | 2 +-
|
||||
tests/bugs/glusterfs/bug-902610.t | 2 +-
|
||||
tests/utils/changelogparser.py | 5 +-
|
||||
tests/utils/create-files.py | 9 +-
|
||||
tests/utils/gfid-access.py | 62 +++++++++----
|
||||
tests/utils/libcxattr.py | 22 +++--
|
||||
tests/utils/py2py3.py | 186 ++++++++++++++++++++++++++++++++++++++
|
||||
7 files changed, 258 insertions(+), 30 deletions(-)
|
||||
create mode 100644 tests/utils/py2py3.py
|
||||
|
||||
diff --git a/tests/bugs/distribute/overlap.py b/tests/bugs/distribute/overlap.py
|
||||
index 0941d37..2813979 100755
|
||||
--- a/tests/bugs/distribute/overlap.py
|
||||
+++ b/tests/bugs/distribute/overlap.py
|
||||
@@ -17,7 +17,7 @@ def calculate_one (ov, nv):
|
||||
|
||||
def calculate_all (values):
|
||||
total = 0
|
||||
- nv_index = len(values) / 2
|
||||
+ nv_index = len(values) // 2
|
||||
for old_val in values[:nv_index]:
|
||||
new_val = values[nv_index]
|
||||
nv_index += 1
|
||||
diff --git a/tests/bugs/glusterfs/bug-902610.t b/tests/bugs/glusterfs/bug-902610.t
|
||||
index b45e92b..112c947 100755
|
||||
--- a/tests/bugs/glusterfs/bug-902610.t
|
||||
+++ b/tests/bugs/glusterfs/bug-902610.t
|
||||
@@ -28,7 +28,7 @@ function get_layout()
|
||||
fi
|
||||
|
||||
# Figure out where the join point is.
|
||||
- target=$( $PYTHON -c "print '%08x' % (0x$layout1_e + 1)")
|
||||
+ target=$( $PYTHON -c "print('%08x' % (0x$layout1_e + 1))")
|
||||
#echo "target for layout2 = $target" > /dev/tty
|
||||
|
||||
# The second layout should cover everything that the first doesn't.
|
||||
diff --git a/tests/utils/changelogparser.py b/tests/utils/changelogparser.py
|
||||
index e8e252d..3b8f81d 100644
|
||||
--- a/tests/utils/changelogparser.py
|
||||
+++ b/tests/utils/changelogparser.py
|
||||
@@ -125,7 +125,10 @@ class Record(object):
|
||||
return repr(self.__dict__)
|
||||
|
||||
def __str__(self):
|
||||
- return unicode(self).encode('utf-8')
|
||||
+ if sys.version_info >= (3,):
|
||||
+ return self.__unicode__()
|
||||
+ else:
|
||||
+ return unicode(self).encode('utf-8')
|
||||
|
||||
|
||||
def get_num_tokens(data, tokens, version=Version.V11):
|
||||
diff --git a/tests/utils/create-files.py b/tests/utils/create-files.py
|
||||
index b2a1961..04736e9 100755
|
||||
--- a/tests/utils/create-files.py
|
||||
+++ b/tests/utils/create-files.py
|
||||
@@ -19,6 +19,11 @@ import argparse
|
||||
datsiz = 0
|
||||
timr = 0
|
||||
|
||||
+def get_ascii_upper_alpha_digits():
|
||||
+ if sys.version_info > (3,0):
|
||||
+ return string.ascii_uppercase+string.digits
|
||||
+ else:
|
||||
+ return string.uppercase+string.digits
|
||||
|
||||
def setLogger(filename):
|
||||
global logger
|
||||
@@ -111,7 +116,7 @@ def create_tar_file(fil, size, mins, maxs, rand):
|
||||
|
||||
def get_filename(flen):
|
||||
size = flen
|
||||
- char = string.uppercase+string.digits
|
||||
+ char = get_ascii_upper_alpha_digits()
|
||||
st = ''.join(random.choice(char) for i in range(size))
|
||||
ti = str((hex(int(str(time.time()).split('.')[0])))[2:])
|
||||
return ti+"%%"+st
|
||||
@@ -175,7 +180,7 @@ def tar_files(files, file_count, inter, size, mins, maxs,
|
||||
|
||||
|
||||
def setxattr_files(files, randname, dir_path):
|
||||
- char = string.uppercase+string.digits
|
||||
+ char = get_ascii_upper_alpha_digits()
|
||||
if not randname:
|
||||
for k in range(files):
|
||||
v = ''.join(random.choice(char) for i in range(10))
|
||||
diff --git a/tests/utils/gfid-access.py b/tests/utils/gfid-access.py
|
||||
index 556d2b4..c35c122 100755
|
||||
--- a/tests/utils/gfid-access.py
|
||||
+++ b/tests/utils/gfid-access.py
|
||||
@@ -33,23 +33,51 @@ def _fmt_mkdir(l):
|
||||
def _fmt_symlink(l1, l2):
|
||||
return "!II%dsI%ds%ds" % (37, l1+1, l2+1)
|
||||
|
||||
-def entry_pack_reg(gf, bn, mo, uid, gid):
|
||||
- blen = len(bn)
|
||||
- return struct.pack(_fmt_mknod(blen),
|
||||
- uid, gid, gf, mo, bn,
|
||||
- stat.S_IMODE(mo), 0, umask())
|
||||
-
|
||||
-def entry_pack_dir(gf, bn, mo, uid, gid):
|
||||
- blen = len(bn)
|
||||
- return struct.pack(_fmt_mkdir(blen),
|
||||
- uid, gid, gf, mo, bn,
|
||||
- stat.S_IMODE(mo), umask())
|
||||
-
|
||||
-def entry_pack_symlink(gf, bn, lnk, mo, uid, gid):
|
||||
- blen = len(bn)
|
||||
- llen = len(lnk)
|
||||
- return struct.pack(_fmt_symlink(blen, llen),
|
||||
- uid, gid, gf, mo, bn, lnk)
|
||||
+
|
||||
+if sys.version_info > (3,):
|
||||
+ def entry_pack_reg(gf, bn, mo, uid, gid):
|
||||
+ bn_encoded = bn.encode()
|
||||
+ blen = len(bn_encoded)
|
||||
+ return struct.pack(_fmt_mknod(blen),
|
||||
+ uid, gid, gf.encode(), mo, bn_encoded,
|
||||
+ stat.S_IMODE(mo), 0, umask())
|
||||
+
|
||||
+ # mkdir
|
||||
+ def entry_pack_dir(gf, bn, mo, uid, gid):
|
||||
+ bn_encoded = bn.encode()
|
||||
+ blen = len(bn_encoded)
|
||||
+ return struct.pack(_fmt_mkdir(blen),
|
||||
+ uid, gid, gf.encode(), mo, bn_encoded,
|
||||
+ stat.S_IMODE(mo), umask())
|
||||
+ # symlink
|
||||
+ def entry_pack_symlink(gf, bn, lnk, st):
|
||||
+ bn_encoded = bn.encode()
|
||||
+ blen = len(bn_encoded)
|
||||
+ lnk_encoded = lnk.encode()
|
||||
+ llen = len(lnk_encoded)
|
||||
+ return struct.pack(_fmt_symlink(blen, llen),
|
||||
+ st['uid'], st['gid'],
|
||||
+ gf.encode(), st['mode'], bn_encoded,
|
||||
+ lnk_encoded)
|
||||
+
|
||||
+else:
|
||||
+ def entry_pack_reg(gf, bn, mo, uid, gid):
|
||||
+ blen = len(bn)
|
||||
+ return struct.pack(_fmt_mknod(blen),
|
||||
+ uid, gid, gf, mo, bn,
|
||||
+ stat.S_IMODE(mo), 0, umask())
|
||||
+
|
||||
+ def entry_pack_dir(gf, bn, mo, uid, gid):
|
||||
+ blen = len(bn)
|
||||
+ return struct.pack(_fmt_mkdir(blen),
|
||||
+ uid, gid, gf, mo, bn,
|
||||
+ stat.S_IMODE(mo), umask())
|
||||
+
|
||||
+ def entry_pack_symlink(gf, bn, lnk, mo, uid, gid):
|
||||
+ blen = len(bn)
|
||||
+ llen = len(lnk)
|
||||
+ return struct.pack(_fmt_symlink(blen, llen),
|
||||
+ uid, gid, gf, mo, bn, lnk)
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) < 9:
|
||||
diff --git a/tests/utils/libcxattr.py b/tests/utils/libcxattr.py
|
||||
index fd0b083..3f3ed1f 100644
|
||||
--- a/tests/utils/libcxattr.py
|
||||
+++ b/tests/utils/libcxattr.py
|
||||
@@ -10,7 +10,9 @@
|
||||
|
||||
import os
|
||||
import sys
|
||||
-from ctypes import CDLL, c_int, create_string_buffer
|
||||
+from ctypes import CDLL, c_int
|
||||
+from py2py3 import bytearray_to_str, gr_create_string_buffer
|
||||
+from py2py3 import gr_query_xattr, gr_lsetxattr, gr_lremovexattr
|
||||
|
||||
|
||||
class Xattr(object):
|
||||
@@ -47,20 +49,23 @@ class Xattr(object):
|
||||
@classmethod
|
||||
def _query_xattr(cls, path, siz, syscall, *a):
|
||||
if siz:
|
||||
- buf = create_string_buffer('\0' * siz)
|
||||
+ buf = gr_create_string_buffer(siz)
|
||||
else:
|
||||
buf = None
|
||||
ret = getattr(cls.libc, syscall)(*((path,) + a + (buf, siz)))
|
||||
if ret == -1:
|
||||
cls.raise_oserr()
|
||||
if siz:
|
||||
- return buf.raw[:ret]
|
||||
+ # py2 and py3 compatibility. Convert bytes array
|
||||
+ # to string
|
||||
+ result = bytearray_to_str(buf.raw)
|
||||
+ return result[:ret]
|
||||
else:
|
||||
return ret
|
||||
|
||||
@classmethod
|
||||
def lgetxattr(cls, path, attr, siz=0):
|
||||
- return cls._query_xattr(path, siz, 'lgetxattr', attr)
|
||||
+ return gr_query_xattr(cls, path, siz, 'lgetxattr', attr)
|
||||
|
||||
@classmethod
|
||||
def lgetxattr_buf(cls, path, attr):
|
||||
@@ -74,20 +79,21 @@ class Xattr(object):
|
||||
|
||||
@classmethod
|
||||
def llistxattr(cls, path, siz=0):
|
||||
- ret = cls._query_xattr(path, siz, 'llistxattr')
|
||||
+ ret = gr_query_xattr(cls, path, siz, 'llistxattr')
|
||||
if isinstance(ret, str):
|
||||
- ret = ret.split('\0')
|
||||
+ ret = ret.strip('\0')
|
||||
+ ret = ret.split('\0') if ret else []
|
||||
return ret
|
||||
|
||||
@classmethod
|
||||
def lsetxattr(cls, path, attr, val):
|
||||
- ret = cls.libc.lsetxattr(path, attr, val, len(val), 0)
|
||||
+ ret = gr_lsetxattr(cls, path, attr, val)
|
||||
if ret == -1:
|
||||
cls.raise_oserr()
|
||||
|
||||
@classmethod
|
||||
def lremovexattr(cls, path, attr):
|
||||
- ret = cls.libc.lremovexattr(path, attr)
|
||||
+ ret = gr_lremovexattr(cls, path, attr)
|
||||
if ret == -1:
|
||||
cls.raise_oserr()
|
||||
|
||||
diff --git a/tests/utils/py2py3.py b/tests/utils/py2py3.py
|
||||
new file mode 100644
|
||||
index 0000000..63aca10
|
||||
--- /dev/null
|
||||
+++ b/tests/utils/py2py3.py
|
||||
@@ -0,0 +1,186 @@
|
||||
+#
|
||||
+# Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
|
||||
+# This file is part of GlusterFS.
|
||||
+
|
||||
+# This file is licensed to you under your choice of the GNU Lesser
|
||||
+# General Public License, version 3 or any later version (LGPLv3 or
|
||||
+# later), or the GNU General Public License, version 2 (GPLv2), in all
|
||||
+# cases as published by the Free Software Foundation.
|
||||
+#
|
||||
+
|
||||
+# All python2/python3 compatibility routines
|
||||
+
|
||||
+import sys
|
||||
+import os
|
||||
+import stat
|
||||
+import struct
|
||||
+from ctypes import create_string_buffer
|
||||
+
|
||||
+def umask():
|
||||
+ return os.umask(0)
|
||||
+
|
||||
+if sys.version_info >= (3,):
|
||||
+ def pipe():
|
||||
+ (r, w) = os.pipe()
|
||||
+ os.set_inheritable(r, True)
|
||||
+ os.set_inheritable(w, True)
|
||||
+ return (r, w)
|
||||
+
|
||||
+ # Raw conversion of bytearray to string. Used in the cases where
|
||||
+ # buffer is created by create_string_buffer which is a 8-bit char
|
||||
+ # array and passed to syscalls to fetch results. Using encode/decode
|
||||
+ # doesn't work as it converts to string altering the size.
|
||||
+ def bytearray_to_str(byte_arr):
|
||||
+ return ''.join([chr(b) for b in byte_arr])
|
||||
+
|
||||
+ # Raw conversion of string to bytes. This is required to convert
|
||||
+ # back the string into bytearray(c char array) to use in struc
|
||||
+ # pack/unpacking. Again encode/decode can't be used as it
|
||||
+ # converts it alters size.
|
||||
+ def str_to_bytearray(string):
|
||||
+ return bytes([ord(c) for c in string])
|
||||
+
|
||||
+ def gr_create_string_buffer(size):
|
||||
+ return create_string_buffer(b'\0', size)
|
||||
+
|
||||
+ def gr_query_xattr(cls, path, size, syscall, attr=None):
|
||||
+ if attr:
|
||||
+ return cls._query_xattr(path.encode(), size, syscall,
|
||||
+ attr.encode())
|
||||
+ else:
|
||||
+ return cls._query_xattr(path.encode(), size, syscall)
|
||||
+
|
||||
+ def gr_lsetxattr(cls, path, attr, val):
|
||||
+ return cls.libc.lsetxattr(path.encode(), attr.encode(), val,
|
||||
+ len(val), 0)
|
||||
+
|
||||
+ def gr_lremovexattr(cls, path, attr):
|
||||
+ return cls.libc.lremovexattr(path.encode(), attr.encode())
|
||||
+
|
||||
+ def gr_cl_register(cls, brick, path, log_file, log_level, retries):
|
||||
+ return cls._get_api('gf_changelog_register')(brick.encode(),
|
||||
+ path.encode(),
|
||||
+ log_file.encode(),
|
||||
+ log_level, retries)
|
||||
+
|
||||
+ def gr_cl_done(cls, clfile):
|
||||
+ return cls._get_api('gf_changelog_done')(clfile.encode())
|
||||
+
|
||||
+ def gr_cl_history_changelog(cls, changelog_path, start, end, num_parallel,
|
||||
+ actual_end):
|
||||
+ return cls._get_api('gf_history_changelog')(changelog_path.encode(),
|
||||
+ start, end, num_parallel,
|
||||
+ actual_end)
|
||||
+
|
||||
+ def gr_cl_history_done(cls, clfile):
|
||||
+ return cls._get_api('gf_history_changelog_done')(clfile.encode())
|
||||
+
|
||||
+ # regular file
|
||||
+
|
||||
+ def entry_pack_reg(cls, gf, bn, mo, uid, gid):
|
||||
+ bn_encoded = bn.encode()
|
||||
+ blen = len(bn_encoded)
|
||||
+ return struct.pack(cls._fmt_mknod(blen),
|
||||
+ uid, gid, gf.encode(), mo, bn_encoded,
|
||||
+ stat.S_IMODE(mo), 0, umask())
|
||||
+
|
||||
+ def entry_pack_reg_stat(cls, gf, bn, st):
|
||||
+ bn_encoded = bn.encode()
|
||||
+ blen = len(bn_encoded)
|
||||
+ mo = st['mode']
|
||||
+ return struct.pack(cls._fmt_mknod(blen),
|
||||
+ st['uid'], st['gid'],
|
||||
+ gf.encode(), mo, bn_encoded,
|
||||
+ stat.S_IMODE(mo), 0, umask())
|
||||
+ # mkdir
|
||||
+
|
||||
+ def entry_pack_mkdir(cls, gf, bn, mo, uid, gid):
|
||||
+ bn_encoded = bn.encode()
|
||||
+ blen = len(bn_encoded)
|
||||
+ return struct.pack(cls._fmt_mkdir(blen),
|
||||
+ uid, gid, gf.encode(), mo, bn_encoded,
|
||||
+ stat.S_IMODE(mo), umask())
|
||||
+ # symlink
|
||||
+
|
||||
+ def entry_pack_symlink(cls, gf, bn, lnk, st):
|
||||
+ bn_encoded = bn.encode()
|
||||
+ blen = len(bn_encoded)
|
||||
+ lnk_encoded = lnk.encode()
|
||||
+ llen = len(lnk_encoded)
|
||||
+ return struct.pack(cls._fmt_symlink(blen, llen),
|
||||
+ st['uid'], st['gid'],
|
||||
+ gf.encode(), st['mode'], bn_encoded,
|
||||
+ lnk_encoded)
|
||||
+else:
|
||||
+ def pipe():
|
||||
+ (r, w) = os.pipe()
|
||||
+ return (r, w)
|
||||
+
|
||||
+ # Raw conversion of bytearray to string
|
||||
+ def bytearray_to_str(byte_arr):
|
||||
+ return byte_arr
|
||||
+
|
||||
+ # Raw conversion of string to bytearray
|
||||
+ def str_to_bytearray(string):
|
||||
+ return string
|
||||
+
|
||||
+ def gr_create_string_buffer(size):
|
||||
+ return create_string_buffer('\0', size)
|
||||
+
|
||||
+ def gr_query_xattr(cls, path, size, syscall, attr=None):
|
||||
+ if attr:
|
||||
+ return cls._query_xattr(path, size, syscall, attr)
|
||||
+ else:
|
||||
+ return cls._query_xattr(path, size, syscall)
|
||||
+
|
||||
+ def gr_lsetxattr(cls, path, attr, val):
|
||||
+ return cls.libc.lsetxattr(path, attr, val, len(val), 0)
|
||||
+
|
||||
+ def gr_lremovexattr(cls, path, attr):
|
||||
+ return cls.libc.lremovexattr(path, attr)
|
||||
+
|
||||
+ def gr_cl_register(cls, brick, path, log_file, log_level, retries):
|
||||
+ return cls._get_api('gf_changelog_register')(brick, path, log_file,
|
||||
+ log_level, retries)
|
||||
+
|
||||
+ def gr_cl_done(cls, clfile):
|
||||
+ return cls._get_api('gf_changelog_done')(clfile)
|
||||
+
|
||||
+ def gr_cl_history_changelog(cls, changelog_path, start, end, num_parallel,
|
||||
+ actual_end):
|
||||
+ return cls._get_api('gf_history_changelog')(changelog_path, start, end,
|
||||
+ num_parallel, actual_end)
|
||||
+
|
||||
+ def gr_cl_history_done(cls, clfile):
|
||||
+ return cls._get_api('gf_history_changelog_done')(clfile)
|
||||
+
|
||||
+ # regular file
|
||||
+
|
||||
+ def entry_pack_reg(cls, gf, bn, mo, uid, gid):
|
||||
+ blen = len(bn)
|
||||
+ return struct.pack(cls._fmt_mknod(blen),
|
||||
+ uid, gid, gf, mo, bn,
|
||||
+ stat.S_IMODE(mo), 0, umask())
|
||||
+
|
||||
+ def entry_pack_reg_stat(cls, gf, bn, st):
|
||||
+ blen = len(bn)
|
||||
+ mo = st['mode']
|
||||
+ return struct.pack(cls._fmt_mknod(blen),
|
||||
+ st['uid'], st['gid'],
|
||||
+ gf, mo, bn,
|
||||
+ stat.S_IMODE(mo), 0, umask())
|
||||
+ # mkdir
|
||||
+
|
||||
+ def entry_pack_mkdir(cls, gf, bn, mo, uid, gid):
|
||||
+ blen = len(bn)
|
||||
+ return struct.pack(cls._fmt_mkdir(blen),
|
||||
+ uid, gid, gf, mo, bn,
|
||||
+ stat.S_IMODE(mo), umask())
|
||||
+ # symlink
|
||||
+
|
||||
+ def entry_pack_symlink(cls, gf, bn, lnk, st):
|
||||
+ blen = len(bn)
|
||||
+ llen = len(lnk)
|
||||
+ return struct.pack(cls._fmt_symlink(blen, llen),
|
||||
+ st['uid'], st['gid'],
|
||||
+ gf, st['mode'], bn, lnk)
|
||||
--
|
||||
1.8.3.1
|
||||
|
@ -0,0 +1,92 @@
|
||||
From 1c55f3633f748629cd0484f79b6c49101eb2df82 Mon Sep 17 00:00:00 2001
|
||||
From: Sunny Kumar <sunkumar@redhat.com>
|
||||
Date: Mon, 8 Jul 2019 11:47:28 +0530
|
||||
Subject: [PATCH 226/255] geo-rep : fix gluster command path for non-root
|
||||
session
|
||||
|
||||
Problem:
|
||||
gluster command not found.
|
||||
|
||||
Cause:
|
||||
In Volinfo class we issue command 'gluster vol info' to get information
|
||||
about volume like getting brick_root to perform various operation.
|
||||
When geo-rep session is configured for non-root user Volinfo class
|
||||
fails to issue gluster command due to unavailability of gluster
|
||||
binary path for non-root user.
|
||||
|
||||
Solution:
|
||||
Use config value 'slave-gluster-command-dir'/'gluster-command-dir' to get path
|
||||
for gluster command based on caller.
|
||||
|
||||
>Backport of:
|
||||
>Upstream Patch: https://review.gluster.org/#/c/glusterfs/+/22920/.
|
||||
>fixes: bz#1722740
|
||||
>Change-Id: I4ec46373da01f5d00ecd160c4e8c6239da8b3859
|
||||
>Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
|
||||
|
||||
BUG: 1712591
|
||||
Change-Id: Ifea2927253a9521fa459fea6de8a60085c3413f6
|
||||
Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175485
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
geo-replication/syncdaemon/monitor.py | 4 ++--
|
||||
geo-replication/syncdaemon/syncdutils.py | 12 +++++++++---
|
||||
2 files changed, 11 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/geo-replication/syncdaemon/monitor.py b/geo-replication/syncdaemon/monitor.py
|
||||
index c45ef24..234f3f1 100644
|
||||
--- a/geo-replication/syncdaemon/monitor.py
|
||||
+++ b/geo-replication/syncdaemon/monitor.py
|
||||
@@ -369,7 +369,7 @@ def distribute(master, slave):
|
||||
if rconf.args.use_gconf_volinfo:
|
||||
mvol = VolinfoFromGconf(master.volume, master=True)
|
||||
else:
|
||||
- mvol = Volinfo(master.volume, master.host)
|
||||
+ mvol = Volinfo(master.volume, master.host, master=True)
|
||||
logging.debug('master bricks: ' + repr(mvol.bricks))
|
||||
prelude = []
|
||||
slave_host = None
|
||||
@@ -385,7 +385,7 @@ def distribute(master, slave):
|
||||
if rconf.args.use_gconf_volinfo:
|
||||
svol = VolinfoFromGconf(slave.volume, master=False)
|
||||
else:
|
||||
- svol = Volinfo(slave.volume, "localhost", prelude)
|
||||
+ svol = Volinfo(slave.volume, "localhost", prelude, master=False)
|
||||
|
||||
sbricks = svol.bricks
|
||||
suuid = svol.uuid
|
||||
diff --git a/geo-replication/syncdaemon/syncdutils.py b/geo-replication/syncdaemon/syncdutils.py
|
||||
index 3f41b5f..2ee10ac 100644
|
||||
--- a/geo-replication/syncdaemon/syncdutils.py
|
||||
+++ b/geo-replication/syncdaemon/syncdutils.py
|
||||
@@ -672,7 +672,7 @@ def get_slv_dir_path(slv_host, slv_volume, gfid):
|
||||
dir_path = ENOENT
|
||||
|
||||
if not slv_bricks:
|
||||
- slv_info = Volinfo(slv_volume, slv_host)
|
||||
+ slv_info = Volinfo(slv_volume, slv_host, master=False)
|
||||
slv_bricks = slv_info.bricks
|
||||
# Result of readlink would be of format as below.
|
||||
# readlink = "../../pgfid[0:2]/pgfid[2:4]/pgfid/basename"
|
||||
@@ -854,8 +854,14 @@ class Popen(subprocess.Popen):
|
||||
|
||||
class Volinfo(object):
|
||||
|
||||
- def __init__(self, vol, host='localhost', prelude=[]):
|
||||
- po = Popen(prelude + ['gluster', '--xml', '--remote-host=' + host,
|
||||
+ def __init__(self, vol, host='localhost', prelude=[], master=True):
|
||||
+ if master:
|
||||
+ gluster_cmd_dir = gconf.get("gluster-command-dir")
|
||||
+ else:
|
||||
+ gluster_cmd_dir = gconf.get("slave-gluster-command-dir")
|
||||
+
|
||||
+ gluster_cmd = os.path.join(gluster_cmd_dir, 'gluster')
|
||||
+ po = Popen(prelude + [gluster_cmd, '--xml', '--remote-host=' + host,
|
||||
'volume', 'info', vol],
|
||||
stdout=PIPE, stderr=PIPE, universal_newlines=True)
|
||||
vix = po.stdout.read()
|
||||
--
|
||||
1.8.3.1
|
||||
|
914
0227-glusterd-svc-update-pid-of-mux-volumes-from-the-shd-.patch
Normal file
914
0227-glusterd-svc-update-pid-of-mux-volumes-from-the-shd-.patch
Normal file
@ -0,0 +1,914 @@
|
||||
From b0815b8a84a07d17a1215c55afc38888ee9fc37c Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Mon, 24 Jun 2019 12:00:20 +0530
|
||||
Subject: [PATCH 227/255] glusterd/svc: update pid of mux volumes from the shd
|
||||
process
|
||||
|
||||
For a normal volume, we are updating the pid from a the
|
||||
process while we do a daemonization or at the end of the
|
||||
init if it is no-daemon mode. Along with updating the pid
|
||||
we also lock the file, to make sure that the process is
|
||||
running fine.
|
||||
|
||||
With brick mux, we were updating the pidfile from gluterd
|
||||
after an attach/detach request.
|
||||
|
||||
There are two problems with this approach.
|
||||
1) We are not holding a pidlock for any file other than parent
|
||||
process.
|
||||
2) There is a chance for possible race conditions with attach/detach.
|
||||
For example, shd start and a volume stop could race. Let's say
|
||||
we are starting an shd and it is attached to a volume.
|
||||
While we trying to link the pid file to the running process,
|
||||
this would have deleted by the thread that doing a volume stop.
|
||||
|
||||
> upstream patch : https://review.gluster.org/#/c/glusterfs/+/22935/
|
||||
|
||||
>Change-Id: I29a00352102877ce09ea3f376ca52affceb5cf1a
|
||||
>Updates: bz#1722541
|
||||
>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
|
||||
Change-Id: I29a00352102877ce09ea3f376ca52affceb5cf1a
|
||||
BUG: 1721802
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175723
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
|
||||
---
|
||||
glusterfsd/src/gf_attach.c | 2 +
|
||||
glusterfsd/src/glusterfsd-mgmt.c | 66 +++++++--
|
||||
libglusterfs/src/glusterfs/glusterfs.h | 2 +-
|
||||
libglusterfs/src/glusterfs/libglusterfs-messages.h | 3 +-
|
||||
libglusterfs/src/graph.c | 154 ++++++++++++++++++++-
|
||||
rpc/xdr/src/glusterd1-xdr.x | 1 +
|
||||
xlators/mgmt/glusterd/src/glusterd-handler.c | 2 +
|
||||
xlators/mgmt/glusterd/src/glusterd-handshake.c | 42 +++++-
|
||||
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 4 +
|
||||
.../mgmt/glusterd/src/glusterd-shd-svc-helper.c | 25 ++++
|
||||
.../mgmt/glusterd/src/glusterd-shd-svc-helper.h | 3 +
|
||||
xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 8 +-
|
||||
xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 57 ++++----
|
||||
xlators/mgmt/glusterd/src/glusterd-syncop.c | 2 +
|
||||
xlators/mgmt/glusterd/src/glusterd-utils.c | 6 +-
|
||||
15 files changed, 325 insertions(+), 52 deletions(-)
|
||||
|
||||
diff --git a/glusterfsd/src/gf_attach.c b/glusterfsd/src/gf_attach.c
|
||||
index 6293b9b..1bff854 100644
|
||||
--- a/glusterfsd/src/gf_attach.c
|
||||
+++ b/glusterfsd/src/gf_attach.c
|
||||
@@ -65,6 +65,8 @@ send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
|
||||
brick_req.name = path;
|
||||
brick_req.input.input_val = NULL;
|
||||
brick_req.input.input_len = 0;
|
||||
+ brick_req.dict.dict_val = NULL;
|
||||
+ brick_req.dict.dict_len = 0;
|
||||
|
||||
req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req);
|
||||
iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size);
|
||||
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
|
||||
index 1d2cd1a..f930e0a 100644
|
||||
--- a/glusterfsd/src/glusterfsd-mgmt.c
|
||||
+++ b/glusterfsd/src/glusterfsd-mgmt.c
|
||||
@@ -50,13 +50,16 @@ int
|
||||
emancipate(glusterfs_ctx_t *ctx, int ret);
|
||||
int
|
||||
glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
|
||||
- char *volfile_id, char *checksum);
|
||||
+ char *volfile_id, char *checksum,
|
||||
+ dict_t *dict);
|
||||
int
|
||||
glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
|
||||
- gf_volfile_t *volfile_obj, char *checksum);
|
||||
+ gf_volfile_t *volfile_obj, char *checksum,
|
||||
+ dict_t *dict);
|
||||
int
|
||||
glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
|
||||
- char *volfile_id, char *checksum);
|
||||
+ char *volfile_id, char *checksum,
|
||||
+ dict_t *dict);
|
||||
int
|
||||
glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj);
|
||||
|
||||
@@ -75,7 +78,8 @@ mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data)
|
||||
}
|
||||
|
||||
int
|
||||
-mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id)
|
||||
+mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id,
|
||||
+ dict_t *dict)
|
||||
{
|
||||
glusterfs_ctx_t *ctx = NULL;
|
||||
int ret = 0;
|
||||
@@ -145,11 +149,11 @@ mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id)
|
||||
* the volfile
|
||||
*/
|
||||
ret = glusterfs_process_svc_attach_volfp(ctx, tmpfp, volfile_id,
|
||||
- sha256_hash);
|
||||
+ sha256_hash, dict);
|
||||
goto unlock;
|
||||
}
|
||||
ret = glusterfs_mux_volfile_reconfigure(tmpfp, ctx, volfile_obj,
|
||||
- sha256_hash);
|
||||
+ sha256_hash, dict);
|
||||
if (ret < 0) {
|
||||
gf_msg_debug("glusterfsd-mgmt", EINVAL, "Reconfigure failed !!");
|
||||
}
|
||||
@@ -387,6 +391,8 @@ err:
|
||||
UNLOCK(&ctx->volfile_lock);
|
||||
if (xlator_req.input.input_val)
|
||||
free(xlator_req.input.input_val);
|
||||
+ if (xlator_req.dict.dict_val)
|
||||
+ free(xlator_req.dict.dict_val);
|
||||
free(xlator_req.name);
|
||||
xlator_req.name = NULL;
|
||||
return 0;
|
||||
@@ -561,6 +567,8 @@ out:
|
||||
|
||||
free(xlator_req.name);
|
||||
free(xlator_req.input.input_val);
|
||||
+ if (xlator_req.dict.dict_val)
|
||||
+ free(xlator_req.dict.dict_val);
|
||||
if (output)
|
||||
dict_unref(output);
|
||||
if (dict)
|
||||
@@ -982,6 +990,8 @@ out:
|
||||
if (input)
|
||||
dict_unref(input);
|
||||
free(xlator_req.input.input_val); /*malloced by xdr*/
|
||||
+ if (xlator_req.dict.dict_val)
|
||||
+ free(xlator_req.dict.dict_val);
|
||||
if (output)
|
||||
dict_unref(output);
|
||||
free(xlator_req.name);
|
||||
@@ -1062,6 +1072,8 @@ glusterfs_handle_attach(rpcsvc_request_t *req)
|
||||
out:
|
||||
UNLOCK(&ctx->volfile_lock);
|
||||
}
|
||||
+ if (xlator_req.dict.dict_val)
|
||||
+ free(xlator_req.dict.dict_val);
|
||||
free(xlator_req.input.input_val);
|
||||
free(xlator_req.name);
|
||||
|
||||
@@ -1077,6 +1089,7 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req)
|
||||
};
|
||||
xlator_t *this = NULL;
|
||||
glusterfs_ctx_t *ctx = NULL;
|
||||
+ dict_t *dict = NULL;
|
||||
|
||||
GF_ASSERT(req);
|
||||
this = THIS;
|
||||
@@ -1091,20 +1104,41 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req)
|
||||
req->rpc_err = GARBAGE_ARGS;
|
||||
goto out;
|
||||
}
|
||||
+
|
||||
gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41,
|
||||
"received attach "
|
||||
"request for volfile-id=%s",
|
||||
xlator_req.name);
|
||||
+
|
||||
+ dict = dict_new();
|
||||
+ if (!dict) {
|
||||
+ ret = -1;
|
||||
+ errno = ENOMEM;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ ret = dict_unserialize(xlator_req.dict.dict_val, xlator_req.dict.dict_len,
|
||||
+ &dict);
|
||||
+ if (ret) {
|
||||
+ gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42,
|
||||
+ "failed to unserialize xdata to dictionary");
|
||||
+ goto out;
|
||||
+ }
|
||||
+ dict->extra_stdfree = xlator_req.dict.dict_val;
|
||||
+
|
||||
ret = 0;
|
||||
|
||||
if (ctx->active) {
|
||||
ret = mgmt_process_volfile(xlator_req.input.input_val,
|
||||
- xlator_req.input.input_len, xlator_req.name);
|
||||
+ xlator_req.input.input_len, xlator_req.name,
|
||||
+ dict);
|
||||
} else {
|
||||
gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42,
|
||||
"got attach for %s but no active graph", xlator_req.name);
|
||||
}
|
||||
out:
|
||||
+ if (dict)
|
||||
+ dict_unref(dict);
|
||||
if (xlator_req.input.input_val)
|
||||
free(xlator_req.input.input_val);
|
||||
if (xlator_req.name)
|
||||
@@ -1241,6 +1275,8 @@ out:
|
||||
GF_FREE(filepath);
|
||||
if (xlator_req.input.input_val)
|
||||
free(xlator_req.input.input_val);
|
||||
+ if (xlator_req.dict.dict_val)
|
||||
+ free(xlator_req.dict.dict_val);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1313,6 +1349,8 @@ out:
|
||||
if (dict)
|
||||
dict_unref(dict);
|
||||
free(xlator_req.input.input_val); // malloced by xdr
|
||||
+ if (xlator_req.dict.dict_val)
|
||||
+ free(xlator_req.dict.dict_val);
|
||||
if (output)
|
||||
dict_unref(output);
|
||||
free(xlator_req.name); // malloced by xdr
|
||||
@@ -1461,6 +1499,8 @@ out:
|
||||
if (output)
|
||||
dict_unref(output);
|
||||
free(brick_req.input.input_val);
|
||||
+ if (brick_req.dict.dict_val)
|
||||
+ free(brick_req.dict.dict_val);
|
||||
free(brick_req.name);
|
||||
GF_FREE(xname);
|
||||
GF_FREE(msg);
|
||||
@@ -1654,6 +1694,8 @@ out:
|
||||
if (dict)
|
||||
dict_unref(dict);
|
||||
free(node_req.input.input_val);
|
||||
+ if (node_req.dict.dict_val)
|
||||
+ free(node_req.dict.dict_val);
|
||||
GF_FREE(msg);
|
||||
GF_FREE(rsp.output.output_val);
|
||||
GF_FREE(node_name);
|
||||
@@ -1757,6 +1799,8 @@ glusterfs_handle_nfs_profile(rpcsvc_request_t *req)
|
||||
|
||||
out:
|
||||
free(nfs_req.input.input_val);
|
||||
+ if (nfs_req.dict.dict_val)
|
||||
+ free(nfs_req.dict.dict_val);
|
||||
if (dict)
|
||||
dict_unref(dict);
|
||||
if (output)
|
||||
@@ -1835,6 +1879,8 @@ out:
|
||||
if (dict)
|
||||
dict_unref(dict);
|
||||
free(xlator_req.input.input_val); // malloced by xdr
|
||||
+ if (xlator_req.dict.dict_val)
|
||||
+ free(xlator_req.dict.dict_val);
|
||||
if (output)
|
||||
dict_unref(output);
|
||||
free(xlator_req.name); // malloced by xdr
|
||||
@@ -1963,7 +2009,8 @@ out:
|
||||
if (dict)
|
||||
dict_unref(dict);
|
||||
free(brick_req.input.input_val);
|
||||
-
|
||||
+ if (brick_req.dict.dict_val)
|
||||
+ free(brick_req.dict.dict_val);
|
||||
gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
|
||||
return ret;
|
||||
}
|
||||
@@ -2213,7 +2260,8 @@ volfile:
|
||||
size = rsp.op_ret;
|
||||
volfile_id = frame->local;
|
||||
if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) {
|
||||
- ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id);
|
||||
+ ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id,
|
||||
+ dict);
|
||||
goto post_graph_mgmt;
|
||||
}
|
||||
|
||||
diff --git a/libglusterfs/src/glusterfs/glusterfs.h b/libglusterfs/src/glusterfs/glusterfs.h
|
||||
index 9ec2365..b6327b8 100644
|
||||
--- a/libglusterfs/src/glusterfs/glusterfs.h
|
||||
+++ b/libglusterfs/src/glusterfs/glusterfs.h
|
||||
@@ -744,7 +744,7 @@ typedef struct {
|
||||
char vol_id[NAME_MAX + 1];
|
||||
struct list_head volfile_list;
|
||||
glusterfs_graph_t *graph;
|
||||
-
|
||||
+ FILE *pidfp;
|
||||
} gf_volfile_t;
|
||||
|
||||
glusterfs_ctx_t *
|
||||
diff --git a/libglusterfs/src/glusterfs/libglusterfs-messages.h b/libglusterfs/src/glusterfs/libglusterfs-messages.h
|
||||
index ea2aa60..7e0eebb 100644
|
||||
--- a/libglusterfs/src/glusterfs/libglusterfs-messages.h
|
||||
+++ b/libglusterfs/src/glusterfs/libglusterfs-messages.h
|
||||
@@ -111,6 +111,7 @@ GLFS_MSGID(
|
||||
LG_MSG_PTHREAD_NAMING_FAILED, LG_MSG_SYSCALL_RETURNS_WRONG,
|
||||
LG_MSG_XXH64_TO_GFID_FAILED, LG_MSG_ASYNC_WARNING, LG_MSG_ASYNC_FAILURE,
|
||||
LG_MSG_GRAPH_CLEANUP_FAILED, LG_MSG_GRAPH_SETUP_FAILED,
|
||||
- LG_MSG_GRAPH_DETACH_STARTED, LG_MSG_GRAPH_ATTACH_FAILED);
|
||||
+ LG_MSG_GRAPH_DETACH_STARTED, LG_MSG_GRAPH_ATTACH_FAILED,
|
||||
+ LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED);
|
||||
|
||||
#endif /* !_LG_MESSAGES_H_ */
|
||||
diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c
|
||||
index 172dc61..05f76bf 100644
|
||||
--- a/libglusterfs/src/graph.c
|
||||
+++ b/libglusterfs/src/graph.c
|
||||
@@ -1467,6 +1467,21 @@ out:
|
||||
}
|
||||
|
||||
int
|
||||
+glusterfs_svc_mux_pidfile_cleanup(gf_volfile_t *volfile_obj)
|
||||
+{
|
||||
+ if (!volfile_obj || !volfile_obj->pidfp)
|
||||
+ return 0;
|
||||
+
|
||||
+ gf_msg_trace("glusterfsd", 0, "pidfile %s cleanup", volfile_obj->vol_id);
|
||||
+
|
||||
+ lockf(fileno(volfile_obj->pidfp), F_ULOCK, 0);
|
||||
+ fclose(volfile_obj->pidfp);
|
||||
+ volfile_obj->pidfp = NULL;
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+int
|
||||
glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj)
|
||||
{
|
||||
xlator_t *last_xl = NULL;
|
||||
@@ -1502,6 +1517,7 @@ glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj)
|
||||
|
||||
list_del_init(&volfile_obj->volfile_list);
|
||||
glusterfs_mux_xlator_unlink(parent_graph->top, xl);
|
||||
+ glusterfs_svc_mux_pidfile_cleanup(volfile_obj);
|
||||
parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph);
|
||||
parent_graph->xl_count -= graph->xl_count;
|
||||
parent_graph->leaf_count -= graph->leaf_count;
|
||||
@@ -1531,8 +1547,126 @@ out:
|
||||
}
|
||||
|
||||
int
|
||||
+glusterfs_svc_mux_pidfile_setup(gf_volfile_t *volfile_obj, const char *pid_file)
|
||||
+{
|
||||
+ int ret = -1;
|
||||
+ FILE *pidfp = NULL;
|
||||
+
|
||||
+ if (!pid_file || !volfile_obj)
|
||||
+ goto out;
|
||||
+
|
||||
+ if (volfile_obj->pidfp) {
|
||||
+ ret = 0;
|
||||
+ goto out;
|
||||
+ }
|
||||
+ pidfp = fopen(pid_file, "a+");
|
||||
+ if (!pidfp) {
|
||||
+ goto out;
|
||||
+ }
|
||||
+ volfile_obj->pidfp = pidfp;
|
||||
+
|
||||
+ ret = lockf(fileno(pidfp), F_TLOCK, 0);
|
||||
+ if (ret) {
|
||||
+ ret = 0;
|
||||
+ goto out;
|
||||
+ }
|
||||
+out:
|
||||
+ return ret;
|
||||
+}
|
||||
+
|
||||
+int
|
||||
+glusterfs_svc_mux_pidfile_update(gf_volfile_t *volfile_obj,
|
||||
+ const char *pid_file, pid_t pid)
|
||||
+{
|
||||
+ int ret = 0;
|
||||
+ FILE *pidfp = NULL;
|
||||
+ int old_pid;
|
||||
+
|
||||
+ if (!volfile_obj->pidfp) {
|
||||
+ ret = glusterfs_svc_mux_pidfile_setup(volfile_obj, pid_file);
|
||||
+ if (ret == -1)
|
||||
+ goto out;
|
||||
+ }
|
||||
+ pidfp = volfile_obj->pidfp;
|
||||
+ ret = fscanf(pidfp, "%d", &old_pid);
|
||||
+ if (ret <= 0) {
|
||||
+ goto update;
|
||||
+ }
|
||||
+ if (old_pid == pid) {
|
||||
+ ret = 0;
|
||||
+ goto out;
|
||||
+ } else {
|
||||
+ gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED,
|
||||
+ "Old pid=%d found in pidfile %s. Cleaning the old pid and "
|
||||
+ "Updating new pid=%d",
|
||||
+ old_pid, pid_file, pid);
|
||||
+ }
|
||||
+update:
|
||||
+ ret = sys_ftruncate(fileno(pidfp), 0);
|
||||
+ if (ret) {
|
||||
+ gf_msg("glusterfsd", GF_LOG_ERROR, errno,
|
||||
+ LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED,
|
||||
+ "pidfile %s truncation failed", pid_file);
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ ret = fprintf(pidfp, "%d\n", pid);
|
||||
+ if (ret <= 0) {
|
||||
+ gf_msg("glusterfsd", GF_LOG_ERROR, errno,
|
||||
+ LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, "pidfile %s write failed",
|
||||
+ pid_file);
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ ret = fflush(pidfp);
|
||||
+ if (ret) {
|
||||
+ gf_msg("glusterfsd", GF_LOG_ERROR, errno,
|
||||
+ LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, "pidfile %s write failed",
|
||||
+ pid_file);
|
||||
+ goto out;
|
||||
+ }
|
||||
+out:
|
||||
+ return ret;
|
||||
+}
|
||||
+
|
||||
+int
|
||||
+glusterfs_update_mux_pid(dict_t *dict, gf_volfile_t *volfile_obj)
|
||||
+{
|
||||
+ char *file = NULL;
|
||||
+ int ret = -1;
|
||||
+
|
||||
+ GF_VALIDATE_OR_GOTO("graph", dict, out);
|
||||
+ GF_VALIDATE_OR_GOTO("graph", volfile_obj, out);
|
||||
+
|
||||
+ ret = dict_get_str(dict, "pidfile", &file);
|
||||
+ if (ret < 0) {
|
||||
+ gf_msg("mgmt", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_SETUP_FAILED,
|
||||
+ "Failed to get pidfile from dict for volfile_id=%s",
|
||||
+ volfile_obj->vol_id);
|
||||
+ }
|
||||
+
|
||||
+ ret = glusterfs_svc_mux_pidfile_update(volfile_obj, file, getpid());
|
||||
+ if (ret < 0) {
|
||||
+ ret = -1;
|
||||
+ gf_msg("mgmt", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_SETUP_FAILED,
|
||||
+ "Failed to update "
|
||||
+ "the pidfile for volfile_id=%s",
|
||||
+ volfile_obj->vol_id);
|
||||
+
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
+ if (ret == 1)
|
||||
+ gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED,
|
||||
+ "PID %d updated in pidfile=%s", getpid(), file);
|
||||
+ ret = 0;
|
||||
+out:
|
||||
+ return ret;
|
||||
+}
|
||||
+int
|
||||
glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
|
||||
- char *volfile_id, char *checksum)
|
||||
+ char *volfile_id, char *checksum,
|
||||
+ dict_t *dict)
|
||||
{
|
||||
glusterfs_graph_t *graph = NULL;
|
||||
glusterfs_graph_t *parent_graph = NULL;
|
||||
@@ -1615,18 +1749,25 @@ glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
+ volfile_obj->pidfp = NULL;
|
||||
+ snprintf(volfile_obj->vol_id, sizeof(volfile_obj->vol_id), "%s",
|
||||
+ volfile_id);
|
||||
+
|
||||
+ if (strcmp(ctx->cmd_args.process_name, "glustershd") == 0) {
|
||||
+ ret = glusterfs_update_mux_pid(dict, volfile_obj);
|
||||
+ if (ret == -1) {
|
||||
+ goto out;
|
||||
+ }
|
||||
+ }
|
||||
|
||||
graph->used = 1;
|
||||
parent_graph->id++;
|
||||
list_add(&graph->list, &ctx->graphs);
|
||||
INIT_LIST_HEAD(&volfile_obj->volfile_list);
|
||||
volfile_obj->graph = graph;
|
||||
- snprintf(volfile_obj->vol_id, sizeof(volfile_obj->vol_id), "%s",
|
||||
- volfile_id);
|
||||
memcpy(volfile_obj->volfile_checksum, checksum,
|
||||
sizeof(volfile_obj->volfile_checksum));
|
||||
list_add_tail(&volfile_obj->volfile_list, &ctx->volfile_list);
|
||||
-
|
||||
gf_log_dump_graph(fp, graph);
|
||||
graph = NULL;
|
||||
|
||||
@@ -1654,7 +1795,8 @@ out:
|
||||
|
||||
int
|
||||
glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
|
||||
- gf_volfile_t *volfile_obj, char *checksum)
|
||||
+ gf_volfile_t *volfile_obj, char *checksum,
|
||||
+ dict_t *dict)
|
||||
{
|
||||
glusterfs_graph_t *oldvolfile_graph = NULL;
|
||||
glusterfs_graph_t *newvolfile_graph = NULL;
|
||||
@@ -1703,7 +1845,7 @@ glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
|
||||
}
|
||||
volfile_obj = NULL;
|
||||
ret = glusterfs_process_svc_attach_volfp(ctx, newvolfile_fp, vol_id,
|
||||
- checksum);
|
||||
+ checksum, dict);
|
||||
goto out;
|
||||
}
|
||||
|
||||
diff --git a/rpc/xdr/src/glusterd1-xdr.x b/rpc/xdr/src/glusterd1-xdr.x
|
||||
index 9b36d34..02ebec2 100644
|
||||
--- a/rpc/xdr/src/glusterd1-xdr.x
|
||||
+++ b/rpc/xdr/src/glusterd1-xdr.x
|
||||
@@ -132,6 +132,7 @@ struct gd1_mgmt_brick_op_req {
|
||||
string name<>;
|
||||
int op;
|
||||
opaque input<>;
|
||||
+ opaque dict<>;
|
||||
} ;
|
||||
|
||||
struct gd1_mgmt_brick_op_rsp {
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
|
||||
index af8a8a4..cc1f1df 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
|
||||
@@ -5423,6 +5423,8 @@ glusterd_print_client_details(FILE *fp, dict_t *dict,
|
||||
|
||||
brick_req->op = GLUSTERD_BRICK_STATUS;
|
||||
brick_req->name = "";
|
||||
+ brick_req->dict.dict_val = NULL;
|
||||
+ brick_req->dict.dict_len = 0;
|
||||
|
||||
ret = dict_set_strn(dict, "brick-name", SLEN("brick-name"),
|
||||
brickinfo->path);
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
|
||||
index 1ba58c3..86dec82 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
|
||||
@@ -203,7 +203,7 @@ out:
|
||||
|
||||
size_t
|
||||
build_volfile_path(char *volume_id, char *path, size_t path_len,
|
||||
- char *trusted_str)
|
||||
+ char *trusted_str, dict_t *dict)
|
||||
{
|
||||
struct stat stbuf = {
|
||||
0,
|
||||
@@ -340,11 +340,19 @@ build_volfile_path(char *volume_id, char *path, size_t path_len,
|
||||
|
||||
ret = glusterd_volinfo_find(volid_ptr, &volinfo);
|
||||
if (ret == -1) {
|
||||
- gf_log(this->name, GF_LOG_ERROR, "Couldn't find volinfo");
|
||||
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
|
||||
+ "Couldn't find volinfo for volid=%s", volid_ptr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
glusterd_svc_build_shd_volfile_path(volinfo, path, path_len);
|
||||
+
|
||||
+ ret = glusterd_svc_set_shd_pidfile(volinfo, dict);
|
||||
+ if (ret == -1) {
|
||||
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
|
||||
+ "Couldn't set pidfile in dict for volid=%s", volid_ptr);
|
||||
+ goto out;
|
||||
+ }
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
@@ -919,6 +927,7 @@ __server_getspec(rpcsvc_request_t *req)
|
||||
char addrstr[RPCSVC_PEER_STRLEN] = {0};
|
||||
peer_info_t *peerinfo = NULL;
|
||||
xlator_t *this = NULL;
|
||||
+ dict_t *dict = NULL;
|
||||
|
||||
this = THIS;
|
||||
GF_ASSERT(this);
|
||||
@@ -971,6 +980,12 @@ __server_getspec(rpcsvc_request_t *req)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
+ dict = dict_new();
|
||||
+ if (!dict) {
|
||||
+ ret = -ENOMEM;
|
||||
+ goto fail;
|
||||
+ }
|
||||
+
|
||||
trans = req->trans;
|
||||
/* addrstr will be empty for cli socket connections */
|
||||
ret = rpcsvc_transport_peername(trans, (char *)&addrstr, sizeof(addrstr));
|
||||
@@ -989,12 +1004,26 @@ __server_getspec(rpcsvc_request_t *req)
|
||||
*/
|
||||
if (strlen(addrstr) == 0 || gf_is_local_addr(addrstr)) {
|
||||
ret = build_volfile_path(volume, filename, sizeof(filename),
|
||||
- TRUSTED_PREFIX);
|
||||
+ TRUSTED_PREFIX, dict);
|
||||
} else {
|
||||
- ret = build_volfile_path(volume, filename, sizeof(filename), NULL);
|
||||
+ ret = build_volfile_path(volume, filename, sizeof(filename), NULL,
|
||||
+ dict);
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
+ if (dict->count > 0) {
|
||||
+ ret = dict_allocate_and_serialize(dict, &rsp.xdata.xdata_val,
|
||||
+ &rsp.xdata.xdata_len);
|
||||
+ if (ret) {
|
||||
+ gf_msg(this->name, GF_LOG_ERROR, 0,
|
||||
+ GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
|
||||
+ "Failed to serialize dict "
|
||||
+ "to request buffer");
|
||||
+ goto fail;
|
||||
+ }
|
||||
+ dict->extra_free = rsp.xdata.xdata_val;
|
||||
+ }
|
||||
+
|
||||
/* to allocate the proper buffer to hold the file data */
|
||||
ret = sys_stat(filename, &stbuf);
|
||||
if (ret < 0) {
|
||||
@@ -1036,7 +1065,6 @@ __server_getspec(rpcsvc_request_t *req)
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
-
|
||||
/* convert to XDR */
|
||||
fail:
|
||||
if (spec_fd >= 0)
|
||||
@@ -1056,6 +1084,10 @@ fail:
|
||||
(xdrproc_t)xdr_gf_getspec_rsp);
|
||||
free(args.key); // malloced by xdr
|
||||
free(rsp.spec);
|
||||
+
|
||||
+ if (dict)
|
||||
+ dict_unref(dict);
|
||||
+
|
||||
if (args.xdata.xdata_val)
|
||||
free(args.xdata.xdata_val);
|
||||
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
||||
index 9ea695e..454877b 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
||||
@@ -655,6 +655,8 @@ glusterd_brick_op_build_payload(glusterd_op_t op,
|
||||
break;
|
||||
}
|
||||
|
||||
+ brick_req->dict.dict_len = 0;
|
||||
+ brick_req->dict.dict_val = NULL;
|
||||
ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
|
||||
&brick_req->input.input_len);
|
||||
if (ret)
|
||||
@@ -723,6 +725,8 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req,
|
||||
goto out;
|
||||
}
|
||||
|
||||
+ brick_req->dict.dict_len = 0;
|
||||
+ brick_req->dict.dict_val = NULL;
|
||||
ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
|
||||
&brick_req->input.input_len);
|
||||
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
|
||||
index 57ceda9..5661e39 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
|
||||
@@ -126,3 +126,28 @@ glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd)
|
||||
out:
|
||||
return;
|
||||
}
|
||||
+
|
||||
+int
|
||||
+glusterd_svc_set_shd_pidfile(glusterd_volinfo_t *volinfo, dict_t *dict)
|
||||
+{
|
||||
+ int ret = -1;
|
||||
+ glusterd_svc_t *svc = NULL;
|
||||
+ xlator_t *this = NULL;
|
||||
+
|
||||
+ this = THIS;
|
||||
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
|
||||
+ GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
|
||||
+ GF_VALIDATE_OR_GOTO(this->name, dict, out);
|
||||
+
|
||||
+ svc = &(volinfo->shd.svc);
|
||||
+
|
||||
+ ret = dict_set_dynstr_with_alloc(dict, "pidfile", svc->proc.pidfile);
|
||||
+ if (ret) {
|
||||
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
|
||||
+ "Failed to set pidfile %s in dict", svc->proc.pidfile);
|
||||
+ goto out;
|
||||
+ }
|
||||
+ ret = 0;
|
||||
+out:
|
||||
+ return ret;
|
||||
+}
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
|
||||
index 59466ec..1f0984b 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
|
||||
@@ -36,4 +36,7 @@ glusterd_recover_shd_attach_failure(glusterd_volinfo_t *volinfo,
|
||||
int
|
||||
glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo);
|
||||
|
||||
+int
|
||||
+glusterd_svc_set_shd_pidfile(glusterd_volinfo_t *volinfo, dict_t *dict);
|
||||
+
|
||||
#endif
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
index 8ad90a9..590169f 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
@@ -258,14 +258,20 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
|
||||
gf_boolean_t shd_restart = _gf_false;
|
||||
|
||||
conf = THIS->private;
|
||||
- volinfo = data;
|
||||
GF_VALIDATE_OR_GOTO("glusterd", conf, out);
|
||||
GF_VALIDATE_OR_GOTO("glusterd", svc, out);
|
||||
+ volinfo = data;
|
||||
GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
|
||||
|
||||
if (volinfo)
|
||||
glusterd_volinfo_ref(volinfo);
|
||||
|
||||
+ if (volinfo->is_snap_volume) {
|
||||
+ /* healing of a snap volume is not supported yet*/
|
||||
+ ret = 0;
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
while (conf->restart_shd) {
|
||||
synclock_unlock(&conf->big_lock);
|
||||
sleep(2);
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
index 400826f..e106111 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
@@ -519,7 +519,7 @@ glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc)
|
||||
/* Take first entry from the process */
|
||||
parent_svc = cds_list_entry(mux_proc->svcs.next, glusterd_svc_t,
|
||||
mux_svc);
|
||||
- sys_link(parent_svc->proc.pidfile, svc->proc.pidfile);
|
||||
+ glusterd_copy_file(parent_svc->proc.pidfile, svc->proc.pidfile);
|
||||
mux_conn = &parent_svc->conn;
|
||||
if (volinfo)
|
||||
volinfo->shd.attached = _gf_true;
|
||||
@@ -623,12 +623,9 @@ glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count,
|
||||
glusterd_volinfo_t *volinfo = NULL;
|
||||
glusterd_shdsvc_t *shd = NULL;
|
||||
glusterd_svc_t *svc = frame->cookie;
|
||||
- glusterd_svc_t *parent_svc = NULL;
|
||||
- glusterd_svc_proc_t *mux_proc = NULL;
|
||||
glusterd_conf_t *conf = NULL;
|
||||
int *flag = (int *)frame->local;
|
||||
xlator_t *this = THIS;
|
||||
- int pid = -1;
|
||||
int ret = -1;
|
||||
gf_getspec_rsp rsp = {
|
||||
0,
|
||||
@@ -679,27 +676,7 @@ glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count,
|
||||
}
|
||||
|
||||
if (rsp.op_ret == 0) {
|
||||
- pthread_mutex_lock(&conf->attach_lock);
|
||||
- {
|
||||
- if (!strcmp(svc->name, "glustershd")) {
|
||||
- mux_proc = svc->svc_proc;
|
||||
- if (mux_proc &&
|
||||
- !gf_is_service_running(svc->proc.pidfile, &pid)) {
|
||||
- /*
|
||||
- * When svc's are restarting, there is a chance that the
|
||||
- * attached svc might not have updated it's pid. Because
|
||||
- * it was at connection stage. So in that case, we need
|
||||
- * to retry the pid file copy.
|
||||
- */
|
||||
- parent_svc = cds_list_entry(mux_proc->svcs.next,
|
||||
- glusterd_svc_t, mux_svc);
|
||||
- if (parent_svc)
|
||||
- sys_link(parent_svc->proc.pidfile, svc->proc.pidfile);
|
||||
- }
|
||||
- }
|
||||
- svc->online = _gf_true;
|
||||
- }
|
||||
- pthread_mutex_unlock(&conf->attach_lock);
|
||||
+ svc->online = _gf_true;
|
||||
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_ATTACH_FAIL,
|
||||
"svc %s of volume %s attached successfully to pid %d", svc->name,
|
||||
volinfo->volname, glusterd_proc_get_pid(&svc->proc));
|
||||
@@ -726,7 +703,7 @@ out:
|
||||
|
||||
extern size_t
|
||||
build_volfile_path(char *volume_id, char *path, size_t path_len,
|
||||
- char *trusted_str);
|
||||
+ char *trusted_str, dict_t *dict);
|
||||
|
||||
int
|
||||
__glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
|
||||
@@ -751,6 +728,7 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
|
||||
ssize_t req_size = 0;
|
||||
call_frame_t *frame = NULL;
|
||||
gd1_mgmt_brick_op_req brick_req;
|
||||
+ dict_t *dict = NULL;
|
||||
void *req = &brick_req;
|
||||
void *errlbl = &&err;
|
||||
struct rpc_clnt_connection *conn;
|
||||
@@ -776,6 +754,8 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
|
||||
brick_req.name = volfile_id;
|
||||
brick_req.input.input_val = NULL;
|
||||
brick_req.input.input_len = 0;
|
||||
+ brick_req.dict.dict_val = NULL;
|
||||
+ brick_req.dict.dict_len = 0;
|
||||
|
||||
frame = create_frame(this, this->ctx->pool);
|
||||
if (!frame) {
|
||||
@@ -783,7 +763,13 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
|
||||
}
|
||||
|
||||
if (op == GLUSTERD_SVC_ATTACH) {
|
||||
- (void)build_volfile_path(volfile_id, path, sizeof(path), NULL);
|
||||
+ dict = dict_new();
|
||||
+ if (!dict) {
|
||||
+ ret = -ENOMEM;
|
||||
+ goto *errlbl;
|
||||
+ }
|
||||
+
|
||||
+ (void)build_volfile_path(volfile_id, path, sizeof(path), NULL, dict);
|
||||
|
||||
ret = sys_stat(path, &stbuf);
|
||||
if (ret < 0) {
|
||||
@@ -818,6 +804,18 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
|
||||
ret = -EIO;
|
||||
goto *errlbl;
|
||||
}
|
||||
+ if (dict->count > 0) {
|
||||
+ ret = dict_allocate_and_serialize(dict, &brick_req.dict.dict_val,
|
||||
+ &brick_req.dict.dict_len);
|
||||
+ if (ret) {
|
||||
+ gf_msg(this->name, GF_LOG_ERROR, 0,
|
||||
+ GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
|
||||
+ "Failed to serialize dict "
|
||||
+ "to request buffer");
|
||||
+ goto *errlbl;
|
||||
+ }
|
||||
+ dict->extra_free = brick_req.dict.dict_val;
|
||||
+ }
|
||||
|
||||
frame->cookie = svc;
|
||||
frame->local = GF_CALLOC(1, sizeof(int), gf_gld_mt_int);
|
||||
@@ -862,6 +860,8 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
|
||||
GF_ATOMIC_INC(conf->blockers);
|
||||
ret = rpc_clnt_submit(rpc, &gd_brick_prog, op, cbkfn, &iov, 1, NULL, 0,
|
||||
iobref, frame, NULL, 0, NULL, 0, NULL);
|
||||
+ if (dict)
|
||||
+ dict_unref(dict);
|
||||
GF_FREE(volfile_content);
|
||||
if (spec_fd >= 0)
|
||||
sys_close(spec_fd);
|
||||
@@ -874,6 +874,9 @@ maybe_free_iobuf:
|
||||
iobuf_unref(iobuf);
|
||||
}
|
||||
err:
|
||||
+ if (dict)
|
||||
+ dict_unref(dict);
|
||||
+
|
||||
GF_FREE(volfile_content);
|
||||
if (spec_fd >= 0)
|
||||
sys_close(spec_fd);
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
|
||||
index 618d8bc..a8098df 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
|
||||
@@ -143,6 +143,8 @@ gd_brick_op_req_free(gd1_mgmt_brick_op_req *req)
|
||||
if (!req)
|
||||
return;
|
||||
|
||||
+ if (req->dict.dict_val)
|
||||
+ GF_FREE(req->dict.dict_val);
|
||||
GF_FREE(req->input.input_val);
|
||||
GF_FREE(req);
|
||||
}
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
|
||||
index 4c487d0..2eb5116 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
|
||||
@@ -5914,6 +5914,8 @@ send_attach_req(xlator_t *this, struct rpc_clnt *rpc, char *path,
|
||||
brick_req.name = path;
|
||||
brick_req.input.input_val = NULL;
|
||||
brick_req.input.input_len = 0;
|
||||
+ brick_req.dict.dict_val = NULL;
|
||||
+ brick_req.dict.dict_len = 0;
|
||||
|
||||
req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req);
|
||||
iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size);
|
||||
@@ -5977,7 +5979,7 @@ err:
|
||||
|
||||
extern size_t
|
||||
build_volfile_path(char *volume_id, char *path, size_t path_len,
|
||||
- char *trusted_str);
|
||||
+ char *trusted_str, dict_t *dict);
|
||||
|
||||
static int
|
||||
attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo,
|
||||
@@ -6022,7 +6024,7 @@ attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo,
|
||||
goto out;
|
||||
}
|
||||
|
||||
- (void)build_volfile_path(full_id, path, sizeof(path), NULL);
|
||||
+ (void)build_volfile_path(full_id, path, sizeof(path), NULL, NULL);
|
||||
|
||||
for (tries = 15; tries > 0; --tries) {
|
||||
rpc = rpc_clnt_ref(other_brick->rpc);
|
||||
--
|
||||
1.8.3.1
|
||||
|
39
0228-locks-enable-notify-contention-by-default.patch
Normal file
39
0228-locks-enable-notify-contention-by-default.patch
Normal file
@ -0,0 +1,39 @@
|
||||
From 21fe2ef700e76c8b7be40f21d3a4fb6b96eafaf0 Mon Sep 17 00:00:00 2001
|
||||
From: Xavi Hernandez <xhernandez@redhat.com>
|
||||
Date: Thu, 6 Jun 2019 08:12:34 +0200
|
||||
Subject: [PATCH 228/255] locks: enable notify-contention by default
|
||||
|
||||
This patch enables the lock contention notification by default.
|
||||
|
||||
Upstream patch:
|
||||
> Change-Id: I10131b026a7cb09fc7c93e1e6c8549988c1d7751
|
||||
> Upstream patch link: https://review.gluster.org/c/glusterfs/+/22828
|
||||
> BUG: 1717754
|
||||
> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
|
||||
|
||||
Change-Id: I10131b026a7cb09fc7c93e1e6c8549988c1d7751
|
||||
Fixes: bz#1720488
|
||||
Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/174655
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
xlators/features/locks/src/posix.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c
|
||||
index adb0df5..9db5ac6 100644
|
||||
--- a/xlators/features/locks/src/posix.c
|
||||
+++ b/xlators/features/locks/src/posix.c
|
||||
@@ -4796,7 +4796,7 @@ struct volume_options options[] = {
|
||||
"be used in conjunction w/ revocation-clear-all."},
|
||||
{.key = {"notify-contention"},
|
||||
.type = GF_OPTION_TYPE_BOOL,
|
||||
- .default_value = "no",
|
||||
+ .default_value = "yes",
|
||||
.flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
|
||||
.op_version = {GD_OP_VERSION_4_0_0},
|
||||
.tags = {"locks", "contention"},
|
||||
--
|
||||
1.8.3.1
|
||||
|
113
0229-glusterd-Show-the-correct-brick-status-in-get-state.patch
Normal file
113
0229-glusterd-Show-the-correct-brick-status-in-get-state.patch
Normal file
@ -0,0 +1,113 @@
|
||||
From 4fc0a77db5b9760fa5c00d3803c6d11a28a00b74 Mon Sep 17 00:00:00 2001
|
||||
From: Mohit Agrawal <moagrawal@redhat.com>
|
||||
Date: Wed, 3 Jul 2019 15:22:38 +0530
|
||||
Subject: [PATCH 229/255] glusterd: Show the correct brick status in get-state
|
||||
|
||||
Problem: get-state does not show correct brick status if brick
|
||||
status is not Started, it always shows started if any value
|
||||
is set brickinfo->status
|
||||
|
||||
Solution: Check the value of brickinfo->status to show correct status
|
||||
in get-state
|
||||
|
||||
> Change-Id: I12a79619024c2cf59f338220d144f2f034059b3b
|
||||
> fixes: bz#1726906
|
||||
> (Cherry pick from commit af989db23d1db00e087f2b9d3dfc43b13ef17153)
|
||||
> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/22983/)
|
||||
|
||||
BUG: 1726991
|
||||
Change-Id: I12a79619024c2cf59f338220d144f2f034059b3b
|
||||
Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175355
|
||||
Tested-by: Mohit Agrawal <moagrawa@redhat.com>
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
|
||||
---
|
||||
xlators/mgmt/glusterd/src/glusterd-handler.c | 7 +++++--
|
||||
xlators/mgmt/glusterd/src/glusterd-utils.c | 28 ++++++++++++++++++++++++++++
|
||||
xlators/mgmt/glusterd/src/glusterd-utils.h | 4 ++++
|
||||
3 files changed, 37 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
|
||||
index cc1f1df..94e1be5 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
|
||||
@@ -5589,7 +5589,9 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
|
||||
char vol_status_str[STATUS_STRLEN] = {
|
||||
0,
|
||||
};
|
||||
-
|
||||
+ char brick_status_str[STATUS_STRLEN] = {
|
||||
+ 0,
|
||||
+ };
|
||||
this = THIS;
|
||||
GF_VALIDATE_OR_GOTO(THIS->name, this, out);
|
||||
|
||||
@@ -5852,8 +5854,9 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
|
||||
brickinfo->rdma_port);
|
||||
fprintf(fp, "Volume%d.Brick%d.port_registered: %d\n", count_bkp,
|
||||
count, brickinfo->port_registered);
|
||||
+ glusterd_brick_get_status_str(brickinfo, brick_status_str);
|
||||
fprintf(fp, "Volume%d.Brick%d.status: %s\n", count_bkp, count,
|
||||
- brickinfo->status ? "Started" : "Stopped");
|
||||
+ brick_status_str);
|
||||
|
||||
/*FIXME: This is a hacky way of figuring out whether a
|
||||
* brick belongs to the hot or cold tier */
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
|
||||
index 2eb5116..3bdfd49 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
|
||||
@@ -13898,6 +13898,34 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
+void
|
||||
+glusterd_brick_get_status_str(glusterd_brickinfo_t *brickinfo, char *status_str)
|
||||
+{
|
||||
+ GF_VALIDATE_OR_GOTO(THIS->name, brickinfo, out);
|
||||
+ GF_VALIDATE_OR_GOTO(THIS->name, status_str, out);
|
||||
+
|
||||
+ switch (brickinfo->status) {
|
||||
+ case GF_BRICK_STOPPED:
|
||||
+ sprintf(status_str, "%s", "Stopped");
|
||||
+ break;
|
||||
+ case GF_BRICK_STARTED:
|
||||
+ sprintf(status_str, "%s", "Started");
|
||||
+ break;
|
||||
+ case GF_BRICK_STARTING:
|
||||
+ sprintf(status_str, "%s", "Starting");
|
||||
+ break;
|
||||
+ case GF_BRICK_STOPPING:
|
||||
+ sprintf(status_str, "%s", "Stopping");
|
||||
+ break;
|
||||
+ default:
|
||||
+ sprintf(status_str, "%s", "None");
|
||||
+ break;
|
||||
+ }
|
||||
+
|
||||
+out:
|
||||
+ return;
|
||||
+}
|
||||
+
|
||||
int
|
||||
glusterd_volume_get_transport_type_str(glusterd_volinfo_t *volinfo,
|
||||
char *transport_type_str)
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
|
||||
index 6ad8062..5c6a453 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
|
||||
@@ -781,6 +781,10 @@ glusterd_volume_get_type_str(glusterd_volinfo_t *volinfo, char **vol_type_str);
|
||||
int
|
||||
glusterd_volume_get_status_str(glusterd_volinfo_t *volinfo, char *status_str);
|
||||
|
||||
+void
|
||||
+glusterd_brick_get_status_str(glusterd_brickinfo_t *brickinfo,
|
||||
+ char *status_str);
|
||||
+
|
||||
int
|
||||
glusterd_volume_get_transport_type_str(glusterd_volinfo_t *volinfo,
|
||||
char *transport_type_str);
|
||||
--
|
||||
1.8.3.1
|
||||
|
893
0230-Revert-glusterd-svc-update-pid-of-mux-volumes-from-t.patch
Normal file
893
0230-Revert-glusterd-svc-update-pid-of-mux-volumes-from-t.patch
Normal file
@ -0,0 +1,893 @@
|
||||
From 308fe0d81dbef9f84bb1ad8e7309e3ffc28d6394 Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:37:29 +0530
|
||||
Subject: [PATCH 230/255] Revert "glusterd/svc: update pid of mux volumes from
|
||||
the shd process"
|
||||
|
||||
This reverts commit b0815b8a84a07d17a1215c55afc38888ee9fc37c.
|
||||
Label : DOWNSTREAM ONLY
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: Iab11c686565e9a9c852f2b7c2d236fa1a348f96a
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175940
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
glusterfsd/src/gf_attach.c | 2 -
|
||||
glusterfsd/src/glusterfsd-mgmt.c | 66 ++-------
|
||||
libglusterfs/src/glusterfs/glusterfs.h | 2 +-
|
||||
libglusterfs/src/glusterfs/libglusterfs-messages.h | 3 +-
|
||||
libglusterfs/src/graph.c | 154 +--------------------
|
||||
rpc/xdr/src/glusterd1-xdr.x | 1 -
|
||||
xlators/mgmt/glusterd/src/glusterd-handler.c | 2 -
|
||||
xlators/mgmt/glusterd/src/glusterd-handshake.c | 42 +-----
|
||||
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 4 -
|
||||
.../mgmt/glusterd/src/glusterd-shd-svc-helper.c | 25 ----
|
||||
.../mgmt/glusterd/src/glusterd-shd-svc-helper.h | 3 -
|
||||
xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 8 +-
|
||||
xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 57 ++++----
|
||||
xlators/mgmt/glusterd/src/glusterd-syncop.c | 2 -
|
||||
xlators/mgmt/glusterd/src/glusterd-utils.c | 6 +-
|
||||
15 files changed, 52 insertions(+), 325 deletions(-)
|
||||
|
||||
diff --git a/glusterfsd/src/gf_attach.c b/glusterfsd/src/gf_attach.c
|
||||
index 1bff854..6293b9b 100644
|
||||
--- a/glusterfsd/src/gf_attach.c
|
||||
+++ b/glusterfsd/src/gf_attach.c
|
||||
@@ -65,8 +65,6 @@ send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
|
||||
brick_req.name = path;
|
||||
brick_req.input.input_val = NULL;
|
||||
brick_req.input.input_len = 0;
|
||||
- brick_req.dict.dict_val = NULL;
|
||||
- brick_req.dict.dict_len = 0;
|
||||
|
||||
req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req);
|
||||
iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size);
|
||||
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
|
||||
index f930e0a..1d2cd1a 100644
|
||||
--- a/glusterfsd/src/glusterfsd-mgmt.c
|
||||
+++ b/glusterfsd/src/glusterfsd-mgmt.c
|
||||
@@ -50,16 +50,13 @@ int
|
||||
emancipate(glusterfs_ctx_t *ctx, int ret);
|
||||
int
|
||||
glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
|
||||
- char *volfile_id, char *checksum,
|
||||
- dict_t *dict);
|
||||
+ char *volfile_id, char *checksum);
|
||||
int
|
||||
glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
|
||||
- gf_volfile_t *volfile_obj, char *checksum,
|
||||
- dict_t *dict);
|
||||
+ gf_volfile_t *volfile_obj, char *checksum);
|
||||
int
|
||||
glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
|
||||
- char *volfile_id, char *checksum,
|
||||
- dict_t *dict);
|
||||
+ char *volfile_id, char *checksum);
|
||||
int
|
||||
glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj);
|
||||
|
||||
@@ -78,8 +75,7 @@ mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data)
|
||||
}
|
||||
|
||||
int
|
||||
-mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id,
|
||||
- dict_t *dict)
|
||||
+mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id)
|
||||
{
|
||||
glusterfs_ctx_t *ctx = NULL;
|
||||
int ret = 0;
|
||||
@@ -149,11 +145,11 @@ mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id,
|
||||
* the volfile
|
||||
*/
|
||||
ret = glusterfs_process_svc_attach_volfp(ctx, tmpfp, volfile_id,
|
||||
- sha256_hash, dict);
|
||||
+ sha256_hash);
|
||||
goto unlock;
|
||||
}
|
||||
ret = glusterfs_mux_volfile_reconfigure(tmpfp, ctx, volfile_obj,
|
||||
- sha256_hash, dict);
|
||||
+ sha256_hash);
|
||||
if (ret < 0) {
|
||||
gf_msg_debug("glusterfsd-mgmt", EINVAL, "Reconfigure failed !!");
|
||||
}
|
||||
@@ -391,8 +387,6 @@ err:
|
||||
UNLOCK(&ctx->volfile_lock);
|
||||
if (xlator_req.input.input_val)
|
||||
free(xlator_req.input.input_val);
|
||||
- if (xlator_req.dict.dict_val)
|
||||
- free(xlator_req.dict.dict_val);
|
||||
free(xlator_req.name);
|
||||
xlator_req.name = NULL;
|
||||
return 0;
|
||||
@@ -567,8 +561,6 @@ out:
|
||||
|
||||
free(xlator_req.name);
|
||||
free(xlator_req.input.input_val);
|
||||
- if (xlator_req.dict.dict_val)
|
||||
- free(xlator_req.dict.dict_val);
|
||||
if (output)
|
||||
dict_unref(output);
|
||||
if (dict)
|
||||
@@ -990,8 +982,6 @@ out:
|
||||
if (input)
|
||||
dict_unref(input);
|
||||
free(xlator_req.input.input_val); /*malloced by xdr*/
|
||||
- if (xlator_req.dict.dict_val)
|
||||
- free(xlator_req.dict.dict_val);
|
||||
if (output)
|
||||
dict_unref(output);
|
||||
free(xlator_req.name);
|
||||
@@ -1072,8 +1062,6 @@ glusterfs_handle_attach(rpcsvc_request_t *req)
|
||||
out:
|
||||
UNLOCK(&ctx->volfile_lock);
|
||||
}
|
||||
- if (xlator_req.dict.dict_val)
|
||||
- free(xlator_req.dict.dict_val);
|
||||
free(xlator_req.input.input_val);
|
||||
free(xlator_req.name);
|
||||
|
||||
@@ -1089,7 +1077,6 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req)
|
||||
};
|
||||
xlator_t *this = NULL;
|
||||
glusterfs_ctx_t *ctx = NULL;
|
||||
- dict_t *dict = NULL;
|
||||
|
||||
GF_ASSERT(req);
|
||||
this = THIS;
|
||||
@@ -1104,41 +1091,20 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req)
|
||||
req->rpc_err = GARBAGE_ARGS;
|
||||
goto out;
|
||||
}
|
||||
-
|
||||
gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41,
|
||||
"received attach "
|
||||
"request for volfile-id=%s",
|
||||
xlator_req.name);
|
||||
-
|
||||
- dict = dict_new();
|
||||
- if (!dict) {
|
||||
- ret = -1;
|
||||
- errno = ENOMEM;
|
||||
- goto out;
|
||||
- }
|
||||
-
|
||||
- ret = dict_unserialize(xlator_req.dict.dict_val, xlator_req.dict.dict_len,
|
||||
- &dict);
|
||||
- if (ret) {
|
||||
- gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42,
|
||||
- "failed to unserialize xdata to dictionary");
|
||||
- goto out;
|
||||
- }
|
||||
- dict->extra_stdfree = xlator_req.dict.dict_val;
|
||||
-
|
||||
ret = 0;
|
||||
|
||||
if (ctx->active) {
|
||||
ret = mgmt_process_volfile(xlator_req.input.input_val,
|
||||
- xlator_req.input.input_len, xlator_req.name,
|
||||
- dict);
|
||||
+ xlator_req.input.input_len, xlator_req.name);
|
||||
} else {
|
||||
gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42,
|
||||
"got attach for %s but no active graph", xlator_req.name);
|
||||
}
|
||||
out:
|
||||
- if (dict)
|
||||
- dict_unref(dict);
|
||||
if (xlator_req.input.input_val)
|
||||
free(xlator_req.input.input_val);
|
||||
if (xlator_req.name)
|
||||
@@ -1275,8 +1241,6 @@ out:
|
||||
GF_FREE(filepath);
|
||||
if (xlator_req.input.input_val)
|
||||
free(xlator_req.input.input_val);
|
||||
- if (xlator_req.dict.dict_val)
|
||||
- free(xlator_req.dict.dict_val);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1349,8 +1313,6 @@ out:
|
||||
if (dict)
|
||||
dict_unref(dict);
|
||||
free(xlator_req.input.input_val); // malloced by xdr
|
||||
- if (xlator_req.dict.dict_val)
|
||||
- free(xlator_req.dict.dict_val);
|
||||
if (output)
|
||||
dict_unref(output);
|
||||
free(xlator_req.name); // malloced by xdr
|
||||
@@ -1499,8 +1461,6 @@ out:
|
||||
if (output)
|
||||
dict_unref(output);
|
||||
free(brick_req.input.input_val);
|
||||
- if (brick_req.dict.dict_val)
|
||||
- free(brick_req.dict.dict_val);
|
||||
free(brick_req.name);
|
||||
GF_FREE(xname);
|
||||
GF_FREE(msg);
|
||||
@@ -1694,8 +1654,6 @@ out:
|
||||
if (dict)
|
||||
dict_unref(dict);
|
||||
free(node_req.input.input_val);
|
||||
- if (node_req.dict.dict_val)
|
||||
- free(node_req.dict.dict_val);
|
||||
GF_FREE(msg);
|
||||
GF_FREE(rsp.output.output_val);
|
||||
GF_FREE(node_name);
|
||||
@@ -1799,8 +1757,6 @@ glusterfs_handle_nfs_profile(rpcsvc_request_t *req)
|
||||
|
||||
out:
|
||||
free(nfs_req.input.input_val);
|
||||
- if (nfs_req.dict.dict_val)
|
||||
- free(nfs_req.dict.dict_val);
|
||||
if (dict)
|
||||
dict_unref(dict);
|
||||
if (output)
|
||||
@@ -1879,8 +1835,6 @@ out:
|
||||
if (dict)
|
||||
dict_unref(dict);
|
||||
free(xlator_req.input.input_val); // malloced by xdr
|
||||
- if (xlator_req.dict.dict_val)
|
||||
- free(xlator_req.dict.dict_val);
|
||||
if (output)
|
||||
dict_unref(output);
|
||||
free(xlator_req.name); // malloced by xdr
|
||||
@@ -2009,8 +1963,7 @@ out:
|
||||
if (dict)
|
||||
dict_unref(dict);
|
||||
free(brick_req.input.input_val);
|
||||
- if (brick_req.dict.dict_val)
|
||||
- free(brick_req.dict.dict_val);
|
||||
+
|
||||
gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
|
||||
return ret;
|
||||
}
|
||||
@@ -2260,8 +2213,7 @@ volfile:
|
||||
size = rsp.op_ret;
|
||||
volfile_id = frame->local;
|
||||
if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) {
|
||||
- ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id,
|
||||
- dict);
|
||||
+ ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id);
|
||||
goto post_graph_mgmt;
|
||||
}
|
||||
|
||||
diff --git a/libglusterfs/src/glusterfs/glusterfs.h b/libglusterfs/src/glusterfs/glusterfs.h
|
||||
index b6327b8..9ec2365 100644
|
||||
--- a/libglusterfs/src/glusterfs/glusterfs.h
|
||||
+++ b/libglusterfs/src/glusterfs/glusterfs.h
|
||||
@@ -744,7 +744,7 @@ typedef struct {
|
||||
char vol_id[NAME_MAX + 1];
|
||||
struct list_head volfile_list;
|
||||
glusterfs_graph_t *graph;
|
||||
- FILE *pidfp;
|
||||
+
|
||||
} gf_volfile_t;
|
||||
|
||||
glusterfs_ctx_t *
|
||||
diff --git a/libglusterfs/src/glusterfs/libglusterfs-messages.h b/libglusterfs/src/glusterfs/libglusterfs-messages.h
|
||||
index 7e0eebb..ea2aa60 100644
|
||||
--- a/libglusterfs/src/glusterfs/libglusterfs-messages.h
|
||||
+++ b/libglusterfs/src/glusterfs/libglusterfs-messages.h
|
||||
@@ -111,7 +111,6 @@ GLFS_MSGID(
|
||||
LG_MSG_PTHREAD_NAMING_FAILED, LG_MSG_SYSCALL_RETURNS_WRONG,
|
||||
LG_MSG_XXH64_TO_GFID_FAILED, LG_MSG_ASYNC_WARNING, LG_MSG_ASYNC_FAILURE,
|
||||
LG_MSG_GRAPH_CLEANUP_FAILED, LG_MSG_GRAPH_SETUP_FAILED,
|
||||
- LG_MSG_GRAPH_DETACH_STARTED, LG_MSG_GRAPH_ATTACH_FAILED,
|
||||
- LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED);
|
||||
+ LG_MSG_GRAPH_DETACH_STARTED, LG_MSG_GRAPH_ATTACH_FAILED);
|
||||
|
||||
#endif /* !_LG_MESSAGES_H_ */
|
||||
diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c
|
||||
index 05f76bf..172dc61 100644
|
||||
--- a/libglusterfs/src/graph.c
|
||||
+++ b/libglusterfs/src/graph.c
|
||||
@@ -1467,21 +1467,6 @@ out:
|
||||
}
|
||||
|
||||
int
|
||||
-glusterfs_svc_mux_pidfile_cleanup(gf_volfile_t *volfile_obj)
|
||||
-{
|
||||
- if (!volfile_obj || !volfile_obj->pidfp)
|
||||
- return 0;
|
||||
-
|
||||
- gf_msg_trace("glusterfsd", 0, "pidfile %s cleanup", volfile_obj->vol_id);
|
||||
-
|
||||
- lockf(fileno(volfile_obj->pidfp), F_ULOCK, 0);
|
||||
- fclose(volfile_obj->pidfp);
|
||||
- volfile_obj->pidfp = NULL;
|
||||
-
|
||||
- return 0;
|
||||
-}
|
||||
-
|
||||
-int
|
||||
glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj)
|
||||
{
|
||||
xlator_t *last_xl = NULL;
|
||||
@@ -1517,7 +1502,6 @@ glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj)
|
||||
|
||||
list_del_init(&volfile_obj->volfile_list);
|
||||
glusterfs_mux_xlator_unlink(parent_graph->top, xl);
|
||||
- glusterfs_svc_mux_pidfile_cleanup(volfile_obj);
|
||||
parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph);
|
||||
parent_graph->xl_count -= graph->xl_count;
|
||||
parent_graph->leaf_count -= graph->leaf_count;
|
||||
@@ -1547,126 +1531,8 @@ out:
|
||||
}
|
||||
|
||||
int
|
||||
-glusterfs_svc_mux_pidfile_setup(gf_volfile_t *volfile_obj, const char *pid_file)
|
||||
-{
|
||||
- int ret = -1;
|
||||
- FILE *pidfp = NULL;
|
||||
-
|
||||
- if (!pid_file || !volfile_obj)
|
||||
- goto out;
|
||||
-
|
||||
- if (volfile_obj->pidfp) {
|
||||
- ret = 0;
|
||||
- goto out;
|
||||
- }
|
||||
- pidfp = fopen(pid_file, "a+");
|
||||
- if (!pidfp) {
|
||||
- goto out;
|
||||
- }
|
||||
- volfile_obj->pidfp = pidfp;
|
||||
-
|
||||
- ret = lockf(fileno(pidfp), F_TLOCK, 0);
|
||||
- if (ret) {
|
||||
- ret = 0;
|
||||
- goto out;
|
||||
- }
|
||||
-out:
|
||||
- return ret;
|
||||
-}
|
||||
-
|
||||
-int
|
||||
-glusterfs_svc_mux_pidfile_update(gf_volfile_t *volfile_obj,
|
||||
- const char *pid_file, pid_t pid)
|
||||
-{
|
||||
- int ret = 0;
|
||||
- FILE *pidfp = NULL;
|
||||
- int old_pid;
|
||||
-
|
||||
- if (!volfile_obj->pidfp) {
|
||||
- ret = glusterfs_svc_mux_pidfile_setup(volfile_obj, pid_file);
|
||||
- if (ret == -1)
|
||||
- goto out;
|
||||
- }
|
||||
- pidfp = volfile_obj->pidfp;
|
||||
- ret = fscanf(pidfp, "%d", &old_pid);
|
||||
- if (ret <= 0) {
|
||||
- goto update;
|
||||
- }
|
||||
- if (old_pid == pid) {
|
||||
- ret = 0;
|
||||
- goto out;
|
||||
- } else {
|
||||
- gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED,
|
||||
- "Old pid=%d found in pidfile %s. Cleaning the old pid and "
|
||||
- "Updating new pid=%d",
|
||||
- old_pid, pid_file, pid);
|
||||
- }
|
||||
-update:
|
||||
- ret = sys_ftruncate(fileno(pidfp), 0);
|
||||
- if (ret) {
|
||||
- gf_msg("glusterfsd", GF_LOG_ERROR, errno,
|
||||
- LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED,
|
||||
- "pidfile %s truncation failed", pid_file);
|
||||
- goto out;
|
||||
- }
|
||||
-
|
||||
- ret = fprintf(pidfp, "%d\n", pid);
|
||||
- if (ret <= 0) {
|
||||
- gf_msg("glusterfsd", GF_LOG_ERROR, errno,
|
||||
- LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, "pidfile %s write failed",
|
||||
- pid_file);
|
||||
- goto out;
|
||||
- }
|
||||
-
|
||||
- ret = fflush(pidfp);
|
||||
- if (ret) {
|
||||
- gf_msg("glusterfsd", GF_LOG_ERROR, errno,
|
||||
- LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, "pidfile %s write failed",
|
||||
- pid_file);
|
||||
- goto out;
|
||||
- }
|
||||
-out:
|
||||
- return ret;
|
||||
-}
|
||||
-
|
||||
-int
|
||||
-glusterfs_update_mux_pid(dict_t *dict, gf_volfile_t *volfile_obj)
|
||||
-{
|
||||
- char *file = NULL;
|
||||
- int ret = -1;
|
||||
-
|
||||
- GF_VALIDATE_OR_GOTO("graph", dict, out);
|
||||
- GF_VALIDATE_OR_GOTO("graph", volfile_obj, out);
|
||||
-
|
||||
- ret = dict_get_str(dict, "pidfile", &file);
|
||||
- if (ret < 0) {
|
||||
- gf_msg("mgmt", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_SETUP_FAILED,
|
||||
- "Failed to get pidfile from dict for volfile_id=%s",
|
||||
- volfile_obj->vol_id);
|
||||
- }
|
||||
-
|
||||
- ret = glusterfs_svc_mux_pidfile_update(volfile_obj, file, getpid());
|
||||
- if (ret < 0) {
|
||||
- ret = -1;
|
||||
- gf_msg("mgmt", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_SETUP_FAILED,
|
||||
- "Failed to update "
|
||||
- "the pidfile for volfile_id=%s",
|
||||
- volfile_obj->vol_id);
|
||||
-
|
||||
- goto out;
|
||||
- }
|
||||
-
|
||||
- if (ret == 1)
|
||||
- gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED,
|
||||
- "PID %d updated in pidfile=%s", getpid(), file);
|
||||
- ret = 0;
|
||||
-out:
|
||||
- return ret;
|
||||
-}
|
||||
-int
|
||||
glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
|
||||
- char *volfile_id, char *checksum,
|
||||
- dict_t *dict)
|
||||
+ char *volfile_id, char *checksum)
|
||||
{
|
||||
glusterfs_graph_t *graph = NULL;
|
||||
glusterfs_graph_t *parent_graph = NULL;
|
||||
@@ -1749,25 +1615,18 @@ glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
- volfile_obj->pidfp = NULL;
|
||||
- snprintf(volfile_obj->vol_id, sizeof(volfile_obj->vol_id), "%s",
|
||||
- volfile_id);
|
||||
-
|
||||
- if (strcmp(ctx->cmd_args.process_name, "glustershd") == 0) {
|
||||
- ret = glusterfs_update_mux_pid(dict, volfile_obj);
|
||||
- if (ret == -1) {
|
||||
- goto out;
|
||||
- }
|
||||
- }
|
||||
|
||||
graph->used = 1;
|
||||
parent_graph->id++;
|
||||
list_add(&graph->list, &ctx->graphs);
|
||||
INIT_LIST_HEAD(&volfile_obj->volfile_list);
|
||||
volfile_obj->graph = graph;
|
||||
+ snprintf(volfile_obj->vol_id, sizeof(volfile_obj->vol_id), "%s",
|
||||
+ volfile_id);
|
||||
memcpy(volfile_obj->volfile_checksum, checksum,
|
||||
sizeof(volfile_obj->volfile_checksum));
|
||||
list_add_tail(&volfile_obj->volfile_list, &ctx->volfile_list);
|
||||
+
|
||||
gf_log_dump_graph(fp, graph);
|
||||
graph = NULL;
|
||||
|
||||
@@ -1795,8 +1654,7 @@ out:
|
||||
|
||||
int
|
||||
glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
|
||||
- gf_volfile_t *volfile_obj, char *checksum,
|
||||
- dict_t *dict)
|
||||
+ gf_volfile_t *volfile_obj, char *checksum)
|
||||
{
|
||||
glusterfs_graph_t *oldvolfile_graph = NULL;
|
||||
glusterfs_graph_t *newvolfile_graph = NULL;
|
||||
@@ -1845,7 +1703,7 @@ glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
|
||||
}
|
||||
volfile_obj = NULL;
|
||||
ret = glusterfs_process_svc_attach_volfp(ctx, newvolfile_fp, vol_id,
|
||||
- checksum, dict);
|
||||
+ checksum);
|
||||
goto out;
|
||||
}
|
||||
|
||||
diff --git a/rpc/xdr/src/glusterd1-xdr.x b/rpc/xdr/src/glusterd1-xdr.x
|
||||
index 02ebec2..9b36d34 100644
|
||||
--- a/rpc/xdr/src/glusterd1-xdr.x
|
||||
+++ b/rpc/xdr/src/glusterd1-xdr.x
|
||||
@@ -132,7 +132,6 @@ struct gd1_mgmt_brick_op_req {
|
||||
string name<>;
|
||||
int op;
|
||||
opaque input<>;
|
||||
- opaque dict<>;
|
||||
} ;
|
||||
|
||||
struct gd1_mgmt_brick_op_rsp {
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
|
||||
index 94e1be5..ac788a0 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
|
||||
@@ -5423,8 +5423,6 @@ glusterd_print_client_details(FILE *fp, dict_t *dict,
|
||||
|
||||
brick_req->op = GLUSTERD_BRICK_STATUS;
|
||||
brick_req->name = "";
|
||||
- brick_req->dict.dict_val = NULL;
|
||||
- brick_req->dict.dict_len = 0;
|
||||
|
||||
ret = dict_set_strn(dict, "brick-name", SLEN("brick-name"),
|
||||
brickinfo->path);
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
|
||||
index 86dec82..1ba58c3 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
|
||||
@@ -203,7 +203,7 @@ out:
|
||||
|
||||
size_t
|
||||
build_volfile_path(char *volume_id, char *path, size_t path_len,
|
||||
- char *trusted_str, dict_t *dict)
|
||||
+ char *trusted_str)
|
||||
{
|
||||
struct stat stbuf = {
|
||||
0,
|
||||
@@ -340,19 +340,11 @@ build_volfile_path(char *volume_id, char *path, size_t path_len,
|
||||
|
||||
ret = glusterd_volinfo_find(volid_ptr, &volinfo);
|
||||
if (ret == -1) {
|
||||
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
|
||||
- "Couldn't find volinfo for volid=%s", volid_ptr);
|
||||
+ gf_log(this->name, GF_LOG_ERROR, "Couldn't find volinfo");
|
||||
goto out;
|
||||
}
|
||||
|
||||
glusterd_svc_build_shd_volfile_path(volinfo, path, path_len);
|
||||
-
|
||||
- ret = glusterd_svc_set_shd_pidfile(volinfo, dict);
|
||||
- if (ret == -1) {
|
||||
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
|
||||
- "Couldn't set pidfile in dict for volid=%s", volid_ptr);
|
||||
- goto out;
|
||||
- }
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
@@ -927,7 +919,6 @@ __server_getspec(rpcsvc_request_t *req)
|
||||
char addrstr[RPCSVC_PEER_STRLEN] = {0};
|
||||
peer_info_t *peerinfo = NULL;
|
||||
xlator_t *this = NULL;
|
||||
- dict_t *dict = NULL;
|
||||
|
||||
this = THIS;
|
||||
GF_ASSERT(this);
|
||||
@@ -980,12 +971,6 @@ __server_getspec(rpcsvc_request_t *req)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
- dict = dict_new();
|
||||
- if (!dict) {
|
||||
- ret = -ENOMEM;
|
||||
- goto fail;
|
||||
- }
|
||||
-
|
||||
trans = req->trans;
|
||||
/* addrstr will be empty for cli socket connections */
|
||||
ret = rpcsvc_transport_peername(trans, (char *)&addrstr, sizeof(addrstr));
|
||||
@@ -1004,26 +989,12 @@ __server_getspec(rpcsvc_request_t *req)
|
||||
*/
|
||||
if (strlen(addrstr) == 0 || gf_is_local_addr(addrstr)) {
|
||||
ret = build_volfile_path(volume, filename, sizeof(filename),
|
||||
- TRUSTED_PREFIX, dict);
|
||||
+ TRUSTED_PREFIX);
|
||||
} else {
|
||||
- ret = build_volfile_path(volume, filename, sizeof(filename), NULL,
|
||||
- dict);
|
||||
+ ret = build_volfile_path(volume, filename, sizeof(filename), NULL);
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
- if (dict->count > 0) {
|
||||
- ret = dict_allocate_and_serialize(dict, &rsp.xdata.xdata_val,
|
||||
- &rsp.xdata.xdata_len);
|
||||
- if (ret) {
|
||||
- gf_msg(this->name, GF_LOG_ERROR, 0,
|
||||
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
|
||||
- "Failed to serialize dict "
|
||||
- "to request buffer");
|
||||
- goto fail;
|
||||
- }
|
||||
- dict->extra_free = rsp.xdata.xdata_val;
|
||||
- }
|
||||
-
|
||||
/* to allocate the proper buffer to hold the file data */
|
||||
ret = sys_stat(filename, &stbuf);
|
||||
if (ret < 0) {
|
||||
@@ -1065,6 +1036,7 @@ __server_getspec(rpcsvc_request_t *req)
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
+
|
||||
/* convert to XDR */
|
||||
fail:
|
||||
if (spec_fd >= 0)
|
||||
@@ -1084,10 +1056,6 @@ fail:
|
||||
(xdrproc_t)xdr_gf_getspec_rsp);
|
||||
free(args.key); // malloced by xdr
|
||||
free(rsp.spec);
|
||||
-
|
||||
- if (dict)
|
||||
- dict_unref(dict);
|
||||
-
|
||||
if (args.xdata.xdata_val)
|
||||
free(args.xdata.xdata_val);
|
||||
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
||||
index 454877b..9ea695e 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
||||
@@ -655,8 +655,6 @@ glusterd_brick_op_build_payload(glusterd_op_t op,
|
||||
break;
|
||||
}
|
||||
|
||||
- brick_req->dict.dict_len = 0;
|
||||
- brick_req->dict.dict_val = NULL;
|
||||
ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
|
||||
&brick_req->input.input_len);
|
||||
if (ret)
|
||||
@@ -725,8 +723,6 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req,
|
||||
goto out;
|
||||
}
|
||||
|
||||
- brick_req->dict.dict_len = 0;
|
||||
- brick_req->dict.dict_val = NULL;
|
||||
ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
|
||||
&brick_req->input.input_len);
|
||||
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
|
||||
index 5661e39..57ceda9 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
|
||||
@@ -126,28 +126,3 @@ glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd)
|
||||
out:
|
||||
return;
|
||||
}
|
||||
-
|
||||
-int
|
||||
-glusterd_svc_set_shd_pidfile(glusterd_volinfo_t *volinfo, dict_t *dict)
|
||||
-{
|
||||
- int ret = -1;
|
||||
- glusterd_svc_t *svc = NULL;
|
||||
- xlator_t *this = NULL;
|
||||
-
|
||||
- this = THIS;
|
||||
- GF_VALIDATE_OR_GOTO("glusterd", this, out);
|
||||
- GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
|
||||
- GF_VALIDATE_OR_GOTO(this->name, dict, out);
|
||||
-
|
||||
- svc = &(volinfo->shd.svc);
|
||||
-
|
||||
- ret = dict_set_dynstr_with_alloc(dict, "pidfile", svc->proc.pidfile);
|
||||
- if (ret) {
|
||||
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
|
||||
- "Failed to set pidfile %s in dict", svc->proc.pidfile);
|
||||
- goto out;
|
||||
- }
|
||||
- ret = 0;
|
||||
-out:
|
||||
- return ret;
|
||||
-}
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
|
||||
index 1f0984b..59466ec 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
|
||||
@@ -36,7 +36,4 @@ glusterd_recover_shd_attach_failure(glusterd_volinfo_t *volinfo,
|
||||
int
|
||||
glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo);
|
||||
|
||||
-int
|
||||
-glusterd_svc_set_shd_pidfile(glusterd_volinfo_t *volinfo, dict_t *dict);
|
||||
-
|
||||
#endif
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
index 590169f..8ad90a9 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
@@ -258,20 +258,14 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
|
||||
gf_boolean_t shd_restart = _gf_false;
|
||||
|
||||
conf = THIS->private;
|
||||
+ volinfo = data;
|
||||
GF_VALIDATE_OR_GOTO("glusterd", conf, out);
|
||||
GF_VALIDATE_OR_GOTO("glusterd", svc, out);
|
||||
- volinfo = data;
|
||||
GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
|
||||
|
||||
if (volinfo)
|
||||
glusterd_volinfo_ref(volinfo);
|
||||
|
||||
- if (volinfo->is_snap_volume) {
|
||||
- /* healing of a snap volume is not supported yet*/
|
||||
- ret = 0;
|
||||
- goto out;
|
||||
- }
|
||||
-
|
||||
while (conf->restart_shd) {
|
||||
synclock_unlock(&conf->big_lock);
|
||||
sleep(2);
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
index e106111..400826f 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
@@ -519,7 +519,7 @@ glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc)
|
||||
/* Take first entry from the process */
|
||||
parent_svc = cds_list_entry(mux_proc->svcs.next, glusterd_svc_t,
|
||||
mux_svc);
|
||||
- glusterd_copy_file(parent_svc->proc.pidfile, svc->proc.pidfile);
|
||||
+ sys_link(parent_svc->proc.pidfile, svc->proc.pidfile);
|
||||
mux_conn = &parent_svc->conn;
|
||||
if (volinfo)
|
||||
volinfo->shd.attached = _gf_true;
|
||||
@@ -623,9 +623,12 @@ glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count,
|
||||
glusterd_volinfo_t *volinfo = NULL;
|
||||
glusterd_shdsvc_t *shd = NULL;
|
||||
glusterd_svc_t *svc = frame->cookie;
|
||||
+ glusterd_svc_t *parent_svc = NULL;
|
||||
+ glusterd_svc_proc_t *mux_proc = NULL;
|
||||
glusterd_conf_t *conf = NULL;
|
||||
int *flag = (int *)frame->local;
|
||||
xlator_t *this = THIS;
|
||||
+ int pid = -1;
|
||||
int ret = -1;
|
||||
gf_getspec_rsp rsp = {
|
||||
0,
|
||||
@@ -676,7 +679,27 @@ glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count,
|
||||
}
|
||||
|
||||
if (rsp.op_ret == 0) {
|
||||
- svc->online = _gf_true;
|
||||
+ pthread_mutex_lock(&conf->attach_lock);
|
||||
+ {
|
||||
+ if (!strcmp(svc->name, "glustershd")) {
|
||||
+ mux_proc = svc->svc_proc;
|
||||
+ if (mux_proc &&
|
||||
+ !gf_is_service_running(svc->proc.pidfile, &pid)) {
|
||||
+ /*
|
||||
+ * When svc's are restarting, there is a chance that the
|
||||
+ * attached svc might not have updated it's pid. Because
|
||||
+ * it was at connection stage. So in that case, we need
|
||||
+ * to retry the pid file copy.
|
||||
+ */
|
||||
+ parent_svc = cds_list_entry(mux_proc->svcs.next,
|
||||
+ glusterd_svc_t, mux_svc);
|
||||
+ if (parent_svc)
|
||||
+ sys_link(parent_svc->proc.pidfile, svc->proc.pidfile);
|
||||
+ }
|
||||
+ }
|
||||
+ svc->online = _gf_true;
|
||||
+ }
|
||||
+ pthread_mutex_unlock(&conf->attach_lock);
|
||||
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_ATTACH_FAIL,
|
||||
"svc %s of volume %s attached successfully to pid %d", svc->name,
|
||||
volinfo->volname, glusterd_proc_get_pid(&svc->proc));
|
||||
@@ -703,7 +726,7 @@ out:
|
||||
|
||||
extern size_t
|
||||
build_volfile_path(char *volume_id, char *path, size_t path_len,
|
||||
- char *trusted_str, dict_t *dict);
|
||||
+ char *trusted_str);
|
||||
|
||||
int
|
||||
__glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
|
||||
@@ -728,7 +751,6 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
|
||||
ssize_t req_size = 0;
|
||||
call_frame_t *frame = NULL;
|
||||
gd1_mgmt_brick_op_req brick_req;
|
||||
- dict_t *dict = NULL;
|
||||
void *req = &brick_req;
|
||||
void *errlbl = &&err;
|
||||
struct rpc_clnt_connection *conn;
|
||||
@@ -754,8 +776,6 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
|
||||
brick_req.name = volfile_id;
|
||||
brick_req.input.input_val = NULL;
|
||||
brick_req.input.input_len = 0;
|
||||
- brick_req.dict.dict_val = NULL;
|
||||
- brick_req.dict.dict_len = 0;
|
||||
|
||||
frame = create_frame(this, this->ctx->pool);
|
||||
if (!frame) {
|
||||
@@ -763,13 +783,7 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
|
||||
}
|
||||
|
||||
if (op == GLUSTERD_SVC_ATTACH) {
|
||||
- dict = dict_new();
|
||||
- if (!dict) {
|
||||
- ret = -ENOMEM;
|
||||
- goto *errlbl;
|
||||
- }
|
||||
-
|
||||
- (void)build_volfile_path(volfile_id, path, sizeof(path), NULL, dict);
|
||||
+ (void)build_volfile_path(volfile_id, path, sizeof(path), NULL);
|
||||
|
||||
ret = sys_stat(path, &stbuf);
|
||||
if (ret < 0) {
|
||||
@@ -804,18 +818,6 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
|
||||
ret = -EIO;
|
||||
goto *errlbl;
|
||||
}
|
||||
- if (dict->count > 0) {
|
||||
- ret = dict_allocate_and_serialize(dict, &brick_req.dict.dict_val,
|
||||
- &brick_req.dict.dict_len);
|
||||
- if (ret) {
|
||||
- gf_msg(this->name, GF_LOG_ERROR, 0,
|
||||
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
|
||||
- "Failed to serialize dict "
|
||||
- "to request buffer");
|
||||
- goto *errlbl;
|
||||
- }
|
||||
- dict->extra_free = brick_req.dict.dict_val;
|
||||
- }
|
||||
|
||||
frame->cookie = svc;
|
||||
frame->local = GF_CALLOC(1, sizeof(int), gf_gld_mt_int);
|
||||
@@ -860,8 +862,6 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
|
||||
GF_ATOMIC_INC(conf->blockers);
|
||||
ret = rpc_clnt_submit(rpc, &gd_brick_prog, op, cbkfn, &iov, 1, NULL, 0,
|
||||
iobref, frame, NULL, 0, NULL, 0, NULL);
|
||||
- if (dict)
|
||||
- dict_unref(dict);
|
||||
GF_FREE(volfile_content);
|
||||
if (spec_fd >= 0)
|
||||
sys_close(spec_fd);
|
||||
@@ -874,9 +874,6 @@ maybe_free_iobuf:
|
||||
iobuf_unref(iobuf);
|
||||
}
|
||||
err:
|
||||
- if (dict)
|
||||
- dict_unref(dict);
|
||||
-
|
||||
GF_FREE(volfile_content);
|
||||
if (spec_fd >= 0)
|
||||
sys_close(spec_fd);
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
|
||||
index a8098df..618d8bc 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
|
||||
@@ -143,8 +143,6 @@ gd_brick_op_req_free(gd1_mgmt_brick_op_req *req)
|
||||
if (!req)
|
||||
return;
|
||||
|
||||
- if (req->dict.dict_val)
|
||||
- GF_FREE(req->dict.dict_val);
|
||||
GF_FREE(req->input.input_val);
|
||||
GF_FREE(req);
|
||||
}
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
|
||||
index 3bdfd49..4525ec7 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
|
||||
@@ -5914,8 +5914,6 @@ send_attach_req(xlator_t *this, struct rpc_clnt *rpc, char *path,
|
||||
brick_req.name = path;
|
||||
brick_req.input.input_val = NULL;
|
||||
brick_req.input.input_len = 0;
|
||||
- brick_req.dict.dict_val = NULL;
|
||||
- brick_req.dict.dict_len = 0;
|
||||
|
||||
req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req);
|
||||
iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size);
|
||||
@@ -5979,7 +5977,7 @@ err:
|
||||
|
||||
extern size_t
|
||||
build_volfile_path(char *volume_id, char *path, size_t path_len,
|
||||
- char *trusted_str, dict_t *dict);
|
||||
+ char *trusted_str);
|
||||
|
||||
static int
|
||||
attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo,
|
||||
@@ -6024,7 +6022,7 @@ attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo,
|
||||
goto out;
|
||||
}
|
||||
|
||||
- (void)build_volfile_path(full_id, path, sizeof(path), NULL, NULL);
|
||||
+ (void)build_volfile_path(full_id, path, sizeof(path), NULL);
|
||||
|
||||
for (tries = 15; tries > 0; --tries) {
|
||||
rpc = rpc_clnt_ref(other_brick->rpc);
|
||||
--
|
||||
1.8.3.1
|
||||
|
180
0231-Revert-graph-shd-Use-top-down-approach-while-cleanin.patch
Normal file
180
0231-Revert-graph-shd-Use-top-down-approach-while-cleanin.patch
Normal file
@ -0,0 +1,180 @@
|
||||
From 21f376939f03f91214218c485e7d3a2848dae4b2 Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:43:44 +0530
|
||||
Subject: [PATCH 231/255] Revert "graph/shd: Use top down approach while
|
||||
cleaning xlator"
|
||||
|
||||
This reverts commit b963fa8bb71963127147d33bf609f439dd5bd107.
|
||||
|
||||
Label : DOWNSTREAM ONLY
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: Ifb8056395c5988cf7c484891bea052f5415bf9da
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175941
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
libglusterfs/src/graph.c | 10 +---------
|
||||
xlators/features/bit-rot/src/stub/bit-rot-stub.c | 1 -
|
||||
xlators/features/changelog/src/changelog.c | 1 -
|
||||
xlators/features/cloudsync/src/cloudsync.c | 4 +---
|
||||
xlators/features/index/src/index.c | 1 -
|
||||
xlators/features/quiesce/src/quiesce.c | 1 -
|
||||
xlators/features/read-only/src/worm.c | 1 -
|
||||
xlators/features/sdfs/src/sdfs.c | 1 -
|
||||
xlators/features/selinux/src/selinux.c | 2 --
|
||||
xlators/features/trash/src/trash.c | 1 -
|
||||
10 files changed, 2 insertions(+), 21 deletions(-)
|
||||
|
||||
diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c
|
||||
index 172dc61..5b95fd6 100644
|
||||
--- a/libglusterfs/src/graph.c
|
||||
+++ b/libglusterfs/src/graph.c
|
||||
@@ -1193,14 +1193,6 @@ glusterfs_graph_fini(glusterfs_graph_t *graph)
|
||||
if (trav->init_succeeded) {
|
||||
trav->cleanup_starting = 1;
|
||||
trav->fini(trav);
|
||||
- if (trav->local_pool) {
|
||||
- mem_pool_destroy(trav->local_pool);
|
||||
- trav->local_pool = NULL;
|
||||
- }
|
||||
- if (trav->itable) {
|
||||
- inode_table_destroy(trav->itable);
|
||||
- trav->itable = NULL;
|
||||
- }
|
||||
trav->init_succeeded = 0;
|
||||
}
|
||||
trav = trav->next;
|
||||
@@ -1402,7 +1394,7 @@ glusterfs_graph_cleanup(void *arg)
|
||||
|
||||
pthread_mutex_lock(&ctx->cleanup_lock);
|
||||
{
|
||||
- glusterfs_graph_fini(graph);
|
||||
+ glusterfs_graph_deactivate(graph);
|
||||
glusterfs_graph_destroy(graph);
|
||||
}
|
||||
pthread_mutex_unlock(&ctx->cleanup_lock);
|
||||
diff --git a/xlators/features/bit-rot/src/stub/bit-rot-stub.c b/xlators/features/bit-rot/src/stub/bit-rot-stub.c
|
||||
index 03446be..3f48a4b 100644
|
||||
--- a/xlators/features/bit-rot/src/stub/bit-rot-stub.c
|
||||
+++ b/xlators/features/bit-rot/src/stub/bit-rot-stub.c
|
||||
@@ -185,7 +185,6 @@ cleanup_lock:
|
||||
pthread_mutex_destroy(&priv->lock);
|
||||
free_mempool:
|
||||
mem_pool_destroy(priv->local_pool);
|
||||
- priv->local_pool = NULL;
|
||||
free_priv:
|
||||
GF_FREE(priv);
|
||||
this->private = NULL;
|
||||
diff --git a/xlators/features/changelog/src/changelog.c b/xlators/features/changelog/src/changelog.c
|
||||
index 2862d1e..d9025f3 100644
|
||||
--- a/xlators/features/changelog/src/changelog.c
|
||||
+++ b/xlators/features/changelog/src/changelog.c
|
||||
@@ -2790,7 +2790,6 @@ cleanup_options:
|
||||
changelog_freeup_options(this, priv);
|
||||
cleanup_mempool:
|
||||
mem_pool_destroy(this->local_pool);
|
||||
- this->local_pool = NULL;
|
||||
cleanup_priv:
|
||||
GF_FREE(priv);
|
||||
error_return:
|
||||
diff --git a/xlators/features/cloudsync/src/cloudsync.c b/xlators/features/cloudsync/src/cloudsync.c
|
||||
index 0ad987e..26e512c 100644
|
||||
--- a/xlators/features/cloudsync/src/cloudsync.c
|
||||
+++ b/xlators/features/cloudsync/src/cloudsync.c
|
||||
@@ -200,10 +200,8 @@ cs_init(xlator_t *this)
|
||||
|
||||
out:
|
||||
if (ret == -1) {
|
||||
- if (this->local_pool) {
|
||||
+ if (this->local_pool)
|
||||
mem_pool_destroy(this->local_pool);
|
||||
- this->local_pool = NULL;
|
||||
- }
|
||||
|
||||
cs_cleanup_private(priv);
|
||||
|
||||
diff --git a/xlators/features/index/src/index.c b/xlators/features/index/src/index.c
|
||||
index 4ece7ff..2f2a6d0 100644
|
||||
--- a/xlators/features/index/src/index.c
|
||||
+++ b/xlators/features/index/src/index.c
|
||||
@@ -2478,7 +2478,6 @@ out:
|
||||
GF_FREE(priv);
|
||||
this->private = NULL;
|
||||
mem_pool_destroy(this->local_pool);
|
||||
- this->local_pool = NULL;
|
||||
}
|
||||
|
||||
if (attr_inited)
|
||||
diff --git a/xlators/features/quiesce/src/quiesce.c b/xlators/features/quiesce/src/quiesce.c
|
||||
index 06f58c9..bfd1116 100644
|
||||
--- a/xlators/features/quiesce/src/quiesce.c
|
||||
+++ b/xlators/features/quiesce/src/quiesce.c
|
||||
@@ -2536,7 +2536,6 @@ fini(xlator_t *this)
|
||||
this->private = NULL;
|
||||
|
||||
mem_pool_destroy(priv->local_pool);
|
||||
- priv->local_pool = NULL;
|
||||
LOCK_DESTROY(&priv->lock);
|
||||
GF_FREE(priv);
|
||||
out:
|
||||
diff --git a/xlators/features/read-only/src/worm.c b/xlators/features/read-only/src/worm.c
|
||||
index 7d13180..24196f8 100644
|
||||
--- a/xlators/features/read-only/src/worm.c
|
||||
+++ b/xlators/features/read-only/src/worm.c
|
||||
@@ -569,7 +569,6 @@ fini(xlator_t *this)
|
||||
mem_put(priv);
|
||||
this->private = NULL;
|
||||
mem_pool_destroy(this->local_pool);
|
||||
- this->local_pool = NULL;
|
||||
out:
|
||||
return;
|
||||
}
|
||||
diff --git a/xlators/features/sdfs/src/sdfs.c b/xlators/features/sdfs/src/sdfs.c
|
||||
index 164c632..f0247fd 100644
|
||||
--- a/xlators/features/sdfs/src/sdfs.c
|
||||
+++ b/xlators/features/sdfs/src/sdfs.c
|
||||
@@ -1429,7 +1429,6 @@ void
|
||||
fini(xlator_t *this)
|
||||
{
|
||||
mem_pool_destroy(this->local_pool);
|
||||
- this->local_pool = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
diff --git a/xlators/features/selinux/src/selinux.c b/xlators/features/selinux/src/selinux.c
|
||||
index ce5fc90..58b4c5d 100644
|
||||
--- a/xlators/features/selinux/src/selinux.c
|
||||
+++ b/xlators/features/selinux/src/selinux.c
|
||||
@@ -256,7 +256,6 @@ out:
|
||||
GF_FREE(priv);
|
||||
}
|
||||
mem_pool_destroy(this->local_pool);
|
||||
- this->local_pool = NULL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -285,7 +284,6 @@ fini(xlator_t *this)
|
||||
GF_FREE(priv);
|
||||
|
||||
mem_pool_destroy(this->local_pool);
|
||||
- this->local_pool = NULL;
|
||||
|
||||
return;
|
||||
}
|
||||
diff --git a/xlators/features/trash/src/trash.c b/xlators/features/trash/src/trash.c
|
||||
index eb5007b..d668436 100644
|
||||
--- a/xlators/features/trash/src/trash.c
|
||||
+++ b/xlators/features/trash/src/trash.c
|
||||
@@ -2523,7 +2523,6 @@ out:
|
||||
GF_FREE(priv);
|
||||
}
|
||||
mem_pool_destroy(this->local_pool);
|
||||
- this->local_pool = NULL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
--
|
||||
1.8.3.1
|
||||
|
228
0232-cluster-afr-Fix-incorrect-reporting-of-gfid-type-mis.patch
Normal file
228
0232-cluster-afr-Fix-incorrect-reporting-of-gfid-type-mis.patch
Normal file
@ -0,0 +1,228 @@
|
||||
From 3ddf12d0710e048878fcf8786d05efe18710c74c Mon Sep 17 00:00:00 2001
|
||||
From: karthik-us <ksubrahm@redhat.com>
|
||||
Date: Fri, 12 Jul 2019 16:44:20 +0530
|
||||
Subject: [PATCH 232/255] cluster/afr: Fix incorrect reporting of gfid & type
|
||||
mismatch
|
||||
|
||||
Backport of: https://review.gluster.org/#/c/glusterfs/+/22908/
|
||||
|
||||
Problems:
|
||||
1. When checking for type and gfid mismatch, if the type or gfid
|
||||
is unknown because of missing gfid handle and the gfid xattr
|
||||
it will be reported as type or gfid mismatch and the heal will
|
||||
not complete.
|
||||
|
||||
2. If the source selected during entry heal has null gfid the same
|
||||
will be sent to afr_lookup_and_heal_gfid(). In this function when
|
||||
we try to assign the gfid on the bricks where it does not exist,
|
||||
we are considering the same gfid and try to assign that on those
|
||||
bricks. This will fail in posix_gfid_set() since the gfid sent
|
||||
is null.
|
||||
|
||||
Fix:
|
||||
If the gfid sent to afr_lookup_and_heal_gfid() is null choose a
|
||||
valid gfid before proceeding to assign the gfid on the bricks
|
||||
where it is missing.
|
||||
|
||||
In afr_selfheal_detect_gfid_and_type_mismatch(), do not report
|
||||
type/gfid mismatch if the type/gfid is unknown or not set.
|
||||
|
||||
Change-Id: Icdb4967c09a48e0a3a64ce4948d5fb0a06d7a7af
|
||||
fixes: bz#1715447
|
||||
Signed-off-by: karthik-us <ksubrahm@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175966
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
.../bug-1722507-type-mismatch-error-handling.t | 116 +++++++++++++++++++++
|
||||
xlators/cluster/afr/src/afr-self-heal-common.c | 12 ++-
|
||||
xlators/cluster/afr/src/afr-self-heal-entry.c | 13 +++
|
||||
3 files changed, 139 insertions(+), 2 deletions(-)
|
||||
create mode 100644 tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
|
||||
|
||||
diff --git a/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
|
||||
new file mode 100644
|
||||
index 0000000..0aeaaaf
|
||||
--- /dev/null
|
||||
+++ b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
|
||||
@@ -0,0 +1,116 @@
|
||||
+#!/bin/bash
|
||||
+
|
||||
+. $(dirname $0)/../../include.rc
|
||||
+. $(dirname $0)/../../volume.rc
|
||||
+. $(dirname $0)/../../afr.rc
|
||||
+
|
||||
+cleanup;
|
||||
+
|
||||
+## Start and create a volume
|
||||
+TEST glusterd;
|
||||
+TEST pidof glusterd;
|
||||
+TEST $CLI volume info;
|
||||
+
|
||||
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
|
||||
+TEST $CLI volume start $V0;
|
||||
+TEST $CLI volume set $V0 cluster.heal-timeout 5
|
||||
+TEST $CLI volume heal $V0 disable
|
||||
+EXPECT 'Started' volinfo_field $V0 'Status';
|
||||
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
|
||||
+
|
||||
+TEST mkdir $M0/dir
|
||||
+
|
||||
+##########################################################################################
|
||||
+# GFID link file and the GFID is missing on one brick and all the bricks are being blamed.
|
||||
+
|
||||
+TEST touch $M0/dir/file
|
||||
+#TEST kill_brick $V0 $H0 $B0/$V0"1"
|
||||
+
|
||||
+#B0 and B2 must blame B1
|
||||
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
|
||||
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
|
||||
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
|
||||
+
|
||||
+# Add entry to xattrop dir to trigger index heal.
|
||||
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
|
||||
+base_entry_b0=`ls $xattrop_dir0`
|
||||
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
|
||||
+ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
|
||||
+EXPECT "^1$" get_pending_heal_count $V0
|
||||
+
|
||||
+# Remove the gfid xattr and the link file on one brick.
|
||||
+gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file)
|
||||
+gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file)
|
||||
+TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file
|
||||
+TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
|
||||
+
|
||||
+# Launch heal
|
||||
+TEST $CLI volume heal $V0 enable
|
||||
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
|
||||
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
|
||||
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
|
||||
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
|
||||
+
|
||||
+# Wait for 2 second to force posix to consider that this is a valid file but
|
||||
+# without gfid.
|
||||
+sleep 2
|
||||
+TEST $CLI volume heal $V0
|
||||
+
|
||||
+# Heal should not fail as the file is missing gfid xattr and the link file,
|
||||
+# which is not actually the gfid or type mismatch.
|
||||
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
|
||||
+
|
||||
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file
|
||||
+TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
|
||||
+rm -f $M0/dir/file
|
||||
+
|
||||
+
|
||||
+###########################################################################################
|
||||
+# GFID link file and the GFID is missing on two bricks and all the bricks are being blamed.
|
||||
+
|
||||
+TEST $CLI volume heal $V0 disable
|
||||
+TEST touch $M0/dir/file
|
||||
+#TEST kill_brick $V0 $H0 $B0/$V0"1"
|
||||
+
|
||||
+#B0 and B2 must blame B1
|
||||
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
|
||||
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
|
||||
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
|
||||
+
|
||||
+# Add entry to xattrop dir to trigger index heal.
|
||||
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
|
||||
+base_entry_b0=`ls $xattrop_dir0`
|
||||
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
|
||||
+ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
|
||||
+EXPECT "^1$" get_pending_heal_count $V0
|
||||
+
|
||||
+# Remove the gfid xattr and the link file on two bricks.
|
||||
+gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file)
|
||||
+gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file)
|
||||
+TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file
|
||||
+TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
|
||||
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir/file
|
||||
+TEST rm -f $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
|
||||
+
|
||||
+# Launch heal
|
||||
+TEST $CLI volume heal $V0 enable
|
||||
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
|
||||
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
|
||||
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
|
||||
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
|
||||
+
|
||||
+# Wait for 2 second to force posix to consider that this is a valid file but
|
||||
+# without gfid.
|
||||
+sleep 2
|
||||
+TEST $CLI volume heal $V0
|
||||
+
|
||||
+# Heal should not fail as the file is missing gfid xattr and the link file,
|
||||
+# which is not actually the gfid or type mismatch.
|
||||
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
|
||||
+
|
||||
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file
|
||||
+TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
|
||||
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}1/dir/file
|
||||
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
|
||||
+
|
||||
+cleanup
|
||||
diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
|
||||
index 5157e7d..b38085a 100644
|
||||
--- a/xlators/cluster/afr/src/afr-self-heal-common.c
|
||||
+++ b/xlators/cluster/afr/src/afr-self-heal-common.c
|
||||
@@ -55,7 +55,8 @@ afr_lookup_and_heal_gfid(xlator_t *this, inode_t *parent, const char *name,
|
||||
for (i = 0; i < priv->child_count; i++) {
|
||||
if (source == -1) {
|
||||
/* case (a) above. */
|
||||
- if (replies[i].valid && replies[i].op_ret == 0) {
|
||||
+ if (replies[i].valid && replies[i].op_ret == 0 &&
|
||||
+ replies[i].poststat.ia_type != IA_INVAL) {
|
||||
ia_type = replies[i].poststat.ia_type;
|
||||
break;
|
||||
}
|
||||
@@ -63,7 +64,8 @@ afr_lookup_and_heal_gfid(xlator_t *this, inode_t *parent, const char *name,
|
||||
/* case (b) above. */
|
||||
if (i == source)
|
||||
continue;
|
||||
- if (sources[i] && replies[i].valid && replies[i].op_ret == 0) {
|
||||
+ if (sources[i] && replies[i].valid && replies[i].op_ret == 0 &&
|
||||
+ replies[i].poststat.ia_type != IA_INVAL) {
|
||||
ia_type = replies[i].poststat.ia_type;
|
||||
break;
|
||||
}
|
||||
@@ -77,6 +79,12 @@ heal:
|
||||
for (i = 0; i < priv->child_count; i++) {
|
||||
if (!replies[i].valid || replies[i].op_ret != 0)
|
||||
continue;
|
||||
+
|
||||
+ if (gf_uuid_is_null(gfid) &&
|
||||
+ !gf_uuid_is_null(replies[i].poststat.ia_gfid) &&
|
||||
+ replies[i].poststat.ia_type == ia_type)
|
||||
+ gfid = replies[i].poststat.ia_gfid;
|
||||
+
|
||||
if (!gf_uuid_is_null(replies[i].poststat.ia_gfid) ||
|
||||
replies[i].poststat.ia_type != ia_type)
|
||||
continue;
|
||||
diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c
|
||||
index a6890fa..e07b521 100644
|
||||
--- a/xlators/cluster/afr/src/afr-self-heal-entry.c
|
||||
+++ b/xlators/cluster/afr/src/afr-self-heal-entry.c
|
||||
@@ -246,6 +246,19 @@ afr_selfheal_detect_gfid_and_type_mismatch(xlator_t *this,
|
||||
if (replies[i].op_ret != 0)
|
||||
continue;
|
||||
|
||||
+ if (gf_uuid_is_null(replies[i].poststat.ia_gfid))
|
||||
+ continue;
|
||||
+
|
||||
+ if (replies[i].poststat.ia_type == IA_INVAL)
|
||||
+ continue;
|
||||
+
|
||||
+ if (ia_type == IA_INVAL || gf_uuid_is_null(gfid)) {
|
||||
+ src_idx = i;
|
||||
+ ia_type = replies[src_idx].poststat.ia_type;
|
||||
+ gfid = &replies[src_idx].poststat.ia_gfid;
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
if (gf_uuid_compare(gfid, replies[i].poststat.ia_gfid) &&
|
||||
(ia_type == replies[i].poststat.ia_type)) {
|
||||
ret = afr_gfid_split_brain_source(this, replies, inode, pargfid,
|
||||
--
|
||||
1.8.3.1
|
||||
|
@ -0,0 +1,78 @@
|
||||
From 5c85ce7363b658bc8fa643742626109efe3ade0c Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:44:04 +0530
|
||||
Subject: [PATCH 233/255] Revert "graph/shd: Use glusterfs_graph_deactivate to
|
||||
free the xl rec"
|
||||
|
||||
This reverts commit 8cc6d8af00303c445b94715c92fe9e3e01edb867.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: Ib90fe89b85f4143db29702338decec76c83872bc
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175942
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
libglusterfs/src/graph.c | 2 +-
|
||||
libglusterfs/src/xlator.c | 9 +--------
|
||||
xlators/features/shard/src/shard.c | 3 ---
|
||||
3 files changed, 2 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c
|
||||
index 5b95fd6..27d9335 100644
|
||||
--- a/libglusterfs/src/graph.c
|
||||
+++ b/libglusterfs/src/graph.c
|
||||
@@ -1394,7 +1394,7 @@ glusterfs_graph_cleanup(void *arg)
|
||||
|
||||
pthread_mutex_lock(&ctx->cleanup_lock);
|
||||
{
|
||||
- glusterfs_graph_deactivate(graph);
|
||||
+ glusterfs_graph_fini(graph);
|
||||
glusterfs_graph_destroy(graph);
|
||||
}
|
||||
pthread_mutex_unlock(&ctx->cleanup_lock);
|
||||
diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c
|
||||
index d9d3441..71e1ed4 100644
|
||||
--- a/libglusterfs/src/xlator.c
|
||||
+++ b/libglusterfs/src/xlator.c
|
||||
@@ -659,7 +659,6 @@ xlator_fini_rec(xlator_t *xl)
|
||||
trav = trav->next;
|
||||
}
|
||||
|
||||
- xl->cleanup_starting = 1;
|
||||
if (xl->init_succeeded) {
|
||||
if (xl->fini) {
|
||||
old_THIS = THIS;
|
||||
@@ -667,14 +666,8 @@ xlator_fini_rec(xlator_t *xl)
|
||||
|
||||
xl->fini(xl);
|
||||
|
||||
- if (xl->local_pool) {
|
||||
+ if (xl->local_pool)
|
||||
mem_pool_destroy(xl->local_pool);
|
||||
- xl->local_pool = NULL;
|
||||
- }
|
||||
- if (xl->itable) {
|
||||
- inode_table_destroy(xl->itable);
|
||||
- xl->itable = NULL;
|
||||
- }
|
||||
|
||||
THIS = old_THIS;
|
||||
} else {
|
||||
diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c
|
||||
index 31c7eec..b248767 100644
|
||||
--- a/xlators/features/shard/src/shard.c
|
||||
+++ b/xlators/features/shard/src/shard.c
|
||||
@@ -6785,9 +6785,6 @@ fini(xlator_t *this)
|
||||
|
||||
GF_VALIDATE_OR_GOTO("shard", this, out);
|
||||
|
||||
- /*Itable was not created by shard, hence setting to NULL.*/
|
||||
- this->itable = NULL;
|
||||
-
|
||||
mem_pool_destroy(this->local_pool);
|
||||
this->local_pool = NULL;
|
||||
|
||||
--
|
||||
1.8.3.1
|
||||
|
220
0234-Revert-glusterd-shd-Change-shd-logfile-to-a-unique-n.patch
Normal file
220
0234-Revert-glusterd-shd-Change-shd-logfile-to-a-unique-n.patch
Normal file
@ -0,0 +1,220 @@
|
||||
From feeee9a35c1219b2077ea07b6fd80976960bd181 Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:44:42 +0530
|
||||
Subject: [PATCH 234/255] Revert "glusterd/shd: Change shd logfile to a unique
|
||||
name"
|
||||
|
||||
This reverts commit 541e1400ecaec5fea0f56e8ca18f00c229906d8a.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: I7e0371d77db6897981f7364c04d4b9b523b865ba
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175943
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
.../mgmt/glusterd/src/glusterd-shd-svc-helper.c | 12 ++++++++
|
||||
.../mgmt/glusterd/src/glusterd-shd-svc-helper.h | 6 ++++
|
||||
xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 14 ++++-----
|
||||
xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 34 +++++-----------------
|
||||
xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c | 4 +--
|
||||
xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h | 4 ---
|
||||
6 files changed, 34 insertions(+), 40 deletions(-)
|
||||
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
|
||||
index 57ceda9..9196758 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
|
||||
@@ -75,6 +75,18 @@ glusterd_svc_build_shd_volfile_path(glusterd_volinfo_t *volinfo, char *path,
|
||||
}
|
||||
|
||||
void
|
||||
+glusterd_svc_build_shd_logdir(char *logdir, char *volname, size_t len)
|
||||
+{
|
||||
+ snprintf(logdir, len, "%s/shd/%s", DEFAULT_LOG_FILE_DIRECTORY, volname);
|
||||
+}
|
||||
+
|
||||
+void
|
||||
+glusterd_svc_build_shd_logfile(char *logfile, char *logdir, size_t len)
|
||||
+{
|
||||
+ snprintf(logfile, len, "%s/shd.log", logdir);
|
||||
+}
|
||||
+
|
||||
+void
|
||||
glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd)
|
||||
{
|
||||
glusterd_svc_proc_t *svc_proc = NULL;
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
|
||||
index 59466ec..c70702c 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
|
||||
@@ -27,6 +27,12 @@ glusterd_svc_build_shd_volfile_path(glusterd_volinfo_t *volinfo, char *path,
|
||||
int path_len);
|
||||
|
||||
void
|
||||
+glusterd_svc_build_shd_logdir(char *logdir, char *volname, size_t len);
|
||||
+
|
||||
+void
|
||||
+glusterd_svc_build_shd_logfile(char *logfile, char *logdir, size_t len);
|
||||
+
|
||||
+void
|
||||
glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd);
|
||||
|
||||
int
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
index 8ad90a9..dbe2560 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
@@ -90,8 +90,8 @@ glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn,
|
||||
GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv);
|
||||
glusterd_svc_create_rundir(rundir);
|
||||
|
||||
- glusterd_svc_build_logfile_path(shd_svc_name, DEFAULT_LOG_FILE_DIRECTORY,
|
||||
- logfile, sizeof(logfile));
|
||||
+ glusterd_svc_build_shd_logdir(logdir, volinfo->volname, sizeof(logdir));
|
||||
+ glusterd_svc_build_shd_logfile(logfile, logdir, sizeof(logfile));
|
||||
|
||||
/* Initialize the connection mgmt */
|
||||
if (mux_conn && mux_svc->rpc) {
|
||||
@@ -104,7 +104,7 @@ glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn,
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
} else {
|
||||
- ret = mkdir_p(DEFAULT_LOG_FILE_DIRECTORY, 0755, _gf_true);
|
||||
+ ret = mkdir_p(logdir, 0755, _gf_true);
|
||||
if ((ret == -1) && (EEXIST != errno)) {
|
||||
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED,
|
||||
"Unable to create logdir %s", logdir);
|
||||
@@ -460,7 +460,6 @@ glusterd_shdsvc_start(glusterd_svc_t *svc, int flags)
|
||||
return -1;
|
||||
|
||||
glusterd_volinfo_ref(volinfo);
|
||||
-
|
||||
if (!svc->inited) {
|
||||
ret = glusterd_shd_svc_mux_init(volinfo, svc);
|
||||
if (ret)
|
||||
@@ -472,11 +471,12 @@ glusterd_shdsvc_start(glusterd_svc_t *svc, int flags)
|
||||
/* Unref will happen from glusterd_svc_attach_cbk */
|
||||
ret = glusterd_attach_svc(svc, volinfo, flags);
|
||||
if (ret) {
|
||||
+ glusterd_volinfo_unref(volinfo);
|
||||
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
|
||||
- "Failed to attach shd svc(volume=%s) to pid=%d",
|
||||
+ "Failed to attach shd svc(volume=%s) to pid=%d. Starting"
|
||||
+ "a new process",
|
||||
volinfo->volname, glusterd_proc_get_pid(&svc->proc));
|
||||
- glusterd_shd_svcproc_cleanup(&volinfo->shd);
|
||||
- glusterd_volinfo_unref(volinfo);
|
||||
+ ret = glusterd_recover_shd_attach_failure(volinfo, svc, flags);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
index 400826f..a6e662f 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
@@ -469,9 +469,6 @@ glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc)
|
||||
glusterd_conf_t *conf = NULL;
|
||||
glusterd_svc_t *parent_svc = NULL;
|
||||
int pid = -1;
|
||||
- char pidfile[PATH_MAX] = {
|
||||
- 0,
|
||||
- };
|
||||
|
||||
GF_VALIDATE_OR_GOTO("glusterd", svc, out);
|
||||
GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
|
||||
@@ -481,26 +478,8 @@ glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc)
|
||||
|
||||
pthread_mutex_lock(&conf->attach_lock);
|
||||
{
|
||||
- if (svc->inited && !glusterd_proc_is_running(&(svc->proc))) {
|
||||
- /* This is the case when shd process was abnormally killed */
|
||||
- pthread_mutex_unlock(&conf->attach_lock);
|
||||
- glusterd_shd_svcproc_cleanup(&volinfo->shd);
|
||||
- pthread_mutex_lock(&conf->attach_lock);
|
||||
- }
|
||||
-
|
||||
if (!svc->inited) {
|
||||
- glusterd_svc_build_shd_pidfile(volinfo, pidfile, sizeof(pidfile));
|
||||
- ret = snprintf(svc->proc.name, sizeof(svc->proc.name), "%s",
|
||||
- "glustershd");
|
||||
- if (ret < 0)
|
||||
- goto unlock;
|
||||
-
|
||||
- ret = snprintf(svc->proc.pidfile, sizeof(svc->proc.pidfile), "%s",
|
||||
- pidfile);
|
||||
- if (ret < 0)
|
||||
- goto unlock;
|
||||
-
|
||||
- if (gf_is_service_running(pidfile, &pid)) {
|
||||
+ if (gf_is_service_running(svc->proc.pidfile, &pid)) {
|
||||
/* Just connect is required, but we don't know what happens
|
||||
* during the disconnect. So better to reattach.
|
||||
*/
|
||||
@@ -508,10 +487,10 @@ glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc)
|
||||
}
|
||||
|
||||
if (!mux_proc) {
|
||||
- if (pid != -1 && sys_access(pidfile, R_OK) == 0) {
|
||||
+ if (pid != -1 && sys_access(svc->proc.pidfile, R_OK) == 0) {
|
||||
/* stale pid file, stop and unlink it */
|
||||
glusterd_proc_stop(&svc->proc, SIGTERM, PROC_STOP_FORCE);
|
||||
- glusterd_unlink_file(pidfile);
|
||||
+ glusterd_unlink_file(svc->proc.pidfile);
|
||||
}
|
||||
mux_proc = __gf_find_compatible_svc(GD_NODE_SHD);
|
||||
}
|
||||
@@ -705,10 +684,11 @@ glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count,
|
||||
volinfo->volname, glusterd_proc_get_pid(&svc->proc));
|
||||
} else {
|
||||
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL,
|
||||
- "svc %s of volume %s failed to attach to pid %d", svc->name,
|
||||
- volinfo->volname, glusterd_proc_get_pid(&svc->proc));
|
||||
+ "svc %s of volume %s failed to "
|
||||
+ "attach to pid %d. Starting a new process",
|
||||
+ svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc));
|
||||
if (!strcmp(svc->name, "glustershd")) {
|
||||
- glusterd_shd_svcproc_cleanup(&volinfo->shd);
|
||||
+ glusterd_recover_shd_attach_failure(volinfo, svc, *flag);
|
||||
}
|
||||
}
|
||||
out:
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
|
||||
index fa316a6..f32dafc 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
|
||||
@@ -33,14 +33,14 @@ glusterd_svc_create_rundir(char *rundir)
|
||||
return ret;
|
||||
}
|
||||
|
||||
-void
|
||||
+static void
|
||||
glusterd_svc_build_logfile_path(char *server, char *logdir, char *logfile,
|
||||
size_t len)
|
||||
{
|
||||
snprintf(logfile, len, "%s/%s.log", logdir, server);
|
||||
}
|
||||
|
||||
-void
|
||||
+static void
|
||||
glusterd_svc_build_volfileid_path(char *server, char *volfileid, size_t len)
|
||||
{
|
||||
snprintf(volfileid, len, "gluster/%s", server);
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h
|
||||
index 5a5466a..fbc5225 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h
|
||||
@@ -74,10 +74,6 @@ glusterd_svc_build_volfile_path(char *server, char *workdir, char *volfile,
|
||||
size_t len);
|
||||
|
||||
void
|
||||
-glusterd_svc_build_logfile_path(char *server, char *logdir, char *logfile,
|
||||
- size_t len);
|
||||
-
|
||||
-void
|
||||
glusterd_svc_build_svcdir(char *server, char *workdir, char *path, size_t len);
|
||||
|
||||
void
|
||||
--
|
||||
1.8.3.1
|
||||
|
@ -0,0 +1,38 @@
|
||||
From b2040d8404e0ac44742cb903e3c8da2c832b2925 Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:45:11 +0530
|
||||
Subject: [PATCH 235/255] Revert "glusterd/svc: Stop stale process using the
|
||||
glusterd_proc_stop"
|
||||
|
||||
This reverts commit fe9159ee42f0f67b01e6a495df8105ea0f66738d.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: Id5ac0d21319724141ad9bcb9b66435803ebe5f47
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175944
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 6 +++---
|
||||
1 file changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
index a6e662f..6a3ca52 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
@@ -488,9 +488,9 @@ glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc)
|
||||
|
||||
if (!mux_proc) {
|
||||
if (pid != -1 && sys_access(svc->proc.pidfile, R_OK) == 0) {
|
||||
- /* stale pid file, stop and unlink it */
|
||||
- glusterd_proc_stop(&svc->proc, SIGTERM, PROC_STOP_FORCE);
|
||||
- glusterd_unlink_file(svc->proc.pidfile);
|
||||
+ /* stale pid file, unlink it. */
|
||||
+ kill(pid, SIGTERM);
|
||||
+ sys_unlink(svc->proc.pidfile);
|
||||
}
|
||||
mux_proc = __gf_find_compatible_svc(GD_NODE_SHD);
|
||||
}
|
||||
--
|
||||
1.8.3.1
|
||||
|
@ -0,0 +1,35 @@
|
||||
From 030b5681d47268c591a72035d5a2419234bd1f5f Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:44:55 +0530
|
||||
Subject: [PATCH 236/255] Revert "shd/mux: Fix race between mux_proc unlink and
|
||||
stop"
|
||||
|
||||
This reverts commit e386fb4f4baf834e6a8fc25cc2fbbb17eb0a7a56.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: I6c52835981389fc5bfeb43483feb581ad8507990
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175945
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
---
|
||||
xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 3 ---
|
||||
1 file changed, 3 deletions(-)
|
||||
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
index dbe2560..d81d760 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
@@ -694,9 +694,6 @@ glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig)
|
||||
gf_is_service_running(svc->proc.pidfile, &pid);
|
||||
cds_list_del_init(&svc->mux_svc);
|
||||
empty = cds_list_empty(&svc_proc->svcs);
|
||||
- if (empty) {
|
||||
- cds_list_del_init(&svc_proc->svc_proc_list);
|
||||
- }
|
||||
}
|
||||
pthread_mutex_unlock(&conf->attach_lock);
|
||||
if (empty) {
|
||||
--
|
||||
1.8.3.1
|
||||
|
227
0237-Revert-ec-fini-Fix-race-between-xlator-cleanup-and-o.patch
Normal file
227
0237-Revert-ec-fini-Fix-race-between-xlator-cleanup-and-o.patch
Normal file
@ -0,0 +1,227 @@
|
||||
From f0c3af09fd919e3646aae2821b0d6bfe4e2fd89c Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:45:58 +0530
|
||||
Subject: [PATCH 237/255] Revert "ec/fini: Fix race between xlator cleanup and
|
||||
on going async fop"
|
||||
|
||||
This reverts commit 9fd966aa6879ac9867381629f82eca24b950d731.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: I557ec138174b01d8b8f8d090acd34c179e2c632d
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175946
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
xlators/cluster/ec/src/ec-common.c | 10 ----------
|
||||
xlators/cluster/ec/src/ec-common.h | 2 --
|
||||
xlators/cluster/ec/src/ec-data.c | 4 +---
|
||||
xlators/cluster/ec/src/ec-heal.c | 17 ++---------------
|
||||
xlators/cluster/ec/src/ec-types.h | 1 -
|
||||
xlators/cluster/ec/src/ec.c | 37 ++++++++++++-------------------------
|
||||
6 files changed, 15 insertions(+), 56 deletions(-)
|
||||
|
||||
diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c
|
||||
index 35c2256..e2e582f 100644
|
||||
--- a/xlators/cluster/ec/src/ec-common.c
|
||||
+++ b/xlators/cluster/ec/src/ec-common.c
|
||||
@@ -2956,13 +2956,3 @@ ec_manager(ec_fop_data_t *fop, int32_t error)
|
||||
|
||||
__ec_manager(fop, error);
|
||||
}
|
||||
-
|
||||
-gf_boolean_t
|
||||
-__ec_is_last_fop(ec_t *ec)
|
||||
-{
|
||||
- if ((list_empty(&ec->pending_fops)) &&
|
||||
- (GF_ATOMIC_GET(ec->async_fop_count) == 0)) {
|
||||
- return _gf_true;
|
||||
- }
|
||||
- return _gf_false;
|
||||
-}
|
||||
diff --git a/xlators/cluster/ec/src/ec-common.h b/xlators/cluster/ec/src/ec-common.h
|
||||
index bf6c97d..e948342 100644
|
||||
--- a/xlators/cluster/ec/src/ec-common.h
|
||||
+++ b/xlators/cluster/ec/src/ec-common.h
|
||||
@@ -204,6 +204,4 @@ void
|
||||
ec_reset_entry_healing(ec_fop_data_t *fop);
|
||||
char *
|
||||
ec_msg_str(ec_fop_data_t *fop);
|
||||
-gf_boolean_t
|
||||
-__ec_is_last_fop(ec_t *ec);
|
||||
#endif /* __EC_COMMON_H__ */
|
||||
diff --git a/xlators/cluster/ec/src/ec-data.c b/xlators/cluster/ec/src/ec-data.c
|
||||
index 8d2d9a1..6ef9340 100644
|
||||
--- a/xlators/cluster/ec/src/ec-data.c
|
||||
+++ b/xlators/cluster/ec/src/ec-data.c
|
||||
@@ -202,13 +202,11 @@ ec_handle_last_pending_fop_completion(ec_fop_data_t *fop, gf_boolean_t *notify)
|
||||
{
|
||||
ec_t *ec = fop->xl->private;
|
||||
|
||||
- *notify = _gf_false;
|
||||
-
|
||||
if (!list_empty(&fop->pending_list)) {
|
||||
LOCK(&ec->lock);
|
||||
{
|
||||
list_del_init(&fop->pending_list);
|
||||
- *notify = __ec_is_last_fop(ec);
|
||||
+ *notify = list_empty(&ec->pending_fops);
|
||||
}
|
||||
UNLOCK(&ec->lock);
|
||||
}
|
||||
diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
|
||||
index 237fea2..8844c29 100644
|
||||
--- a/xlators/cluster/ec/src/ec-heal.c
|
||||
+++ b/xlators/cluster/ec/src/ec-heal.c
|
||||
@@ -2814,20 +2814,8 @@ int
|
||||
ec_replace_heal_done(int ret, call_frame_t *heal, void *opaque)
|
||||
{
|
||||
ec_t *ec = opaque;
|
||||
- gf_boolean_t last_fop = _gf_false;
|
||||
|
||||
- if (GF_ATOMIC_DEC(ec->async_fop_count) == 0) {
|
||||
- LOCK(&ec->lock);
|
||||
- {
|
||||
- last_fop = __ec_is_last_fop(ec);
|
||||
- }
|
||||
- UNLOCK(&ec->lock);
|
||||
- }
|
||||
gf_msg_debug(ec->xl->name, 0, "getxattr on bricks is done ret %d", ret);
|
||||
-
|
||||
- if (last_fop)
|
||||
- ec_pending_fops_completed(ec);
|
||||
-
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2881,15 +2869,14 @@ ec_launch_replace_heal(ec_t *ec)
|
||||
{
|
||||
int ret = -1;
|
||||
|
||||
+ if (!ec)
|
||||
+ return ret;
|
||||
ret = synctask_new(ec->xl->ctx->env, ec_replace_brick_heal_wrap,
|
||||
ec_replace_heal_done, NULL, ec);
|
||||
-
|
||||
if (ret < 0) {
|
||||
gf_msg_debug(ec->xl->name, 0, "Heal failed for replace brick ret = %d",
|
||||
ret);
|
||||
- ec_replace_heal_done(-1, NULL, ec);
|
||||
}
|
||||
-
|
||||
return ret;
|
||||
}
|
||||
|
||||
diff --git a/xlators/cluster/ec/src/ec-types.h b/xlators/cluster/ec/src/ec-types.h
|
||||
index 4dbf4a3..1c295c0 100644
|
||||
--- a/xlators/cluster/ec/src/ec-types.h
|
||||
+++ b/xlators/cluster/ec/src/ec-types.h
|
||||
@@ -643,7 +643,6 @@ struct _ec {
|
||||
uintptr_t xl_notify; /* Bit flag representing
|
||||
notification for bricks. */
|
||||
uintptr_t node_mask;
|
||||
- gf_atomic_t async_fop_count; /* Number of on going asynchronous fops. */
|
||||
xlator_t **xl_list;
|
||||
gf_lock_t lock;
|
||||
gf_timer_t *timer;
|
||||
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
|
||||
index f0d58c0..df5912c 100644
|
||||
--- a/xlators/cluster/ec/src/ec.c
|
||||
+++ b/xlators/cluster/ec/src/ec.c
|
||||
@@ -355,7 +355,6 @@ ec_notify_cbk(void *data)
|
||||
ec_t *ec = data;
|
||||
glusterfs_event_t event = GF_EVENT_MAXVAL;
|
||||
gf_boolean_t propagate = _gf_false;
|
||||
- gf_boolean_t launch_heal = _gf_false;
|
||||
|
||||
LOCK(&ec->lock);
|
||||
{
|
||||
@@ -385,11 +384,6 @@ ec_notify_cbk(void *data)
|
||||
* still bricks DOWN, they will be healed when they
|
||||
* come up. */
|
||||
ec_up(ec->xl, ec);
|
||||
-
|
||||
- if (ec->shd.iamshd && !ec->shutdown) {
|
||||
- launch_heal = _gf_true;
|
||||
- GF_ATOMIC_INC(ec->async_fop_count);
|
||||
- }
|
||||
}
|
||||
|
||||
propagate = _gf_true;
|
||||
@@ -397,12 +391,13 @@ ec_notify_cbk(void *data)
|
||||
unlock:
|
||||
UNLOCK(&ec->lock);
|
||||
|
||||
- if (launch_heal) {
|
||||
- /* We have just brought the volume UP, so we trigger
|
||||
- * a self-heal check on the root directory. */
|
||||
- ec_launch_replace_heal(ec);
|
||||
- }
|
||||
if (propagate) {
|
||||
+ if ((event == GF_EVENT_CHILD_UP) && ec->shd.iamshd) {
|
||||
+ /* We have just brought the volume UP, so we trigger
|
||||
+ * a self-heal check on the root directory. */
|
||||
+ ec_launch_replace_heal(ec);
|
||||
+ }
|
||||
+
|
||||
default_notify(ec->xl, event, NULL);
|
||||
}
|
||||
}
|
||||
@@ -430,7 +425,7 @@ ec_disable_delays(ec_t *ec)
|
||||
{
|
||||
ec->shutdown = _gf_true;
|
||||
|
||||
- return __ec_is_last_fop(ec);
|
||||
+ return list_empty(&ec->pending_fops);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -608,10 +603,7 @@ ec_notify(xlator_t *this, int32_t event, void *data, void *data2)
|
||||
if (event == GF_EVENT_CHILD_UP) {
|
||||
/* We need to trigger a selfheal if a brick changes
|
||||
* to UP state. */
|
||||
- if (ec_set_up_state(ec, mask, mask) && ec->shd.iamshd &&
|
||||
- !ec->shutdown) {
|
||||
- needs_shd_check = _gf_true;
|
||||
- }
|
||||
+ needs_shd_check = ec_set_up_state(ec, mask, mask);
|
||||
} else if (event == GF_EVENT_CHILD_DOWN) {
|
||||
ec_set_up_state(ec, mask, 0);
|
||||
}
|
||||
@@ -641,21 +633,17 @@ ec_notify(xlator_t *this, int32_t event, void *data, void *data2)
|
||||
}
|
||||
} else {
|
||||
propagate = _gf_false;
|
||||
- needs_shd_check = _gf_false;
|
||||
- }
|
||||
-
|
||||
- if (needs_shd_check) {
|
||||
- GF_ATOMIC_INC(ec->async_fop_count);
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
UNLOCK(&ec->lock);
|
||||
|
||||
done:
|
||||
- if (needs_shd_check) {
|
||||
- ec_launch_replace_heal(ec);
|
||||
- }
|
||||
if (propagate) {
|
||||
+ if (needs_shd_check && ec->shd.iamshd) {
|
||||
+ ec_launch_replace_heal(ec);
|
||||
+ }
|
||||
+
|
||||
error = default_notify(this, event, data);
|
||||
}
|
||||
|
||||
@@ -717,7 +705,6 @@ init(xlator_t *this)
|
||||
ec->xl = this;
|
||||
LOCK_INIT(&ec->lock);
|
||||
|
||||
- GF_ATOMIC_INIT(ec->async_fop_count, 0);
|
||||
INIT_LIST_HEAD(&ec->pending_fops);
|
||||
INIT_LIST_HEAD(&ec->heal_waiting);
|
||||
INIT_LIST_HEAD(&ec->healing);
|
||||
--
|
||||
1.8.3.1
|
||||
|
@ -0,0 +1,47 @@
|
||||
From 96072cea4da1c2ba5bd87307f20b3ee2cbe6f63d Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:46:10 +0530
|
||||
Subject: [PATCH 238/255] Revert "xlator/log: Add more logging in
|
||||
xlator_is_cleanup_starting"
|
||||
|
||||
This reverts commit 9b94397a5a735910fab2a29670146a1feb6d890e.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: Icc3f0c8741ed780e265202adbb009063f657c7f7
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175947
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
libglusterfs/src/xlator.c | 12 +++---------
|
||||
1 file changed, 3 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c
|
||||
index 71e1ed4..fbfbbe2 100644
|
||||
--- a/libglusterfs/src/xlator.c
|
||||
+++ b/libglusterfs/src/xlator.c
|
||||
@@ -1494,18 +1494,12 @@ xlator_is_cleanup_starting(xlator_t *this)
|
||||
glusterfs_graph_t *graph = NULL;
|
||||
xlator_t *xl = NULL;
|
||||
|
||||
- if (!this) {
|
||||
- gf_msg("xlator", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
|
||||
- "xlator object is null, returning false");
|
||||
+ if (!this)
|
||||
goto out;
|
||||
- }
|
||||
-
|
||||
graph = this->graph;
|
||||
- if (!graph) {
|
||||
- gf_msg("xlator", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
|
||||
- "Graph is not set for xlator %s", this->name);
|
||||
+
|
||||
+ if (!graph)
|
||||
goto out;
|
||||
- }
|
||||
|
||||
xl = graph->first;
|
||||
if (xl && xl->cleanup_starting)
|
||||
--
|
||||
1.8.3.1
|
||||
|
128
0239-Revert-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
Normal file
128
0239-Revert-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
Normal file
@ -0,0 +1,128 @@
|
||||
From ad40c0783e84e5e54a83aeb20a52f720cc881b0c Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:46:22 +0530
|
||||
Subject: [PATCH 239/255] Revert "ec/fini: Fix race with ec_fini and ec_notify"
|
||||
|
||||
This reverts commit 998d9b8b5e271f407e1c654c34f45f0db36abc71.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: Ifccb8a22d9ef96c22b32dcb4b82bf4d21cf85484
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175948
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
libglusterfs/src/glusterfs/xlator.h | 3 ---
|
||||
libglusterfs/src/libglusterfs.sym | 1 -
|
||||
libglusterfs/src/xlator.c | 21 ---------------------
|
||||
xlators/cluster/ec/src/ec-heal.c | 4 ----
|
||||
xlators/cluster/ec/src/ec-heald.c | 6 ------
|
||||
xlators/cluster/ec/src/ec.c | 3 ---
|
||||
6 files changed, 38 deletions(-)
|
||||
|
||||
diff --git a/libglusterfs/src/glusterfs/xlator.h b/libglusterfs/src/glusterfs/xlator.h
|
||||
index 09e463e..8998976 100644
|
||||
--- a/libglusterfs/src/glusterfs/xlator.h
|
||||
+++ b/libglusterfs/src/glusterfs/xlator.h
|
||||
@@ -1092,7 +1092,4 @@ gluster_graph_take_reference(xlator_t *tree);
|
||||
|
||||
gf_boolean_t
|
||||
mgmt_is_multiplexed_daemon(char *name);
|
||||
-
|
||||
-gf_boolean_t
|
||||
-xlator_is_cleanup_starting(xlator_t *this);
|
||||
#endif /* _XLATOR_H */
|
||||
diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym
|
||||
index 86215d2..05f93b4 100644
|
||||
--- a/libglusterfs/src/libglusterfs.sym
|
||||
+++ b/libglusterfs/src/libglusterfs.sym
|
||||
@@ -1160,4 +1160,3 @@ glusterfs_process_svc_attach_volfp
|
||||
glusterfs_mux_volfile_reconfigure
|
||||
glusterfs_process_svc_detach
|
||||
mgmt_is_multiplexed_daemon
|
||||
-xlator_is_cleanup_starting
|
||||
diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c
|
||||
index fbfbbe2..022c3ed 100644
|
||||
--- a/libglusterfs/src/xlator.c
|
||||
+++ b/libglusterfs/src/xlator.c
|
||||
@@ -1486,24 +1486,3 @@ mgmt_is_multiplexed_daemon(char *name)
|
||||
}
|
||||
return _gf_false;
|
||||
}
|
||||
-
|
||||
-gf_boolean_t
|
||||
-xlator_is_cleanup_starting(xlator_t *this)
|
||||
-{
|
||||
- gf_boolean_t cleanup = _gf_false;
|
||||
- glusterfs_graph_t *graph = NULL;
|
||||
- xlator_t *xl = NULL;
|
||||
-
|
||||
- if (!this)
|
||||
- goto out;
|
||||
- graph = this->graph;
|
||||
-
|
||||
- if (!graph)
|
||||
- goto out;
|
||||
-
|
||||
- xl = graph->first;
|
||||
- if (xl && xl->cleanup_starting)
|
||||
- cleanup = _gf_true;
|
||||
-out:
|
||||
- return cleanup;
|
||||
-}
|
||||
diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
|
||||
index 8844c29..2fa1f11 100644
|
||||
--- a/xlators/cluster/ec/src/ec-heal.c
|
||||
+++ b/xlators/cluster/ec/src/ec-heal.c
|
||||
@@ -2855,10 +2855,6 @@ ec_replace_brick_heal_wrap(void *opaque)
|
||||
itable = ec->xl->itable;
|
||||
else
|
||||
goto out;
|
||||
-
|
||||
- if (xlator_is_cleanup_starting(ec->xl))
|
||||
- goto out;
|
||||
-
|
||||
ret = ec_replace_heal(ec, itable->root);
|
||||
out:
|
||||
return ret;
|
||||
diff --git a/xlators/cluster/ec/src/ec-heald.c b/xlators/cluster/ec/src/ec-heald.c
|
||||
index 91512d7..edf5e11 100644
|
||||
--- a/xlators/cluster/ec/src/ec-heald.c
|
||||
+++ b/xlators/cluster/ec/src/ec-heald.c
|
||||
@@ -444,9 +444,6 @@ unlock:
|
||||
int
|
||||
ec_shd_full_healer_spawn(xlator_t *this, int subvol)
|
||||
{
|
||||
- if (xlator_is_cleanup_starting(this))
|
||||
- return -1;
|
||||
-
|
||||
return ec_shd_healer_spawn(this, NTH_FULL_HEALER(this, subvol),
|
||||
ec_shd_full_healer);
|
||||
}
|
||||
@@ -454,9 +451,6 @@ ec_shd_full_healer_spawn(xlator_t *this, int subvol)
|
||||
int
|
||||
ec_shd_index_healer_spawn(xlator_t *this, int subvol)
|
||||
{
|
||||
- if (xlator_is_cleanup_starting(this))
|
||||
- return -1;
|
||||
-
|
||||
return ec_shd_healer_spawn(this, NTH_INDEX_HEALER(this, subvol),
|
||||
ec_shd_index_healer);
|
||||
}
|
||||
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
|
||||
index df5912c..264582a 100644
|
||||
--- a/xlators/cluster/ec/src/ec.c
|
||||
+++ b/xlators/cluster/ec/src/ec.c
|
||||
@@ -486,9 +486,6 @@ ec_set_up_state(ec_t *ec, uintptr_t index_mask, uintptr_t new_state)
|
||||
{
|
||||
uintptr_t current_state = 0;
|
||||
|
||||
- if (xlator_is_cleanup_starting(ec->xl))
|
||||
- return _gf_false;
|
||||
-
|
||||
if ((ec->xl_notify & index_mask) == 0) {
|
||||
ec->xl_notify |= index_mask;
|
||||
ec->xl_notify_count++;
|
||||
--
|
||||
1.8.3.1
|
||||
|
@ -0,0 +1,54 @@
|
||||
From 9b3adb28207681f49ea97fc2c473634ff0f73db6 Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:46:35 +0530
|
||||
Subject: [PATCH 240/255] Revert "glusterd/shd: Optimize the glustershd manager
|
||||
to send reconfigure"
|
||||
|
||||
This reverts commit 321080e55f0ae97115a9542ba5de8494e7610860.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: I5fa84baa3c3e72ca8eb605c7f1fafb53c68859f9
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175949
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
libglusterfs/src/graph.c | 1 +
|
||||
xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 9 ++++-----
|
||||
2 files changed, 5 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c
|
||||
index 27d9335..18fb2d9 100644
|
||||
--- a/libglusterfs/src/graph.c
|
||||
+++ b/libglusterfs/src/graph.c
|
||||
@@ -1497,6 +1497,7 @@ glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj)
|
||||
parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph);
|
||||
parent_graph->xl_count -= graph->xl_count;
|
||||
parent_graph->leaf_count -= graph->leaf_count;
|
||||
+ default_notify(xl, GF_EVENT_PARENT_DOWN, xl);
|
||||
parent_graph->id++;
|
||||
ret = 0;
|
||||
}
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
index d81d760..981cc87 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
@@ -311,11 +311,10 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
|
||||
*/
|
||||
ret = svc->stop(svc, SIGTERM);
|
||||
} else if (volinfo) {
|
||||
- if (volinfo->status != GLUSTERD_STATUS_STARTED) {
|
||||
- ret = svc->stop(svc, SIGTERM);
|
||||
- if (ret)
|
||||
- goto out;
|
||||
- }
|
||||
+ ret = svc->stop(svc, SIGTERM);
|
||||
+ if (ret)
|
||||
+ goto out;
|
||||
+
|
||||
if (volinfo->status == GLUSTERD_STATUS_STARTED) {
|
||||
ret = svc->start(svc, flags);
|
||||
if (ret)
|
||||
--
|
||||
1.8.3.1
|
||||
|
@ -0,0 +1,82 @@
|
||||
From 066189add979d2e4c74463592e5021bd060d5a51 Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:46:47 +0530
|
||||
Subject: [PATCH 241/255] Revert "glusterd/svc: glusterd_svcs_stop should call
|
||||
individual wrapper function"
|
||||
|
||||
This reverts commit 79fff98f9ca5f815cf0227312b9a997d555dad29.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: I258040ed9be6bc3b4498c76ed51d59258c55acff
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175950
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 12 ++----------
|
||||
xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 10 +++++-----
|
||||
2 files changed, 7 insertions(+), 15 deletions(-)
|
||||
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
index 981cc87..75f9a07 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
@@ -656,18 +656,10 @@ glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig)
|
||||
int pid = -1;
|
||||
|
||||
conf = THIS->private;
|
||||
- GF_VALIDATE_OR_GOTO("glusterd", conf, out);
|
||||
GF_VALIDATE_OR_GOTO("glusterd", svc, out);
|
||||
svc_proc = svc->svc_proc;
|
||||
- if (!svc_proc) {
|
||||
- /*
|
||||
- * This can happen when stop was called on a volume that is not shd
|
||||
- * compatible.
|
||||
- */
|
||||
- gf_msg_debug("glusterd", 0, "svc_proc is null, ie shd already stopped");
|
||||
- ret = 0;
|
||||
- goto out;
|
||||
- }
|
||||
+ GF_VALIDATE_OR_GOTO("glusterd", svc_proc, out);
|
||||
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
|
||||
|
||||
/* Get volinfo->shd from svc object */
|
||||
shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
index 6a3ca52..f7be394 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
@@ -86,25 +86,25 @@ glusterd_svcs_stop(glusterd_volinfo_t *volinfo)
|
||||
priv = this->private;
|
||||
GF_ASSERT(priv);
|
||||
|
||||
- ret = priv->nfs_svc.stop(&(priv->nfs_svc), SIGKILL);
|
||||
+ ret = glusterd_svc_stop(&(priv->nfs_svc), SIGKILL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
- ret = priv->quotad_svc.stop(&(priv->quotad_svc), SIGTERM);
|
||||
+ ret = glusterd_svc_stop(&(priv->quotad_svc), SIGTERM);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (volinfo) {
|
||||
- ret = volinfo->shd.svc.stop(&(volinfo->shd.svc), SIGTERM);
|
||||
+ ret = glusterd_svc_stop(&(volinfo->shd.svc), PROC_START_NO_WAIT);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
- ret = priv->bitd_svc.stop(&(priv->bitd_svc), SIGTERM);
|
||||
+ ret = glusterd_svc_stop(&(priv->bitd_svc), SIGTERM);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
- ret = priv->scrub_svc.stop(&(priv->scrub_svc), SIGTERM);
|
||||
+ ret = glusterd_svc_stop(&(priv->scrub_svc), SIGTERM);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
--
|
||||
1.8.3.1
|
||||
|
427
0242-Revert-tests-shd-Add-test-coverage-for-shd-mux.patch
Normal file
427
0242-Revert-tests-shd-Add-test-coverage-for-shd-mux.patch
Normal file
@ -0,0 +1,427 @@
|
||||
From 48f7be493588fdf5e99dff0c3b91327e07da05f3 Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:48:34 +0530
|
||||
Subject: [PATCH 242/255] Revert "tests/shd: Add test coverage for shd mux"
|
||||
|
||||
This reverts commit b7f832288d2d2e57231d90765afc049ad7cb2f9d.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: Ifccac5150f07b98006714e43c77c5a4b1fd38cb8
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175951
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
tests/basic/glusterd-restart-shd-mux.t | 96 ---------------------
|
||||
tests/basic/shd-mux.t | 149 ---------------------------------
|
||||
tests/basic/volume-scale-shd-mux.t | 112 -------------------------
|
||||
tests/volume.rc | 15 ----
|
||||
4 files changed, 372 deletions(-)
|
||||
delete mode 100644 tests/basic/glusterd-restart-shd-mux.t
|
||||
delete mode 100644 tests/basic/shd-mux.t
|
||||
delete mode 100644 tests/basic/volume-scale-shd-mux.t
|
||||
|
||||
diff --git a/tests/basic/glusterd-restart-shd-mux.t b/tests/basic/glusterd-restart-shd-mux.t
|
||||
deleted file mode 100644
|
||||
index a50af9d..0000000
|
||||
--- a/tests/basic/glusterd-restart-shd-mux.t
|
||||
+++ /dev/null
|
||||
@@ -1,96 +0,0 @@
|
||||
-#!/bin/bash
|
||||
-
|
||||
-. $(dirname $0)/../include.rc
|
||||
-. $(dirname $0)/../volume.rc
|
||||
-
|
||||
-cleanup;
|
||||
-
|
||||
-TESTS_EXPECTED_IN_LOOP=20
|
||||
-
|
||||
-TEST glusterd
|
||||
-TEST pidof glusterd
|
||||
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
|
||||
-TEST $CLI volume set $V0 cluster.background-self-heal-count 0
|
||||
-TEST $CLI volume set $V0 cluster.eager-lock off
|
||||
-TEST $CLI volume set $V0 performance.flush-behind off
|
||||
-TEST $CLI volume start $V0
|
||||
-
|
||||
-for i in $(seq 1 3); do
|
||||
- TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
|
||||
- TEST $CLI volume start ${V0}_afr$i
|
||||
- TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
|
||||
- TEST $CLI volume start ${V0}_ec$i
|
||||
-done
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
|
||||
-
|
||||
-#Stop the glusterd
|
||||
-TEST pkill glusterd
|
||||
-#Only stopping glusterd, so there will be one shd
|
||||
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" shd_count
|
||||
-TEST glusterd
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
|
||||
-#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
|
||||
-#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
|
||||
-
|
||||
-shd_pid=$(get_shd_mux_pid $V0)
|
||||
-for i in $(seq 1 3); do
|
||||
- afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
|
||||
- EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
|
||||
- ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
|
||||
- EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
|
||||
-done
|
||||
-
|
||||
-#Reboot a node scenario
|
||||
-TEST pkill gluster
|
||||
-#Only stopped glusterd, so there will be one shd
|
||||
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
|
||||
-
|
||||
-TEST glusterd
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
|
||||
-
|
||||
-#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
|
||||
-#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
|
||||
-
|
||||
-shd_pid=$(get_shd_mux_pid $V0)
|
||||
-for i in $(seq 1 3); do
|
||||
- afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
|
||||
- EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
|
||||
- ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
|
||||
- EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
|
||||
-done
|
||||
-
|
||||
-for i in $(seq 1 3); do
|
||||
- TEST $CLI volume stop ${V0}_afr$i
|
||||
- TEST $CLI volume stop ${V0}_ec$i
|
||||
-done
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
|
||||
-
|
||||
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
|
||||
-
|
||||
-TEST kill_brick $V0 $H0 $B0/${V0}0
|
||||
-TEST kill_brick $V0 $H0 $B0/${V0}3
|
||||
-
|
||||
-TEST touch $M0/foo{1..100}
|
||||
-
|
||||
-EXPECT_WITHIN $HEAL_TIMEOUT "^204$" get_pending_heal_count $V0
|
||||
-
|
||||
-TEST $CLI volume start ${V0} force
|
||||
-
|
||||
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
|
||||
-
|
||||
-TEST rm -rf $M0/*
|
||||
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
|
||||
-
|
||||
-
|
||||
-TEST $CLI volume stop ${V0}
|
||||
-TEST $CLI volume delete ${V0}
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^0$" shd_count
|
||||
-
|
||||
-cleanup
|
||||
diff --git a/tests/basic/shd-mux.t b/tests/basic/shd-mux.t
|
||||
deleted file mode 100644
|
||||
index e42a34a..0000000
|
||||
--- a/tests/basic/shd-mux.t
|
||||
+++ /dev/null
|
||||
@@ -1,149 +0,0 @@
|
||||
-#!/bin/bash
|
||||
-
|
||||
-. $(dirname $0)/../include.rc
|
||||
-. $(dirname $0)/../volume.rc
|
||||
-
|
||||
-cleanup;
|
||||
-
|
||||
-TESTS_EXPECTED_IN_LOOP=16
|
||||
-
|
||||
-TEST glusterd
|
||||
-TEST pidof glusterd
|
||||
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
|
||||
-TEST $CLI volume set $V0 cluster.background-self-heal-count 0
|
||||
-TEST $CLI volume set $V0 cluster.eager-lock off
|
||||
-TEST $CLI volume set $V0 performance.flush-behind off
|
||||
-TEST $CLI volume start $V0
|
||||
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
|
||||
-
|
||||
-shd_pid=$(get_shd_mux_pid $V0)
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
|
||||
-
|
||||
-#Create a one more volume
|
||||
-TEST $CLI volume create ${V0}_1 replica 3 $H0:$B0/${V0}_1{0,1,2,3,4,5}
|
||||
-TEST $CLI volume start ${V0}_1
|
||||
-
|
||||
-#Check whether the shd has multiplexed or not
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_1
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
|
||||
-
|
||||
-TEST $CLI volume set ${V0}_1 cluster.background-self-heal-count 0
|
||||
-TEST $CLI volume set ${V0}_1 cluster.eager-lock off
|
||||
-TEST $CLI volume set ${V0}_1 performance.flush-behind off
|
||||
-TEST $GFS --volfile-id=/${V0}_1 --volfile-server=$H0 $M1
|
||||
-
|
||||
-TEST kill_brick $V0 $H0 $B0/${V0}0
|
||||
-TEST kill_brick $V0 $H0 $B0/${V0}4
|
||||
-TEST kill_brick ${V0}_1 $H0 $B0/${V0}_10
|
||||
-TEST kill_brick ${V0}_1 $H0 $B0/${V0}_14
|
||||
-
|
||||
-TEST touch $M0/foo{1..100}
|
||||
-TEST touch $M1/foo{1..100}
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count ${V0}_1
|
||||
-
|
||||
-TEST $CLI volume start ${V0} force
|
||||
-TEST $CLI volume start ${V0}_1 force
|
||||
-
|
||||
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
|
||||
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_1
|
||||
-
|
||||
-TEST rm -rf $M0/*
|
||||
-TEST rm -rf $M1/*
|
||||
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
|
||||
-
|
||||
-#Stop the volume
|
||||
-TEST $CLI volume stop ${V0}_1
|
||||
-TEST $CLI volume delete ${V0}_1
|
||||
-
|
||||
-#Check the stop succeeded and detached the volume with out restarting it
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
|
||||
-
|
||||
-#Check the thread count become to earlier number after stopping
|
||||
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
|
||||
-
|
||||
-
|
||||
-#Now create a ec volume and check mux works
|
||||
-TEST $CLI volume create ${V0}_2 disperse 6 redundancy 2 $H0:$B0/${V0}_2{0,1,2,3,4,5}
|
||||
-TEST $CLI volume start ${V0}_2
|
||||
-
|
||||
-#Check whether the shd has multiplexed or not
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_2
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
|
||||
-
|
||||
-TEST $CLI volume set ${V0}_2 cluster.background-self-heal-count 0
|
||||
-TEST $CLI volume set ${V0}_2 cluster.eager-lock off
|
||||
-TEST $CLI volume set ${V0}_2 performance.flush-behind off
|
||||
-TEST $GFS --volfile-id=/${V0}_2 --volfile-server=$H0 $M1
|
||||
-
|
||||
-TEST kill_brick $V0 $H0 $B0/${V0}0
|
||||
-TEST kill_brick $V0 $H0 $B0/${V0}4
|
||||
-TEST kill_brick ${V0}_2 $H0 $B0/${V0}_20
|
||||
-TEST kill_brick ${V0}_2 $H0 $B0/${V0}_22
|
||||
-
|
||||
-TEST touch $M0/foo{1..100}
|
||||
-TEST touch $M1/foo{1..100}
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^404$" get_pending_heal_count ${V0}_2
|
||||
-
|
||||
-TEST $CLI volume start ${V0} force
|
||||
-TEST $CLI volume start ${V0}_2 force
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
|
||||
-
|
||||
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
|
||||
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_2
|
||||
-
|
||||
-TEST rm -rf $M0/*
|
||||
-TEST rm -rf $M1/*
|
||||
-
|
||||
-
|
||||
-#Stop the volume
|
||||
-TEST $CLI volume stop ${V0}_2
|
||||
-TEST $CLI volume delete ${V0}_2
|
||||
-
|
||||
-#Check the stop succeeded and detached the volume with out restarting it
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
|
||||
-
|
||||
-#Check the thread count become to zero for ec related threads
|
||||
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
|
||||
-#Check the thread count become to earlier number after stopping
|
||||
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
|
||||
-
|
||||
-for i in $(seq 1 3); do
|
||||
- TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
|
||||
- TEST $CLI volume start ${V0}_afr$i
|
||||
- TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
|
||||
- TEST $CLI volume start ${V0}_ec$i
|
||||
-done
|
||||
-
|
||||
-#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
|
||||
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
|
||||
-#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
|
||||
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^24$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
|
||||
-#Delete the volumes
|
||||
-for i in $(seq 1 3); do
|
||||
- TEST $CLI volume stop ${V0}_afr$i
|
||||
- TEST $CLI volume stop ${V0}_ec$i
|
||||
- TEST $CLI volume delete ${V0}_afr$i
|
||||
- TEST $CLI volume delete ${V0}_ec$i
|
||||
-done
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
|
||||
-
|
||||
-TEST $CLI volume stop ${V0}
|
||||
-TEST $CLI volume delete ${V0}
|
||||
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
|
||||
-
|
||||
-cleanup
|
||||
diff --git a/tests/basic/volume-scale-shd-mux.t b/tests/basic/volume-scale-shd-mux.t
|
||||
deleted file mode 100644
|
||||
index dd9cf83..0000000
|
||||
--- a/tests/basic/volume-scale-shd-mux.t
|
||||
+++ /dev/null
|
||||
@@ -1,112 +0,0 @@
|
||||
-#!/bin/bash
|
||||
-
|
||||
-. $(dirname $0)/../include.rc
|
||||
-. $(dirname $0)/../volume.rc
|
||||
-
|
||||
-cleanup;
|
||||
-
|
||||
-TESTS_EXPECTED_IN_LOOP=6
|
||||
-
|
||||
-TEST glusterd
|
||||
-TEST pidof glusterd
|
||||
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
|
||||
-TEST $CLI volume set $V0 cluster.background-self-heal-count 0
|
||||
-TEST $CLI volume set $V0 cluster.eager-lock off
|
||||
-TEST $CLI volume set $V0 performance.flush-behind off
|
||||
-TEST $CLI volume start $V0
|
||||
-
|
||||
-for i in $(seq 1 2); do
|
||||
- TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
|
||||
- TEST $CLI volume start ${V0}_afr$i
|
||||
- TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
|
||||
- TEST $CLI volume start ${V0}_ec$i
|
||||
-done
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
|
||||
-#Check the thread count become to number of volumes*number of ec subvolume (2*6=12)
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
|
||||
-#Check the thread count become to number of volumes*number of afr subvolume (3*6=18)
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
|
||||
-
|
||||
-TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{6,7,8};
|
||||
-#Check the thread count become to number of volumes*number of afr subvolume plus 3 additional threads from newly added bricks (3*6+3=21)
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^21$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
|
||||
-
|
||||
-#Remove the brick and check the detach is successful
|
||||
-$CLI volume remove-brick $V0 $H0:$B0/${V0}{6,7,8} force
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
|
||||
-
|
||||
-TEST $CLI volume add-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5};
|
||||
-#Check the thread count become to number of volumes*number of ec subvolume plus 2 additional threads from newly added bricks (2*6+6=18)
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
|
||||
-
|
||||
-#Remove the brick and check the detach is successful
|
||||
-$CLI volume remove-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5} force
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "__ec_shd_healer_wait"
|
||||
-
|
||||
-
|
||||
-for i in $(seq 1 2); do
|
||||
- TEST $CLI volume stop ${V0}_afr$i
|
||||
- TEST $CLI volume stop ${V0}_ec$i
|
||||
-done
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
|
||||
-
|
||||
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
|
||||
-
|
||||
-TEST kill_brick $V0 $H0 $B0/${V0}0
|
||||
-TEST kill_brick $V0 $H0 $B0/${V0}4
|
||||
-
|
||||
-TEST touch $M0/foo{1..100}
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
|
||||
-
|
||||
-TEST $CLI volume start ${V0} force
|
||||
-
|
||||
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
|
||||
-
|
||||
-TEST rm -rf $M0/*
|
||||
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
|
||||
-shd_pid=$(get_shd_mux_pid $V0)
|
||||
-TEST $CLI volume create ${V0}_distribute1 $H0:$B0/${V0}_distribute10
|
||||
-TEST $CLI volume start ${V0}_distribute1
|
||||
-
|
||||
-#Creating a non-replicate/non-ec volume should not have any effect in shd
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
|
||||
-EXPECT "^${shd_pid}$" get_shd_mux_pid $V0
|
||||
-
|
||||
-TEST mkdir $B0/add/
|
||||
-#Now convert the distributed volume to replicate
|
||||
-TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3}
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^9$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
|
||||
-
|
||||
-#scale down the volume
|
||||
-TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "__afr_shd_healer_wait"
|
||||
-
|
||||
-TEST $CLI volume stop ${V0}
|
||||
-TEST $CLI volume delete ${V0}
|
||||
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
|
||||
-
|
||||
-TEST rm -rf $B0/add/
|
||||
-TEST mkdir $B0/add/
|
||||
-#Now convert the distributed volume back to replicate and make sure that a new shd is spawned
|
||||
-TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3};
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
|
||||
-EXPECT_WITHIN $HEAL_TIMEOUT "^3$" number_healer_threads_shd ${V0}_distribute1 "__afr_shd_healer_wait"
|
||||
-
|
||||
-#Now convert the replica volume to distribute again and make sure the shd is now stopped
|
||||
-TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
|
||||
-TEST rm -rf $B0/add/
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
|
||||
-
|
||||
-cleanup
|
||||
diff --git a/tests/volume.rc b/tests/volume.rc
|
||||
index 6a78c37..022d972 100644
|
||||
--- a/tests/volume.rc
|
||||
+++ b/tests/volume.rc
|
||||
@@ -913,21 +913,6 @@ function volgen_check_ancestry {
|
||||
fi
|
||||
}
|
||||
|
||||
-function get_shd_mux_pid {
|
||||
- local volume=$1
|
||||
- pid=`$CLI volume status $volume shd | awk '/Self-heal/{print $8}'`
|
||||
- echo $pid
|
||||
-}
|
||||
-
|
||||
-function shd_count {
|
||||
- ps aux | grep "glustershd" | grep -v grep | wc -l
|
||||
-}
|
||||
-
|
||||
-function number_healer_threads_shd {
|
||||
- local pid=$(get_shd_mux_pid $1)
|
||||
- pstack $pid | grep $2 | wc -l
|
||||
-}
|
||||
-
|
||||
function get_mtime {
|
||||
local time=$(get-mdata-xattr -m $1)
|
||||
if [ $time == "-1" ];
|
||||
--
|
||||
1.8.3.1
|
||||
|
154
0243-Revert-glusterfsd-cleanup-Protect-graph-object-under.patch
Normal file
154
0243-Revert-glusterfsd-cleanup-Protect-graph-object-under.patch
Normal file
@ -0,0 +1,154 @@
|
||||
From 4d65506ddfa0245dcaa13b14ca13b2ea762df37d Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:48:51 +0530
|
||||
Subject: [PATCH 243/255] Revert "glusterfsd/cleanup: Protect graph object
|
||||
under a lock"
|
||||
|
||||
This reverts commit 11b64d494c52004002f900888694d20ef8af6df6.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: I2717207d87ad213722de33c24e451502ed4aff48
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175952
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
libglusterfs/src/graph.c | 58 ++++++++++---------------
|
||||
libglusterfs/src/statedump.c | 16 ++-----
|
||||
tests/bugs/glusterd/optimized-basic-testcases.t | 4 +-
|
||||
3 files changed, 28 insertions(+), 50 deletions(-)
|
||||
|
||||
diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c
|
||||
index 18fb2d9..4c8b02d 100644
|
||||
--- a/libglusterfs/src/graph.c
|
||||
+++ b/libglusterfs/src/graph.c
|
||||
@@ -1392,12 +1392,8 @@ glusterfs_graph_cleanup(void *arg)
|
||||
}
|
||||
pthread_mutex_unlock(&ctx->notify_lock);
|
||||
|
||||
- pthread_mutex_lock(&ctx->cleanup_lock);
|
||||
- {
|
||||
- glusterfs_graph_fini(graph);
|
||||
- glusterfs_graph_destroy(graph);
|
||||
- }
|
||||
- pthread_mutex_unlock(&ctx->cleanup_lock);
|
||||
+ glusterfs_graph_fini(graph);
|
||||
+ glusterfs_graph_destroy(graph);
|
||||
out:
|
||||
return NULL;
|
||||
}
|
||||
@@ -1472,37 +1468,31 @@ glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj)
|
||||
|
||||
if (!ctx || !ctx->active || !volfile_obj)
|
||||
goto out;
|
||||
+ parent_graph = ctx->active;
|
||||
+ graph = volfile_obj->graph;
|
||||
+ if (!graph)
|
||||
+ goto out;
|
||||
+ if (graph->first)
|
||||
+ xl = graph->first;
|
||||
|
||||
- pthread_mutex_lock(&ctx->cleanup_lock);
|
||||
- {
|
||||
- parent_graph = ctx->active;
|
||||
- graph = volfile_obj->graph;
|
||||
- if (!graph)
|
||||
- goto unlock;
|
||||
- if (graph->first)
|
||||
- xl = graph->first;
|
||||
-
|
||||
- last_xl = graph->last_xl;
|
||||
- if (last_xl)
|
||||
- last_xl->next = NULL;
|
||||
- if (!xl || xl->cleanup_starting)
|
||||
- goto unlock;
|
||||
+ last_xl = graph->last_xl;
|
||||
+ if (last_xl)
|
||||
+ last_xl->next = NULL;
|
||||
+ if (!xl || xl->cleanup_starting)
|
||||
+ goto out;
|
||||
|
||||
- xl->cleanup_starting = 1;
|
||||
- gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_DETACH_STARTED,
|
||||
- "detaching child %s", volfile_obj->vol_id);
|
||||
+ xl->cleanup_starting = 1;
|
||||
+ gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_DETACH_STARTED,
|
||||
+ "detaching child %s", volfile_obj->vol_id);
|
||||
|
||||
- list_del_init(&volfile_obj->volfile_list);
|
||||
- glusterfs_mux_xlator_unlink(parent_graph->top, xl);
|
||||
- parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph);
|
||||
- parent_graph->xl_count -= graph->xl_count;
|
||||
- parent_graph->leaf_count -= graph->leaf_count;
|
||||
- default_notify(xl, GF_EVENT_PARENT_DOWN, xl);
|
||||
- parent_graph->id++;
|
||||
- ret = 0;
|
||||
- }
|
||||
-unlock:
|
||||
- pthread_mutex_unlock(&ctx->cleanup_lock);
|
||||
+ list_del_init(&volfile_obj->volfile_list);
|
||||
+ glusterfs_mux_xlator_unlink(parent_graph->top, xl);
|
||||
+ parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph);
|
||||
+ parent_graph->xl_count -= graph->xl_count;
|
||||
+ parent_graph->leaf_count -= graph->leaf_count;
|
||||
+ default_notify(xl, GF_EVENT_PARENT_DOWN, xl);
|
||||
+ parent_graph->id++;
|
||||
+ ret = 0;
|
||||
out:
|
||||
if (!ret) {
|
||||
list_del_init(&volfile_obj->volfile_list);
|
||||
diff --git a/libglusterfs/src/statedump.c b/libglusterfs/src/statedump.c
|
||||
index 0d58f8f..0cf80c0 100644
|
||||
--- a/libglusterfs/src/statedump.c
|
||||
+++ b/libglusterfs/src/statedump.c
|
||||
@@ -805,17 +805,11 @@ gf_proc_dump_info(int signum, glusterfs_ctx_t *ctx)
|
||||
int brick_count = 0;
|
||||
int len = 0;
|
||||
|
||||
+ gf_proc_dump_lock();
|
||||
+
|
||||
if (!ctx)
|
||||
goto out;
|
||||
|
||||
- /*
|
||||
- * Multiplexed daemons can change the active graph when attach/detach
|
||||
- * is called. So this has to be protected with the cleanup lock.
|
||||
- */
|
||||
- if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name))
|
||||
- pthread_mutex_lock(&ctx->cleanup_lock);
|
||||
- gf_proc_dump_lock();
|
||||
-
|
||||
if (!mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name) &&
|
||||
(ctx && ctx->active)) {
|
||||
top = ctx->active->first;
|
||||
@@ -929,11 +923,7 @@ gf_proc_dump_info(int signum, glusterfs_ctx_t *ctx)
|
||||
out:
|
||||
GF_FREE(dump_options.dump_path);
|
||||
dump_options.dump_path = NULL;
|
||||
- if (ctx) {
|
||||
- gf_proc_dump_unlock();
|
||||
- if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name))
|
||||
- pthread_mutex_unlock(&ctx->cleanup_lock);
|
||||
- }
|
||||
+ gf_proc_dump_unlock();
|
||||
|
||||
return;
|
||||
}
|
||||
diff --git a/tests/bugs/glusterd/optimized-basic-testcases.t b/tests/bugs/glusterd/optimized-basic-testcases.t
|
||||
index 110f1b9..d700b5e 100644
|
||||
--- a/tests/bugs/glusterd/optimized-basic-testcases.t
|
||||
+++ b/tests/bugs/glusterd/optimized-basic-testcases.t
|
||||
@@ -289,9 +289,7 @@ mkdir -p /xyz/var/lib/glusterd/abc
|
||||
TEST $CLI volume create "test" $H0:/xyz/var/lib/glusterd/abc
|
||||
EXPECT 'Created' volinfo_field "test" 'Status';
|
||||
|
||||
-#While taking a statedump, there is a TRY_LOCK on call_frame, which might may cause
|
||||
-#failure. So Adding a EXPECT_WITHIN
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" generate_statedump_and_check_for_glusterd_info
|
||||
+EXPECT "1" generate_statedump_and_check_for_glusterd_info
|
||||
|
||||
cleanup_statedump `pidof glusterd`
|
||||
cleanup
|
||||
--
|
||||
1.8.3.1
|
||||
|
292
0244-Revert-ec-shd-Cleanup-self-heal-daemon-resources-dur.patch
Normal file
292
0244-Revert-ec-shd-Cleanup-self-heal-daemon-resources-dur.patch
Normal file
@ -0,0 +1,292 @@
|
||||
From f6d967cd70ff41a0f93c54d50128c468e9d5dea9 Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:49:21 +0530
|
||||
Subject: [PATCH 244/255] Revert "ec/shd: Cleanup self heal daemon resources
|
||||
during ec fini"
|
||||
|
||||
This reverts commit edc238e40060773f5f5fd59fcdad8ae27d65749f.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: If6cb5941b964f005454a21a67938b354ef1a2037
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175953
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
libglusterfs/src/syncop-utils.c | 2 -
|
||||
xlators/cluster/afr/src/afr-self-heald.c | 5 ---
|
||||
xlators/cluster/ec/src/ec-heald.c | 77 +++++---------------------------
|
||||
xlators/cluster/ec/src/ec-heald.h | 3 --
|
||||
xlators/cluster/ec/src/ec-messages.h | 3 +-
|
||||
xlators/cluster/ec/src/ec.c | 47 -------------------
|
||||
6 files changed, 13 insertions(+), 124 deletions(-)
|
||||
|
||||
diff --git a/libglusterfs/src/syncop-utils.c b/libglusterfs/src/syncop-utils.c
|
||||
index 4167db4..b842142 100644
|
||||
--- a/libglusterfs/src/syncop-utils.c
|
||||
+++ b/libglusterfs/src/syncop-utils.c
|
||||
@@ -354,8 +354,6 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid,
|
||||
|
||||
if (frame) {
|
||||
this = frame->this;
|
||||
- } else {
|
||||
- this = THIS;
|
||||
}
|
||||
|
||||
/*For this functionality to be implemented in general, we need
|
||||
diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c
|
||||
index 522fe5d..8bc4720 100644
|
||||
--- a/xlators/cluster/afr/src/afr-self-heald.c
|
||||
+++ b/xlators/cluster/afr/src/afr-self-heald.c
|
||||
@@ -524,11 +524,6 @@ afr_shd_full_heal(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
|
||||
afr_private_t *priv = NULL;
|
||||
|
||||
priv = this->private;
|
||||
-
|
||||
- if (this->cleanup_starting) {
|
||||
- return -ENOTCONN;
|
||||
- }
|
||||
-
|
||||
if (!priv->shd.enabled)
|
||||
return -EBUSY;
|
||||
|
||||
diff --git a/xlators/cluster/ec/src/ec-heald.c b/xlators/cluster/ec/src/ec-heald.c
|
||||
index edf5e11..cba111a 100644
|
||||
--- a/xlators/cluster/ec/src/ec-heald.c
|
||||
+++ b/xlators/cluster/ec/src/ec-heald.c
|
||||
@@ -71,11 +71,6 @@ disabled_loop:
|
||||
break;
|
||||
}
|
||||
|
||||
- if (ec->shutdown) {
|
||||
- healer->running = _gf_false;
|
||||
- return -1;
|
||||
- }
|
||||
-
|
||||
ret = healer->rerun;
|
||||
healer->rerun = 0;
|
||||
|
||||
@@ -246,11 +241,9 @@ ec_shd_index_sweep(struct subvol_healer *healer)
|
||||
goto out;
|
||||
}
|
||||
|
||||
- _mask_cancellation();
|
||||
ret = syncop_mt_dir_scan(NULL, subvol, &loc, GF_CLIENT_PID_SELF_HEALD,
|
||||
healer, ec_shd_index_heal, xdata,
|
||||
ec->shd.max_threads, ec->shd.wait_qlength);
|
||||
- _unmask_cancellation();
|
||||
out:
|
||||
if (xdata)
|
||||
dict_unref(xdata);
|
||||
@@ -270,11 +263,6 @@ ec_shd_full_heal(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
|
||||
int ret = 0;
|
||||
|
||||
ec = this->private;
|
||||
-
|
||||
- if (this->cleanup_starting) {
|
||||
- return -ENOTCONN;
|
||||
- }
|
||||
-
|
||||
if (ec->xl_up_count <= ec->fragments) {
|
||||
return -ENOTCONN;
|
||||
}
|
||||
@@ -317,15 +305,11 @@ ec_shd_full_sweep(struct subvol_healer *healer, inode_t *inode)
|
||||
{
|
||||
ec_t *ec = NULL;
|
||||
loc_t loc = {0};
|
||||
- int ret = -1;
|
||||
|
||||
ec = healer->this->private;
|
||||
loc.inode = inode;
|
||||
- _mask_cancellation();
|
||||
- ret = syncop_ftw(ec->xl_list[healer->subvol], &loc,
|
||||
- GF_CLIENT_PID_SELF_HEALD, healer, ec_shd_full_heal);
|
||||
- _unmask_cancellation();
|
||||
- return ret;
|
||||
+ return syncop_ftw(ec->xl_list[healer->subvol], &loc,
|
||||
+ GF_CLIENT_PID_SELF_HEALD, healer, ec_shd_full_heal);
|
||||
}
|
||||
|
||||
void *
|
||||
@@ -333,16 +317,13 @@ ec_shd_index_healer(void *data)
|
||||
{
|
||||
struct subvol_healer *healer = NULL;
|
||||
xlator_t *this = NULL;
|
||||
- int run = 0;
|
||||
|
||||
healer = data;
|
||||
THIS = this = healer->this;
|
||||
ec_t *ec = this->private;
|
||||
|
||||
for (;;) {
|
||||
- run = ec_shd_healer_wait(healer);
|
||||
- if (run == -1)
|
||||
- break;
|
||||
+ ec_shd_healer_wait(healer);
|
||||
|
||||
if (ec->xl_up_count > ec->fragments) {
|
||||
gf_msg_debug(this->name, 0, "starting index sweep on subvol %s",
|
||||
@@ -371,12 +352,16 @@ ec_shd_full_healer(void *data)
|
||||
|
||||
rootloc.inode = this->itable->root;
|
||||
for (;;) {
|
||||
- run = ec_shd_healer_wait(healer);
|
||||
- if (run < 0) {
|
||||
- break;
|
||||
- } else if (run == 0) {
|
||||
- continue;
|
||||
+ pthread_mutex_lock(&healer->mutex);
|
||||
+ {
|
||||
+ run = __ec_shd_healer_wait(healer);
|
||||
+ if (!run)
|
||||
+ healer->running = _gf_false;
|
||||
}
|
||||
+ pthread_mutex_unlock(&healer->mutex);
|
||||
+
|
||||
+ if (!run)
|
||||
+ break;
|
||||
|
||||
if (ec->xl_up_count > ec->fragments) {
|
||||
gf_msg(this->name, GF_LOG_INFO, 0, EC_MSG_FULL_SWEEP_START,
|
||||
@@ -577,41 +562,3 @@ out:
|
||||
dict_del(output, this->name);
|
||||
return ret;
|
||||
}
|
||||
-
|
||||
-void
|
||||
-ec_destroy_healer_object(xlator_t *this, struct subvol_healer *healer)
|
||||
-{
|
||||
- if (!healer)
|
||||
- return;
|
||||
-
|
||||
- pthread_cond_destroy(&healer->cond);
|
||||
- pthread_mutex_destroy(&healer->mutex);
|
||||
-}
|
||||
-
|
||||
-void
|
||||
-ec_selfheal_daemon_fini(xlator_t *this)
|
||||
-{
|
||||
- struct subvol_healer *healer = NULL;
|
||||
- ec_self_heald_t *shd = NULL;
|
||||
- ec_t *priv = NULL;
|
||||
- int i = 0;
|
||||
-
|
||||
- priv = this->private;
|
||||
- if (!priv)
|
||||
- return;
|
||||
-
|
||||
- shd = &priv->shd;
|
||||
- if (!shd->iamshd)
|
||||
- return;
|
||||
-
|
||||
- for (i = 0; i < priv->nodes; i++) {
|
||||
- healer = &shd->index_healers[i];
|
||||
- ec_destroy_healer_object(this, healer);
|
||||
-
|
||||
- healer = &shd->full_healers[i];
|
||||
- ec_destroy_healer_object(this, healer);
|
||||
- }
|
||||
-
|
||||
- GF_FREE(shd->index_healers);
|
||||
- GF_FREE(shd->full_healers);
|
||||
-}
|
||||
diff --git a/xlators/cluster/ec/src/ec-heald.h b/xlators/cluster/ec/src/ec-heald.h
|
||||
index 8184cf4..2eda2a7 100644
|
||||
--- a/xlators/cluster/ec/src/ec-heald.h
|
||||
+++ b/xlators/cluster/ec/src/ec-heald.h
|
||||
@@ -24,7 +24,4 @@ ec_selfheal_daemon_init(xlator_t *this);
|
||||
void
|
||||
ec_shd_index_healer_wake(ec_t *ec);
|
||||
|
||||
-void
|
||||
-ec_selfheal_daemon_fini(xlator_t *this);
|
||||
-
|
||||
#endif /* __EC_HEALD_H__ */
|
||||
diff --git a/xlators/cluster/ec/src/ec-messages.h b/xlators/cluster/ec/src/ec-messages.h
|
||||
index ce299bb..7c28808 100644
|
||||
--- a/xlators/cluster/ec/src/ec-messages.h
|
||||
+++ b/xlators/cluster/ec/src/ec-messages.h
|
||||
@@ -55,7 +55,6 @@ GLFS_MSGID(EC, EC_MSG_INVALID_CONFIG, EC_MSG_HEAL_FAIL,
|
||||
EC_MSG_CONFIG_XATTR_INVALID, EC_MSG_EXTENSION, EC_MSG_EXTENSION_NONE,
|
||||
EC_MSG_EXTENSION_UNKNOWN, EC_MSG_EXTENSION_UNSUPPORTED,
|
||||
EC_MSG_EXTENSION_FAILED, EC_MSG_NO_GF, EC_MSG_MATRIX_FAILED,
|
||||
- EC_MSG_DYN_CREATE_FAILED, EC_MSG_DYN_CODEGEN_FAILED,
|
||||
- EC_MSG_THREAD_CLEANUP_FAILED);
|
||||
+ EC_MSG_DYN_CREATE_FAILED, EC_MSG_DYN_CODEGEN_FAILED);
|
||||
|
||||
#endif /* !_EC_MESSAGES_H_ */
|
||||
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
|
||||
index 264582a..3c8013e 100644
|
||||
--- a/xlators/cluster/ec/src/ec.c
|
||||
+++ b/xlators/cluster/ec/src/ec.c
|
||||
@@ -429,51 +429,6 @@ ec_disable_delays(ec_t *ec)
|
||||
}
|
||||
|
||||
void
|
||||
-ec_cleanup_healer_object(ec_t *ec)
|
||||
-{
|
||||
- struct subvol_healer *healer = NULL;
|
||||
- ec_self_heald_t *shd = NULL;
|
||||
- void *res = NULL;
|
||||
- int i = 0;
|
||||
- gf_boolean_t is_join = _gf_false;
|
||||
-
|
||||
- shd = &ec->shd;
|
||||
- if (!shd->iamshd)
|
||||
- return;
|
||||
-
|
||||
- for (i = 0; i < ec->nodes; i++) {
|
||||
- healer = &shd->index_healers[i];
|
||||
- pthread_mutex_lock(&healer->mutex);
|
||||
- {
|
||||
- healer->rerun = 1;
|
||||
- if (healer->running) {
|
||||
- pthread_cond_signal(&healer->cond);
|
||||
- is_join = _gf_true;
|
||||
- }
|
||||
- }
|
||||
- pthread_mutex_unlock(&healer->mutex);
|
||||
- if (is_join) {
|
||||
- pthread_join(healer->thread, &res);
|
||||
- is_join = _gf_false;
|
||||
- }
|
||||
-
|
||||
- healer = &shd->full_healers[i];
|
||||
- pthread_mutex_lock(&healer->mutex);
|
||||
- {
|
||||
- healer->rerun = 1;
|
||||
- if (healer->running) {
|
||||
- pthread_cond_signal(&healer->cond);
|
||||
- is_join = _gf_true;
|
||||
- }
|
||||
- }
|
||||
- pthread_mutex_unlock(&healer->mutex);
|
||||
- if (is_join) {
|
||||
- pthread_join(healer->thread, &res);
|
||||
- is_join = _gf_false;
|
||||
- }
|
||||
- }
|
||||
-}
|
||||
-void
|
||||
ec_pending_fops_completed(ec_t *ec)
|
||||
{
|
||||
if (ec->shutdown) {
|
||||
@@ -589,7 +544,6 @@ ec_notify(xlator_t *this, int32_t event, void *data, void *data2)
|
||||
/* If there aren't pending fops running after we have waken up
|
||||
* them, we immediately propagate the notification. */
|
||||
propagate = ec_disable_delays(ec);
|
||||
- ec_cleanup_healer_object(ec);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@@ -805,7 +759,6 @@ failed:
|
||||
void
|
||||
fini(xlator_t *this)
|
||||
{
|
||||
- ec_selfheal_daemon_fini(this);
|
||||
__ec_destroy_private(this);
|
||||
}
|
||||
|
||||
--
|
||||
1.8.3.1
|
||||
|
151
0245-Revert-shd-glusterd-Serialize-shd-manager-to-prevent.patch
Normal file
151
0245-Revert-shd-glusterd-Serialize-shd-manager-to-prevent.patch
Normal file
@ -0,0 +1,151 @@
|
||||
From 022701465f3e642cdb7942995647615baa266a35 Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:49:31 +0530
|
||||
Subject: [PATCH 245/255] Revert "shd/glusterd: Serialize shd manager to
|
||||
prevent race condition"
|
||||
|
||||
This reverts commit 646292b4f73bf1b506d034b85787f794963d7196.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: Ie21fbe18965d8bdea81f4276b57960a27a4db89d
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175954
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
.../serialize-shd-manager-glusterd-restart.t | 54 ----------------------
|
||||
xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 14 ------
|
||||
xlators/mgmt/glusterd/src/glusterd.c | 1 -
|
||||
xlators/mgmt/glusterd/src/glusterd.h | 3 --
|
||||
4 files changed, 72 deletions(-)
|
||||
delete mode 100644 tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
|
||||
|
||||
diff --git a/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
|
||||
deleted file mode 100644
|
||||
index 3a27c2a..0000000
|
||||
--- a/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
|
||||
+++ /dev/null
|
||||
@@ -1,54 +0,0 @@
|
||||
-#! /bin/bash
|
||||
-
|
||||
-. $(dirname $0)/../../include.rc
|
||||
-. $(dirname $0)/../../cluster.rc
|
||||
-
|
||||
-function check_peers {
|
||||
-count=`$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l`
|
||||
-echo $count
|
||||
-}
|
||||
-
|
||||
-function check_shd {
|
||||
-ps aux | grep $1 | grep glustershd | wc -l
|
||||
-}
|
||||
-
|
||||
-cleanup
|
||||
-
|
||||
-
|
||||
-TEST launch_cluster 6
|
||||
-
|
||||
-TESTS_EXPECTED_IN_LOOP=25
|
||||
-for i in $(seq 2 6); do
|
||||
- hostname="H$i"
|
||||
- TEST $CLI_1 peer probe ${!hostname}
|
||||
-done
|
||||
-
|
||||
-
|
||||
-EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers;
|
||||
-for i in $(seq 1 5); do
|
||||
-
|
||||
- TEST $CLI_1 volume create ${V0}_$i replica 3 $H1:$B1/${V0}_$i $H2:$B2/${V0}_$i $H3:$B3/${V0}_$i $H4:$B4/${V0}_$i $H5:$B5/${V0}_$i $H6:$B6/${V0}_$i
|
||||
- TEST $CLI_1 volume start ${V0}_$i force
|
||||
-
|
||||
-done
|
||||
-
|
||||
-#kill a node
|
||||
-TEST kill_node 3
|
||||
-
|
||||
-TEST $glusterd_3;
|
||||
-EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers
|
||||
-
|
||||
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_shd $H3
|
||||
-
|
||||
-for i in $(seq 1 5); do
|
||||
-
|
||||
- TEST $CLI_1 volume stop ${V0}_$i
|
||||
- TEST $CLI_1 volume delete ${V0}_$i
|
||||
-
|
||||
-done
|
||||
-
|
||||
-for i in $(seq 1 6); do
|
||||
- hostname="H$i"
|
||||
- EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 0 check_shd ${!hostname}
|
||||
-done
|
||||
-cleanup
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
index 75f9a07..a9eab42 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
@@ -254,26 +254,14 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
|
||||
{
|
||||
int ret = -1;
|
||||
glusterd_volinfo_t *volinfo = NULL;
|
||||
- glusterd_conf_t *conf = NULL;
|
||||
- gf_boolean_t shd_restart = _gf_false;
|
||||
|
||||
- conf = THIS->private;
|
||||
volinfo = data;
|
||||
- GF_VALIDATE_OR_GOTO("glusterd", conf, out);
|
||||
GF_VALIDATE_OR_GOTO("glusterd", svc, out);
|
||||
GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
|
||||
|
||||
if (volinfo)
|
||||
glusterd_volinfo_ref(volinfo);
|
||||
|
||||
- while (conf->restart_shd) {
|
||||
- synclock_unlock(&conf->big_lock);
|
||||
- sleep(2);
|
||||
- synclock_lock(&conf->big_lock);
|
||||
- }
|
||||
- conf->restart_shd = _gf_true;
|
||||
- shd_restart = _gf_true;
|
||||
-
|
||||
ret = glusterd_shdsvc_create_volfile(volinfo);
|
||||
if (ret)
|
||||
goto out;
|
||||
@@ -322,8 +310,6 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
|
||||
}
|
||||
}
|
||||
out:
|
||||
- if (shd_restart)
|
||||
- conf->restart_shd = _gf_false;
|
||||
if (volinfo)
|
||||
glusterd_volinfo_unref(volinfo);
|
||||
if (ret)
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
|
||||
index 6d7dd4a..c0973cb 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd.c
|
||||
@@ -1819,7 +1819,6 @@ init(xlator_t *this)
|
||||
conf->rpc = rpc;
|
||||
conf->uds_rpc = uds_rpc;
|
||||
conf->gfs_mgmt = &gd_brick_prog;
|
||||
- conf->restart_shd = _gf_false;
|
||||
this->private = conf;
|
||||
/* conf->workdir and conf->rundir are smaller than PATH_MAX; gcc's
|
||||
* snprintf checking will throw an error here if sprintf is used.
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
|
||||
index 7d07d33..0fbc9dd 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd.h
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd.h
|
||||
@@ -222,9 +222,6 @@ typedef struct {
|
||||
gf_atomic_t blockers;
|
||||
uint32_t mgmt_v3_lock_timeout;
|
||||
gf_boolean_t restart_bricks;
|
||||
- gf_boolean_t restart_shd; /* This flag prevents running two shd manager
|
||||
- simultaneously
|
||||
- */
|
||||
pthread_mutex_t attach_lock; /* Lock can be per process or a common one */
|
||||
pthread_mutex_t volume_lock; /* We release the big_lock from lot of places
|
||||
which might lead the modification of volinfo
|
||||
--
|
||||
1.8.3.1
|
||||
|
@ -0,0 +1,53 @@
|
||||
From bc5e3967864d6f6ea22deb22ba72aedca8367797 Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:49:41 +0530
|
||||
Subject: [PATCH 246/255] Revert "glusterd/shd: Keep a ref on volinfo until
|
||||
attach rpc execute cbk"
|
||||
|
||||
This reverts commit c429d3c63601e6ea15af76aa684c30bbeb746467.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: I614e8bdbcc5111dbf407aba047e7d2284bef8ac8
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175955
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 3 ---
|
||||
xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 4 ----
|
||||
2 files changed, 7 deletions(-)
|
||||
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
index a9eab42..19eca9f 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
@@ -452,11 +452,8 @@ glusterd_shdsvc_start(glusterd_svc_t *svc, int flags)
|
||||
}
|
||||
|
||||
if (shd->attached) {
|
||||
- glusterd_volinfo_ref(volinfo);
|
||||
- /* Unref will happen from glusterd_svc_attach_cbk */
|
||||
ret = glusterd_attach_svc(svc, volinfo, flags);
|
||||
if (ret) {
|
||||
- glusterd_volinfo_unref(volinfo);
|
||||
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
|
||||
"Failed to attach shd svc(volume=%s) to pid=%d. Starting"
|
||||
"a new process",
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
index f7be394..02945b1 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
@@ -695,10 +695,6 @@ out:
|
||||
if (flag) {
|
||||
GF_FREE(flag);
|
||||
}
|
||||
-
|
||||
- if (volinfo)
|
||||
- glusterd_volinfo_unref(volinfo);
|
||||
-
|
||||
GF_ATOMIC_DEC(conf->blockers);
|
||||
STACK_DESTROY(frame->root);
|
||||
return 0;
|
||||
--
|
||||
1.8.3.1
|
||||
|
144
0247-Revert-afr-shd-Cleanup-self-heal-daemon-resources-du.patch
Normal file
144
0247-Revert-afr-shd-Cleanup-self-heal-daemon-resources-du.patch
Normal file
@ -0,0 +1,144 @@
|
||||
From 33d59c74169192b4ba89abc915d8d785bc450fbb Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:49:54 +0530
|
||||
Subject: [PATCH 247/255] Revert "afr/shd: Cleanup self heal daemon resources
|
||||
during afr fini"
|
||||
|
||||
This reverts commit faaaa3452ceec6afcc18cffc9beca3fe19841cce.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: Id4a22ab45b89872684830f866ec4b589fca50a90
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175956
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
libglusterfs/src/syncop-utils.c | 8 -----
|
||||
xlators/cluster/afr/src/afr-self-heald.c | 2 --
|
||||
xlators/cluster/afr/src/afr.c | 57 --------------------------------
|
||||
3 files changed, 67 deletions(-)
|
||||
|
||||
diff --git a/libglusterfs/src/syncop-utils.c b/libglusterfs/src/syncop-utils.c
|
||||
index b842142..be03527 100644
|
||||
--- a/libglusterfs/src/syncop-utils.c
|
||||
+++ b/libglusterfs/src/syncop-utils.c
|
||||
@@ -350,11 +350,6 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid,
|
||||
gf_boolean_t cond_init = _gf_false;
|
||||
gf_boolean_t mut_init = _gf_false;
|
||||
gf_dirent_t entries;
|
||||
- xlator_t *this = NULL;
|
||||
-
|
||||
- if (frame) {
|
||||
- this = frame->this;
|
||||
- }
|
||||
|
||||
/*For this functionality to be implemented in general, we need
|
||||
* synccond_t infra which doesn't block the executing thread. Until then
|
||||
@@ -402,9 +397,6 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid,
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &entries.list, list)
|
||||
{
|
||||
- if (this && this->cleanup_starting)
|
||||
- goto out;
|
||||
-
|
||||
list_del_init(&entry->list);
|
||||
if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, "..")) {
|
||||
gf_dirent_entry_free(entry);
|
||||
diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c
|
||||
index 8bc4720..7eb1207 100644
|
||||
--- a/xlators/cluster/afr/src/afr-self-heald.c
|
||||
+++ b/xlators/cluster/afr/src/afr-self-heald.c
|
||||
@@ -373,7 +373,6 @@ afr_shd_sweep_prepare(struct subvol_healer *healer)
|
||||
|
||||
time(&event->start_time);
|
||||
event->end_time = 0;
|
||||
- _mask_cancellation();
|
||||
}
|
||||
|
||||
void
|
||||
@@ -395,7 +394,6 @@ afr_shd_sweep_done(struct subvol_healer *healer)
|
||||
|
||||
if (eh_save_history(shd->statistics[healer->subvol], history) < 0)
|
||||
GF_FREE(history);
|
||||
- _unmask_cancellation();
|
||||
}
|
||||
|
||||
int
|
||||
diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c
|
||||
index a0a7551..33258a0 100644
|
||||
--- a/xlators/cluster/afr/src/afr.c
|
||||
+++ b/xlators/cluster/afr/src/afr.c
|
||||
@@ -611,70 +611,13 @@ init(xlator_t *this)
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
-void
|
||||
-afr_destroy_healer_object(xlator_t *this, struct subvol_healer *healer)
|
||||
-{
|
||||
- int ret = -1;
|
||||
-
|
||||
- if (!healer)
|
||||
- return;
|
||||
-
|
||||
- if (healer->running) {
|
||||
- /*
|
||||
- * If there are any resources to cleanup, We need
|
||||
- * to do that gracefully using pthread_cleanup_push
|
||||
- */
|
||||
- ret = gf_thread_cleanup_xint(healer->thread);
|
||||
- if (ret)
|
||||
- gf_msg(this->name, GF_LOG_WARNING, 0, AFR_MSG_SELF_HEAL_FAILED,
|
||||
- "Failed to clean up healer threads.");
|
||||
- healer->thread = 0;
|
||||
- }
|
||||
- pthread_cond_destroy(&healer->cond);
|
||||
- pthread_mutex_destroy(&healer->mutex);
|
||||
-}
|
||||
-
|
||||
-void
|
||||
-afr_selfheal_daemon_fini(xlator_t *this)
|
||||
-{
|
||||
- struct subvol_healer *healer = NULL;
|
||||
- afr_self_heald_t *shd = NULL;
|
||||
- afr_private_t *priv = NULL;
|
||||
- int i = 0;
|
||||
-
|
||||
- priv = this->private;
|
||||
- if (!priv)
|
||||
- return;
|
||||
-
|
||||
- shd = &priv->shd;
|
||||
- if (!shd->iamshd)
|
||||
- return;
|
||||
-
|
||||
- for (i = 0; i < priv->child_count; i++) {
|
||||
- healer = &shd->index_healers[i];
|
||||
- afr_destroy_healer_object(this, healer);
|
||||
|
||||
- healer = &shd->full_healers[i];
|
||||
- afr_destroy_healer_object(this, healer);
|
||||
-
|
||||
- if (shd->statistics[i])
|
||||
- eh_destroy(shd->statistics[i]);
|
||||
- }
|
||||
- GF_FREE(shd->index_healers);
|
||||
- GF_FREE(shd->full_healers);
|
||||
- GF_FREE(shd->statistics);
|
||||
- if (shd->split_brain)
|
||||
- eh_destroy(shd->split_brain);
|
||||
-}
|
||||
void
|
||||
fini(xlator_t *this)
|
||||
{
|
||||
afr_private_t *priv = NULL;
|
||||
|
||||
priv = this->private;
|
||||
-
|
||||
- afr_selfheal_daemon_fini(this);
|
||||
-
|
||||
LOCK(&priv->lock);
|
||||
if (priv->timer != NULL) {
|
||||
gf_timer_call_cancel(this->ctx, priv->timer);
|
||||
--
|
||||
1.8.3.1
|
||||
|
151
0248-Revert-shd-mux-Fix-coverity-issues-introduced-by-shd.patch
Normal file
151
0248-Revert-shd-mux-Fix-coverity-issues-introduced-by-shd.patch
Normal file
@ -0,0 +1,151 @@
|
||||
From 469cb9e16d46f075caf609ddcb12a7c02d73ce8b Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:50:24 +0530
|
||||
Subject: [PATCH 248/255] Revert "shd/mux: Fix coverity issues introduced by
|
||||
shd mux patch"
|
||||
|
||||
This reverts commit 0021a4bbc9af2bfe28d4a79f76c3cd33f23dd118.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: I0749328412ed3cc7ae5d64baea7a90b63b489a08
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175957
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
libglusterfs/src/graph.c | 21 ++++++++-------------
|
||||
xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 6 ------
|
||||
xlators/mgmt/glusterd/src/glusterd-svc-helper.c | 24 +++++++-----------------
|
||||
3 files changed, 15 insertions(+), 36 deletions(-)
|
||||
|
||||
diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c
|
||||
index 4c8b02d..a492dd8 100644
|
||||
--- a/libglusterfs/src/graph.c
|
||||
+++ b/libglusterfs/src/graph.c
|
||||
@@ -1470,9 +1470,7 @@ glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj)
|
||||
goto out;
|
||||
parent_graph = ctx->active;
|
||||
graph = volfile_obj->graph;
|
||||
- if (!graph)
|
||||
- goto out;
|
||||
- if (graph->first)
|
||||
+ if (graph && graph->first)
|
||||
xl = graph->first;
|
||||
|
||||
last_xl = graph->last_xl;
|
||||
@@ -1593,10 +1591,12 @@ glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
|
||||
parent_graph->leaf_count += graph->leaf_count;
|
||||
parent_graph->id++;
|
||||
|
||||
- volfile_obj = GF_CALLOC(1, sizeof(gf_volfile_t), gf_common_volfile_t);
|
||||
if (!volfile_obj) {
|
||||
- ret = -1;
|
||||
- goto out;
|
||||
+ volfile_obj = GF_CALLOC(1, sizeof(gf_volfile_t), gf_common_volfile_t);
|
||||
+ if (!volfile_obj) {
|
||||
+ ret = -1;
|
||||
+ goto out;
|
||||
+ }
|
||||
}
|
||||
|
||||
graph->used = 1;
|
||||
@@ -1641,7 +1641,6 @@ glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
|
||||
{
|
||||
glusterfs_graph_t *oldvolfile_graph = NULL;
|
||||
glusterfs_graph_t *newvolfile_graph = NULL;
|
||||
- char vol_id[NAME_MAX + 1];
|
||||
|
||||
int ret = -1;
|
||||
|
||||
@@ -1673,9 +1672,6 @@ glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
|
||||
glusterfs_graph_prepare(newvolfile_graph, ctx, newvolfile_graph->first);
|
||||
|
||||
if (!is_graph_topology_equal(oldvolfile_graph, newvolfile_graph)) {
|
||||
- ret = snprintf(vol_id, sizeof(vol_id), "%s", volfile_obj->vol_id);
|
||||
- if (ret < 0)
|
||||
- goto out;
|
||||
ret = glusterfs_process_svc_detach(ctx, volfile_obj);
|
||||
if (ret) {
|
||||
gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL,
|
||||
@@ -1684,9 +1680,8 @@ glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
|
||||
"old graph. Aborting the reconfiguration operation");
|
||||
goto out;
|
||||
}
|
||||
- volfile_obj = NULL;
|
||||
- ret = glusterfs_process_svc_attach_volfp(ctx, newvolfile_fp, vol_id,
|
||||
- checksum);
|
||||
+ ret = glusterfs_process_svc_attach_volfp(ctx, newvolfile_fp,
|
||||
+ volfile_obj->vol_id, checksum);
|
||||
goto out;
|
||||
}
|
||||
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
index 19eca9f..4789843 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
|
||||
@@ -101,8 +101,6 @@ glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn,
|
||||
svc->conn.rpc = rpc_clnt_ref(mux_svc->rpc);
|
||||
ret = snprintf(svc->conn.sockpath, sizeof(svc->conn.sockpath), "%s",
|
||||
mux_conn->sockpath);
|
||||
- if (ret < 0)
|
||||
- goto out;
|
||||
} else {
|
||||
ret = mkdir_p(logdir, 0755, _gf_true);
|
||||
if ((ret == -1) && (EEXIST != errno)) {
|
||||
@@ -675,10 +673,6 @@ glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig)
|
||||
glusterd_volinfo_ref(volinfo);
|
||||
svc_proc->data = volinfo;
|
||||
ret = glusterd_svc_stop(svc, sig);
|
||||
- if (ret) {
|
||||
- glusterd_volinfo_unref(volinfo);
|
||||
- goto out;
|
||||
- }
|
||||
}
|
||||
if (!empty && pid != -1) {
|
||||
ret = glusterd_detach_svc(svc, volinfo, sig);
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
index 02945b1..e42703c 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
|
||||
@@ -411,14 +411,9 @@ __gf_find_compatible_svc(gd_node_type daemon)
|
||||
conf = THIS->private;
|
||||
GF_VALIDATE_OR_GOTO("glusterd", conf, out);
|
||||
|
||||
- switch (daemon) {
|
||||
- case GD_NODE_SHD: {
|
||||
- svc_procs = &conf->shd_procs;
|
||||
- if (!svc_procs)
|
||||
- goto out;
|
||||
- } break;
|
||||
- default:
|
||||
- /* Add support for other client daemons here */
|
||||
+ if (daemon == GD_NODE_SHD) {
|
||||
+ svc_procs = &conf->shd_procs;
|
||||
+ if (!svc_procs)
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -545,16 +540,11 @@ __gf_find_compatible_svc_from_pid(gd_node_type daemon, pid_t pid)
|
||||
if (!conf)
|
||||
return NULL;
|
||||
|
||||
- switch (daemon) {
|
||||
- case GD_NODE_SHD: {
|
||||
- svc_procs = &conf->shd_procs;
|
||||
- if (!svc_procs)
|
||||
- return NULL;
|
||||
- } break;
|
||||
- default:
|
||||
- /* Add support for other client daemons here */
|
||||
+ if (daemon == GD_NODE_SHD) {
|
||||
+ svc_procs = &conf->shd_procs;
|
||||
+ if (!svc_proc)
|
||||
return NULL;
|
||||
- }
|
||||
+ } /* Can be moved to switch when mux is implemented for other daemon; */
|
||||
|
||||
cds_list_for_each_entry(svc_proc, svc_procs, svc_proc_list)
|
||||
{
|
||||
--
|
||||
1.8.3.1
|
||||
|
95
0249-Revert-client-fini-return-fini-after-rpc-cleanup.patch
Normal file
95
0249-Revert-client-fini-return-fini-after-rpc-cleanup.patch
Normal file
@ -0,0 +1,95 @@
|
||||
From 1864a4f382f3031915e8126440a1561035487e49 Mon Sep 17 00:00:00 2001
|
||||
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Date: Thu, 11 Jul 2019 12:53:20 +0530
|
||||
Subject: [PATCH 249/255] Revert "client/fini: return fini after rpc cleanup"
|
||||
|
||||
This reverts commit d79cb2cdff6fe8d962c9ac095a7541ddf500302b.
|
||||
|
||||
BUG: 1471742
|
||||
Change-Id: I15e6544d47fb7b6002c3b44de3fe0b2a13c84f51
|
||||
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/175958
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
xlators/protocol/client/src/client.c | 25 +++++--------------------
|
||||
xlators/protocol/client/src/client.h | 6 ------
|
||||
2 files changed, 5 insertions(+), 26 deletions(-)
|
||||
|
||||
diff --git a/xlators/protocol/client/src/client.c b/xlators/protocol/client/src/client.c
|
||||
index 95e4be5..532ef35 100644
|
||||
--- a/xlators/protocol/client/src/client.c
|
||||
+++ b/xlators/protocol/client/src/client.c
|
||||
@@ -49,12 +49,11 @@ client_fini_complete(xlator_t *this)
|
||||
if (!conf->destroy)
|
||||
return 0;
|
||||
|
||||
- pthread_mutex_lock(&conf->lock);
|
||||
- {
|
||||
- conf->fini_completed = _gf_true;
|
||||
- pthread_cond_broadcast(&conf->fini_complete_cond);
|
||||
- }
|
||||
- pthread_mutex_unlock(&conf->lock);
|
||||
+ this->private = NULL;
|
||||
+
|
||||
+ pthread_spin_destroy(&conf->fd_lock);
|
||||
+ pthread_mutex_destroy(&conf->lock);
|
||||
+ GF_FREE(conf);
|
||||
|
||||
out:
|
||||
return 0;
|
||||
@@ -2730,7 +2729,6 @@ init(xlator_t *this)
|
||||
goto out;
|
||||
|
||||
pthread_mutex_init(&conf->lock, NULL);
|
||||
- pthread_cond_init(&conf->fini_complete_cond, NULL);
|
||||
pthread_spin_init(&conf->fd_lock, 0);
|
||||
INIT_LIST_HEAD(&conf->saved_fds);
|
||||
|
||||
@@ -2789,7 +2787,6 @@ fini(xlator_t *this)
|
||||
if (!conf)
|
||||
return;
|
||||
|
||||
- conf->fini_completed = _gf_false;
|
||||
conf->destroy = 1;
|
||||
if (conf->rpc) {
|
||||
/* cleanup the saved-frames before last unref */
|
||||
@@ -2797,18 +2794,6 @@ fini(xlator_t *this)
|
||||
rpc_clnt_unref(conf->rpc);
|
||||
}
|
||||
|
||||
- pthread_mutex_lock(&conf->lock);
|
||||
- {
|
||||
- while (!conf->fini_completed)
|
||||
- pthread_cond_wait(&conf->fini_complete_cond, &conf->lock);
|
||||
- }
|
||||
- pthread_mutex_unlock(&conf->lock);
|
||||
-
|
||||
- pthread_spin_destroy(&conf->fd_lock);
|
||||
- pthread_mutex_destroy(&conf->lock);
|
||||
- pthread_cond_destroy(&conf->fini_complete_cond);
|
||||
- GF_FREE(conf);
|
||||
-
|
||||
/* Saved Fds */
|
||||
/* TODO: */
|
||||
|
||||
diff --git a/xlators/protocol/client/src/client.h b/xlators/protocol/client/src/client.h
|
||||
index 8dcd72f..f12fa61 100644
|
||||
--- a/xlators/protocol/client/src/client.h
|
||||
+++ b/xlators/protocol/client/src/client.h
|
||||
@@ -235,12 +235,6 @@ typedef struct clnt_conf {
|
||||
* up, disconnects can be
|
||||
* logged
|
||||
*/
|
||||
-
|
||||
- gf_boolean_t old_protocol; /* used only for old-protocol testing */
|
||||
- pthread_cond_t fini_complete_cond; /* Used to wait till we finsh the fini
|
||||
- compltely, ie client_fini_complete
|
||||
- to return*/
|
||||
- gf_boolean_t fini_completed;
|
||||
} clnt_conf_t;
|
||||
|
||||
typedef struct _client_fd_ctx {
|
||||
--
|
||||
1.8.3.1
|
||||
|
4572
0250-Revert-mgmt-shd-Implement-multiplexing-in-self-heal-.patch
Normal file
4572
0250-Revert-mgmt-shd-Implement-multiplexing-in-self-heal-.patch
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,57 @@
|
||||
From 7a04fb9999f5d25c17f5593eed5e98d0f5a1932d Mon Sep 17 00:00:00 2001
|
||||
From: karthik-us <ksubrahm@redhat.com>
|
||||
Date: Mon, 15 Jul 2019 14:30:52 +0530
|
||||
Subject: [PATCH 251/255] tests: Fix
|
||||
bug-1717819-metadata-split-brain-detection.t failure
|
||||
|
||||
<Backport of: https://review.gluster.org/#/c/glusterfs/+/23043/>
|
||||
|
||||
Problem:
|
||||
tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t fails
|
||||
intermittently in test cases #49 & #50, which compare the values of the
|
||||
user set xattr values after enabling the heal. We are not waiting for
|
||||
the heal to complete before comparing those values, which might lead
|
||||
those tests to fail.
|
||||
|
||||
Fix:
|
||||
Wait till the HEAL-TIMEOUT before comparing the xattr values.
|
||||
Also cheking for the shd to come up and the bricks to connect to the shd
|
||||
process in another case.
|
||||
|
||||
Change-Id: I0021c2d5d251111c695e2bf18c63e8189e456114
|
||||
fixes: bz#1704562
|
||||
Signed-off-by: karthik-us <ksubrahm@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/176071
|
||||
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
---
|
||||
tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t | 6 ++++++
|
||||
1 file changed, 6 insertions(+)
|
||||
|
||||
diff --git a/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t b/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t
|
||||
index 94b8bf3..76d1f21 100644
|
||||
--- a/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t
|
||||
+++ b/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t
|
||||
@@ -76,6 +76,10 @@ EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0
|
||||
|
||||
# Launch heal
|
||||
TEST $CLI volume heal $V0 enable
|
||||
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
|
||||
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
|
||||
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
|
||||
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
|
||||
TEST $CLI volume heal $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
|
||||
|
||||
@@ -117,6 +121,8 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
|
||||
+TEST $CLI volume heal $V0
|
||||
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
|
||||
|
||||
B0_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}0/dir/file)
|
||||
B1_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}1/dir/file)
|
||||
--
|
||||
1.8.3.1
|
||||
|
@ -0,0 +1,63 @@
|
||||
From 5a35a996257d6aaa7fa55ff1e1aac407dd4824fe Mon Sep 17 00:00:00 2001
|
||||
From: Sanju Rakonde <srakonde@redhat.com>
|
||||
Date: Fri, 12 Jul 2019 16:28:04 +0530
|
||||
Subject: [PATCH 252/255] glusterd: do not mark skip_locking as true for
|
||||
geo-rep operations
|
||||
|
||||
We need to send the commit req to peers in case of geo-rep
|
||||
operations even though it is a no volname operation. In commit
|
||||
phase peers try to set the txn_opinfo which will fail because
|
||||
it is a no volname operation where we don't require a commit
|
||||
phase. We mark skip_locking as true for no volname operations,
|
||||
but we have to give an exception to geo-rep operations, so that
|
||||
they can set txn_opinfo in commit phase.
|
||||
|
||||
Please refer to detailed RCA at the bug: 1729463
|
||||
|
||||
> upstream patch : https://review.gluster.org/#/c/glusterfs/+/23034/
|
||||
|
||||
>fixes: bz#1729463
|
||||
>Change-Id: I9f2478b12a281f6e052035c0563c40543493a3fc
|
||||
>Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
|
||||
|
||||
Change-Id: I9f2478b12a281f6e052035c0563c40543493a3fc
|
||||
BUG: 1727785
|
||||
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/176032
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Shwetha Acharya <sacharya@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
xlators/mgmt/glusterd/src/glusterd-handler.c | 9 +++++++--
|
||||
1 file changed, 7 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
|
||||
index cb2666b..2e73c98 100644
|
||||
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
|
||||
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
|
||||
@@ -1078,7 +1078,11 @@ __glusterd_handle_stage_op(rpcsvc_request_t *req)
|
||||
|
||||
/* In cases where there is no volname, the receivers won't have a
|
||||
* transaction opinfo created, as for those operations, the locking
|
||||
- * phase where the transaction opinfos are created, won't be called. */
|
||||
+ * phase where the transaction opinfos are created, won't be called.
|
||||
+ * skip_locking will be true for all such transaction and we clear
|
||||
+ * the txn_opinfo after the staging phase, except for geo-replication
|
||||
+ * operations where we need to access txn_opinfo in the later phases also.
|
||||
+ */
|
||||
ret = glusterd_get_txn_opinfo(txn_id, &txn_op_info);
|
||||
if (ret) {
|
||||
gf_msg_debug(this->name, 0, "No transaction's opinfo set");
|
||||
@@ -1087,7 +1091,8 @@ __glusterd_handle_stage_op(rpcsvc_request_t *req)
|
||||
glusterd_txn_opinfo_init(&txn_op_info, &state, &op_req.op,
|
||||
req_ctx->dict, req);
|
||||
|
||||
- txn_op_info.skip_locking = _gf_true;
|
||||
+ if (req_ctx->op != GD_OP_GSYNC_SET)
|
||||
+ txn_op_info.skip_locking = _gf_true;
|
||||
ret = glusterd_set_txn_opinfo(txn_id, &txn_op_info);
|
||||
if (ret) {
|
||||
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
|
||||
--
|
||||
1.8.3.1
|
||||
|
246
0253-core-fix-deadlock-between-statedump-and-fd_anonymous.patch
Normal file
246
0253-core-fix-deadlock-between-statedump-and-fd_anonymous.patch
Normal file
@ -0,0 +1,246 @@
|
||||
From ea7f11b989896d76b8d091d26bc0241bce9413f8 Mon Sep 17 00:00:00 2001
|
||||
From: Xavi Hernandez <xhernandez@redhat.com>
|
||||
Date: Thu, 4 Jul 2019 13:21:33 +0200
|
||||
Subject: [PATCH 253/255] core: fix deadlock between statedump and
|
||||
fd_anonymous()
|
||||
|
||||
There exists a deadlock between statedump generation and fd_anonymous()
|
||||
function because they are acquiring inode table lock and inode lock in
|
||||
reverse order.
|
||||
|
||||
This patch modifies fd_anonymous() so that it takes inode lock only when
|
||||
it's really necessary, avoiding the deadlock.
|
||||
|
||||
Upstream patch:
|
||||
> Change-Id: I24355447f0ea1b39e2546782ad07f0512cc381e7
|
||||
> Upstream patch link: https://review.gluster.org/c/glusterfs/+/22995
|
||||
> BUG: 1727068
|
||||
> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
|
||||
|
||||
Change-Id: I24355447f0ea1b39e2546782ad07f0512cc381e7
|
||||
Fixes: bz#1722209
|
||||
Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/176096
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
libglusterfs/src/fd.c | 137 ++++++++++++++++++++++----------------------------
|
||||
1 file changed, 61 insertions(+), 76 deletions(-)
|
||||
|
||||
diff --git a/libglusterfs/src/fd.c b/libglusterfs/src/fd.c
|
||||
index b8aac72..314546a 100644
|
||||
--- a/libglusterfs/src/fd.c
|
||||
+++ b/libglusterfs/src/fd.c
|
||||
@@ -532,7 +532,7 @@ fd_unref(fd_t *fd)
|
||||
return;
|
||||
}
|
||||
|
||||
-fd_t *
|
||||
+static fd_t *
|
||||
__fd_bind(fd_t *fd)
|
||||
{
|
||||
list_del_init(&fd->inode_list);
|
||||
@@ -562,9 +562,9 @@ fd_bind(fd_t *fd)
|
||||
}
|
||||
|
||||
static fd_t *
|
||||
-__fd_create(inode_t *inode, uint64_t pid)
|
||||
+fd_allocate(inode_t *inode, uint64_t pid)
|
||||
{
|
||||
- fd_t *fd = NULL;
|
||||
+ fd_t *fd;
|
||||
|
||||
if (inode == NULL) {
|
||||
gf_msg_callingfn("fd", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
|
||||
@@ -573,64 +573,67 @@ __fd_create(inode_t *inode, uint64_t pid)
|
||||
}
|
||||
|
||||
fd = mem_get0(inode->table->fd_mem_pool);
|
||||
- if (!fd)
|
||||
- goto out;
|
||||
+ if (fd == NULL) {
|
||||
+ return NULL;
|
||||
+ }
|
||||
|
||||
fd->xl_count = inode->table->xl->graph->xl_count + 1;
|
||||
|
||||
fd->_ctx = GF_CALLOC(1, (sizeof(struct _fd_ctx) * fd->xl_count),
|
||||
gf_common_mt_fd_ctx);
|
||||
- if (!fd->_ctx)
|
||||
- goto free_fd;
|
||||
+ if (fd->_ctx == NULL) {
|
||||
+ goto failed;
|
||||
+ }
|
||||
|
||||
fd->lk_ctx = fd_lk_ctx_create();
|
||||
- if (!fd->lk_ctx)
|
||||
- goto free_fd_ctx;
|
||||
-
|
||||
- fd->inode = inode_ref(inode);
|
||||
- fd->pid = pid;
|
||||
- INIT_LIST_HEAD(&fd->inode_list);
|
||||
-
|
||||
- LOCK_INIT(&fd->lock);
|
||||
-out:
|
||||
- return fd;
|
||||
+ if (fd->lk_ctx != NULL) {
|
||||
+ /* We need to take a reference from the inode, but we cannot do it
|
||||
+ * here because this function can be called with the inode lock taken
|
||||
+ * and inode_ref() takes the inode's table lock. This is the reverse
|
||||
+ * of the logical lock acquisition order and can cause a deadlock. So
|
||||
+ * we simply assign the inode here and we delefate the inode reference
|
||||
+ * responsibility to the caller (when this function succeeds and the
|
||||
+ * inode lock is released). This is safe because the caller must hold
|
||||
+ * a reference of the inode to use it, so it's guaranteed that the
|
||||
+ * number of references won't reach 0 before the caller finishes.
|
||||
+ *
|
||||
+ * TODO: minimize use of locks in favor of atomic operations to avoid
|
||||
+ * these dependencies. */
|
||||
+ fd->inode = inode;
|
||||
+ fd->pid = pid;
|
||||
+ INIT_LIST_HEAD(&fd->inode_list);
|
||||
+ LOCK_INIT(&fd->lock);
|
||||
+ GF_ATOMIC_INIT(fd->refcount, 1);
|
||||
+ return fd;
|
||||
+ }
|
||||
|
||||
-free_fd_ctx:
|
||||
GF_FREE(fd->_ctx);
|
||||
-free_fd:
|
||||
+
|
||||
+failed:
|
||||
mem_put(fd);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
fd_t *
|
||||
-fd_create(inode_t *inode, pid_t pid)
|
||||
+fd_create_uint64(inode_t *inode, uint64_t pid)
|
||||
{
|
||||
- fd_t *fd = NULL;
|
||||
-
|
||||
- fd = __fd_create(inode, (uint64_t)pid);
|
||||
- if (!fd)
|
||||
- goto out;
|
||||
+ fd_t *fd;
|
||||
|
||||
- fd = fd_ref(fd);
|
||||
+ fd = fd_allocate(inode, pid);
|
||||
+ if (fd != NULL) {
|
||||
+ /* fd_allocate() doesn't get a reference from the inode. We need to
|
||||
+ * take it here in case of success. */
|
||||
+ inode_ref(inode);
|
||||
+ }
|
||||
|
||||
-out:
|
||||
return fd;
|
||||
}
|
||||
|
||||
fd_t *
|
||||
-fd_create_uint64(inode_t *inode, uint64_t pid)
|
||||
+fd_create(inode_t *inode, pid_t pid)
|
||||
{
|
||||
- fd_t *fd = NULL;
|
||||
-
|
||||
- fd = __fd_create(inode, pid);
|
||||
- if (!fd)
|
||||
- goto out;
|
||||
-
|
||||
- fd = fd_ref(fd);
|
||||
-
|
||||
-out:
|
||||
- return fd;
|
||||
+ return fd_create_uint64(inode, (uint64_t)pid);
|
||||
}
|
||||
|
||||
static fd_t *
|
||||
@@ -719,10 +722,13 @@ __fd_lookup_anonymous(inode_t *inode, int32_t flags)
|
||||
return fd;
|
||||
}
|
||||
|
||||
-static fd_t *
|
||||
-__fd_anonymous(inode_t *inode, int32_t flags)
|
||||
+fd_t *
|
||||
+fd_anonymous_with_flags(inode_t *inode, int32_t flags)
|
||||
{
|
||||
fd_t *fd = NULL;
|
||||
+ bool ref = false;
|
||||
+
|
||||
+ LOCK(&inode->lock);
|
||||
|
||||
fd = __fd_lookup_anonymous(inode, flags);
|
||||
|
||||
@@ -730,54 +736,33 @@ __fd_anonymous(inode_t *inode, int32_t flags)
|
||||
__fd_lookup_anonymous(), so no need of one more fd_ref().
|
||||
if (!fd); then both create and bind won't bump up the ref
|
||||
count, so we have to call fd_ref() after bind. */
|
||||
- if (!fd) {
|
||||
- fd = __fd_create(inode, 0);
|
||||
-
|
||||
- if (!fd)
|
||||
- return NULL;
|
||||
-
|
||||
- fd->anonymous = _gf_true;
|
||||
- fd->flags = GF_ANON_FD_FLAGS | flags;
|
||||
+ if (fd == NULL) {
|
||||
+ fd = fd_allocate(inode, 0);
|
||||
+ if (fd != NULL) {
|
||||
+ fd->anonymous = _gf_true;
|
||||
+ fd->flags = GF_ANON_FD_FLAGS | (flags & O_DIRECT);
|
||||
|
||||
- __fd_bind(fd);
|
||||
+ __fd_bind(fd);
|
||||
|
||||
- __fd_ref(fd);
|
||||
+ ref = true;
|
||||
+ }
|
||||
}
|
||||
|
||||
- return fd;
|
||||
-}
|
||||
-
|
||||
-fd_t *
|
||||
-fd_anonymous(inode_t *inode)
|
||||
-{
|
||||
- fd_t *fd = NULL;
|
||||
+ UNLOCK(&inode->lock);
|
||||
|
||||
- LOCK(&inode->lock);
|
||||
- {
|
||||
- fd = __fd_anonymous(inode, GF_ANON_FD_FLAGS);
|
||||
+ if (ref) {
|
||||
+ /* fd_allocate() doesn't get a reference from the inode. We need to
|
||||
+ * take it here in case of success. */
|
||||
+ inode_ref(inode);
|
||||
}
|
||||
- UNLOCK(&inode->lock);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
fd_t *
|
||||
-fd_anonymous_with_flags(inode_t *inode, int32_t flags)
|
||||
+fd_anonymous(inode_t *inode)
|
||||
{
|
||||
- fd_t *fd = NULL;
|
||||
-
|
||||
- if (flags & O_DIRECT)
|
||||
- flags = GF_ANON_FD_FLAGS | O_DIRECT;
|
||||
- else
|
||||
- flags = GF_ANON_FD_FLAGS;
|
||||
-
|
||||
- LOCK(&inode->lock);
|
||||
- {
|
||||
- fd = __fd_anonymous(inode, flags);
|
||||
- }
|
||||
- UNLOCK(&inode->lock);
|
||||
-
|
||||
- return fd;
|
||||
+ return fd_anonymous_with_flags(inode, 0);
|
||||
}
|
||||
|
||||
fd_t *
|
||||
--
|
||||
1.8.3.1
|
||||
|
43
0254-Detach-iot_worker-to-release-its-resources.patch
Normal file
43
0254-Detach-iot_worker-to-release-its-resources.patch
Normal file
@ -0,0 +1,43 @@
|
||||
From 2bbb097d087bb5ef142775500708f11ccd31bac0 Mon Sep 17 00:00:00 2001
|
||||
From: Liguang Li <liguang.lee6@gmail.com>
|
||||
Date: Fri, 21 Jun 2019 12:18:58 +0800
|
||||
Subject: [PATCH 254/255] Detach iot_worker to release its resources
|
||||
|
||||
When iot_worker terminates, its resources have not been reaped, which
|
||||
will consumes lots of memory.
|
||||
|
||||
Detach iot_worker to automically release its resources back to the
|
||||
system.
|
||||
|
||||
> upstream patch : https://review.gluster.org/#/c/glusterfs/+/22918/
|
||||
|
||||
>fixes: bz#1729107
|
||||
>Change-Id: I71fabb2940e76ad54dc56b4c41aeeead2644b8bb
|
||||
>Signed-off-by: Liguang Li <liguang.lee6@gmail.com>
|
||||
|
||||
BUG:1729108
|
||||
Change-Id: I71fabb2940e76ad54dc56b4c41aeeead2644b8bb
|
||||
Signed-off-by: Liguang Li <liguang.lee6@gmail.com>
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/176106
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
|
||||
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
|
||||
---
|
||||
xlators/performance/io-threads/src/io-threads.c | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/xlators/performance/io-threads/src/io-threads.c b/xlators/performance/io-threads/src/io-threads.c
|
||||
index 060d923..1d96691 100644
|
||||
--- a/xlators/performance/io-threads/src/io-threads.c
|
||||
+++ b/xlators/performance/io-threads/src/io-threads.c
|
||||
@@ -835,6 +835,7 @@ __iot_workers_scale(iot_conf_t *conf)
|
||||
ret = gf_thread_create(&thread, &conf->w_attr, iot_worker, conf,
|
||||
"iotwr%03hx", conf->curr_count & 0x3ff);
|
||||
if (ret == 0) {
|
||||
+ pthread_detach(thread);
|
||||
conf->curr_count++;
|
||||
gf_msg_debug(conf->this->name, 0,
|
||||
"scaled threads to %d (queue_size=%d/%d)",
|
||||
--
|
||||
1.8.3.1
|
||||
|
104
0255-Revert-tier-shd-glusterd-with-shd-mux-the-shd-volfil.patch
Normal file
104
0255-Revert-tier-shd-glusterd-with-shd-mux-the-shd-volfil.patch
Normal file
@ -0,0 +1,104 @@
|
||||
From 684a4949552164d3469329b3f959de4369d54faa Mon Sep 17 00:00:00 2001
|
||||
From: Atin Mukherjee <amukherj@redhat.com>
|
||||
Date: Sun, 14 Jul 2019 08:06:11 +0530
|
||||
Subject: [PATCH 255/255] Revert "tier/shd/glusterd: with shd mux, the shd
|
||||
volfile path have to be updated for tier-heald.t"
|
||||
|
||||
This reverts commit 6e7d333625ecd9f7402c2e839338350fa86eaf45.
|
||||
|
||||
Updates: bz#1471742
|
||||
Change-Id: I6c27634999f72b5bbb35d5d13cdebda7af072b01
|
||||
Reviewed-on: https://code.engineering.redhat.com/gerrit/176017
|
||||
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
||||
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
||||
---
|
||||
tests/basic/tier/tier-heald.t | 35 +++++++++++++++--------------------
|
||||
1 file changed, 15 insertions(+), 20 deletions(-)
|
||||
|
||||
diff --git a/tests/basic/tier/tier-heald.t b/tests/basic/tier/tier-heald.t
|
||||
index 0ec9e43..a8e634f 100644
|
||||
--- a/tests/basic/tier/tier-heald.t
|
||||
+++ b/tests/basic/tier/tier-heald.t
|
||||
@@ -11,7 +11,7 @@ cleanup;
|
||||
TEST glusterd
|
||||
TEST pidof glusterd
|
||||
|
||||
-r2_volfile=$(gluster system:: getwd)"/vols/r2/r2-shd.vol"
|
||||
+volfile=$(gluster system:: getwd)"/glustershd/glustershd-server.vol"
|
||||
|
||||
# Commands should fail when both tiers are not of distribute type.
|
||||
# Glustershd shouldn't be running as long as there are no replicate/disperse
|
||||
@@ -34,56 +34,51 @@ TEST $CLI volume tier r2 attach $H0:$B0/r2_hot
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
|
||||
TEST $CLI volume heal r2 enable
|
||||
EXPECT "enable" volume_option r2 "cluster.self-heal-daemon"
|
||||
-EXPECT "enable" volgen_volume_option $r2_volfile r2-replicate-0 cluster replicate self-heal-daemon
|
||||
+EXPECT "enable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
|
||||
TEST $CLI volume heal r2 disable
|
||||
EXPECT "disable" volume_option r2 "cluster.self-heal-daemon"
|
||||
-EXPECT "disable" volgen_volume_option $r2_volfile r2-replicate-0 cluster replicate self-heal-daemon
|
||||
+EXPECT "disable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
|
||||
# Commands should work on disperse volume.
|
||||
TEST $CLI volume create ec2 disperse 3 redundancy 1 $H0:$B0/ec2_0 $H0:$B0/ec2_1 $H0:$B0/ec2_2
|
||||
TEST $CLI volume start ec2
|
||||
|
||||
-ec2_volfile=$(gluster system:: getwd)"/vols/ec2/ec2-shd.vol"
|
||||
-
|
||||
TEST $CLI volume tier ec2 attach replica 2 $H0:$B0/ec2_hot{1..4}
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
|
||||
TEST $CLI volume heal ec2 enable
|
||||
EXPECT "enable" volume_option ec2 "cluster.disperse-self-heal-daemon"
|
||||
-EXPECT "enable" volgen_volume_option $ec2_volfile ec2-disperse-0 cluster disperse self-heal-daemon
|
||||
+EXPECT "enable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
|
||||
TEST $CLI volume heal ec2 disable
|
||||
EXPECT "disable" volume_option ec2 "cluster.disperse-self-heal-daemon"
|
||||
-EXPECT "disable" volgen_volume_option $ec2_volfile ec2-disperse-0 cluster disperse self-heal-daemon
|
||||
+EXPECT "disable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
|
||||
|
||||
#Check that shd graph is rewritten correctly on volume stop/start
|
||||
-EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse
|
||||
-EXPECT "Y" volgen_volume_exists $r2_volfile r2-replicate-0 cluster replicate
|
||||
+EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
|
||||
+EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
|
||||
TEST $CLI volume stop r2
|
||||
-EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse
|
||||
-
|
||||
-# Has been commented as the validations after stop using volfile dont hold true.
|
||||
-#EXPECT "N" volgen_volume_exists $r2_volfile r2-replicate-0 cluster replicate
|
||||
+EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
|
||||
+EXPECT "N" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
|
||||
TEST $CLI volume stop ec2
|
||||
# When both the volumes are stopped glustershd volfile is not modified just the
|
||||
# process is stopped
|
||||
TEST "[ -z $(get_shd_process_pid) ]"
|
||||
|
||||
TEST $CLI volume start r2
|
||||
-# Has been commented as the validations after stop using volfile dont hold true.
|
||||
-#EXPECT "N" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse
|
||||
-EXPECT "Y" volgen_volume_exists $r2_volfile r2-replicate-0 cluster replicate
|
||||
+EXPECT "N" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
|
||||
+EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
|
||||
|
||||
TEST $CLI volume start ec2
|
||||
|
||||
-EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse
|
||||
-EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-replicate-0 cluster replicate
|
||||
+EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
|
||||
+EXPECT "Y" volgen_volume_exists $volfile ec2-replicate-0 cluster replicate
|
||||
|
||||
TEST $CLI volume tier ec2 detach force
|
||||
|
||||
-EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse
|
||||
-EXPECT "N" volgen_volume_exists $ec2_volfile ec2-replicate-0 cluster replicate
|
||||
+EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
|
||||
+EXPECT "N" volgen_volume_exists $volfile ec2-replicate-0 cluster replicate
|
||||
|
||||
TEST $CLI volume set r2 self-heal-daemon on
|
||||
TEST $CLI volume set r2 cluster.self-heal-daemon off
|
||||
--
|
||||
1.8.3.1
|
||||
|
@ -231,7 +231,7 @@ Release: 0.1%{?prereltag:.%{prereltag}}%{?dist}
|
||||
%else
|
||||
Name: glusterfs
|
||||
Version: 6.0
|
||||
Release: 7%{?dist}
|
||||
Release: 8%{?dist}
|
||||
ExcludeArch: i686
|
||||
%endif
|
||||
License: GPLv2 or LGPLv3+
|
||||
@ -530,6 +530,40 @@ Patch0218: 0218-graph-shd-Use-glusterfs_graph_deactivate-to-free-the.patch
|
||||
Patch0219: 0219-posix-add-posix_set_ctime-in-posix_ftruncate.patch
|
||||
Patch0220: 0220-graph-shd-Use-top-down-approach-while-cleaning-xlato.patch
|
||||
Patch0221: 0221-protocol-client-propagte-GF_EVENT_CHILD_PING-only-fo.patch
|
||||
Patch0222: 0222-cluster-dht-Fixed-a-memleak-in-dht_rename_cbk.patch
|
||||
Patch0223: 0223-change-get_real_filename-implementation-to-use-ENOAT.patch
|
||||
Patch0224: 0224-core-replace-inet_addr-with-inet_pton.patch
|
||||
Patch0225: 0225-tests-utils-Fix-py2-py3-util-python-scripts.patch
|
||||
Patch0226: 0226-geo-rep-fix-gluster-command-path-for-non-root-sessio.patch
|
||||
Patch0227: 0227-glusterd-svc-update-pid-of-mux-volumes-from-the-shd-.patch
|
||||
Patch0228: 0228-locks-enable-notify-contention-by-default.patch
|
||||
Patch0229: 0229-glusterd-Show-the-correct-brick-status-in-get-state.patch
|
||||
Patch0230: 0230-Revert-glusterd-svc-update-pid-of-mux-volumes-from-t.patch
|
||||
Patch0231: 0231-Revert-graph-shd-Use-top-down-approach-while-cleanin.patch
|
||||
Patch0232: 0232-cluster-afr-Fix-incorrect-reporting-of-gfid-type-mis.patch
|
||||
Patch0233: 0233-Revert-graph-shd-Use-glusterfs_graph_deactivate-to-f.patch
|
||||
Patch0234: 0234-Revert-glusterd-shd-Change-shd-logfile-to-a-unique-n.patch
|
||||
Patch0235: 0235-Revert-glusterd-svc-Stop-stale-process-using-the-glu.patch
|
||||
Patch0236: 0236-Revert-shd-mux-Fix-race-between-mux_proc-unlink-and-.patch
|
||||
Patch0237: 0237-Revert-ec-fini-Fix-race-between-xlator-cleanup-and-o.patch
|
||||
Patch0238: 0238-Revert-xlator-log-Add-more-logging-in-xlator_is_clea.patch
|
||||
Patch0239: 0239-Revert-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
|
||||
Patch0240: 0240-Revert-glusterd-shd-Optimize-the-glustershd-manager-.patch
|
||||
Patch0241: 0241-Revert-glusterd-svc-glusterd_svcs_stop-should-call-i.patch
|
||||
Patch0242: 0242-Revert-tests-shd-Add-test-coverage-for-shd-mux.patch
|
||||
Patch0243: 0243-Revert-glusterfsd-cleanup-Protect-graph-object-under.patch
|
||||
Patch0244: 0244-Revert-ec-shd-Cleanup-self-heal-daemon-resources-dur.patch
|
||||
Patch0245: 0245-Revert-shd-glusterd-Serialize-shd-manager-to-prevent.patch
|
||||
Patch0246: 0246-Revert-glusterd-shd-Keep-a-ref-on-volinfo-until-atta.patch
|
||||
Patch0247: 0247-Revert-afr-shd-Cleanup-self-heal-daemon-resources-du.patch
|
||||
Patch0248: 0248-Revert-shd-mux-Fix-coverity-issues-introduced-by-shd.patch
|
||||
Patch0249: 0249-Revert-client-fini-return-fini-after-rpc-cleanup.patch
|
||||
Patch0250: 0250-Revert-mgmt-shd-Implement-multiplexing-in-self-heal-.patch
|
||||
Patch0251: 0251-tests-Fix-bug-1717819-metadata-split-brain-detection.patch
|
||||
Patch0252: 0252-glusterd-do-not-mark-skip_locking-as-true-for-geo-re.patch
|
||||
Patch0253: 0253-core-fix-deadlock-between-statedump-and-fd_anonymous.patch
|
||||
Patch0254: 0254-Detach-iot_worker-to-release-its-resources.patch
|
||||
Patch0255: 0255-Revert-tier-shd-glusterd-with-shd-mux-the-shd-volfil.patch
|
||||
|
||||
%description
|
||||
GlusterFS is a distributed file-system capable of scaling to several
|
||||
@ -2238,6 +2272,10 @@ fi
|
||||
%endif
|
||||
|
||||
%changelog
|
||||
* Tue Jul 16 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-8
|
||||
- fixes bugs bz#1698435 bz#1712591 bz#1715447 bz#1720488 bz#1722209
|
||||
bz#1722512 bz#1724089 bz#1726991 bz#1727785 bz#1729108
|
||||
|
||||
* Fri Jun 28 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-7
|
||||
- fixes bugs bz#1573077 bz#1600918 bz#1703423 bz#1704207 bz#1708064
|
||||
bz#1709301 bz#1713664 bz#1716760 bz#1717784 bz#1720163 bz#1720192
|
||||
|
Loading…
Reference in New Issue
Block a user