import glusterfs-6.0-56.4.el8

This commit is contained in:
CentOS Sources 2021-11-09 05:11:26 -05:00 committed by Stepan Oksanichenko
parent 70fe8ac334
commit 9c1244254d
66 changed files with 16278 additions and 38 deletions

View File

@ -1,33 +0,0 @@
From 346aa7cbc34b9bbbaca45180215a4d9ffd5055df Mon Sep 17 00:00:00 2001
From: Rinku Kothiya <rkothiya@redhat.com>
Date: Fri, 19 Feb 2021 06:19:07 +0000
Subject: [PATCH 481/481] RHGS-3.5.3 rebuild to ship with RHEL.
Label: DOWNSTREAM ONLY
BUG: 1930561
Change-Id: I9c7f30cc6bc616344b27072bfde056c7bba1e143
Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/228413
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 2 ++
1 file changed, 2 insertions(+)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 30d7162..52f9b40 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1983,6 +1983,8 @@ fi
%endif
%changelog
+* Fri Feb 19 2021 Rinku Kothiya <rkothiya@redhat.com>
+- Build RGHS clients for RHEL (#1930561)
* Mon May 11 2020 Sunny Kumar <sunkumar@redhat.com>
- added requires policycoreutils-python-utils on rhel8 for geo-replication
--
1.8.3.1

View File

@ -0,0 +1,26 @@
From 828be8e789db3c77587c708f930d7fe8c9456e3b Mon Sep 17 00:00:00 2001
From: Rinku Kothiya <rkothiya@redhat.com>
Date: Fri, 4 Dec 2020 05:18:45 +0530
Subject: [PATCH 481/511] Update rfc.sh to rhgs-3.5.4
Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
---
rfc.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/rfc.sh b/rfc.sh
index 1dca29f..c0559b9 100755
--- a/rfc.sh
+++ b/rfc.sh
@@ -18,7 +18,7 @@ done
shift $((OPTIND-1))
-branch="rhgs-3.5.3";
+branch="rhgs-3.5.4";
set_hooks_commit_msg()
{
--
1.8.3.1

View File

@ -0,0 +1,49 @@
From e43af5b15d14e43c3201fd0fb7bf02663e3e0127 Mon Sep 17 00:00:00 2001
From: Rinku Kothiya <rkothiya@redhat.com>
Date: Sat, 7 Nov 2020 12:09:36 +0530
Subject: [PATCH 482/511] logger: Always print errors in english
Upstream:
> Reviewed-on: https://github.com/gluster/glusterfs/pull/1657
> fixes: #1302
> Change-Id: If0e21f016155276a953c64a8dd13ff3eb281d09d
> Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
BUG: 1896425
Change-Id: If0e21f016155276a953c64a8dd13ff3eb281d09d
Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/219999
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/logging.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/libglusterfs/src/logging.c b/libglusterfs/src/logging.c
index 7f0eff6..5874c34 100644
--- a/libglusterfs/src/logging.c
+++ b/libglusterfs/src/logging.c
@@ -513,6 +513,7 @@ gf_openlog(const char *ident, int option, int facility)
{
int _option = option;
int _facility = facility;
+ char *language = NULL;
if (-1 == _option) {
_option = LOG_PID | LOG_NDELAY;
@@ -522,7 +523,10 @@ gf_openlog(const char *ident, int option, int facility)
}
/* TODO: Should check for errors here and return appropriately */
- setlocale(LC_ALL, "");
+ language = setlocale(LC_ALL, "en_US.UTF-8");
+ if (!language)
+ setlocale(LC_ALL, "");
+
setlocale(LC_NUMERIC, "C"); /* C-locale for strtod, ... */
/* close the previous syslog if open as we are changing settings */
closelog();
--
1.8.3.1

View File

@ -0,0 +1,150 @@
From 8c366f34a279a5ab2a6301bfd93534fe746a23e8 Mon Sep 17 00:00:00 2001
From: Ravishankar N <ravishankar@redhat.com>
Date: Mon, 7 Dec 2020 09:53:27 +0530
Subject: [PATCH 483/511] afr: more quorum checks in lookup and new entry
marking
Problem: See upstream github issue for details.
Fix:
-In lookup if the entry exists in 2 out of 3 bricks, don't fail the
lookup with ENOENT just because there is an entrylk on the parent.
Consider quorum before deciding.
-If entry FOP does not succeed on quorum no. of bricks, do not perform
new entry mark.
Upstream patch details:
> Reviewed-on: https://review.gluster.org/#/c/glusterfs/+/24499/
> Fixes: #1303
> Change-Id: I56df8c89ad53b29fa450c7930a7b7ccec9f4a6c5
> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
BUG: 1821599
Change-Id: If513e8a7d6088a676288927630d8e616269bf5d5
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220363
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
...20-mark-dirty-for-entry-txn-on-quorum-failure.t | 2 --
xlators/cluster/afr/src/afr-common.c | 24 ++++++++++++----------
xlators/cluster/afr/src/afr-dir-write.c | 8 ++++++++
xlators/cluster/afr/src/afr.h | 4 ++++
4 files changed, 25 insertions(+), 13 deletions(-)
diff --git a/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t b/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
index 26f9049..49c4dea 100644
--- a/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
+++ b/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
@@ -53,8 +53,6 @@ TEST ! ls $B0/${V0}1/file$i
TEST ls $B0/${V0}2/file$i
dirty=$(get_hex_xattr trusted.afr.dirty $B0/${V0}2)
TEST [ "$dirty" != "000000000000000000000000" ]
-EXPECT "000000010000000100000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2/file$i
-EXPECT "000000010000000100000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2/file$i
TEST $CLI volume set $V0 self-heal-daemon on
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
index 89e2483..851ccad 100644
--- a/xlators/cluster/afr/src/afr-common.c
+++ b/xlators/cluster/afr/src/afr-common.c
@@ -1236,7 +1236,7 @@ refresh_done:
return 0;
}
-static void
+void
afr_fill_success_replies(afr_local_t *local, afr_private_t *priv,
unsigned char *replies)
{
@@ -2290,6 +2290,7 @@ afr_lookup_done(call_frame_t *frame, xlator_t *this)
0,
};
gf_boolean_t locked_entry = _gf_false;
+ gf_boolean_t in_flight_create = _gf_false;
gf_boolean_t can_interpret = _gf_true;
inode_t *parent = NULL;
ia_type_t ia_type = IA_INVAL;
@@ -2333,17 +2334,12 @@ afr_lookup_done(call_frame_t *frame, xlator_t *this)
if (!replies[i].valid)
continue;
- if (locked_entry && replies[i].op_ret == -1 &&
- replies[i].op_errno == ENOENT) {
- /* Second, check entry is still
- "underway" in creation */
- local->op_ret = -1;
- local->op_errno = ENOENT;
- goto error;
- }
-
- if (replies[i].op_ret == -1)
+ if (replies[i].op_ret == -1) {
+ if (locked_entry && replies[i].op_errno == ENOENT) {
+ in_flight_create = _gf_true;
+ }
continue;
+ }
if (read_subvol == -1 || !readable[read_subvol]) {
read_subvol = i;
@@ -2353,6 +2349,12 @@ afr_lookup_done(call_frame_t *frame, xlator_t *this)
}
}
+ if (in_flight_create && !afr_has_quorum(success_replies, this, NULL)) {
+ local->op_ret = -1;
+ local->op_errno = ENOENT;
+ goto error;
+ }
+
if (read_subvol == -1)
goto error;
/* We now have a read_subvol, which is readable[] (if there
diff --git a/xlators/cluster/afr/src/afr-dir-write.c b/xlators/cluster/afr/src/afr-dir-write.c
index 84e2a34..416c19d 100644
--- a/xlators/cluster/afr/src/afr-dir-write.c
+++ b/xlators/cluster/afr/src/afr-dir-write.c
@@ -349,6 +349,7 @@ afr_mark_entry_pending_changelog(call_frame_t *frame, xlator_t *this)
afr_private_t *priv = NULL;
int pre_op_count = 0;
int failed_count = 0;
+ unsigned char *success_replies = NULL;
local = frame->local;
priv = this->private;
@@ -364,9 +365,16 @@ afr_mark_entry_pending_changelog(call_frame_t *frame, xlator_t *this)
failed_count = AFR_COUNT(local->transaction.failed_subvols,
priv->child_count);
+ /* FOP succeeded on all bricks. */
if (pre_op_count == priv->child_count && !failed_count)
return;
+ /* FOP did not suceed on quorum no. of bricks. */
+ success_replies = alloca0(priv->child_count);
+ afr_fill_success_replies(local, priv, success_replies);
+ if (!afr_has_quorum(success_replies, this, NULL))
+ return;
+
if (priv->thin_arbiter_count) {
/*Mark new entry using ta file*/
local->is_new_entry = _gf_true;
diff --git a/xlators/cluster/afr/src/afr.h b/xlators/cluster/afr/src/afr.h
index ff96246..ed5096e 100644
--- a/xlators/cluster/afr/src/afr.h
+++ b/xlators/cluster/afr/src/afr.h
@@ -1334,4 +1334,8 @@ afr_mark_new_entry_changelog(call_frame_t *frame, xlator_t *this);
void
afr_selfheal_childup(xlator_t *this, afr_private_t *priv);
+
+void
+afr_fill_success_replies(afr_local_t *local, afr_private_t *priv,
+ unsigned char *replies);
#endif /* __AFR_H__ */
--
1.8.3.1

View File

@ -0,0 +1,90 @@
From 6c3b21ce5bb76b35856a6c270eb65d11f869061f Mon Sep 17 00:00:00 2001
From: Sanju Rakonde <srakonde@redhat.com>
Date: Fri, 26 Jun 2020 12:10:31 +0530
Subject: [PATCH 484/511] glusterd: rebalance status displays stats as 0 after
reboot
problem: while the rebalance is in progress, if a node is
rebooted rebalance v status shows the stats of this node as
0 once the node is back.
Reason: when the node is rebooted, once it is back
glusterd_volume_defrag_restart() starts the rebalance and
creates the rpc. but due to some race, rebalance process is
sending disconnect event, so rpc object is getting destroyed. As
the rpc object is null, request for fetching the latest stats is
not sent to rebalance process. and stats are shows as default values
which is 0.
Solution: When the rpc object null, we should create the rpc if the
rebalance process is up. so that request can be sent to rebalance
process using the rpc.
>fixes: #1339
>Change-Id: I1c7533fedd17dcaffc0f7a5a918c87356133a81c
>Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Upstream Patch : https://review.gluster.org/c/glusterfs/+/24641
BUG: 1832306
Change-Id: I1c7533fedd17dcaffc0f7a5a918c87356133a81c
Signed-off-by: Srijan Sivakumar <ssivakum@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220369
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-syncop.c | 29 ++++++++++++++++++++---------
1 file changed, 20 insertions(+), 9 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index c78983a..df78fef 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -1693,6 +1693,7 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
rpc_clnt_t *rpc = NULL;
dict_t *rsp_dict = NULL;
int32_t cmd = GF_OP_CMD_NONE;
+ glusterd_volinfo_t *volinfo = NULL;
this = THIS;
rsp_dict = dict_new();
@@ -1724,18 +1725,28 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
cds_list_for_each_entry_safe(pending_node, tmp, &selected, list)
{
rpc = glusterd_pending_node_get_rpc(pending_node);
+ /* In the case of rebalance if the rpc object is null, we try to
+ * create the rpc object. if the rebalance daemon is down, it returns
+ * -1. otherwise, rpc object will be created and referenced.
+ */
if (!rpc) {
- if (pending_node->type == GD_NODE_REBALANCE) {
- ret = 0;
- glusterd_defrag_volume_node_rsp(req_dict, NULL, op_ctx);
+ if (pending_node->type == GD_NODE_REBALANCE && pending_node->node) {
+ volinfo = pending_node->node;
+ ret = glusterd_rebalance_rpc_create(volinfo);
+ if (ret) {
+ ret = 0;
+ glusterd_defrag_volume_node_rsp(req_dict, NULL, op_ctx);
+ goto out;
+ } else {
+ rpc = glusterd_defrag_rpc_get(volinfo->rebal.defrag);
+ }
+ } else {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE,
+ "Brick Op failed "
+ "due to rpc failure.");
goto out;
}
-
- ret = -1;
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE,
- "Brick Op failed "
- "due to rpc failure.");
- goto out;
}
/* Redirect operation to be detach tier via rebalance flow. */
--
1.8.3.1

View File

@ -0,0 +1,87 @@
From 2e6a5e504e66bc95208420e4882e453a53ac9ea2 Mon Sep 17 00:00:00 2001
From: schaffung <ssivakum@redhat.com>
Date: Mon, 2 Nov 2020 11:18:01 +0530
Subject: [PATCH 485/511] cli-rpc: conditional init of global quota rpc (#1578)
Issue: It is seem that the initialization of rpc to
connect with quotad is done in every glusterfs cli command,
irrespective of whether the quota feature is enabled or disabled.
This seems to be an overkill.
Code change: The file /var/run/quotad/quotad.pid is present
signals that quotad is enabled. Hence we can put a conditional
check for seeing when this file exists and if it doesn't we
just skip over the initialization of the global quotad rpc.
This will go on to reduce the extra rpc calls and operations
being performed in the kernel space.
>Fixes: #1577
>Change-Id: Icb69d35330f76ce95626f59af75a12726eb620ff
>Signed-off-by: srijan-sivakumar <ssivakumar@redhat.com>
Upstream Patch : https://github.com/gluster/glusterfs/pull/1578
BUG: 1885966
Change-Id: Icb69d35330f76ce95626f59af75a12726eb620ff
Signed-off-by: Srijan Sivakumar <ssivakum@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220371
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
cli/src/cli.c | 18 +++++++++++++-----
cli/src/cli.h | 3 +++
2 files changed, 16 insertions(+), 5 deletions(-)
diff --git a/cli/src/cli.c b/cli/src/cli.c
index 99a16a0..a76c5a2 100644
--- a/cli/src/cli.c
+++ b/cli/src/cli.c
@@ -64,8 +64,7 @@
extern int connected;
/* using argp for command line parsing */
-const char *argp_program_version =
- PACKAGE_NAME" "PACKAGE_VERSION;
+const char *argp_program_version = PACKAGE_NAME " " PACKAGE_VERSION;
const char *argp_program_bug_address = "<" PACKAGE_BUGREPORT ">";
struct rpc_clnt *global_quotad_rpc;
@@ -840,9 +839,18 @@ main(int argc, char *argv[])
if (!global_rpc)
goto out;
- global_quotad_rpc = cli_quotad_clnt_rpc_init();
- if (!global_quotad_rpc)
- goto out;
+ /*
+ * Now, one doesn't need to initialize global rpc
+ * for quota unless and until quota is enabled.
+ * So why not put a check to save all the rpc related
+ * ops here.
+ */
+ ret = sys_access(QUOTAD_PID_PATH, F_OK);
+ if (!ret) {
+ global_quotad_rpc = cli_quotad_clnt_rpc_init();
+ if (!global_quotad_rpc)
+ goto out;
+ }
ret = cli_cmds_register(&state);
if (ret)
diff --git a/cli/src/cli.h b/cli/src/cli.h
index 37e4d9d..c30ae9c 100644
--- a/cli/src/cli.h
+++ b/cli/src/cli.h
@@ -30,6 +30,9 @@
#define CLI_TAB_LENGTH 8
#define CLI_BRICK_STATUS_LINE_LEN 78
+// Quotad pid path.
+#define QUOTAD_PID_PATH "/var/run/gluster/quotad/quotad.pid"
+
/* Geo-rep command positional arguments' index */
#define GEO_REP_CMD_INDEX 1
#define GEO_REP_CMD_CONFIG_INDEX 4
--
1.8.3.1

View File

@ -0,0 +1,87 @@
From 9b19d4841fc3002d30ec3e44c85ec37682c11bfb Mon Sep 17 00:00:00 2001
From: schaffung <ssivakum@redhat.com>
Date: Thu, 22 Oct 2020 13:07:09 +0530
Subject: [PATCH 486/511] glusterd: brick sock file deleted, log error (#1560)
Issue: The satus of the brick as tracked by glusterd is
stopped if the socket file corresponding to a running
brick process is absent in /var/run/gluster. The glusterd
keeps on trying to reconnect ( rpc layer ) but it fails.
Code change: Rather than registering the rpc connection
with the help of the given sockfilepath which is not
even present as it keeps on reconnecting, why not log
this as an error and not try to reconnect using the
non-existing sock file path.
>Fixes: #1526
>Change-Id: I6c81691ab1624c66dec74f5ffcc6c383201ac757
>Signed-off-by: srijan-sivakumar <ssivakumar@redhat.com>
Upstream Patch : https://github.com/gluster/glusterfs/pull/1560
BUG: 1882923
Change-Id: I6c81691ab1624c66dec74f5ffcc6c383201ac757
Signed-off-by: Srijan Sivakumar <ssivakum@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220376
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-utils.c | 27 +++++++++++++++++++++++++--
1 file changed, 25 insertions(+), 2 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index d25fc8a..a72c494 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -6310,7 +6310,7 @@ find_compatible_brick(glusterd_conf_t *conf, glusterd_volinfo_t *volinfo,
check if passed pid is match with running glusterfs process
*/
-int
+static int
glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len)
{
char fname[128] = "";
@@ -6383,7 +6383,17 @@ glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len)
if (tmpsockpath[0]) {
strncpy(sockpath, tmpsockpath, i);
- ret = 0;
+ /*
+ * Condition to check if the brick socket file is present
+ * in the stated path or not. This helps in preventing
+ * constant re-connect triggered in the RPC layer and also
+ * a log message would help out the user.
+ */
+ ret = sys_access(sockpath, F_OK);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_NOT_FOUND,
+ "%s not found", sockpath, NULL);
+ }
}
return ret;
@@ -6581,7 +6591,20 @@ glusterd_brick_start(glusterd_volinfo_t *volinfo,
if (!is_brick_mx_enabled()) {
glusterd_set_brick_socket_filepath(
volinfo, brickinfo, socketpath, sizeof(socketpath));
+ /*
+ * Condition to check if the brick socket file is present
+ * in the stated path or not. This helps in preventing
+ * constant re-connect triggered in the RPC layer and also
+ * a log message would help out the user.
+ */
+ ret = sys_access(socketpath, F_OK);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_NOT_FOUND,
+ "%s not found", socketpath, NULL);
+ goto out;
+ }
}
+
gf_log(this->name, GF_LOG_DEBUG,
"Using %s as sockfile for brick %s of volume %s ",
socketpath, brickinfo->path, volinfo->volname);
--
1.8.3.1

View File

@ -0,0 +1,56 @@
From c961ee1d7c1abb2552b79ed39ed7fd1bd1b3962f Mon Sep 17 00:00:00 2001
From: srijan-sivakumar <ssivakum@redhat.com>
Date: Fri, 7 Aug 2020 15:02:07 +0530
Subject: [PATCH 487/511] Events: Log file not re-opened after logrotate.
Issue: The logging is being done in the same file
even after the logrotate utility has changed the file.
This causes the logfile to grow indefinitely.
Code Changes: Using the WatchedFileHandler class instead
of FileHandler class. This watches the file it is logging
into and if the file changes, it is closed and reopened
using the file name. Hence after file rotate, a new file
will be used for logging instead of continuing with
the same old file.
>Fixes: #1289
>Change-Id: I773d04f17613a03709cb682692efb39fd8e664e2
>Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
Upstream Patch : https://review.gluster.org/c/glusterfs/+/24820
BUG: 1814744
Change-Id: I773d04f17613a03709cb682692efb39fd8e664e2
Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220370
Reviewed-by: Shwetha Acharya <sacharya@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
events/src/utils.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/events/src/utils.py b/events/src/utils.py
index 38b707a..6d4e079 100644
--- a/events/src/utils.py
+++ b/events/src/utils.py
@@ -13,6 +13,7 @@ import sys
import json
import os
import logging
+import logging.handlers
import fcntl
from errno import EBADF
from threading import Thread
@@ -98,7 +99,7 @@ def setup_logger():
logger.setLevel(logging.INFO)
# create the logging file handler
- fh = logging.FileHandler(LOG_FILE)
+ fh = logging.handlers.WatchedFileHandler(LOG_FILE)
formatter = logging.Formatter("[%(asctime)s] %(levelname)s "
"[%(module)s - %(lineno)s:%(funcName)s] "
--
1.8.3.1

View File

@ -0,0 +1,864 @@
From 0502383024cbf7e4776816e0a992dccc484a3cf2 Mon Sep 17 00:00:00 2001
From: Ravishankar N <ravishankar@redhat.com>
Date: Tue, 8 Dec 2020 17:23:22 +0530
Subject: [PATCH 488/511] glusterd/afr: enable granular-entry-heal by default
XXXXXXXXXXXXXXXXXXX
IMPORTANT:
XXXXXXXXXXXXXXXXXXXX
I see that for rhgs-3.5.3, GD_OP_VERSION_MAX is GD_OP_VERSION_7_0. Since
this patch should only act on new volumes in rhgs-3.5.4, I am bumping
the op-version to GD_OP_VERSION_7_1. In glusterfs upstream, the patch
acts only if op-version >= GD_OP_VERSION_9_0 as seen in the commit
messae below.
Upstream patch details:
/------------------------------------------------------------------------------/
1. The option has been enabled and tested for quite some time now in RHHI-V
downstream and I think it is safe to make it 'on' by default. Since it
is not possible to simply change it from 'off' to 'on' without breaking
rolling upgrades, old clients etc., I have made it default only for new volumes
starting from op-verison GD_OP_VERSION_9_0.
Note: If you do a volume reset, the option will be turned back off.
This is okay as the dir's gfid will be captured in 'xattrop' folder and heals
will proceed. There might be stale entries inside entry-changes' folder,
which will be removed when we enable the option again.
2. I encountered a cust. issue where entry heal was pending on a dir. with
236436 files in it and the glustershd.log output was just stuck at
"performing entry selfheal", so I have added logs to give us
more info in DEBUG level about whether entry heal and data heal are
progressing (metadata heal doesn't take much time). That way, we have a
quick visual indication to say things are not 'stuck' if we briefly
enable debug logs, instead of taking statedumps or checking profile info
etc.
>Fixes: #1483
>Change-Id: I4f116f8c92f8cd33f209b758ff14f3c7e1981422
>Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Upstream Patch: https://github.com/gluster/glusterfs/pull/1621
/------------------------------------------------------------------------------/
BUG: 1890506
Change-Id: If449a1e873633616cfc508d74b5c22eb434b55ae
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220555
Tested-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/glusterfs/globals.h | 4 +-
libglusterfs/src/syncop-utils.c | 4 +-
tests/basic/afr/add-brick-self-heal-non-granular.t | 75 +++++++++++++
tests/basic/afr/add-brick-self-heal.t | 4 +-
tests/basic/afr/bug-1130892-non-granular.t | 77 ++++++++++++++
.../basic/afr/bug-1493415-gfid-heal-non-granular.t | 79 ++++++++++++++
...507-type-mismatch-error-handling-non-granular.t | 117 +++++++++++++++++++++
...1749322-entry-heal-not-happening-non-granular.t | 90 ++++++++++++++++
.../afr/replace-brick-self-heal-non-granular.t | 65 ++++++++++++
tests/basic/afr/replace-brick-self-heal.t | 2 +-
tests/bugs/replicate/bug-1130892.t | 2 +-
tests/bugs/replicate/bug-1493415-gfid-heal.t | 2 +-
.../bug-1722507-type-mismatch-error-handling.t | 26 +++--
.../bug-1749322-entry-heal-not-happening.t | 7 +-
xlators/cluster/afr/src/afr-self-heal-common.c | 5 +
xlators/cluster/afr/src/afr-self-heal-data.c | 3 +
xlators/cluster/afr/src/afr-self-heal-entry.c | 7 +-
xlators/mgmt/glusterd/src/glusterd-utils.c | 13 +++
18 files changed, 558 insertions(+), 24 deletions(-)
create mode 100644 tests/basic/afr/add-brick-self-heal-non-granular.t
create mode 100644 tests/basic/afr/bug-1130892-non-granular.t
create mode 100644 tests/basic/afr/bug-1493415-gfid-heal-non-granular.t
create mode 100644 tests/basic/afr/bug-1722507-type-mismatch-error-handling-non-granular.t
create mode 100644 tests/basic/afr/bug-1749322-entry-heal-not-happening-non-granular.t
create mode 100644 tests/basic/afr/replace-brick-self-heal-non-granular.t
diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
index 31717ed..cc145cd 100644
--- a/libglusterfs/src/glusterfs/globals.h
+++ b/libglusterfs/src/glusterfs/globals.h
@@ -50,7 +50,7 @@
1 /* MIN is the fresh start op-version, mostly \
should not change */
#define GD_OP_VERSION_MAX \
- GD_OP_VERSION_7_0 /* MAX VERSION is the maximum \
+ GD_OP_VERSION_7_1 /* MAX VERSION is the maximum \
count in VME table, should \
keep changing with \
introduction of newer \
@@ -138,6 +138,8 @@
#define GD_OP_VERSION_7_0 70000 /* Op-version for GlusterFS 7.0 */
+#define GD_OP_VERSION_7_1 70100 /* Op-version for GlusterFS 7.1 */
+
#include "glusterfs/xlator.h"
#include "glusterfs/options.h"
diff --git a/libglusterfs/src/syncop-utils.c b/libglusterfs/src/syncop-utils.c
index be03527..2269c76 100644
--- a/libglusterfs/src/syncop-utils.c
+++ b/libglusterfs/src/syncop-utils.c
@@ -495,9 +495,7 @@ syncop_dir_scan(xlator_t *subvol, loc_t *loc, int pid, void *data,
if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, ".."))
continue;
- ret = fn(subvol, entry, loc, data);
- if (ret)
- break;
+ ret |= fn(subvol, entry, loc, data);
}
gf_dirent_free(&entries);
if (ret)
diff --git a/tests/basic/afr/add-brick-self-heal-non-granular.t b/tests/basic/afr/add-brick-self-heal-non-granular.t
new file mode 100644
index 0000000..19caf24
--- /dev/null
+++ b/tests/basic/afr/add-brick-self-heal-non-granular.t
@@ -0,0 +1,75 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 cluster.granular-entry-heal off
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Create files
+for i in {1..5}
+do
+ echo $i > $M0/file$i.txt
+done
+
+# Metadata changes
+TEST setfattr -n user.test -v qwerty $M0/file5.txt
+
+# Add brick1
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+
+# New-brick should accuse the old-bricks (Simulating case for data-loss)
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}2/
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}2/
+
+# Check if pending xattr and dirty-xattr are set for newly-added-brick
+EXPECT "000000000000000100000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
+EXPECT "000000000000000100000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.dirty $B0/${V0}2
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Check if entry-heal has happened
+TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}2 | sort)
+TEST diff <(ls $B0/${V0}1 | sort) <(ls $B0/${V0}2 | sort)
+
+# Test if data was healed
+TEST diff $B0/${V0}0/file1.txt $B0/${V0}2/file1.txt
+
+# Test if metadata was healed and exists on both the bricks
+EXPECT "qwerty" get_text_xattr user.test $B0/${V0}2/file5.txt
+EXPECT "qwerty" get_text_xattr user.test $B0/${V0}0/file5.txt
+
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}2
+
+cleanup;
diff --git a/tests/basic/afr/add-brick-self-heal.t b/tests/basic/afr/add-brick-self-heal.t
index c847e22..7ebf4f6 100644
--- a/tests/basic/afr/add-brick-self-heal.t
+++ b/tests/basic/afr/add-brick-self-heal.t
@@ -38,8 +38,8 @@ TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0
TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}2/
# Check if pending xattr and dirty-xattr are set for newly-added-brick
-EXPECT "000000000000000100000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
-EXPECT "000000000000000100000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
+EXPECT "000000010000000100000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
+EXPECT "000000010000000100000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.dirty $B0/${V0}2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
diff --git a/tests/basic/afr/bug-1130892-non-granular.t b/tests/basic/afr/bug-1130892-non-granular.t
new file mode 100644
index 0000000..3cdbc7d
--- /dev/null
+++ b/tests/basic/afr/bug-1130892-non-granular.t
@@ -0,0 +1,77 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+# Create a 1X2 replica
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 cluster.granular-entry-heal off
+
+# Disable self-heal daemon
+TEST gluster volume set $V0 self-heal-daemon off
+
+# Enable Client side heal
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+
+# Disable all perf-xlators
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+
+# Volume start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+# FUSE Mount
+TEST ${GFS} -s $H0 --volfile-id $V0 $M0
+
+# Create files and dirs
+TEST mkdir -p $M0/one/two/
+TEST `echo "Carpe diem" > $M0/one/two/three`
+
+# Simulate disk-replacement
+TEST kill_brick $V0 $H0 $B0/${V0}-1
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 1
+TEST rm -rf $B0/${V0}-1/one
+TEST rm -rf $B0/${V0}-1/.glusterfs
+
+#Ideally, disk replacement is done using reset-brick or replace-brick gluster CLI
+#which will create .glusterfs folder.
+mkdir $B0/${V0}-1/.glusterfs && chmod 600 $B0/${V0}-1/.glusterfs
+
+# Start force
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST stat $M0/one
+
+sleep 1
+
+# Check pending xattrs
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 data
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 entry
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 metadata
+
+TEST gluster volume set $V0 self-heal-daemon on
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_file_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two/three
+
+cleanup;
diff --git a/tests/basic/afr/bug-1493415-gfid-heal-non-granular.t b/tests/basic/afr/bug-1493415-gfid-heal-non-granular.t
new file mode 100644
index 0000000..aff001c
--- /dev/null
+++ b/tests/basic/afr/bug-1493415-gfid-heal-non-granular.t
@@ -0,0 +1,79 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.granular-entry-heal off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST $CLI volume set $V0 self-heal-daemon off
+
+# Create base entry in indices/xattrop
+echo "Data" > $M0/FILE
+
+#------------------------------------------------------------------------------#
+TEST touch $M0/f1
+gfid_f1=$(gf_get_gfid_xattr $B0/${V0}0/f1)
+gfid_str_f1=$(gf_gfid_xattr_to_str $gfid_f1)
+
+# Remove gfid xattr and .glusterfs hard link from 2nd brick. This simulates a
+# brick crash at the point where file got created but no xattrs were set.
+TEST setfattr -x trusted.gfid $B0/${V0}1/f1
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 2
+
+# Assume there were no pending xattrs on parent dir due to 1st brick crashing
+# too. Then name heal from client must heal the gfid.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST stat $M0/f1
+EXPECT "$gfid_f1" gf_get_gfid_xattr $B0/${V0}1/f1
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+#------------------------------------------------------------------------------#
+TEST mkdir $M0/dir
+TEST touch $M0/dir/f2
+gfid_f2=$(gf_get_gfid_xattr $B0/${V0}0/dir/f2)
+gfid_str_f2=$(gf_gfid_xattr_to_str $gfid_f2)
+
+# Remove gfid xattr and .glusterfs hard link from 2nd brick. This simulates a
+# brick crash at the point where file got created but no xattrs were set.
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir/f2
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
+
+#Now simulate setting of pending entry xattr on parent dir of 1st brick.
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/dir
+create_brick_xattrop_entry $B0/${V0}0 dir
+
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 2
+
+#Trigger entry-heal via shd
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$gfid_f2" gf_get_gfid_xattr $B0/${V0}1/dir/f2
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
+
+#------------------------------------------------------------------------------#
+cleanup;
diff --git a/tests/basic/afr/bug-1722507-type-mismatch-error-handling-non-granular.t b/tests/basic/afr/bug-1722507-type-mismatch-error-handling-non-granular.t
new file mode 100644
index 0000000..9079c93
--- /dev/null
+++ b/tests/basic/afr/bug-1722507-type-mismatch-error-handling-non-granular.t
@@ -0,0 +1,117 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume set $V0 cluster.granular-entry-heal off
+TEST $CLI volume start $V0;
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume heal $V0 disable
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+
+##########################################################################################
+# GFID link file and the GFID is missing on one brick and all the bricks are being blamed.
+
+TEST touch $M0/dir/file
+TEST `echo append>> $M0/dir/file`
+
+#B0 and B2 must blame B1
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT "^1$" get_pending_heal_count $V0
+
+# Remove the gfid xattr and the link file on one brick.
+gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file)
+gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file)
+TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file
+TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+
+# Wait for 2 second to force posix to consider that this is a valid file but
+# without gfid.
+sleep 2
+TEST $CLI volume heal $V0
+
+# Heal should not fail as the file is missing gfid xattr and the link file,
+# which is not actually the gfid or type mismatch.
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file
+TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+rm -f $M0/dir/file
+
+
+###########################################################################################
+# GFID link file and the GFID is missing on two bricks and all the bricks are being blamed.
+
+TEST $CLI volume heal $V0 disable
+TEST touch $M0/dir/file
+#TEST kill_brick $V0 $H0 $B0/$V0"1"
+
+#B0 and B2 must blame B1
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT "^1$" get_pending_heal_count $V0
+
+# Remove the gfid xattr and the link file on two bricks.
+gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file)
+gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file)
+TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file
+TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir/file
+TEST rm -f $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+
+# Wait for 2 second to force posix to consider that this is a valid file but
+# without gfid.
+sleep 2
+TEST $CLI volume heal $V0
+
+# Heal should not fail as the file is missing gfid xattr and the link file,
+# which is not actually the gfid or type mismatch.
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file
+TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}1/dir/file
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+
+cleanup
diff --git a/tests/basic/afr/bug-1749322-entry-heal-not-happening-non-granular.t b/tests/basic/afr/bug-1749322-entry-heal-not-happening-non-granular.t
new file mode 100644
index 0000000..4f27da4
--- /dev/null
+++ b/tests/basic/afr/bug-1749322-entry-heal-not-happening-non-granular.t
@@ -0,0 +1,90 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+function check_gfid_and_link_count
+{
+ local file=$1
+
+ file_gfid_b0=$(gf_get_gfid_xattr $B0/${V0}0/$file)
+ TEST [ ! -z $file_gfid_b0 ]
+ file_gfid_b1=$(gf_get_gfid_xattr $B0/${V0}1/$file)
+ file_gfid_b2=$(gf_get_gfid_xattr $B0/${V0}2/$file)
+ EXPECT $file_gfid_b0 echo $file_gfid_b1
+ EXPECT $file_gfid_b0 echo $file_gfid_b2
+
+ EXPECT "2" stat -c %h $B0/${V0}0/$file
+ EXPECT "2" stat -c %h $B0/${V0}1/$file
+ EXPECT "2" stat -c %h $B0/${V0}2/$file
+}
+TESTS_EXPECTED_IN_LOOP=18
+
+################################################################################
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume set $V0 cluster.granular-entry-heal off
+TEST $CLI volume start $V0;
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume heal $V0 disable
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST `echo "File 1 " > $M0/dir/file1`
+TEST touch $M0/dir/file{2..4}
+
+# Remove file2 from 1st & 3rd bricks
+TEST rm -f $B0/$V0"0"/dir/file2
+TEST rm -f $B0/$V0"2"/dir/file2
+
+# Remove file3 and the .glusterfs hardlink from 1st & 2nd bricks
+gfid_file3=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file3)
+gfid_str_file3=$(gf_gfid_xattr_to_str $gfid_file3)
+TEST rm $B0/$V0"0"/.glusterfs/${gfid_str_file3:0:2}/${gfid_str_file3:2:2}/$gfid_str_file3
+TEST rm $B0/$V0"1"/.glusterfs/${gfid_str_file3:0:2}/${gfid_str_file3:2:2}/$gfid_str_file3
+TEST rm -f $B0/$V0"0"/dir/file3
+TEST rm -f $B0/$V0"1"/dir/file3
+
+# Remove the .glusterfs hardlink and the gfid xattr of file4 on 3rd brick
+gfid_file4=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file4)
+gfid_str_file4=$(gf_gfid_xattr_to_str $gfid_file4)
+TEST rm $B0/$V0"2"/.glusterfs/${gfid_str_file4:0:2}/${gfid_str_file4:2:2}/$gfid_str_file4
+TEST setfattr -x trusted.gfid $B0/$V0"2"/dir/file4
+
+# B0 and B2 blame each other
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+
+# Add entry to xattrop dir on first brick.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+
+EXPECT "^1$" get_pending_heal_count $V0
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# All the files must be present on all the bricks after conservative merge and
+# should have the gfid xattr and the .glusterfs hardlink.
+check_gfid_and_link_count dir/file1
+check_gfid_and_link_count dir/file2
+check_gfid_and_link_count dir/file3
+check_gfid_and_link_count dir/file4
+
+cleanup
diff --git a/tests/basic/afr/replace-brick-self-heal-non-granular.t b/tests/basic/afr/replace-brick-self-heal-non-granular.t
new file mode 100644
index 0000000..c86bff1
--- /dev/null
+++ b/tests/basic/afr/replace-brick-self-heal-non-granular.t
@@ -0,0 +1,65 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.granular-entry-heal off
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Create files
+for i in {1..5}
+do
+ echo $i > $M0/file$i.txt
+done
+
+# Metadata changes
+TEST setfattr -n user.test -v qwerty $M0/file5.txt
+
+# Replace brick1
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1_new commit force
+
+# Replaced-brick should accuse the non-replaced-brick (Simulating case for data-loss)
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1_new/
+
+# Check if pending xattr and dirty-xattr are set for replaced-brick
+EXPECT "000000000000000100000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.dirty $B0/${V0}1_new
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+# Check if entry-heal has happened
+TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1_new | sort)
+
+# To make sure that files were not lost from brick0
+TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1 | sort)
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
+
+# Test if data was healed
+TEST diff $B0/${V0}0/file1.txt $B0/${V0}1_new/file1.txt
+# To make sure that data was not lost from brick0
+TEST diff $B0/${V0}0/file1.txt $B0/${V0}1/file1.txt
+
+# Test if metadata was healed and exists on both the bricks
+EXPECT "qwerty" get_text_xattr user.test $B0/${V0}1_new/file5.txt
+EXPECT "qwerty" get_text_xattr user.test $B0/${V0}0/file5.txt
+
+cleanup;
diff --git a/tests/basic/afr/replace-brick-self-heal.t b/tests/basic/afr/replace-brick-self-heal.t
index 0360db7..da31c87 100644
--- a/tests/basic/afr/replace-brick-self-heal.t
+++ b/tests/basic/afr/replace-brick-self-heal.t
@@ -30,7 +30,7 @@ TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1_new commit forc
TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1_new/
# Check if pending xattr and dirty-xattr are set for replaced-brick
-EXPECT "000000000000000100000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
+EXPECT "000000010000000100000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.dirty $B0/${V0}1_new
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
diff --git a/tests/bugs/replicate/bug-1130892.t b/tests/bugs/replicate/bug-1130892.t
index 0f57d66..e23eb26 100644
--- a/tests/bugs/replicate/bug-1130892.t
+++ b/tests/bugs/replicate/bug-1130892.t
@@ -56,7 +56,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
TEST stat $M0/one
# Check pending xattrs
-EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 data
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 data
EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 entry
EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 metadata
diff --git a/tests/bugs/replicate/bug-1493415-gfid-heal.t b/tests/bugs/replicate/bug-1493415-gfid-heal.t
index 125c35a..9714d5e 100644
--- a/tests/bugs/replicate/bug-1493415-gfid-heal.t
+++ b/tests/bugs/replicate/bug-1493415-gfid-heal.t
@@ -49,7 +49,7 @@ TEST setfattr -x trusted.gfid $B0/${V0}1/dir/f2
TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
#Now simulate setting of pending entry xattr on parent dir of 1st brick.
-TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/dir
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000001 $B0/${V0}0/dir
create_brick_xattrop_entry $B0/${V0}0 dir
#Trigger entry-heal via shd
diff --git a/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
index 0aeaaaf..1fdf7ea 100644
--- a/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
+++ b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
@@ -23,19 +23,21 @@ TEST mkdir $M0/dir
##########################################################################################
# GFID link file and the GFID is missing on one brick and all the bricks are being blamed.
-TEST touch $M0/dir/file
-#TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST `echo append>> $M0/dir/file`
#B0 and B2 must blame B1
-setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
-setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
-setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+# Set data part of the xattr also to 1 so that local->need_full_crawl is true.
+# Another way is to create the needed entries inside indices/entry-changes
+# folder.
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000001 $B0/$V0"0"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000001 $B0/$V0"0"/dir
# Add entry to xattrop dir to trigger index heal.
xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
base_entry_b0=`ls $xattrop_dir0`
gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
-ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
EXPECT "^1$" get_pending_heal_count $V0
# Remove the gfid xattr and the link file on one brick.
@@ -70,18 +72,20 @@ rm -f $M0/dir/file
TEST $CLI volume heal $V0 disable
TEST touch $M0/dir/file
-#TEST kill_brick $V0 $H0 $B0/$V0"1"
#B0 and B2 must blame B1
-setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
-setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
-setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+# Set data part of the xattr also to 1 so that local->need_full_crawl is true.
+# Another way is to create the needed entries inside indices/entry-changes
+# folder.
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000001 $B0/$V0"0"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000001 $B0/$V0"0"/dir
# Add entry to xattrop dir to trigger index heal.
xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
base_entry_b0=`ls $xattrop_dir0`
gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
-ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
EXPECT "^1$" get_pending_heal_count $V0
# Remove the gfid xattr and the link file on two bricks.
diff --git a/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t b/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t
index 9627908..3da873a 100644
--- a/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t
+++ b/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t
@@ -59,8 +59,11 @@ TEST rm $B0/$V0"2"/.glusterfs/${gfid_str_file4:0:2}/${gfid_str_file4:2:2}/$gfid_
TEST setfattr -x trusted.gfid $B0/$V0"2"/dir/file4
# B0 and B2 blame each other
-setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
-setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+# Set data part of the xattr also to 1 so that local->need_full_crawl is true.
+# Another way is to create the needed entries inside indices/entry-changes
+# folder.
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000001 $B0/$V0"0"/dir
# Add entry to xattrop dir on first brick.
xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
index 1608f75..36fd3a9 100644
--- a/xlators/cluster/afr/src/afr-self-heal-common.c
+++ b/xlators/cluster/afr/src/afr-self-heal-common.c
@@ -2549,6 +2549,11 @@ afr_selfheal_do(call_frame_t *frame, xlator_t *this, uuid_t gfid)
}
}
+ gf_msg_debug(
+ this->name, 0,
+ "heals needed for %s: [entry-heal=%d, metadata-heal=%d, data-heal=%d]",
+ uuid_utoa(gfid), entry_selfheal, metadata_selfheal, data_selfheal);
+
if (data_selfheal && priv->data_self_heal)
data_ret = afr_selfheal_data(frame, this, fd);
diff --git a/xlators/cluster/afr/src/afr-self-heal-data.c b/xlators/cluster/afr/src/afr-self-heal-data.c
index cdff4a5..b97c66b 100644
--- a/xlators/cluster/afr/src/afr-self-heal-data.c
+++ b/xlators/cluster/afr/src/afr-self-heal-data.c
@@ -239,6 +239,9 @@ afr_selfheal_data_block(call_frame_t *frame, xlator_t *this, fd_t *fd,
sink_count = AFR_COUNT(healed_sinks, priv->child_count);
data_lock = alloca0(priv->child_count);
+ gf_msg_debug(this->name, 0, "gfid:%s, offset=%jd, size=%zu",
+ uuid_utoa(fd->inode->gfid), offset, size);
+
ret = afr_selfheal_inodelk(frame, this, fd->inode, this->name, offset, size,
data_lock);
{
diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c
index 40be898..00b5b2d 100644
--- a/xlators/cluster/afr/src/afr-self-heal-entry.c
+++ b/xlators/cluster/afr/src/afr-self-heal-entry.c
@@ -206,8 +206,11 @@ __afr_selfheal_heal_dirent(call_frame_t *frame, xlator_t *this, fd_t *fd,
replies);
} else {
if (!gf_uuid_compare(replies[i].poststat.ia_gfid,
- replies[source].poststat.ia_gfid))
+ replies[source].poststat.ia_gfid)) {
+ gf_msg_debug(this->name, 0, "skipping %s, no heal needed.",
+ name);
continue;
+ }
ret = afr_selfheal_recreate_entry(frame, i, source, sources,
fd->inode, name, inode, replies);
@@ -839,7 +842,7 @@ afr_selfheal_entry_granular_dirent(xlator_t *subvol, gf_dirent_t *entry,
out:
loc_wipe(&loc);
- return 0;
+ return ret;
}
static int
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index a72c494..bd17a82 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -13181,6 +13181,19 @@ glusterd_enable_default_options(glusterd_volinfo_t *volinfo, char *option)
goto out;
}
}
+
+ if ((conf->op_version >= GD_OP_VERSION_7_1) &&
+ (volinfo->status == GLUSTERD_STATUS_NONE)) {
+ ret = dict_set_dynstr_with_alloc(volinfo->dict,
+ "cluster.granular-entry-heal", "on");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set option 'cluster.granular-entry-heal' "
+ "on volume %s",
+ volinfo->volname);
+ goto out;
+ }
+ }
out:
return ret;
}
--
1.8.3.1

View File

@ -0,0 +1,141 @@
From 2d172144810956225eac3599c943416c4a7e25d0 Mon Sep 17 00:00:00 2001
From: Ravishankar N <ravishankar@redhat.com>
Date: Tue, 8 Dec 2020 20:30:23 +0530
Subject: [PATCH 489/511] glusterd: fix bug in enabling granular-entry-heal
Upstream patch details:
/------------------------------------------------------------------------------/
commit f5e1eb87d4af44be3b317b7f99ab88f89c2f0b1a meant to enable the
volume option only for replica volumes but inadvertently enabled
it for all volume types. Fixing it now.
Also found a bug in glusterd where disabling the option on plain
distribute was succeeding even though setting it in the fist place
fails. Fixed that too.
>Fixes: #1483
>Change-Id: Icb6c169a8eec44cc4fb4dd636405d3b3485e91b4
>Reported-by: Sheetal Pamecha <spamecha@redhat.com>
>Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Upstream Patch: https://github.com/gluster/glusterfs/pull/1752
/------------------------------------------------------------------------------/
BUG: 1890506
Change-Id: Id63655dac08d2cfda4899d7ee0efe96e72cd6986
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220556
Tested-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/basic/afr/granular-esh/cli.t | 30 ++++++++++++++++++++-----
xlators/mgmt/glusterd/src/glusterd-utils.c | 3 ++-
xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 12 +++++-----
3 files changed, 34 insertions(+), 11 deletions(-)
diff --git a/tests/basic/afr/granular-esh/cli.t b/tests/basic/afr/granular-esh/cli.t
index 995d93e..5ab2e39 100644
--- a/tests/basic/afr/granular-esh/cli.t
+++ b/tests/basic/afr/granular-esh/cli.t
@@ -11,25 +11,38 @@ TESTS_EXPECTED_IN_LOOP=4
TEST glusterd
TEST pidof glusterd
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
-# Test that enabling the option should work on a newly created volume
-TEST $CLI volume set $V0 cluster.granular-entry-heal on
-TEST $CLI volume set $V0 cluster.granular-entry-heal off
-
#########################
##### DISPERSE TEST #####
#########################
# Execute the same command on a disperse volume and make sure it fails.
TEST $CLI volume create $V1 disperse 3 redundancy 1 $H0:$B0/${V1}{0,1,2}
+EXPECT "no" volume_get_field $V1 cluster.granular-entry-heal
+TEST $CLI volume start $V1
+TEST ! $CLI volume heal $V1 granular-entry-heal enable
+TEST ! $CLI volume heal $V1 granular-entry-heal disable
+
+TEST $CLI volume stop $V1
+TEST $CLI volume delete $V1
+
+#########################
+##### PLAIN DISTRIBUTE TEST #####
+#########################
+# Execute the same command on a distribute volume and make sure it fails.
+TEST $CLI volume create $V1 $H0:$B0/${V1}{0,1,2}
+EXPECT "no" volume_get_field $V1 cluster.granular-entry-heal
TEST $CLI volume start $V1
TEST ! $CLI volume heal $V1 granular-entry-heal enable
TEST ! $CLI volume heal $V1 granular-entry-heal disable
+TEST $CLI volume stop $V1
+TEST $CLI volume delete $V1
#######################
###### TIER TEST ######
#######################
# Execute the same command on a disperse + replicate tiered volume and make
# sure the option is set on the replicate leg of the volume
+TEST $CLI volume create $V1 disperse 3 redundancy 1 $H0:$B0/${V1}{0,1,2}
+TEST $CLI volume start $V1
TEST $CLI volume tier $V1 attach replica 2 $H0:$B0/${V1}{3,4}
TEST $CLI volume heal $V1 granular-entry-heal enable
EXPECT "enable" volume_get_field $V1 cluster.granular-entry-heal
@@ -52,10 +65,17 @@ TEST kill_brick $V1 $H0 $B0/${V1}3
# failed.
TEST ! $CLI volume heal $V1 granular-entry-heal enable
EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal
+TEST $CLI volume stop $V1
+TEST $CLI volume delete $V1
######################
### REPLICATE TEST ###
######################
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+EXPECT "on" volume_get_field $V0 cluster.granular-entry-heal
+# Test that enabling the option should work on a newly created volume
+TEST $CLI volume set $V0 cluster.granular-entry-heal on
+TEST $CLI volume set $V0 cluster.granular-entry-heal off
TEST $CLI volume start $V0
TEST $CLI volume set $V0 cluster.data-self-heal off
TEST $CLI volume set $V0 cluster.metadata-self-heal off
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index bd17a82..ad3750e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -13183,7 +13183,8 @@ glusterd_enable_default_options(glusterd_volinfo_t *volinfo, char *option)
}
if ((conf->op_version >= GD_OP_VERSION_7_1) &&
- (volinfo->status == GLUSTERD_STATUS_NONE)) {
+ (volinfo->status == GLUSTERD_STATUS_NONE) &&
+ (volinfo->type == GF_CLUSTER_TYPE_REPLICATE)) {
ret = dict_set_dynstr_with_alloc(volinfo->dict,
"cluster.granular-entry-heal", "on");
if (ret) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index 134b04c..09e6ead 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -621,11 +621,13 @@ glusterd_handle_heal_options_enable_disable(rpcsvc_request_t *req, dict_t *dict,
goto out;
}
- if (((heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) ||
- (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) &&
- (volinfo->type == GF_CLUSTER_TYPE_DISPERSE)) {
- ret = -1;
- goto out;
+ if ((heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) ||
+ (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) {
+ if ((volinfo->type != GF_CLUSTER_TYPE_REPLICATE) &&
+ (volinfo->type != GF_CLUSTER_TYPE_TIER)) {
+ ret = -1;
+ goto out;
+ }
}
if ((heal_op == GF_SHD_OP_HEAL_ENABLE) ||
--
1.8.3.1

View File

@ -0,0 +1,57 @@
From 5a110946b41619577b365cdceddc4da551ff49f0 Mon Sep 17 00:00:00 2001
From: kinsu <vpolakis@gmail.com>
Date: Thu, 19 Sep 2019 08:34:32 +0000
Subject: [PATCH 490/511] Segmentation fault occurs during truncate
Problem:
Segmentation fault occurs when bricks are nearly full 100% and in
parallel truncate of a file is attempted (No space left on device).
Prerequicite is that performance xlators are activated
(read-ahead, write-behind etc)
while stack unwind of the frames following an error responce
from brick (No space left on device) frame->local includes a memory
location that is not allocated via mem_get but via calloc.
The destroyed frame is always ra_truncate_cbk winded from ra_ftruncate
and the inode ptr is copied to the frame local in the wb_ftruncate.
Fix:
extra check is added for the pool ptr
>Change-Id: Ic5d3bd0ab7011e40b2811c6dece063b256e4d9d1
>Fixes: bz#1797882
>Signed-off-by: kinsu <vpolakis@gmail.com>
Upstream-patch: https://review.gluster.org/c/glusterfs/+/23445
BUG: 1842449
Change-Id: Ic5d3bd0ab7011e40b2811c6dece063b256e4d9d1
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220540
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/mem-pool.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/libglusterfs/src/mem-pool.c b/libglusterfs/src/mem-pool.c
index 73503e0..1390747 100644
--- a/libglusterfs/src/mem-pool.c
+++ b/libglusterfs/src/mem-pool.c
@@ -857,6 +857,14 @@ mem_put(void *ptr)
/* Not one of ours; don't touch it. */
return;
}
+
+ if (!hdr->pool_list) {
+ gf_msg_callingfn("mem-pool", GF_LOG_CRITICAL, EINVAL,
+ LG_MSG_INVALID_ARG,
+ "invalid argument hdr->pool_list NULL");
+ return;
+ }
+
pool_list = hdr->pool_list;
pt_pool = &pool_list->pools[hdr->power_of_two - POOL_SMALLEST];
--
1.8.3.1

View File

@ -0,0 +1,56 @@
From 0fed8ca9c6c9e3a9041951bc748c7936d0abc8cf Mon Sep 17 00:00:00 2001
From: nik-redhat <nladha@redhat.com>
Date: Tue, 15 Sep 2020 16:20:19 +0530
Subject: [PATCH 491/511] glusterd: mount directory getting truncated on
mounting shared_storage
Issue:
In case of a user created volume the mount point
is the brick path 'ex: /data/brick' but in case of
shared_storage the mount point is '/'.So, here
we increment the array by one so as to get the exact
path of brick without '/', which works fine for other
volumes as the pointer of the brick_dir variable is
at '/', but for shared_storage it is at 'v'(where v is
starting letter of 'var' directory). So, on incrementing
the path we get in case of shared_storage starts from
'ar/lib/glusterd/...'
Fix:
Only, increment the pointer if the current position is '/',
else the path will be wrong.
>Fixes: #1480
>Change-Id: Id31bb13f58134ae2099884fbc5984c4e055fb357
>Signed-off-by: nik-redhat <nladha@redhat.com>
Upstream patch: https://review.gluster.org/c/glusterfs/+/24989
BUG: 1878077
Change-Id: Id31bb13f58134ae2099884fbc5984c4e055fb357
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220536
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-utils.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index ad3750e..b343eee 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -1221,7 +1221,8 @@ glusterd_get_brick_mount_dir(char *brickpath, char *hostname, char *mount_dir)
}
brick_dir = &brickpath[strlen(mnt_pt)];
- brick_dir++;
+ if (brick_dir[0] == '/')
+ brick_dir++;
snprintf(mount_dir, VALID_GLUSTERD_PATHMAX, "/%s", brick_dir);
}
--
1.8.3.1

View File

@ -0,0 +1,188 @@
From bde1ad97f8739f8370a2bbb92229b1b397ecd82c Mon Sep 17 00:00:00 2001
From: karthik-us <ksubrahm@redhat.com>
Date: Tue, 8 Dec 2020 19:06:03 +0530
Subject: [PATCH 492/511] afr/lookup: Pass xattr_req in while doing a selfheal
in lookup
We were not passing xattr_req when doing a name self heal
as well as a meta data heal. Because of this, some xdata
was missing which causes i/o errors
Upstream patch details:
> Change-Id: Ibfb1205a7eb0195632dc3820116ffbbb8043545f
> Fixes: bz#1728770
> Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
Upstream Patch : https://review.gluster.org/#/c/glusterfs/+/23024/
BUG: 1726673
Change-Id: Ibfb1205a7eb0195632dc3820116ffbbb8043545f
Signed-off-by: karthik-us <ksubrahm@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220538
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/bugs/replicate/bug-1728770-pass-xattrs.t | 52 ++++++++++++++++++++++++++
tests/include.rc | 1 +
xlators/cluster/afr/src/afr-common.c | 8 +++-
xlators/cluster/afr/src/afr-self-heal-common.c | 9 ++++-
xlators/cluster/afr/src/afr-self-heal.h | 2 +-
5 files changed, 67 insertions(+), 5 deletions(-)
create mode 100644 tests/bugs/replicate/bug-1728770-pass-xattrs.t
diff --git a/tests/bugs/replicate/bug-1728770-pass-xattrs.t b/tests/bugs/replicate/bug-1728770-pass-xattrs.t
new file mode 100644
index 0000000..159c4fc
--- /dev/null
+++ b/tests/bugs/replicate/bug-1728770-pass-xattrs.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+function fop_on_bad_disk {
+ local path=$1
+ mkdir $path/dir{1..1000} 2>/dev/null
+ mv $path/dir1 $path/newdir
+ touch $path/foo.txt
+ echo $?
+}
+
+function ls_fop_on_bad_disk {
+ local path=$1
+ ls $path
+ echo $?
+}
+
+TEST init_n_bricks 6;
+TEST setup_lvm 6;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 replica 3 $H0:$L1 $H0:$L2 $H0:$L3 $H0:$L4 $H0:$L5 $H0:$L6;
+TEST $CLI volume set $V0 health-check-interval 1000;
+
+TEST $CLI volume start $V0;
+
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+#corrupt last disk
+dd if=/dev/urandom of=/dev/mapper/patchy_snap_vg_6-brick_lvm bs=512K count=200 status=progress && sync
+
+
+# Test the disk is now returning EIO for touch and ls
+EXPECT_WITHIN $DISK_FAIL_TIMEOUT "^1$" fop_on_bad_disk "$L6"
+EXPECT_WITHIN $DISK_FAIL_TIMEOUT "^2$" ls_fop_on_bad_disk "$L6"
+
+TEST touch $M0/foo{1..100}
+TEST $CLI volume remove-brick $V0 replica 3 $H0:$L4 $H0:$L5 $H0:$L6 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$L4 $H0:$L5 $H0:$L6";
+
+#check that remove-brick status should not have any failed or skipped files
+var=`$CLI volume remove-brick $V0 $H0:$L4 $H0:$L5 $H0:$L6 status | grep completed`
+TEST [ `echo $var | awk '{print $5}'` = "0" ]
+TEST [ `echo $var | awk '{print $6}'` = "0" ]
+
+cleanup;
diff --git a/tests/include.rc b/tests/include.rc
index 762c5e2..c925941 100644
--- a/tests/include.rc
+++ b/tests/include.rc
@@ -89,6 +89,7 @@ GRAPH_SWITCH_TIMEOUT=10
UNLINK_TIMEOUT=5
MDC_TIMEOUT=5
IO_WAIT_TIMEOUT=5
+DISK_FAIL_TIMEOUT=80
LOGDIR=$(gluster --print-logdir)
diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
index 851ccad..fca2cd5 100644
--- a/xlators/cluster/afr/src/afr-common.c
+++ b/xlators/cluster/afr/src/afr-common.c
@@ -2609,6 +2609,10 @@ afr_lookup_sh_metadata_wrap(void *opaque)
dict = dict_new();
if (!dict)
goto out;
+ if (local->xattr_req) {
+ dict_copy(local->xattr_req, dict);
+ }
+
ret = dict_set_sizen_str_sizen(dict, "link-count", GF_XATTROP_INDEX_COUNT);
if (ret) {
gf_msg_debug(this->name, -ret, "Unable to set link-count in dict ");
@@ -2617,7 +2621,7 @@ afr_lookup_sh_metadata_wrap(void *opaque)
if (loc_is_nameless(&local->loc)) {
ret = afr_selfheal_unlocked_discover_on(frame, local->inode,
local->loc.gfid, local->replies,
- local->child_up);
+ local->child_up, dict);
} else {
inode = afr_selfheal_unlocked_lookup_on(frame, local->loc.parent,
local->loc.name, local->replies,
@@ -2791,7 +2795,7 @@ afr_lookup_selfheal_wrap(void *opaque)
inode = afr_selfheal_unlocked_lookup_on(frame, local->loc.parent,
local->loc.name, local->replies,
- local->child_up, NULL);
+ local->child_up, local->xattr_req);
if (inode)
inode_unref(inode);
diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
index 36fd3a9..9b6575f 100644
--- a/xlators/cluster/afr/src/afr-self-heal-common.c
+++ b/xlators/cluster/afr/src/afr-self-heal-common.c
@@ -1861,7 +1861,7 @@ afr_set_multi_dom_lock_count_request(xlator_t *this, dict_t *dict)
int
afr_selfheal_unlocked_discover_on(call_frame_t *frame, inode_t *inode,
uuid_t gfid, struct afr_reply *replies,
- unsigned char *discover_on)
+ unsigned char *discover_on, dict_t *dict)
{
loc_t loc = {
0,
@@ -1876,6 +1876,8 @@ afr_selfheal_unlocked_discover_on(call_frame_t *frame, inode_t *inode,
xattr_req = dict_new();
if (!xattr_req)
return -ENOMEM;
+ if (dict)
+ dict_copy(dict, xattr_req);
if (afr_xattr_req_prepare(frame->this, xattr_req) != 0) {
dict_unref(xattr_req);
@@ -1906,11 +1908,14 @@ afr_selfheal_unlocked_discover(call_frame_t *frame, inode_t *inode, uuid_t gfid,
struct afr_reply *replies)
{
afr_local_t *local = NULL;
+ dict_t *dict = NULL;
local = frame->local;
+ if (local && local->xattr_req)
+ dict = local->xattr_req;
return afr_selfheal_unlocked_discover_on(frame, inode, gfid, replies,
- local->child_up);
+ local->child_up, dict);
}
unsigned int
diff --git a/xlators/cluster/afr/src/afr-self-heal.h b/xlators/cluster/afr/src/afr-self-heal.h
index b39af02..8f6fb00 100644
--- a/xlators/cluster/afr/src/afr-self-heal.h
+++ b/xlators/cluster/afr/src/afr-self-heal.h
@@ -188,7 +188,7 @@ afr_selfheal_unlocked_discover(call_frame_t *frame, inode_t *inode, uuid_t gfid,
int
afr_selfheal_unlocked_discover_on(call_frame_t *frame, inode_t *inode,
uuid_t gfid, struct afr_reply *replies,
- unsigned char *discover_on);
+ unsigned char *discover_on, dict_t *dict);
inode_t *
afr_selfheal_unlocked_lookup_on(call_frame_t *frame, inode_t *parent,
const char *name, struct afr_reply *replies,
--
1.8.3.1

View File

@ -0,0 +1,283 @@
From 03de45e5fb1c8aa5369848ed9e52abd1365e1d21 Mon Sep 17 00:00:00 2001
From: Shwetha K Acharya <sacharya@redhat.com>
Date: Wed, 31 Jul 2019 11:34:19 +0530
Subject: [PATCH 493/511] geo-rep: Note section is required for ignore_deletes
There exists a window of 15 sec, where the deletes are picked up
by history crawl when the ignore_deletes is set to true.
And it eventually deletes the file/s from slave which is/are not
supposed to be deleted. Though it is working as per design, a
note regarding this is needed.
Added a warning message indicating the same.
Also logged info when the worker restarts after ignore-deletes
option set.
>fixes: bz#1708603
>Change-Id: I103be882fac18b4cef935efa355f5037a396f7c1
>Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
Upstream patch: https://review.gluster.org/c/glusterfs/+/22702
BUG: 1224906
Change-Id: I103be882fac18b4cef935efa355f5037a396f7c1
Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220757
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
cli/src/cli-cmd-parser.c | 45 ++++++++++++++++++++------
cli/src/cli-cmd-volume.c | 20 ++++++++----
cli/src/cli.h | 3 +-
geo-replication/syncdaemon/gsyncd.py | 2 +-
geo-replication/syncdaemon/master.py | 6 ++++
tests/00-geo-rep/bug-1708603.t | 63 ++++++++++++++++++++++++++++++++++++
6 files changed, 120 insertions(+), 19 deletions(-)
create mode 100644 tests/00-geo-rep/bug-1708603.t
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index 5fd05f4..34f17c9 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -2901,7 +2901,8 @@ out:
}
int32_t
-cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **options)
+cli_cmd_gsync_set_parse(struct cli_state *state, const char **words,
+ int wordcount, dict_t **options, char **errstr)
{
int32_t ret = -1;
dict_t *dict = NULL;
@@ -2918,6 +2919,8 @@ cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **options)
char *save_ptr = NULL;
char *slave_temp = NULL;
char *token = NULL;
+ gf_answer_t answer = GF_ANSWER_NO;
+ const char *question = NULL;
GF_ASSERT(words);
GF_ASSERT(options);
@@ -2990,8 +2993,10 @@ cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **options)
if (masteri && gsyncd_url_check(words[masteri]))
goto out;
- if (slavei && !glob && !gsyncd_url_check(words[slavei]))
+ if (slavei && !glob && !gsyncd_url_check(words[slavei])) {
+ gf_asprintf(errstr, "Invalid slave url: %s", words[slavei]);
goto out;
+ }
w = str_getunamb(words[cmdi], opwords);
if (!w)
@@ -3101,16 +3106,36 @@ cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **options)
}
if (!ret)
ret = dict_set_int32(dict, "type", type);
- if (!ret && type == GF_GSYNC_OPTION_TYPE_CONFIG)
+ if (!ret && type == GF_GSYNC_OPTION_TYPE_CONFIG) {
+ if (!strcmp((char *)words[wordcount - 2], "ignore-deletes") &&
+ !strcmp((char *)words[wordcount - 1], "true")) {
+ question =
+ "There exists ~15 seconds delay for the option to take"
+ " effect from stime of the corresponding brick. Please"
+ " check the log for the time, the option is effective."
+ " Proceed";
+
+ answer = cli_cmd_get_confirmation(state, question);
+
+ if (GF_ANSWER_NO == answer) {
+ gf_log("cli", GF_LOG_INFO,
+ "Operation "
+ "cancelled, exiting");
+ *errstr = gf_strdup("Aborted by user.");
+ ret = -1;
+ goto out;
+ }
+ }
+
ret = config_parse(words, wordcount, dict, cmdi, glob);
+ }
out:
if (slave_temp)
GF_FREE(slave_temp);
- if (ret) {
- if (dict)
- dict_unref(dict);
- } else
+ if (ret && dict)
+ dict_unref(dict);
+ else
*options = dict;
return ret;
@@ -5659,9 +5684,9 @@ cli_cmd_bitrot_parse(const char **words, int wordcount, dict_t **options)
int32_t ret = -1;
char *w = NULL;
char *volname = NULL;
- char *opwords[] = {
- "enable", "disable", "scrub-throttle", "scrub-frequency", "scrub",
- "signing-time", "signer-threads", NULL};
+ char *opwords[] = {"enable", "disable", "scrub-throttle",
+ "scrub-frequency", "scrub", "signing-time",
+ "signer-threads", NULL};
char *scrub_throt_values[] = {"lazy", "normal", "aggressive", NULL};
char *scrub_freq_values[] = {"hourly", "daily", "weekly", "biweekly",
"monthly", "minute", NULL};
diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
index 72504ca..6f5bf8b 100644
--- a/cli/src/cli-cmd-volume.c
+++ b/cli/src/cli-cmd-volume.c
@@ -2457,6 +2457,7 @@ cli_cmd_volume_gsync_set_cbk(struct cli_state *state, struct cli_cmd_word *word,
rpc_clnt_procedure_t *proc = NULL;
call_frame_t *frame = NULL;
cli_local_t *local = NULL;
+ char *errstr = NULL;
#if (USE_EVENTS)
int ret1 = -1;
int cmd_type = -1;
@@ -2468,16 +2469,21 @@ cli_cmd_volume_gsync_set_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GSYNC_SET];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (frame == NULL) {
- ret = -1;
+ ret = cli_cmd_gsync_set_parse(state, words, wordcount, &options, &errstr);
+ if (ret) {
+ if (errstr) {
+ cli_err("%s", errstr);
+ GF_FREE(errstr);
+ } else {
+ cli_usage_out(word->pattern);
+ }
+ parse_err = 1;
goto out;
}
- ret = cli_cmd_gsync_set_parse(words, wordcount, &options);
- if (ret) {
- cli_usage_out(word->pattern);
- parse_err = 1;
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (frame == NULL) {
+ ret = -1;
goto out;
}
diff --git a/cli/src/cli.h b/cli/src/cli.h
index c30ae9c..7b4f446 100644
--- a/cli/src/cli.h
+++ b/cli/src/cli.h
@@ -269,7 +269,8 @@ int32_t
cli_cmd_volume_reset_parse(const char **words, int wordcount, dict_t **opt);
int32_t
-cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **opt);
+cli_cmd_gsync_set_parse(struct cli_state *state, const char **words,
+ int wordcount, dict_t **opt, char **errstr);
int32_t
cli_cmd_quota_parse(const char **words, int wordcount, dict_t **opt);
diff --git a/geo-replication/syncdaemon/gsyncd.py b/geo-replication/syncdaemon/gsyncd.py
index 8940384..215c62d 100644
--- a/geo-replication/syncdaemon/gsyncd.py
+++ b/geo-replication/syncdaemon/gsyncd.py
@@ -315,7 +315,7 @@ def main():
# Log message for loaded config file
if config_file is not None:
- logging.info(lf("Using session config file", path=config_file))
+ logging.debug(lf("Using session config file", path=config_file))
set_term_handler()
excont = FreeObject(exval=0)
diff --git a/geo-replication/syncdaemon/master.py b/geo-replication/syncdaemon/master.py
index 08e98f8..98637e7 100644
--- a/geo-replication/syncdaemon/master.py
+++ b/geo-replication/syncdaemon/master.py
@@ -1549,6 +1549,12 @@ class GMasterChangeloghistoryMixin(GMasterChangelogMixin):
data_stime = self.get_data_stime()
end_time = int(time.time())
+
+ #as start of historical crawl marks Geo-rep worker restart
+ if gconf.get("ignore-deletes"):
+ logging.info(lf('ignore-deletes config option is set',
+ stime=data_stime))
+
logging.info(lf('starting history crawl',
turns=self.history_turns,
stime=data_stime,
diff --git a/tests/00-geo-rep/bug-1708603.t b/tests/00-geo-rep/bug-1708603.t
new file mode 100644
index 0000000..26913f1
--- /dev/null
+++ b/tests/00-geo-rep/bug-1708603.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../geo-rep.rc
+. $(dirname $0)/../env.rc
+
+SCRIPT_TIMEOUT=300
+
+##Cleanup and start glusterd
+cleanup;
+TEST glusterd;
+TEST pidof glusterd
+
+
+##Variables
+GEOREP_CLI="gluster volume geo-replication"
+master=$GMV0
+SH0="127.0.0.1"
+slave=${SH0}::${GSV0}
+num_active=2
+num_passive=2
+master_mnt=$M0
+slave_mnt=$M1
+
+############################################################
+#SETUP VOLUMES AND GEO-REPLICATION
+############################################################
+
+##create_and_start_master_volume
+TEST $CLI volume create $GMV0 replica 2 $H0:$B0/${GMV0}{1,2,3,4};
+TEST $CLI volume start $GMV0
+
+##create_and_start_slave_volume
+TEST $CLI volume create $GSV0 replica 2 $H0:$B0/${GSV0}{1,2,3,4};
+TEST $CLI volume start $GSV0
+
+##Mount master
+TEST glusterfs -s $H0 --volfile-id $GMV0 $M0
+
+##Mount slave
+TEST glusterfs -s $H0 --volfile-id $GSV0 $M1
+
+#Create geo-rep session
+TEST create_georep_session $master $slave
+
+echo n | $GEOREP_CLI $master $slave config ignore-deletes true >/dev/null 2>&1
+EXPECT "false" echo $($GEOREP_CLI $master $slave config ignore-deletes)
+echo y | $GEOREP_CLI $master $slave config ignore-deletes true
+EXPECT "true" echo $($GEOREP_CLI $master $slave config ignore-deletes)
+
+#Stop Geo-rep
+TEST $GEOREP_CLI $master $slave stop
+
+#Delete Geo-rep
+TEST $GEOREP_CLI $master $slave delete
+
+#Cleanup authorized keys
+sed -i '/^command=.*SSH_ORIGINAL_COMMAND#.*/d' ~/.ssh/authorized_keys
+sed -i '/^command=.*gsyncd.*/d' ~/.ssh/authorized_keys
+
+cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
--
1.8.3.1

View File

@ -0,0 +1,54 @@
From 1b24bc4319203128a9ff7f97fe14f4b3622c4eec Mon Sep 17 00:00:00 2001
From: Sanju Rakonde <srakonde@redhat.com>
Date: Wed, 26 Aug 2020 20:05:35 +0530
Subject: [PATCH 494/511] glusterd: start the brick on a different port
Problem: brick fails to start when the port provided by
glusterd is in use by any other process
Solution: glusterd should check errno set by runner_run()
and if it is set to EADDRINUSE, it should allocate a new
port to the brick and try to start it again.
Previously ret value is checked instead of errno, so the
retry part never executed. Now, we initialize errno to 0
before calling runner framework. and afterwards store the
errno into ret to avoid modification of errno in subsequent
function calls.
>fixes: #1101
>Change-Id: I1aa048a77c5f8b035dece36976d60602d9753b1a
>Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
>Signed-off-by: nik-redhat <nladha@redhat.com>
Upstream patch: https://review.gluster.org/c/glusterfs/+/24923/
BUG: 1865796
Change-Id: I1aa048a77c5f8b035dece36976d60602d9753b1a
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220541
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-utils.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index b343eee..f7030fb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -2289,7 +2289,10 @@ retry:
if (wait) {
synclock_unlock(&priv->big_lock);
+ errno = 0;
ret = runner_run(&runner);
+ if (errno != 0)
+ ret = errno;
synclock_lock(&priv->big_lock);
if (ret == EADDRINUSE) {
--
1.8.3.1

View File

@ -0,0 +1,60 @@
From 17a2a880290d2038c913c23985df620e3c9741b3 Mon Sep 17 00:00:00 2001
From: Sunny Kumar <sunkumar@redhat.com>
Date: Mon, 16 Mar 2020 15:17:23 +0000
Subject: [PATCH 495/511] geo-rep: descriptive message when worker crashes due
to EIO
With this patch now you can notice log if it is due to EIO:
[2020-03-16 16:24:48.293837] E [syncdutils(worker /bricks/brick1/mbr3):348:log_raise_exception] <top>: Getting "Input/Output error" is most likely due to a. Brick is down or b. Split brain issue.
[2020-03-16 16:24:48.293915] E [syncdutils(worker /bricks/brick1/mbr3):352:log_raise_exception] <top>: This is expected as per design to keep the consistency of the file system. Once the above issue is resolved geo-rep would automatically proceed further.
>Change-Id: Ie33f2440bc96089731ce12afa8dab91d9550a7ca
>Fixes: #1104
>Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
>Upstream Patch : https://review.gluster.org/c/glusterfs/+/24228/
BUG: 1412494
Change-Id: Ie33f2440bc96089731ce12afa8dab91d9550a7ca
Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220874
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
geo-replication/syncdaemon/syncdutils.py | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/geo-replication/syncdaemon/syncdutils.py b/geo-replication/syncdaemon/syncdutils.py
index f43e13b..d5a94d4 100644
--- a/geo-replication/syncdaemon/syncdutils.py
+++ b/geo-replication/syncdaemon/syncdutils.py
@@ -22,7 +22,7 @@ import socket
from subprocess import PIPE
from threading import Lock, Thread as baseThread
from errno import EACCES, EAGAIN, EPIPE, ENOTCONN, ENOMEM, ECONNABORTED
-from errno import EINTR, ENOENT, ESTALE, EBUSY, ENODATA, errorcode
+from errno import EINTR, ENOENT, ESTALE, EBUSY, ENODATA, errorcode, EIO
from signal import signal, SIGTERM
import select as oselect
from os import waitpid as owaitpid
@@ -346,6 +346,17 @@ def log_raise_exception(excont):
ECONNABORTED):
logging.error(lf('Gluster Mount process exited',
error=errorcode[exc.errno]))
+ elif isinstance(exc, OSError) and exc.errno == EIO:
+ logging.error("Getting \"Input/Output error\" "
+ "is most likely due to "
+ "a. Brick is down or "
+ "b. Split brain issue.")
+ logging.error("This is expected as per design to "
+ "keep the consistency of the file system. "
+ "Once the above issue is resolved "
+ "geo-replication would automatically "
+ "proceed further.")
+ logtag = "FAIL"
else:
logtag = "FAIL"
if not logtag and logging.getLogger().isEnabledFor(logging.DEBUG):
--
1.8.3.1

View File

@ -0,0 +1,139 @@
From 5893e64ca8c147b7acfa12cd9824f254d53ee261 Mon Sep 17 00:00:00 2001
From: mohit84 <moagrawa@redhat.com>
Date: Wed, 4 Nov 2020 09:02:03 +0530
Subject: [PATCH 496/511] posix: Use MALLOC instead of alloca to allocate
memory for xattrs list (#1730)
In case of file is having huge xattrs on backend a brick process is
crashed while alloca(size) limit has been crossed 256k because iot_worker
stack size is 256k.
> Fixes: #1699
> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
> Change-Id: I100468234f83329a7d65b43cbe4e10450c1ccecd
> (Cherry pick from commit fd666caa35ac84dd1cba55399761982011b77112)
> (Reviewed on upstream link https://github.com/gluster/glusterfs/pull/1828)
Change-Id: I100468234f83329a7d65b43cbe4e10450c1ccecd
Bug: 1903468
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220872
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/storage/posix/src/posix-gfid-path.c | 5 ++++-
xlators/storage/posix/src/posix-helpers.c | 3 ++-
xlators/storage/posix/src/posix-inode-fd-ops.c | 12 +++++++++---
3 files changed, 15 insertions(+), 5 deletions(-)
diff --git a/xlators/storage/posix/src/posix-gfid-path.c b/xlators/storage/posix/src/posix-gfid-path.c
index 64b5c6c..01315ac 100644
--- a/xlators/storage/posix/src/posix-gfid-path.c
+++ b/xlators/storage/posix/src/posix-gfid-path.c
@@ -195,7 +195,8 @@ posix_get_gfid2path(xlator_t *this, inode_t *inode, const char *real_path,
if (size == 0)
goto done;
}
- list = alloca(size);
+
+ list = GF_MALLOC(size, gf_posix_mt_char);
if (!list) {
*op_errno = errno;
goto err;
@@ -309,6 +310,7 @@ done:
GF_FREE(paths[j]);
}
ret = 0;
+ GF_FREE(list);
return ret;
err:
if (path)
@@ -317,5 +319,6 @@ err:
if (paths[j])
GF_FREE(paths[j]);
}
+ GF_FREE(list);
return ret;
}
diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c
index 73a44be..ceac52a 100644
--- a/xlators/storage/posix/src/posix-helpers.c
+++ b/xlators/storage/posix/src/posix-helpers.c
@@ -349,7 +349,7 @@ _posix_get_marker_all_contributions(posix_xattr_filler_t *filler)
goto out;
}
- list = alloca(size);
+ list = GF_MALLOC(size, gf_posix_mt_char);
if (!list) {
goto out;
}
@@ -379,6 +379,7 @@ _posix_get_marker_all_contributions(posix_xattr_filler_t *filler)
ret = 0;
out:
+ GF_FREE(list);
return ret;
}
diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c
index 21119ea..1d37aed 100644
--- a/xlators/storage/posix/src/posix-inode-fd-ops.c
+++ b/xlators/storage/posix/src/posix-inode-fd-ops.c
@@ -3305,7 +3305,7 @@ posix_get_ancestry_non_directory(xlator_t *this, inode_t *leaf_inode,
goto out;
}
- list = alloca(size);
+ list = GF_MALLOC(size, gf_posix_mt_char);
if (!list) {
*op_errno = errno;
goto out;
@@ -3385,6 +3385,7 @@ posix_get_ancestry_non_directory(xlator_t *this, inode_t *leaf_inode,
op_ret = 0;
out:
+ GF_FREE(list);
return op_ret;
}
@@ -3810,7 +3811,8 @@ posix_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
if (size == 0)
goto done;
}
- list = alloca(size);
+
+ list = GF_MALLOC(size, gf_posix_mt_char);
if (!list) {
op_errno = errno;
goto out;
@@ -3937,6 +3939,7 @@ out:
dict_unref(dict);
}
+ GF_FREE(list);
return 0;
}
@@ -4136,7 +4139,8 @@ posix_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
if (size == 0)
goto done;
}
- list = alloca(size + 1);
+
+ list = GF_MALLOC(size, gf_posix_mt_char);
if (!list) {
op_ret = -1;
op_errno = ENOMEM;
@@ -4240,6 +4244,8 @@ out:
if (dict)
dict_unref(dict);
+ GF_FREE(list);
+
return 0;
}
--
1.8.3.1

View File

@ -0,0 +1,80 @@
From 85a5cce40dba0393e636c0eb5af9d8f8746f2315 Mon Sep 17 00:00:00 2001
From: Mohit Agrawal <moagrawal@redhat.com>
Date: Thu, 2 Jan 2020 10:23:52 +0530
Subject: [PATCH 497/511] socket: Use AES128 cipher in SSL if AES is supported
by CPU
SSL performance is improved after configuring AES128 cipher
so use AES128 cipher as a default cipher on the CPU those
enabled AES bits otherwise ssl use AES256 cipher
> Change-Id: I91c50fe987cbb22ed76f8012094730c592c63506
> Fixes: #1050
> Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
> (Cherry pick from commit 177cc09d24515596eb51739ce0a276c26e3c52f1)
> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/23952/)
Change-Id: I91c50fe987cbb22ed76f8012094730c592c63506
Bug: 1612973
Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220870
Tested-by: Mohit Agrawal <moagrawa@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
rpc/rpc-transport/socket/src/socket.c | 32 ++++++++++++++++++++++++++++++++
1 file changed, 32 insertions(+)
diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c
index 54cd5df..1ee7320 100644
--- a/rpc/rpc-transport/socket/src/socket.c
+++ b/rpc/rpc-transport/socket/src/socket.c
@@ -4238,6 +4238,34 @@ static void __attribute__((destructor)) fini_openssl_mt(void)
ERR_free_strings();
}
+/* The function returns 0 if AES bit is enabled on the CPU */
+static int
+ssl_check_aes_bit(void)
+{
+ FILE *fp = fopen("/proc/cpuinfo", "r");
+ int ret = 1;
+ size_t len = 0;
+ char *line = NULL;
+ char *match = NULL;
+
+ GF_ASSERT(fp != NULL);
+
+ while (getline(&line, &len, fp) > 0) {
+ if (!strncmp(line, "flags", 5)) {
+ match = strstr(line, " aes");
+ if ((match != NULL) && ((match[4] == ' ') || (match[4] == 0))) {
+ ret = 0;
+ break;
+ }
+ }
+ }
+
+ free(line);
+ fclose(fp);
+
+ return ret;
+}
+
static int
ssl_setup_connection_params(rpc_transport_t *this)
{
@@ -4261,6 +4289,10 @@ ssl_setup_connection_params(rpc_transport_t *this)
return 0;
}
+ if (!ssl_check_aes_bit()) {
+ cipher_list = "AES128:" DEFAULT_CIPHER_LIST;
+ }
+
priv->ssl_own_cert = DEFAULT_CERT_PATH;
if (dict_get_str(this->options, SSL_OWN_CERT_OPT, &optstr) == 0) {
if (!priv->ssl_enabled) {
--
1.8.3.1

View File

@ -0,0 +1,69 @@
From 11d648660b8bd246756f87b2f40c72fbabf084d1 Mon Sep 17 00:00:00 2001
From: Sunny Kumar <sunkumar@redhat.com>
Date: Tue, 19 May 2020 16:13:01 +0100
Subject: [PATCH 498/511] geo-rep: Fix corner case in rename on mkdir during
hybrid crawl
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Problem:
The issue is being hit during hybrid mode while handling rename on slave.
In this special case the rename is recorded as mkdir and geo-rep process it
by resolving the path form backend.
While resolving the backend path during this special handling one corner case is not considered.
<snip>
Traceback (most recent call last):
  File "/usr/libexec/glusterfs/python/syncdaemon/repce.py", line 118, in worker
    res = getattr(self.obj, rmeth)(*in_data[2:])
  File "/usr/libexec/glusterfs/python/syncdaemon/resource.py", line 588, in entry_ops
    src_entry = get_slv_dir_path(slv_host, slv_volume, gfid)
  File "/usr/libexec/glusterfs/python/syncdaemon/syncdutils.py", line 710, in get_slv_dir_path
    dir_entry = os.path.join(pfx, pargfid, basename)
  File "/usr/lib64/python2.7/posixpath.py", line 75, in join
    if b.startswith('/'):
AttributeError: 'int' object has no attribute 'startswith'
In pyhthon3:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib64/python3.8/posixpath.py", line 90, in join
genericpath._check_arg_types('join', a, *p)
File "/usr/lib64/python3.8/genericpath.py", line 152, in _check_arg_types
raise TypeError(f'{funcname}() argument must be str, bytes, or '
TypeError: join() argument must be str, bytes, or os.PathLike object, not 'int'
</snip>
>Change-Id: I8b926899c60ad8c4ffc886d57028ba70fd21e332
>Fixes: #1250
>Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
Upstream Patch: https://review.gluster.org/c/glusterfs/+/24468/
BUG: 1835229
Change-Id: I8b926899c60ad8c4ffc886d57028ba70fd21e332
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220867
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
geo-replication/syncdaemon/syncdutils.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/geo-replication/syncdaemon/syncdutils.py b/geo-replication/syncdaemon/syncdutils.py
index d5a94d4..26c79d0 100644
--- a/geo-replication/syncdaemon/syncdutils.py
+++ b/geo-replication/syncdaemon/syncdutils.py
@@ -732,6 +732,8 @@ def get_slv_dir_path(slv_host, slv_volume, gfid):
else:
dirpath = dirpath.strip("/")
pargfid = get_gfid_from_mnt(dirpath)
+ if isinstance(pargfid, int):
+ return None
dir_entry = os.path.join(pfx, pargfid, basename)
return dir_entry
--
1.8.3.1

View File

@ -0,0 +1,63 @@
From f78a5d86c55149d80b6efdf60eae7221c238654e Mon Sep 17 00:00:00 2001
From: Rinku Kothiya <rkothiya@redhat.com>
Date: Thu, 24 Sep 2020 12:43:51 +0000
Subject: [PATCH 499/511] gfapi: give appropriate error when size exceeds
This patch help generate appropriate error message
when the gfapi tries to write data equal to or
greater than 1 Gb due to the limitation at the
socket layer.
Upstream:
> Reviewed-on: https://github.com/gluster/glusterfs/pull/1557
> fixes: #1518
> Change-Id: I1234a0b5a6e675a0b20c6b1afe0f4390fd721f6f
> Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
BUG: 1691320
Change-Id: I1234a0b5a6e675a0b20c6b1afe0f4390fd721f6f
Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
Signed-off-by: Sunil Kumar Acharya <sheggodu@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/219998
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
api/src/gfapi-messages.h | 4 +++-
api/src/glfs-fops.c | 8 ++++++++
2 files changed, 11 insertions(+), 1 deletion(-)
diff --git a/api/src/gfapi-messages.h b/api/src/gfapi-messages.h
index 68d1242..2ffd5ac 100644
--- a/api/src/gfapi-messages.h
+++ b/api/src/gfapi-messages.h
@@ -49,6 +49,8 @@ GLFS_MSGID(API, API_MSG_MEM_ACCT_INIT_FAILED, API_MSG_MASTER_XLATOR_INIT_FAILED,
API_MSG_INODE_LINK_FAILED, API_MSG_STATEDUMP_FAILED,
API_MSG_XREADDIRP_R_FAILED, API_MSG_LOCK_INSERT_MERGE_FAILED,
API_MSG_SETTING_LOCK_TYPE_FAILED, API_MSG_INODE_FIND_FAILED,
- API_MSG_FDCTX_SET_FAILED, API_MSG_UPCALL_SYNCOP_FAILED);
+ API_MSG_FDCTX_SET_FAILED, API_MSG_UPCALL_SYNCOP_FAILED,
+ API_MSG_INVALID_ARG);
+#define API_MSG_INVALID_ARG_STR "Invalid"
#endif /* !_GFAPI_MESSAGES_H__ */
diff --git a/api/src/glfs-fops.c b/api/src/glfs-fops.c
index e6adea5..051541f 100644
--- a/api/src/glfs-fops.c
+++ b/api/src/glfs-fops.c
@@ -1525,6 +1525,14 @@ glfs_pwritev_common(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
GF_REF_GET(glfd);
+ if (iovec->iov_len >= GF_UNIT_GB) {
+ ret = -1;
+ errno = EINVAL;
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ARG,
+ "size >= %llu is not allowed", GF_UNIT_GB, NULL);
+ goto out;
+ }
+
subvol = glfs_active_subvol(glfd->fs);
if (!subvol) {
ret = -1;
--
1.8.3.1

View File

@ -0,0 +1,104 @@
From 60789c658ea22063c26168cb4ce15ac5fd279e58 Mon Sep 17 00:00:00 2001
From: Ravishankar N <ravishankar@redhat.com>
Date: Mon, 14 Dec 2020 10:57:03 +0530
Subject: [PATCH 500/511] features/shard: Convert shard block indices to uint64
This patch fixes a crash in FOPs that operate on really large sharded
files where number of participant shards could sometimes exceed
signed int32 max.
The patch also adds GF_ASSERTs to ensure that number of participating
shards is always greater than 0 for files that do have more than one
shard.
Upstream:
> https://review.gluster.org/#/c/glusterfs/+/23407/
> Change-Id: I354de58796f350eb1aa42fcdf8092ca2e69ccbb6
> Fixes: #1348
> Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
BUG: 1752739
Change-Id: I354de58796f350eb1aa42fcdf8092ca2e69ccbb6
Signed-off-by: Vinayakswami Hariharmath <vharihar@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/221061
Tested-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
---
xlators/features/shard/src/shard.c | 14 ++++++++------
xlators/features/shard/src/shard.h | 6 +++---
2 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c
index 16d557b..a967f35 100644
--- a/xlators/features/shard/src/shard.c
+++ b/xlators/features/shard/src/shard.c
@@ -1855,10 +1855,9 @@ int shard_truncate_last_shard(call_frame_t *frame, xlator_t *this,
*/
if (!inode) {
gf_msg_debug(this->name, 0,
- "Last shard to be truncated absent"
- " in backend: %s. Directly proceeding to update "
- "file size",
- uuid_utoa(inode->gfid));
+ "Last shard to be truncated absent in backend: " PRIu64
+ " of gfid: %s. Directly proceeding to update file size",
+ local->first_block, uuid_utoa(local->loc.inode->gfid));
shard_update_file_size(frame, this, NULL, &local->loc,
shard_post_update_size_truncate_handler);
return 0;
@@ -2389,6 +2388,7 @@ int shard_truncate_begin(call_frame_t *frame, xlator_t *this) {
get_highest_block(0, local->prebuf.ia_size, local->block_size);
local->num_blocks = local->last_block - local->first_block + 1;
+ GF_ASSERT(local->num_blocks > 0);
local->resolver_base_inode =
(local->fop == GF_FOP_TRUNCATE) ? local->loc.inode : local->fd->inode;
@@ -4809,6 +4809,7 @@ int shard_post_lookup_readv_handler(call_frame_t *frame, xlator_t *this) {
get_highest_block(local->offset, local->total_size, local->block_size);
local->num_blocks = local->last_block - local->first_block + 1;
+ GF_ASSERT(local->num_blocks > 0);
local->resolver_base_inode = local->loc.inode;
local->inode_list =
@@ -5266,6 +5267,7 @@ int shard_common_inode_write_post_lookup_handler(call_frame_t *frame,
local->last_block =
get_highest_block(local->offset, local->total_size, local->block_size);
local->num_blocks = local->last_block - local->first_block + 1;
+ GF_ASSERT(local->num_blocks > 0);
local->inode_list =
GF_CALLOC(local->num_blocks, sizeof(inode_t *), gf_shard_mt_inode_list);
if (!local->inode_list) {
@@ -5274,8 +5276,8 @@ int shard_common_inode_write_post_lookup_handler(call_frame_t *frame,
}
gf_msg_trace(
- this->name, 0, "%s: gfid=%s first_block=%" PRIu32 " "
- "last_block=%" PRIu32 " num_blocks=%" PRIu32
+ this->name, 0, "%s: gfid=%s first_block=%" PRIu64 " "
+ "last_block=%" PRIu64 " num_blocks=%" PRIu64
" offset=%" PRId64 " total_size=%zu flags=%" PRId32 "",
gf_fop_list[local->fop], uuid_utoa(local->resolver_base_inode->gfid),
local->first_block, local->last_block, local->num_blocks, local->offset,
diff --git a/xlators/features/shard/src/shard.h b/xlators/features/shard/src/shard.h
index 1721417..4fe181b 100644
--- a/xlators/features/shard/src/shard.h
+++ b/xlators/features/shard/src/shard.h
@@ -254,9 +254,9 @@ typedef int32_t (*shard_post_update_size_fop_handler_t)(call_frame_t *frame,
typedef struct shard_local {
int op_ret;
int op_errno;
- int first_block;
- int last_block;
- int num_blocks;
+ uint64_t first_block;
+ uint64_t last_block;
+ uint64_t num_blocks;
int call_count;
int eexist_count;
int create_count;
--
1.8.3.1

View File

@ -0,0 +1,48 @@
From 070698ede9c3765c95364e8207c8311dbf895499 Mon Sep 17 00:00:00 2001
From: kiyer <kiyer@redhat.com>
Date: Tue, 8 Dec 2020 15:18:49 +0530
Subject: [PATCH 501/511] Cli: Removing old syntax of tier cmds from help menu
Remove old syntax of attach-tier and detach-tier
commands from help menu.
Label: DOWNSTREAM ONLY
BUG: 1813866
Change-Id: If86e4828b475fb593a5105ca8deac96374f9542d
Signed-off-by: kiyer <kiyer@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220510
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
cli/src/cli-cmd-volume.c | 13 -------------
1 file changed, 13 deletions(-)
diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
index 6f5bf8b..b6bef80 100644
--- a/cli/src/cli-cmd-volume.c
+++ b/cli/src/cli-cmd-volume.c
@@ -3331,19 +3331,6 @@ struct cli_cmd tier_cmds[] = {
{"volume tier <VOLNAME> detach <start|stop|status|commit|[force]>",
cli_cmd_volume_tier_cbk, "Detach the hot tier from <VOLNAME>"},
- {"volume attach-tier <VOLNAME> [<replica COUNT>] <NEW-BRICK>...",
- cli_cmd_volume_tier_cbk,
- "NOTE: this is old syntax, will be deprecated in next release. "
- "Please use gluster volume tier <vol> attach "
- "[<replica COUNT>] <NEW-BRICK>..."},
-
- {"volume detach-tier <VOLNAME> "
- "<start|stop|status|commit|force>",
- cli_cmd_volume_tier_cbk,
- "NOTE: this is old syntax, will be deprecated in next release. "
- "Please use gluster volume tier <vol> detach "
- "{start|stop|commit} [force]"},
-
{"volume tier <VOLNAME> status\n"
"volume tier <VOLNAME> start [force]\n"
"volume tier <VOLNAME> stop\n"
--
1.8.3.1

View File

@ -0,0 +1,225 @@
From 3f1eee125a35c33ecb078e5d3bfd80d80e63881d Mon Sep 17 00:00:00 2001
From: Barak Sason Rofman <bsasonro@redhat.com>
Date: Wed, 15 Jan 2020 12:02:05 +0200
Subject: [PATCH 502/511] dht - fixing a permission update issue
When bringing back a downed brick and performing lookup from the client
side, the permission on said brick aren't updated on the first lookup,
but only on the second.
This patch modifies permission update logic so the first lookup will
trigger a permission update on the downed brick.
LIMITATIONS OF THE PATCH:
As the choice of source depends on whether the directory has layout or not.
Even the directories on the newly added brick will have layout xattr[zeroed], but the same is not true for a root directory.
Hence, in case in the entire cluster only the newly added bricks are up [and others are down], then any change in permission during this time will be overwritten by the older permissions when the cluster is restarted.
Upstream:
> Reviewed-on: https://review.gluster.org/#/c/glusterfs/+/24020/
> fixes: #999
> Change-Id: Ieb70246d41e59f9cae9f70bc203627a433dfbd33
> Signed-off-by: Barak Sason Rofman <bsasonro@redhat.com>
BUG: 1663821
Change-Id: Ieb70246d41e59f9cae9f70bc203627a433dfbd33
Signed-off-by: Barak Sason Rofman <bsasonro@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/221116
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/bugs/bug-1064147.t | 71 ++++++++++++++++++++++++++++++++
xlators/cluster/dht/src/dht-common.c | 28 ++++++++++---
xlators/cluster/dht/src/dht-selfheal.c | 15 +++++--
xlators/storage/posix/src/posix-common.c | 16 +++----
4 files changed, 111 insertions(+), 19 deletions(-)
create mode 100755 tests/bugs/bug-1064147.t
diff --git a/tests/bugs/bug-1064147.t b/tests/bugs/bug-1064147.t
new file mode 100755
index 0000000..617a1aa
--- /dev/null
+++ b/tests/bugs/bug-1064147.t
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+# Initialize
+#------------------------------------------------------------
+cleanup;
+
+# Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+# Create a volume
+TEST $CLI volume create $V0 $H0:/${V0}{1,2};
+
+# Verify volume creation
+ EXPECT "$V0" volinfo_field $V0 'Volume Name';
+ EXPECT 'Created' volinfo_field $V0 'Status';
+
+# Start volume and verify successful start
+ TEST $CLI volume start $V0;
+ EXPECT 'Started' volinfo_field $V0 'Status';
+ TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+#------------------------------------------------------------
+
+# Test case 1 - Subvolume down + Healing
+#------------------------------------------------------------
+# Kill 2nd brick process
+TEST kill -9 `ps aux | grep glusterfsd | grep ${V0}2 | grep -v grep | awk '{print $2}'`;
+
+# Change root permissions
+TEST chmod 444 $M0
+
+# Store permission for comparision
+TEST permission_new=`stat -c "%A" $M0`
+
+# Bring up the killed brick process
+TEST $CLI volume start $V0 force
+
+# Perform lookup
+sleep 5
+TEST ls $M0
+
+# Check brick permissions
+TEST brick_perm=`stat -c "%A" /${V0}2`
+TEST [ ${brick_perm} = ${permission_new} ]
+#------------------------------------------------------------
+
+# Test case 2 - Add-brick + Healing
+#------------------------------------------------------------
+# Change root permissions
+TEST chmod 777 $M0
+
+# Store permission for comparision
+TEST permission_new_2=`stat -c "%A" $M0`
+
+# Add a 3rd brick
+TEST $CLI volume add-brick $V0 $H0:/${V0}3
+
+# Perform lookup
+sleep 5
+TEST ls $M0
+
+# Check permissions on the new brick
+TEST brick_perm2=`stat -c "%A" /${V0}3`
+
+TEST [ ${brick_perm2} = ${permission_new_2} ]
+
+cleanup;
diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c
index 4db89df..fe1d0ee 100644
--- a/xlators/cluster/dht/src/dht-common.c
+++ b/xlators/cluster/dht/src/dht-common.c
@@ -1363,13 +1363,29 @@ dht_lookup_dir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
dht_aggregate_xattr(local->xattr, xattr);
}
+ if (__is_root_gfid(stbuf->ia_gfid)) {
+ ret = dht_dir_has_layout(xattr, conf->xattr_name);
+ if (ret >= 0) {
+ if (is_greater_time(local->prebuf.ia_ctime,
+ local->prebuf.ia_ctime_nsec,
+ stbuf->ia_ctime, stbuf->ia_ctime_nsec)) {
+ /* Choose source */
+ local->prebuf.ia_gid = stbuf->ia_gid;
+ local->prebuf.ia_uid = stbuf->ia_uid;
+
+ local->prebuf.ia_ctime = stbuf->ia_ctime;
+ local->prebuf.ia_ctime_nsec = stbuf->ia_ctime_nsec;
+ local->prebuf.ia_prot = stbuf->ia_prot;
+ }
+ }
+ }
+
if (local->stbuf.ia_type != IA_INVAL) {
/* This is not the first subvol to respond */
- if (!__is_root_gfid(stbuf->ia_gfid) &&
- ((local->stbuf.ia_gid != stbuf->ia_gid) ||
- (local->stbuf.ia_uid != stbuf->ia_uid) ||
- (is_permission_different(&local->stbuf.ia_prot,
- &stbuf->ia_prot)))) {
+ if ((local->stbuf.ia_gid != stbuf->ia_gid) ||
+ (local->stbuf.ia_uid != stbuf->ia_uid) ||
+ (is_permission_different(&local->stbuf.ia_prot,
+ &stbuf->ia_prot))) {
local->need_attrheal = 1;
}
}
@@ -10969,7 +10985,7 @@ dht_notify(xlator_t *this, int event, void *data, ...)
if ((cmd == GF_DEFRAG_CMD_STATUS) ||
(cmd == GF_DEFRAG_CMD_STATUS_TIER) ||
(cmd == GF_DEFRAG_CMD_DETACH_STATUS))
- gf_defrag_status_get(conf, output, _gf_false);
+ gf_defrag_status_get(conf, output, _gf_false);
else if (cmd == GF_DEFRAG_CMD_START_DETACH_TIER)
gf_defrag_start_detach_tier(defrag);
else if (cmd == GF_DEFRAG_CMD_DETACH_START)
diff --git a/xlators/cluster/dht/src/dht-selfheal.c b/xlators/cluster/dht/src/dht-selfheal.c
index f5dfff9..f4e17d1 100644
--- a/xlators/cluster/dht/src/dht-selfheal.c
+++ b/xlators/cluster/dht/src/dht-selfheal.c
@@ -2097,9 +2097,18 @@ dht_selfheal_directory(call_frame_t *frame, dht_selfheal_dir_cbk_t dir_cbk,
local->selfheal.dir_cbk = dir_cbk;
local->selfheal.layout = dht_layout_ref(this, layout);
- if (local->need_attrheal && !IA_ISINVAL(local->mds_stbuf.ia_type)) {
- /*Use the one in the mds_stbuf*/
- local->stbuf = local->mds_stbuf;
+ if (local->need_attrheal) {
+ if (__is_root_gfid(local->stbuf.ia_gfid)) {
+ local->stbuf.ia_gid = local->prebuf.ia_gid;
+ local->stbuf.ia_uid = local->prebuf.ia_uid;
+
+ local->stbuf.ia_ctime = local->prebuf.ia_ctime;
+ local->stbuf.ia_ctime_nsec = local->prebuf.ia_ctime_nsec;
+ local->stbuf.ia_prot = local->prebuf.ia_prot;
+
+ } else if (!IA_ISINVAL(local->mds_stbuf.ia_type)) {
+ local->stbuf = local->mds_stbuf;
+ }
}
if (!__is_root_gfid(local->stbuf.ia_gfid)) {
diff --git a/xlators/storage/posix/src/posix-common.c b/xlators/storage/posix/src/posix-common.c
index c5a43a1..e5c6e62 100644
--- a/xlators/storage/posix/src/posix-common.c
+++ b/xlators/storage/posix/src/posix-common.c
@@ -598,6 +598,7 @@ posix_init(xlator_t *this)
int force_directory = -1;
int create_mask = -1;
int create_directory_mask = -1;
+ char value;
dir_data = dict_get(this->options, "directory");
@@ -654,16 +655,11 @@ posix_init(xlator_t *this)
}
/* Check for Extended attribute support, if not present, log it */
- op_ret = sys_lsetxattr(dir_data->data, "trusted.glusterfs.test", "working",
- 8, 0);
- if (op_ret != -1) {
- ret = sys_lremovexattr(dir_data->data, "trusted.glusterfs.test");
- if (ret) {
- gf_msg(this->name, GF_LOG_DEBUG, errno, P_MSG_INVALID_OPTION,
- "failed to remove xattr: "
- "trusted.glusterfs.test");
- }
- } else {
+ size = sys_lgetxattr(dir_data->data, "user.x", &value, sizeof(value));
+
+ if ((size == -1) && (errno == EOPNOTSUPP)) {
+ gf_msg(this->name, GF_LOG_DEBUG, 0, P_MSG_XDATA_GETXATTR,
+ "getxattr returned %zd", size);
tmp_data = dict_get(this->options, "mandate-attribute");
if (tmp_data) {
if (gf_string2boolean(tmp_data->data, &tmp_bool) == -1) {
--
1.8.3.1

View File

@ -0,0 +1,179 @@
From 5946a6ec18976c0f52162fe0f47e9b5171af87ec Mon Sep 17 00:00:00 2001
From: Soumya Koduri <skoduri@redhat.com>
Date: Mon, 6 Apr 2020 12:36:44 +0530
Subject: [PATCH 503/511] gfapi: Suspend synctasks instead of blocking them
There are certain conditions which blocks the current
execution thread (like waiting on mutex lock or condition
variable or I/O response). In such cases, if it is a
synctask thread, we should suspend the task instead
of blocking it (like done in SYNCOP using synctask_yield)
This is to avoid deadlock like the one mentioned below -
1) synctaskA sets fs->migration_in_progress to 1 and
does I/O (LOOKUP)
2) Other synctask threads wait for fs->migration_in_progress
to be reset to 0 by synctaskA and hence blocked
3) but synctaskA cannot resume as all synctask threads are blocked
on (2).
Note: this same approach is already used by few other components
like syncbarrier etc.
>Change-Id: If90f870d663bb242c702a5b86ac52eeda67c6f0d
>Fixes: #1146
>Signed-off-by: Soumya Koduri <skoduri@redhat.com>
Upstream patch: https://review.gluster.org/c/glusterfs/+/24276
BUG: 1779238
Change-Id: If90f870d663bb242c702a5b86ac52eeda67c6f0d
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/221081
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
---
api/src/glfs-internal.h | 34 ++++++++++++++++++++++++++++++++--
api/src/glfs-resolve.c | 9 +++++++++
api/src/glfs.c | 9 +++++++++
3 files changed, 50 insertions(+), 2 deletions(-)
diff --git a/api/src/glfs-internal.h b/api/src/glfs-internal.h
index 55401b2..15cf0ee 100644
--- a/api/src/glfs-internal.h
+++ b/api/src/glfs-internal.h
@@ -16,6 +16,7 @@
#include <glusterfs/upcall-utils.h>
#include "glfs-handles.h"
#include <glusterfs/refcount.h>
+#include <glusterfs/syncop.h>
#define GLFS_SYMLINK_MAX_FOLLOW 2048
@@ -207,6 +208,7 @@ struct glfs {
glfs_upcall_cbk up_cbk; /* upcall cbk function to be registered */
void *up_data; /* Opaque data provided by application
* during upcall registration */
+ struct list_head waitq; /* waiting synctasks */
};
/* This enum is used to maintain the state of glfd. In case of async fops
@@ -442,6 +444,34 @@ glfs_process_upcall_event(struct glfs *fs, void *data)
THIS = glfd->fd->inode->table->xl->ctx->master; \
} while (0)
+#define __GLFS_LOCK_WAIT(fs) \
+ do { \
+ struct synctask *task = NULL; \
+ \
+ task = synctask_get(); \
+ \
+ if (task) { \
+ list_add_tail(&task->waitq, &fs->waitq); \
+ pthread_mutex_unlock(&fs->mutex); \
+ synctask_yield(task, NULL); \
+ pthread_mutex_lock(&fs->mutex); \
+ } else { \
+ /* non-synctask */ \
+ pthread_cond_wait(&fs->cond, &fs->mutex); \
+ } \
+ } while (0)
+
+#define __GLFS_SYNCTASK_WAKE(fs) \
+ do { \
+ struct synctask *waittask = NULL; \
+ \
+ while (!list_empty(&fs->waitq)) { \
+ waittask = list_entry(fs->waitq.next, struct synctask, waitq); \
+ list_del_init(&waittask->waitq); \
+ synctask_wake(waittask); \
+ } \
+ } while (0)
+
/*
By default all lock attempts from user context must
use glfs_lock() and glfs_unlock(). This allows
@@ -466,10 +496,10 @@ glfs_lock(struct glfs *fs, gf_boolean_t wait_for_migration)
pthread_mutex_lock(&fs->mutex);
while (!fs->init)
- pthread_cond_wait(&fs->cond, &fs->mutex);
+ __GLFS_LOCK_WAIT(fs);
while (wait_for_migration && fs->migration_in_progress)
- pthread_cond_wait(&fs->cond, &fs->mutex);
+ __GLFS_LOCK_WAIT(fs);
return 0;
}
diff --git a/api/src/glfs-resolve.c b/api/src/glfs-resolve.c
index 062b7dc..58b6ace 100644
--- a/api/src/glfs-resolve.c
+++ b/api/src/glfs-resolve.c
@@ -65,6 +65,9 @@ __glfs_first_lookup(struct glfs *fs, xlator_t *subvol)
fs->migration_in_progress = 0;
pthread_cond_broadcast(&fs->cond);
+ /* wake up other waiting tasks */
+ __GLFS_SYNCTASK_WAKE(fs);
+
return ret;
}
@@ -154,6 +157,9 @@ __glfs_refresh_inode(struct glfs *fs, xlator_t *subvol, inode_t *inode,
fs->migration_in_progress = 0;
pthread_cond_broadcast(&fs->cond);
+ /* wake up other waiting tasks */
+ __GLFS_SYNCTASK_WAKE(fs);
+
return newinode;
}
@@ -841,6 +847,9 @@ __glfs_migrate_fd(struct glfs *fs, xlator_t *newsubvol, struct glfs_fd *glfd)
fs->migration_in_progress = 0;
pthread_cond_broadcast(&fs->cond);
+ /* wake up other waiting tasks */
+ __GLFS_SYNCTASK_WAKE(fs);
+
return newfd;
}
diff --git a/api/src/glfs.c b/api/src/glfs.c
index f36616d..ae994fa 100644
--- a/api/src/glfs.c
+++ b/api/src/glfs.c
@@ -740,6 +740,7 @@ glfs_new_fs(const char *volname)
INIT_LIST_HEAD(&fs->openfds);
INIT_LIST_HEAD(&fs->upcall_list);
+ INIT_LIST_HEAD(&fs->waitq);
PTHREAD_MUTEX_INIT(&fs->mutex, NULL, fs->pthread_flags, GLFS_INIT_MUTEX,
err);
@@ -1228,6 +1229,7 @@ pub_glfs_fini(struct glfs *fs)
call_pool_t *call_pool = NULL;
int fs_init = 0;
int err = -1;
+ struct synctask *waittask = NULL;
DECLARE_OLD_THIS;
@@ -1249,6 +1251,13 @@ pub_glfs_fini(struct glfs *fs)
call_pool = fs->ctx->pool;
+ /* Wake up any suspended synctasks */
+ while (!list_empty(&fs->waitq)) {
+ waittask = list_entry(fs->waitq.next, struct synctask, waitq);
+ list_del_init(&waittask->waitq);
+ synctask_wake(waittask);
+ }
+
while (countdown--) {
/* give some time for background frames to finish */
pthread_mutex_lock(&fs->mutex);
--
1.8.3.1

View File

@ -0,0 +1,109 @@
From baa566be8832a56fdea7068d84844ec1ec84d8d9 Mon Sep 17 00:00:00 2001
From: mohit84 <moagrawa@redhat.com>
Date: Thu, 15 Oct 2020 16:28:58 +0530
Subject: [PATCH 504/511] io-stats: Configure ios_sample_buf_size based on
sample_interval value (#1574)
io-stats xlator declares a ios_sample_buf_size 64k object(10M) per xlator
but in case of sample_interval is 0 this big buffer is not required so
declare the default value only while sample_interval is not 0.The new
change would be helpful to reduce RSS size for a brick and shd process
while the number of volumes are huge.
> Change-Id: I3e82cca92e40549355edfac32580169f3ce51af8
> Fixes: #1542
> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
> (Cherry picked from commit f71660eb879a9cd5761e5adbf10c783e959a990a)
> (Reviewed on upstream link https://github.com/gluster/glusterfs/issues/1542)
Change-Id: I3e82cca92e40549355edfac32580169f3ce51af8
BUG: 1898778
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/221183
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/bugs/glusterd/daemon-log-level-option.t | 8 ++++----
xlators/debug/io-stats/src/io-stats.c | 26 ++++++++++++++++++++++----
2 files changed, 26 insertions(+), 8 deletions(-)
diff --git a/tests/bugs/glusterd/daemon-log-level-option.t b/tests/bugs/glusterd/daemon-log-level-option.t
index 66e55e3..5352a63 100644
--- a/tests/bugs/glusterd/daemon-log-level-option.t
+++ b/tests/bugs/glusterd/daemon-log-level-option.t
@@ -61,8 +61,8 @@ rm -f /var/log/glusterfs/glustershd.log
TEST $CLI volume set all cluster.daemon-log-level WARNING
TEST $CLI volume start $V0
-# log should not have any info messages
-EXPECT 0 Info_messages_count "/var/log/glusterfs/glustershd.log"
+# log does have 1 info message specific to configure ios_sample_buf_size in io-stats xlator
+EXPECT 1 Info_messages_count "/var/log/glusterfs/glustershd.log"
# log should not have any debug messages
EXPECT 0 Debug_messages_count "/var/log/glusterfs/glustershd.log"
@@ -78,8 +78,8 @@ rm -f /var/log/glusterfs/glustershd.log
TEST $CLI volume set all cluster.daemon-log-level ERROR
TEST $CLI volume start $V0
-# log should not have any info messages
-EXPECT 0 Info_messages_count "/var/log/glusterfs/glustershd.log"
+# log does have 1 info message specific to configure ios_sample_buf_size in io-stats xlator
+EXPECT 1 Info_messages_count "/var/log/glusterfs/glustershd.log"
# log should not have any warning messages
EXPECT 0 Warning_messages_count "/var/log/glusterfs/glustershd.log"
diff --git a/xlators/debug/io-stats/src/io-stats.c b/xlators/debug/io-stats/src/io-stats.c
index aa91a0a..9b34895 100644
--- a/xlators/debug/io-stats/src/io-stats.c
+++ b/xlators/debug/io-stats/src/io-stats.c
@@ -3724,6 +3724,15 @@ xlator_set_loglevel(xlator_t *this, int log_level)
}
}
+void
+ios_sample_buf_size_configure(char *name, struct ios_conf *conf)
+{
+ conf->ios_sample_buf_size = 1024;
+ gf_log(name, GF_LOG_INFO,
+ "Configure ios_sample_buf "
+ " size is 1024 because ios_sample_interval is 0");
+}
+
int
reconfigure(xlator_t *this, dict_t *options)
{
@@ -3779,8 +3788,13 @@ reconfigure(xlator_t *this, dict_t *options)
int32, out);
GF_OPTION_RECONF("ios-dump-format", dump_format_str, options, str, out);
ios_set_log_format_code(conf, dump_format_str);
- GF_OPTION_RECONF("ios-sample-buf-size", conf->ios_sample_buf_size, options,
- int32, out);
+ if (conf->ios_sample_interval) {
+ GF_OPTION_RECONF("ios-sample-buf-size", conf->ios_sample_buf_size,
+ options, int32, out);
+ } else {
+ ios_sample_buf_size_configure(this->name, conf);
+ }
+
GF_OPTION_RECONF("sys-log-level", sys_log_str, options, str, out);
if (sys_log_str) {
sys_log_level = glusterd_check_log_level(sys_log_str);
@@ -3947,8 +3961,12 @@ init(xlator_t *this)
GF_OPTION_INIT("ios-dump-format", dump_format_str, str, out);
ios_set_log_format_code(conf, dump_format_str);
- GF_OPTION_INIT("ios-sample-buf-size", conf->ios_sample_buf_size, int32,
- out);
+ if (conf->ios_sample_interval) {
+ GF_OPTION_INIT("ios-sample-buf-size", conf->ios_sample_buf_size, int32,
+ out);
+ } else {
+ ios_sample_buf_size_configure(this->name, conf);
+ }
ret = ios_init_sample_buf(conf);
if (ret) {
--
1.8.3.1

View File

@ -0,0 +1,107 @@
From 43a8e2c7441b14f5f238cb11d83f32f248b16abb Mon Sep 17 00:00:00 2001
From: Mohit Agrawal <moagrawa@redhat.com>
Date: Tue, 13 Oct 2020 18:56:20 +0530
Subject: [PATCH 505/511] trash: Create inode_table only while feature is
enabled
Currently trash xlator create a inode table(1M) even if
feature is not enabled.In brick_mux environment while 250
bricks are attached with a single brick process and feature
is not enable brick process increase RSS size unnecessarily.
Solution: Create inode_table only while a feature is enabled.
The patch reduces 250M RSS size per brick process
if trash feature is not enabled.
> Change-Id: I11a6fd2b8419fe2988f398be6ec30fb4f3b99a5d
> Fixes: #1543
> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
> (Cherry pick from commit 32f25e7b1b4b080ab2640e178b407c878e629376)
> (Reviewed on upstream link https://github.com/gluster/glusterfs/issues/1543)
Change-Id: I11a6fd2b8419fe2988f398be6ec30fb4f3b99a5d
BUG: 1898781
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/221184
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/features/trash/src/trash.c | 47 +++++++++++++++++++++++++++++++++++---
1 file changed, 44 insertions(+), 3 deletions(-)
diff --git a/xlators/features/trash/src/trash.c b/xlators/features/trash/src/trash.c
index f96ed73..93f020f 100644
--- a/xlators/features/trash/src/trash.c
+++ b/xlators/features/trash/src/trash.c
@@ -2235,16 +2235,47 @@ reconfigure(xlator_t *this, dict_t *options)
char trash_dir[PATH_MAX] = {
0,
};
+ gf_boolean_t active_earlier = _gf_false;
+ gf_boolean_t active_now = _gf_false;
priv = this->private;
GF_VALIDATE_OR_GOTO("trash", priv, out);
+ active_earlier = priv->state;
+ GF_OPTION_RECONF("trash", active_now, options, bool, out);
+
+ /* Disable of trash feature is not allowed at this point until
+ we are not able to find an approach to cleanup resource
+ gracefully. Here to disable the feature need to destroy inode
+ table and currently it is difficult to ensure inode is not
+ being used
+ */
+ if (active_earlier && !active_now) {
+ gf_log(this->name, GF_LOG_INFO,
+ "Disable of trash feature is not allowed "
+ "during graph reconfigure");
+ ret = 0;
+ goto out;
+ }
+
+ if (!active_earlier && active_now) {
+ if (!priv->trash_itable) {
+ priv->trash_itable = inode_table_new(0, this);
+ if (!priv->trash_itable) {
+ ret = -ENOMEM;
+ gf_log(this->name, GF_LOG_ERROR,
+ "failed to create trash inode_table"
+ " during graph reconfigure");
+ goto out;
+ }
+ }
+ priv->state = active_now;
+ }
+
GF_OPTION_RECONF("trash-internal-op", priv->internal, options, bool, out);
GF_OPTION_RECONF("trash-dir", tmp, options, str, out);
- GF_OPTION_RECONF("trash", priv->state, options, bool, out);
-
if (priv->state) {
ret = create_or_rename_trash_directory(this);
@@ -2501,7 +2532,17 @@ init(xlator_t *this)
goto out;
}
- priv->trash_itable = inode_table_new(0, this);
+ if (priv->state) {
+ priv->trash_itable = inode_table_new(0, this);
+ if (!priv->trash_itable) {
+ ret = -ENOMEM;
+ priv->state = _gf_false;
+ gf_log(this->name, GF_LOG_ERROR,
+ "failed to create trash inode_table disable trash");
+ goto out;
+ }
+ }
+
gf_log(this->name, GF_LOG_DEBUG, "brick path is%s", priv->brick_path);
this->private = (void *)priv;
--
1.8.3.1

View File

@ -0,0 +1,499 @@
From 17a9ce965ef2fec9ee5c8e4b76981bb7cbcf1352 Mon Sep 17 00:00:00 2001
From: mohit84 <moagrawa@redhat.com>
Date: Mon, 9 Nov 2020 17:15:42 +0530
Subject: [PATCH 506/511] posix: Attach a posix_spawn_disk_thread with
glusterfs_ctx (#1595)
Currently posix xlator spawns posix_disk_space_threads per brick and in
case of brick_mux environment while glusterd attached bricks at maximum
level(250) with a single brick process in that case 250 threads are
spawned for all bricks and brick process memory size also increased.
Solution: Attach a posix_disk_space thread with glusterfs_ctx to
spawn a thread per process basis instead of spawning a per brick
> Fixes: #1482
> Change-Id: I8dd88f252a950495b71742e2a7588bd5bb019ec7
> Cherry-picked from commit 3f93be77e1acf5baacafa97a320e91e6879d1c0e
> Reviewed on upstream link https://github.com/gluster/glusterfs/issues/1482
> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Change-Id: I8dd88f252a950495b71742e2a7588bd5bb019ec7
Bug: 1898776
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220366
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfsd/src/glusterfsd.c | 4 +
libglusterfs/src/glusterfs/glusterfs.h | 6 ++
xlators/storage/posix/src/posix-common.c | 68 +++++++++++--
xlators/storage/posix/src/posix-handle.h | 3 +-
xlators/storage/posix/src/posix-helpers.c | 131 ++++++++++++++-----------
xlators/storage/posix/src/posix-inode-fd-ops.c | 3 +-
xlators/storage/posix/src/posix-mem-types.h | 1 +
xlators/storage/posix/src/posix.h | 12 ++-
8 files changed, 160 insertions(+), 68 deletions(-)
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index 955bf1d..ac25255 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -1840,9 +1840,13 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
INIT_LIST_HEAD(&cmd_args->xlator_options);
INIT_LIST_HEAD(&cmd_args->volfile_servers);
ctx->pxl_count = 0;
+ ctx->diskxl_count = 0;
pthread_mutex_init(&ctx->fd_lock, NULL);
pthread_cond_init(&ctx->fd_cond, NULL);
INIT_LIST_HEAD(&ctx->janitor_fds);
+ pthread_mutex_init(&ctx->xl_lock, NULL);
+ pthread_cond_init(&ctx->xl_cond, NULL);
+ INIT_LIST_HEAD(&ctx->diskth_xl);
lim.rlim_cur = RLIM_INFINITY;
lim.rlim_max = RLIM_INFINITY;
diff --git a/libglusterfs/src/glusterfs/glusterfs.h b/libglusterfs/src/glusterfs/glusterfs.h
index bf6a987..d3400bf 100644
--- a/libglusterfs/src/glusterfs/glusterfs.h
+++ b/libglusterfs/src/glusterfs/glusterfs.h
@@ -740,7 +740,13 @@ struct _glusterfs_ctx {
pthread_t janitor;
/* The variable is use to save total posix xlator count */
uint32_t pxl_count;
+ uint32_t diskxl_count;
+ /* List of posix xlator use by disk thread*/
+ struct list_head diskth_xl;
+ pthread_mutex_t xl_lock;
+ pthread_cond_t xl_cond;
+ pthread_t disk_space_check;
char volume_id[GF_UUID_BUF_SIZE]; /* Used only in protocol/client */
};
typedef struct _glusterfs_ctx glusterfs_ctx_t;
diff --git a/xlators/storage/posix/src/posix-common.c b/xlators/storage/posix/src/posix-common.c
index e5c6e62..2c9030b 100644
--- a/xlators/storage/posix/src/posix-common.c
+++ b/xlators/storage/posix/src/posix-common.c
@@ -138,6 +138,36 @@ posix_inode(xlator_t *this)
return 0;
}
+static void
+delete_posix_diskxl(xlator_t *this)
+{
+ struct posix_private *priv = this->private;
+ struct posix_diskxl *pxl = priv->pxl;
+ glusterfs_ctx_t *ctx = this->ctx;
+ uint32_t count = 1;
+
+ if (pxl) {
+ pthread_mutex_lock(&ctx->xl_lock);
+ {
+ pxl->detach_notify = _gf_true;
+ while (pxl->is_use)
+ pthread_cond_wait(&pxl->cond, &ctx->xl_lock);
+ list_del_init(&pxl->list);
+ priv->pxl = NULL;
+ count = --ctx->diskxl_count;
+ if (count == 0)
+ pthread_cond_signal(&ctx->xl_cond);
+ }
+ pthread_mutex_unlock(&ctx->xl_lock);
+ pthread_cond_destroy(&pxl->cond);
+ GF_FREE(pxl);
+ if (count == 0) {
+ pthread_join(ctx->disk_space_check, NULL);
+ ctx->disk_space_check = 0;
+ }
+ }
+}
+
/**
* notify - when parent sends PARENT_UP, send CHILD_UP event from here
*/
@@ -194,6 +224,8 @@ posix_notify(xlator_t *this, int32_t event, void *data, ...)
}
pthread_mutex_unlock(&ctx->fd_lock);
+ delete_posix_diskxl(this);
+
gf_log(this->name, GF_LOG_INFO, "Sending CHILD_DOWN for brick %s",
victim->name);
default_notify(this->parents->xlator, GF_EVENT_CHILD_DOWN, data);
@@ -318,6 +350,7 @@ posix_reconfigure(xlator_t *this, dict_t *options)
int32_t force_directory_mode = -1;
int32_t create_mask = -1;
int32_t create_directory_mask = -1;
+ double old_disk_reserve = 0.0;
priv = this->private;
@@ -383,6 +416,7 @@ posix_reconfigure(xlator_t *this, dict_t *options)
" fallback to <hostname>:<export>");
}
+ old_disk_reserve = priv->disk_reserve;
GF_OPTION_RECONF("reserve", priv->disk_reserve, options, percent_or_size,
out);
/* option can be any one of percent or bytes */
@@ -390,11 +424,19 @@ posix_reconfigure(xlator_t *this, dict_t *options)
if (priv->disk_reserve < 100.0)
priv->disk_unit = 'p';
- if (priv->disk_reserve) {
+ /* Delete a pxl object from a list of disk_reserve while something
+ is changed for reserve option during graph reconfigure
+ */
+ if (old_disk_reserve != priv->disk_reserve) {
+ delete_posix_diskxl(this);
+ old_disk_reserve = 0;
+ }
+
+ if (!old_disk_reserve && priv->disk_reserve) {
ret = posix_spawn_disk_space_check_thread(this);
if (ret) {
gf_msg(this->name, GF_LOG_INFO, 0, P_MSG_DISK_SPACE_CHECK_FAILED,
- "Getting disk space check from thread failed");
+ "Getting disk space check from thread failed ");
goto out;
}
}
@@ -1008,13 +1050,13 @@ posix_init(xlator_t *this)
" fallback to <hostname>:<export>");
}
- _private->disk_space_check_active = _gf_false;
_private->disk_space_full = 0;
GF_OPTION_INIT("reserve", _private->disk_reserve, percent_or_size, out);
/* option can be any one of percent or bytes */
_private->disk_unit = 0;
+ pthread_cond_init(&_private->fd_cond, NULL);
if (_private->disk_reserve < 100.0)
_private->disk_unit = 'p';
@@ -1162,12 +1204,6 @@ posix_fini(xlator_t *this)
priv->health_check = 0;
}
- if (priv->disk_space_check) {
- priv->disk_space_check_active = _gf_false;
- (void)gf_thread_cleanup_xint(priv->disk_space_check);
- priv->disk_space_check = 0;
- }
-
if (priv->janitor) {
/*TODO: Make sure the synctask is also complete */
ret = gf_tw_del_timer(this->ctx->tw->timer_wheel, priv->janitor);
@@ -1192,10 +1228,24 @@ posix_fini(xlator_t *this)
pthread_join(ctx->janitor, NULL);
}
+ pthread_mutex_lock(&ctx->xl_lock);
+ {
+ count = --ctx->diskxl_count;
+ if (count == 0)
+ pthread_cond_signal(&ctx->xl_cond);
+ }
+ pthread_mutex_unlock(&ctx->xl_lock);
+
+ if (count == 0) {
+ pthread_join(ctx->disk_space_check, NULL);
+ ctx->disk_space_check = 0;
+ }
+
if (priv->fsyncer) {
(void)gf_thread_cleanup_xint(priv->fsyncer);
priv->fsyncer = 0;
}
+
/*unlock brick dir*/
if (priv->mount_lock)
(void)sys_closedir(priv->mount_lock);
diff --git a/xlators/storage/posix/src/posix-handle.h b/xlators/storage/posix/src/posix-handle.h
index c4d7cb1..8e4c719 100644
--- a/xlators/storage/posix/src/posix-handle.h
+++ b/xlators/storage/posix/src/posix-handle.h
@@ -206,5 +206,6 @@ int
posix_check_internal_writes(xlator_t *this, fd_t *fd, int sysfd, dict_t *xdata);
void
-posix_disk_space_check(xlator_t *this);
+posix_disk_space_check(struct posix_private* priv);
+
#endif /* !_POSIX_HANDLE_H */
diff --git a/xlators/storage/posix/src/posix-helpers.c b/xlators/storage/posix/src/posix-helpers.c
index ceac52a..110d383 100644
--- a/xlators/storage/posix/src/posix-helpers.c
+++ b/xlators/storage/posix/src/posix-helpers.c
@@ -2284,9 +2284,8 @@ unlock:
}
void
-posix_disk_space_check(xlator_t *this)
+posix_disk_space_check(struct posix_private *priv)
{
- struct posix_private *priv = NULL;
char *subvol_path = NULL;
int op_ret = 0;
double size = 0;
@@ -2295,16 +2294,14 @@ posix_disk_space_check(xlator_t *this)
double totsz = 0;
double freesz = 0;
- GF_VALIDATE_OR_GOTO(this->name, this, out);
- priv = this->private;
- GF_VALIDATE_OR_GOTO(this->name, priv, out);
+ GF_VALIDATE_OR_GOTO("posix-helpers", priv, out);
subvol_path = priv->base_path;
op_ret = sys_statvfs(subvol_path, &buf);
if (op_ret == -1) {
- gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_STATVFS_FAILED,
+ gf_msg("posix-disk", GF_LOG_ERROR, errno, P_MSG_STATVFS_FAILED,
"statvfs failed on %s", subvol_path);
goto out;
}
@@ -2328,78 +2325,102 @@ out:
}
static void *
-posix_disk_space_check_thread_proc(void *data)
+posix_ctx_disk_thread_proc(void *data)
{
- xlator_t *this = NULL;
struct posix_private *priv = NULL;
+ glusterfs_ctx_t *ctx = NULL;
uint32_t interval = 0;
- int ret = -1;
-
- this = data;
- priv = this->private;
+ struct posix_diskxl *pthis = NULL;
+ xlator_t *this = NULL;
+ struct timespec sleep_till = {
+ 0,
+ };
+ ctx = data;
interval = 5;
- gf_msg_debug(this->name, 0,
- "disk-space thread started, "
+
+ gf_msg_debug("glusterfs_ctx", 0,
+ "Ctx disk-space thread started, "
"interval = %d seconds",
interval);
- while (1) {
- /* aborting sleep() is a request to exit this thread, sleep()
- * will normally not return when cancelled */
- ret = sleep(interval);
- if (ret > 0)
- break;
- /* prevent thread errors while doing the health-check(s) */
- pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
-
- /* Do the disk-check.*/
- posix_disk_space_check(this);
- if (!priv->disk_space_check_active)
- goto out;
- pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
- }
-out:
- gf_msg_debug(this->name, 0, "disk space check thread exiting");
- LOCK(&priv->lock);
+ pthread_mutex_lock(&ctx->xl_lock);
{
- priv->disk_space_check_active = _gf_false;
+ while (ctx->diskxl_count > 0) {
+ list_for_each_entry(pthis, &ctx->diskth_xl, list)
+ {
+ pthis->is_use = _gf_true;
+ pthread_mutex_unlock(&ctx->xl_lock);
+
+ THIS = this = pthis->xl;
+ priv = this->private;
+
+ posix_disk_space_check(priv);
+
+ pthread_mutex_lock(&ctx->xl_lock);
+ pthis->is_use = _gf_false;
+ /* Send a signal to posix_notify function */
+ if (pthis->detach_notify)
+ pthread_cond_signal(&pthis->cond);
+ }
+
+ timespec_now_realtime(&sleep_till);
+ sleep_till.tv_sec += 5;
+ (void)pthread_cond_timedwait(&ctx->xl_cond, &ctx->xl_lock,
+ &sleep_till);
+ }
}
- UNLOCK(&priv->lock);
+ pthread_mutex_unlock(&ctx->xl_lock);
return NULL;
}
int
-posix_spawn_disk_space_check_thread(xlator_t *xl)
+posix_spawn_disk_space_check_thread(xlator_t *this)
{
- struct posix_private *priv = NULL;
- int ret = -1;
+ int ret = 0;
+ glusterfs_ctx_t *ctx = this->ctx;
+ struct posix_diskxl *pxl = NULL;
+ struct posix_private *priv = this->private;
- priv = xl->private;
+ pxl = GF_CALLOC(1, sizeof(struct posix_diskxl), gf_posix_mt_diskxl_t);
+ if (!pxl) {
+ ret = -ENOMEM;
+ gf_log(this->name, GF_LOG_ERROR,
+ "Calloc is failed to allocate "
+ "memory for diskxl object");
+ goto out;
+ }
+ pthread_cond_init(&pxl->cond, NULL);
- LOCK(&priv->lock);
+ pthread_mutex_lock(&ctx->xl_lock);
{
- /* cancel the running thread */
- if (priv->disk_space_check_active == _gf_true) {
- pthread_cancel(priv->disk_space_check);
- priv->disk_space_check_active = _gf_false;
- }
+ if (ctx->diskxl_count++ == 0) {
+ ret = gf_thread_create(&ctx->disk_space_check, NULL,
+ posix_ctx_disk_thread_proc, ctx,
+ "posixctxres");
- ret = gf_thread_create(&priv->disk_space_check, NULL,
- posix_disk_space_check_thread_proc, xl,
- "posix_reserve");
- if (ret) {
- priv->disk_space_check_active = _gf_false;
- gf_msg(xl->name, GF_LOG_ERROR, errno, P_MSG_DISK_SPACE_CHECK_FAILED,
- "unable to setup disk space check thread");
- goto unlock;
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, P_MSG_THREAD_FAILED,
+ "spawning disk space check thread failed");
+ ctx->diskxl_count--;
+ pthread_mutex_unlock(&ctx->xl_lock);
+ goto out;
+ }
}
+ pxl->xl = this;
+ priv->pxl = (void *)pxl;
+ list_add_tail(&pxl->list, &ctx->diskth_xl);
+ }
+ pthread_mutex_unlock(&ctx->xl_lock);
- priv->disk_space_check_active = _gf_true;
+out:
+ if (ret) {
+ if (pxl) {
+ pthread_cond_destroy(&pxl->cond);
+ GF_FREE(pxl);
+ }
}
-unlock:
- UNLOCK(&priv->lock);
return ret;
}
diff --git a/xlators/storage/posix/src/posix-inode-fd-ops.c b/xlators/storage/posix/src/posix-inode-fd-ops.c
index 1d37aed..761e018 100644
--- a/xlators/storage/posix/src/posix-inode-fd-ops.c
+++ b/xlators/storage/posix/src/posix-inode-fd-ops.c
@@ -37,6 +37,7 @@
#include <fcntl.h>
#endif /* HAVE_LINKAT */
+#include "posix-handle.h"
#include <glusterfs/glusterfs.h>
#include <glusterfs/checksum.h>
#include <glusterfs/dict.h>
@@ -713,7 +714,7 @@ posix_do_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags,
option behaviour
*/
if (priv->disk_reserve)
- posix_disk_space_check(this);
+ posix_disk_space_check(priv);
DISK_SPACE_CHECK_AND_GOTO(frame, priv, xdata, ret, ret, unlock);
diff --git a/xlators/storage/posix/src/posix-mem-types.h b/xlators/storage/posix/src/posix-mem-types.h
index 2253f38..bb4c56d 100644
--- a/xlators/storage/posix/src/posix-mem-types.h
+++ b/xlators/storage/posix/src/posix-mem-types.h
@@ -20,6 +20,7 @@ enum gf_posix_mem_types_ {
gf_posix_mt_paiocb,
gf_posix_mt_inode_ctx_t,
gf_posix_mt_mdata_attr,
+ gf_posix_mt_diskxl_t,
gf_posix_mt_end
};
#endif
diff --git a/xlators/storage/posix/src/posix.h b/xlators/storage/posix/src/posix.h
index 07f367b..4be979c 100644
--- a/xlators/storage/posix/src/posix.h
+++ b/xlators/storage/posix/src/posix.h
@@ -36,7 +36,6 @@
#include <glusterfs/compat.h>
#include <glusterfs/timer.h>
#include "posix-mem-types.h"
-#include "posix-handle.h"
#include <glusterfs/call-stub.h>
#ifdef HAVE_LIBAIO
@@ -138,6 +137,14 @@ struct posix_fd {
char _pad[4]; /* manual padding */
};
+struct posix_diskxl {
+ pthread_cond_t cond;
+ struct list_head list;
+ xlator_t *xl;
+ gf_boolean_t detach_notify;
+ gf_boolean_t is_use;
+};
+
struct posix_private {
char *base_path;
int32_t base_path_length;
@@ -207,6 +214,7 @@ struct posix_private {
pthread_mutex_t janitor_mutex;
pthread_cond_t janitor_cond;
pthread_cond_t fd_cond;
+ pthread_cond_t disk_cond;
int fsync_queue_count;
enum {
@@ -233,7 +241,6 @@ struct posix_private {
char disk_unit;
uint32_t disk_space_full;
pthread_t disk_space_check;
- gf_boolean_t disk_space_check_active;
#ifdef GF_DARWIN_HOST_OS
enum {
@@ -263,6 +270,7 @@ struct posix_private {
gf_boolean_t ctime;
gf_boolean_t janitor_task_stop;
uint32_t rel_fdcount;
+ void *pxl;
};
typedef struct {
--
1.8.3.1

View File

@ -0,0 +1,764 @@
From b3a17b67a69142eef1b4adde3409d5e54dda1e0b Mon Sep 17 00:00:00 2001
From: Amar Tumballi <amarts@redhat.com>
Date: Sat, 9 Feb 2019 13:23:06 +0530
Subject: [PATCH 507/511] inode: make critical section smaller
do all the 'static' tasks outside of locked region.
* hash_dentry() and hash_gfid() are now called outside locked region.
* remove extra __dentry_hash exported in libglusterfs.sym
* avoid checks in locked functions, if the check is done in calling
function.
* implement dentry_destroy(), which handles freeing of dentry separately,
from that of dentry_unset (which takes care of separating dentry from
inode, and table)
> Updates: bz#1670031
> Change-Id: I584213e0748464bb427fbdef3c4ab6615d7d5eb0
> Signed-off-by: Amar Tumballi <amarts@redhat.com>
> (Cherry pick from commit 8a90d346b9d3f69ff11241feb0011c90a8e57e30)
> (Review on upstream link https://review.gluster.org/#/c/glusterfs/+/22184/)
Change-Id: I584213e0748464bb427fbdef3c4ab6615d7d5eb0
BUG: 1898777
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/221189
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/glusterfs/inode.h | 3 -
libglusterfs/src/inode.c | 323 +++++++++++++------------------------
libglusterfs/src/libglusterfs.sym | 1 -
3 files changed, 111 insertions(+), 216 deletions(-)
diff --git a/libglusterfs/src/glusterfs/inode.h b/libglusterfs/src/glusterfs/inode.h
index 4421c47..c875653 100644
--- a/libglusterfs/src/glusterfs/inode.h
+++ b/libglusterfs/src/glusterfs/inode.h
@@ -167,9 +167,6 @@ inode_rename(inode_table_t *table, inode_t *olddir, const char *oldname,
inode_t *newdir, const char *newname, inode_t *inode,
struct iatt *stbuf);
-dentry_t *
-__dentry_grep(inode_table_t *table, inode_t *parent, const char *name);
-
inode_t *
inode_grep(inode_table_t *table, inode_t *parent, const char *name);
diff --git a/libglusterfs/src/inode.c b/libglusterfs/src/inode.c
index 4c3c546..71b2d2a 100644
--- a/libglusterfs/src/inode.c
+++ b/libglusterfs/src/inode.c
@@ -159,27 +159,15 @@ hash_dentry(inode_t *parent, const char *name, int mod)
static int
hash_gfid(uuid_t uuid, int mod)
{
- int ret = 0;
-
- ret = uuid[15] + (uuid[14] << 8);
-
- return ret;
+ return ((uuid[15] + (uuid[14] << 8)) % mod);
}
static void
-__dentry_hash(dentry_t *dentry)
+__dentry_hash(dentry_t *dentry, const int hash)
{
inode_table_t *table = NULL;
- int hash = 0;
-
- if (!dentry) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_DENTRY_NOT_FOUND,
- "dentry not found");
- return;
- }
table = dentry->inode->table;
- hash = hash_dentry(dentry->parent, dentry->name, table->hashsize);
list_del_init(&dentry->hash);
list_add(&dentry->hash, &table->name_hash[hash]);
@@ -188,49 +176,44 @@ __dentry_hash(dentry_t *dentry)
static int
__is_dentry_hashed(dentry_t *dentry)
{
- if (!dentry) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_DENTRY_NOT_FOUND,
- "dentry not found");
- return 0;
- }
-
return !list_empty(&dentry->hash);
}
static void
__dentry_unhash(dentry_t *dentry)
{
- if (!dentry) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_DENTRY_NOT_FOUND,
- "dentry not found");
- return;
- }
-
list_del_init(&dentry->hash);
}
static void
-__dentry_unset(dentry_t *dentry)
+dentry_destroy(dentry_t *dentry)
{
- if (!dentry) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_DENTRY_NOT_FOUND,
- "dentry not found");
+ if (!dentry)
return;
- }
+
+ GF_FREE(dentry->name);
+ dentry->name = NULL;
+ mem_put(dentry);
+
+ return;
+}
+
+static dentry_t *
+__dentry_unset(dentry_t *dentry)
+{
+ if (!dentry)
+ return NULL;
__dentry_unhash(dentry);
list_del_init(&dentry->inode_list);
- GF_FREE(dentry->name);
- dentry->name = NULL;
-
if (dentry->parent) {
__inode_unref(dentry->parent, false);
dentry->parent = NULL;
}
- mem_put(dentry);
+ return dentry;
}
static int
@@ -289,22 +272,14 @@ static int
__is_dentry_cyclic(dentry_t *dentry)
{
int ret = 0;
- inode_t *inode = NULL;
- char *name = "<nul>";
ret = __foreach_ancestor_dentry(dentry, __check_cycle, dentry->inode);
if (ret) {
- inode = dentry->inode;
-
- if (dentry->name)
- name = dentry->name;
-
gf_msg(dentry->inode->table->name, GF_LOG_CRITICAL, 0,
LG_MSG_DENTRY_CYCLIC_LOOP,
- "detected cyclic loop "
- "formation during inode linkage. inode (%s) linking "
- "under itself as %s",
- uuid_utoa(inode->gfid), name);
+ "detected cyclic loop formation during inode linkage. "
+ "inode (%s) linking under itself as %s",
+ uuid_utoa(dentry->inode->gfid), dentry->name);
}
return ret;
@@ -313,41 +288,19 @@ __is_dentry_cyclic(dentry_t *dentry)
static void
__inode_unhash(inode_t *inode)
{
- if (!inode) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
- "inode not found");
- return;
- }
-
list_del_init(&inode->hash);
}
static int
__is_inode_hashed(inode_t *inode)
{
- if (!inode) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
- "inode not found");
- return 0;
- }
-
return !list_empty(&inode->hash);
}
static void
-__inode_hash(inode_t *inode)
+__inode_hash(inode_t *inode, const int hash)
{
- inode_table_t *table = NULL;
- int hash = 0;
-
- if (!inode) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
- "inode not found");
- return;
- }
-
- table = inode->table;
- hash = hash_gfid(inode->gfid, 65536);
+ inode_table_t *table = inode->table;
list_del_init(&inode->hash);
list_add(&inode->hash, &table->inode_hash[hash]);
@@ -359,12 +312,6 @@ __dentry_search_for_inode(inode_t *inode, uuid_t pargfid, const char *name)
dentry_t *dentry = NULL;
dentry_t *tmp = NULL;
- if (!inode || !name) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
- "inode || name not found");
- return NULL;
- }
-
/* earlier, just the ino was sent, which could have been 0, now
we deal with gfid, and if sent gfid is null or 0, no need to
continue with the check */
@@ -390,12 +337,6 @@ __inode_ctx_free(inode_t *inode)
xlator_t *xl = NULL;
xlator_t *old_THIS = NULL;
- if (!inode) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
- "inode not found");
- return;
- }
-
if (!inode->_ctx) {
gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_CTX_NULL,
"_ctx not found");
@@ -424,12 +365,6 @@ noctx:
static void
__inode_destroy(inode_t *inode)
{
- if (!inode) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
- "inode not found");
- return;
- }
-
__inode_ctx_free(inode);
LOCK_DESTROY(&inode->lock);
@@ -472,9 +407,6 @@ inode_ctx_merge(fd_t *fd, inode_t *inode, inode_t *linked_inode)
static void
__inode_activate(inode_t *inode)
{
- if (!inode)
- return;
-
list_move(&inode->list, &inode->table->active);
inode->table->active_size++;
}
@@ -485,19 +417,13 @@ __inode_passivate(inode_t *inode)
dentry_t *dentry = NULL;
dentry_t *t = NULL;
- if (!inode) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
- "inode not found");
- return;
- }
-
list_move_tail(&inode->list, &inode->table->lru);
inode->table->lru_size++;
list_for_each_entry_safe(dentry, t, &inode->dentry_list, inode_list)
{
if (!__is_dentry_hashed(dentry))
- __dentry_unset(dentry);
+ dentry_destroy(__dentry_unset(dentry));
}
}
@@ -507,12 +433,6 @@ __inode_retire(inode_t *inode)
dentry_t *dentry = NULL;
dentry_t *t = NULL;
- if (!inode) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
- "inode not found");
- return;
- }
-
list_move_tail(&inode->list, &inode->table->purge);
inode->table->purge_size++;
@@ -520,7 +440,7 @@ __inode_retire(inode_t *inode)
list_for_each_entry_safe(dentry, t, &inode->dentry_list, inode_list)
{
- __dentry_unset(dentry);
+ dentry_destroy(__dentry_unset(dentry));
}
}
@@ -547,9 +467,6 @@ __inode_unref(inode_t *inode, bool clear)
xlator_t *this = NULL;
uint64_t nlookup = 0;
- if (!inode)
- return NULL;
-
/*
* Root inode should always be in active list of inode table. So unrefs
* on root inode are no-ops.
@@ -677,16 +594,10 @@ inode_ref(inode_t *inode)
}
static dentry_t *
-__dentry_create(inode_t *inode, inode_t *parent, const char *name)
+dentry_create(inode_t *inode, inode_t *parent, const char *name)
{
dentry_t *newd = NULL;
- if (!inode || !parent || !name) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
- "inode || parent || name not found");
- return NULL;
- }
-
newd = mem_get0(parent->table->dentry_pool);
if (newd == NULL) {
goto out;
@@ -702,10 +613,6 @@ __dentry_create(inode_t *inode, inode_t *parent, const char *name)
goto out;
}
- if (parent)
- newd->parent = __inode_ref(parent, false);
-
- list_add(&newd->inode_list, &inode->dentry_list);
newd->inode = inode;
out:
@@ -717,14 +624,6 @@ __inode_create(inode_table_t *table)
{
inode_t *newi = NULL;
- if (!table) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0,
- LG_MSG_INODE_TABLE_NOT_FOUND,
- "table not "
- "found");
- return NULL;
- }
-
newi = mem_get0(table->inode_pool);
if (!newi) {
goto out;
@@ -795,9 +694,6 @@ __inode_ref_reduce_by_n(inode_t *inode, uint64_t nref)
{
uint64_t nlookup = 0;
- if (!inode)
- return NULL;
-
GF_ASSERT(inode->ref >= nref);
inode->ref -= nref;
@@ -837,17 +733,12 @@ inode_forget_atomic(inode_t *inode, uint64_t nlookup)
}
dentry_t *
-__dentry_grep(inode_table_t *table, inode_t *parent, const char *name)
+__dentry_grep(inode_table_t *table, inode_t *parent, const char *name,
+ const int hash)
{
- int hash = 0;
dentry_t *dentry = NULL;
dentry_t *tmp = NULL;
- if (!table || !name || !parent)
- return NULL;
-
- hash = hash_dentry(parent, name, table->hashsize);
-
list_for_each_entry(tmp, &table->name_hash[hash], hash)
{
if (tmp->parent == parent && !strcmp(tmp->name, name)) {
@@ -872,15 +763,16 @@ inode_grep(inode_table_t *table, inode_t *parent, const char *name)
return NULL;
}
+ int hash = hash_dentry(parent, name, table->hashsize);
+
pthread_mutex_lock(&table->lock);
{
- dentry = __dentry_grep(table, parent, name);
-
- if (dentry)
+ dentry = __dentry_grep(table, parent, name, hash);
+ if (dentry) {
inode = dentry->inode;
-
- if (inode)
- __inode_ref(inode, false);
+ if (inode)
+ __inode_ref(inode, false);
+ }
}
pthread_mutex_unlock(&table->lock);
@@ -947,17 +839,18 @@ inode_grep_for_gfid(inode_table_t *table, inode_t *parent, const char *name,
return ret;
}
+ int hash = hash_dentry(parent, name, table->hashsize);
+
pthread_mutex_lock(&table->lock);
{
- dentry = __dentry_grep(table, parent, name);
-
- if (dentry)
+ dentry = __dentry_grep(table, parent, name, hash);
+ if (dentry) {
inode = dentry->inode;
-
- if (inode) {
- gf_uuid_copy(gfid, inode->gfid);
- *type = inode->ia_type;
- ret = 0;
+ if (inode) {
+ gf_uuid_copy(gfid, inode->gfid);
+ *type = inode->ia_type;
+ ret = 0;
+ }
}
}
pthread_mutex_unlock(&table->lock);
@@ -978,25 +871,14 @@ __is_root_gfid(uuid_t gfid)
}
inode_t *
-__inode_find(inode_table_t *table, uuid_t gfid)
+__inode_find(inode_table_t *table, uuid_t gfid, const int hash)
{
inode_t *inode = NULL;
inode_t *tmp = NULL;
- int hash = 0;
-
- if (!table) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0,
- LG_MSG_INODE_TABLE_NOT_FOUND,
- "table not "
- "found");
- goto out;
- }
if (__is_root_gfid(gfid))
return table->root;
- hash = hash_gfid(gfid, 65536);
-
list_for_each_entry(tmp, &table->inode_hash[hash], hash)
{
if (gf_uuid_compare(tmp->gfid, gfid) == 0) {
@@ -1005,7 +887,6 @@ __inode_find(inode_table_t *table, uuid_t gfid)
}
}
-out:
return inode;
}
@@ -1022,9 +903,11 @@ inode_find(inode_table_t *table, uuid_t gfid)
return NULL;
}
+ int hash = hash_gfid(gfid, 65536);
+
pthread_mutex_lock(&table->lock);
{
- inode = __inode_find(table, gfid);
+ inode = __inode_find(table, gfid, hash);
if (inode)
__inode_ref(inode, false);
}
@@ -1035,7 +918,7 @@ inode_find(inode_table_t *table, uuid_t gfid)
static inode_t *
__inode_link(inode_t *inode, inode_t *parent, const char *name,
- struct iatt *iatt)
+ struct iatt *iatt, const int dhash)
{
dentry_t *dentry = NULL;
dentry_t *old_dentry = NULL;
@@ -1043,16 +926,7 @@ __inode_link(inode_t *inode, inode_t *parent, const char *name,
inode_table_t *table = NULL;
inode_t *link_inode = NULL;
- if (!inode) {
- errno = EINVAL;
- return NULL;
- }
-
table = inode->table;
- if (!table) {
- errno = EINVAL;
- return NULL;
- }
if (parent) {
/* We should prevent inode linking between different
@@ -1090,14 +964,16 @@ __inode_link(inode_t *inode, inode_t *parent, const char *name,
return NULL;
}
- old_inode = __inode_find(table, iatt->ia_gfid);
+ int ihash = hash_gfid(iatt->ia_gfid, 65536);
+
+ old_inode = __inode_find(table, iatt->ia_gfid, ihash);
if (old_inode) {
link_inode = old_inode;
} else {
gf_uuid_copy(inode->gfid, iatt->ia_gfid);
inode->ia_type = iatt->ia_type;
- __inode_hash(inode);
+ __inode_hash(inode, ihash);
}
} else {
/* @old_inode serves another important purpose - it indicates
@@ -1112,22 +988,16 @@ __inode_link(inode_t *inode, inode_t *parent, const char *name,
old_inode = inode;
}
- if (name) {
- if (!strcmp(name, ".") || !strcmp(name, ".."))
- return link_inode;
-
- if (strchr(name, '/')) {
- GF_ASSERT(!"inode link attempted with '/' in name");
- return NULL;
- }
+ if (name && (!strcmp(name, ".") || !strcmp(name, ".."))) {
+ return link_inode;
}
/* use only link_inode beyond this point */
if (parent) {
- old_dentry = __dentry_grep(table, parent, name);
+ old_dentry = __dentry_grep(table, parent, name, dhash);
if (!old_dentry || old_dentry->inode != link_inode) {
- dentry = __dentry_create(link_inode, parent, name);
+ dentry = dentry_create(link_inode, parent, name);
if (!dentry) {
gf_msg_callingfn(
THIS->name, GF_LOG_ERROR, 0, LG_MSG_DENTRY_CREATE_FAILED,
@@ -1137,15 +1007,20 @@ __inode_link(inode_t *inode, inode_t *parent, const char *name,
errno = ENOMEM;
return NULL;
}
+
+ /* dentry linking needs to happen inside lock */
+ dentry->parent = __inode_ref(parent, false);
+ list_add(&dentry->inode_list, &link_inode->dentry_list);
+
if (old_inode && __is_dentry_cyclic(dentry)) {
errno = ELOOP;
- __dentry_unset(dentry);
+ dentry_destroy(__dentry_unset(dentry));
return NULL;
}
- __dentry_hash(dentry);
+ __dentry_hash(dentry, dhash);
if (old_dentry)
- __dentry_unset(old_dentry);
+ dentry_destroy(__dentry_unset(old_dentry));
}
}
@@ -1155,6 +1030,7 @@ __inode_link(inode_t *inode, inode_t *parent, const char *name,
inode_t *
inode_link(inode_t *inode, inode_t *parent, const char *name, struct iatt *iatt)
{
+ int hash = 0;
inode_table_t *table = NULL;
inode_t *linked_inode = NULL;
@@ -1166,10 +1042,18 @@ inode_link(inode_t *inode, inode_t *parent, const char *name, struct iatt *iatt)
table = inode->table;
+ if (parent && name) {
+ hash = hash_dentry(parent, name, table->hashsize);
+ }
+
+ if (name && strchr(name, '/')) {
+ GF_ASSERT(!"inode link attempted with '/' in name");
+ return NULL;
+ }
+
pthread_mutex_lock(&table->lock);
{
- linked_inode = __inode_link(inode, parent, name, iatt);
-
+ linked_inode = __inode_link(inode, parent, name, iatt, hash);
if (linked_inode)
__inode_ref(linked_inode, false);
}
@@ -1312,48 +1196,47 @@ inode_invalidate(inode_t *inode)
return ret;
}
-static void
+static dentry_t *
__inode_unlink(inode_t *inode, inode_t *parent, const char *name)
{
dentry_t *dentry = NULL;
char pgfid[64] = {0};
char gfid[64] = {0};
- if (!inode || !parent || !name)
- return;
-
dentry = __dentry_search_for_inode(inode, parent->gfid, name);
/* dentry NULL for corrupted backend */
if (dentry) {
- __dentry_unset(dentry);
+ dentry = __dentry_unset(dentry);
} else {
gf_msg("inode", GF_LOG_WARNING, 0, LG_MSG_DENTRY_NOT_FOUND,
"%s/%s: dentry not found in %s",
uuid_utoa_r(parent->gfid, pgfid), name,
uuid_utoa_r(inode->gfid, gfid));
}
+
+ return dentry;
}
void
inode_unlink(inode_t *inode, inode_t *parent, const char *name)
{
- inode_table_t *table = NULL;
+ inode_table_t *table;
+ dentry_t *dentry;
- if (!inode) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
- "inode not found");
+ if (!inode || !parent || !name)
return;
- }
table = inode->table;
pthread_mutex_lock(&table->lock);
{
- __inode_unlink(inode, parent, name);
+ dentry = __inode_unlink(inode, parent, name);
}
pthread_mutex_unlock(&table->lock);
+ dentry_destroy(dentry);
+
inode_table_prune(table);
}
@@ -1362,6 +1245,9 @@ inode_rename(inode_table_t *table, inode_t *srcdir, const char *srcname,
inode_t *dstdir, const char *dstname, inode_t *inode,
struct iatt *iatt)
{
+ int hash = 0;
+ dentry_t *dentry = NULL;
+
if (!inode) {
gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
"inode not found");
@@ -1370,13 +1256,26 @@ inode_rename(inode_table_t *table, inode_t *srcdir, const char *srcname,
table = inode->table;
+ if (dstname && strchr(dstname, '/')) {
+ GF_ASSERT(!"inode link attempted with '/' in name");
+ return -1;
+ }
+
+ if (dstdir && dstname) {
+ hash = hash_dentry(dstdir, dstname, table->hashsize);
+ }
+
pthread_mutex_lock(&table->lock);
{
- __inode_link(inode, dstdir, dstname, iatt);
- __inode_unlink(inode, srcdir, srcname);
+ __inode_link(inode, dstdir, dstname, iatt, hash);
+ /* pick the old dentry */
+ dentry = __inode_unlink(inode, srcdir, srcname);
}
pthread_mutex_unlock(&table->lock);
+ /* free the old dentry */
+ dentry_destroy(dentry);
+
inode_table_prune(table);
return 0;
@@ -1447,12 +1346,6 @@ inode_parent(inode_t *inode, uuid_t pargfid, const char *name)
static int
__inode_has_dentry(inode_t *inode)
{
- if (!inode) {
- gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
- "inode not found");
- return 0;
- }
-
return !list_empty(&inode->dentry_list);
}
@@ -1461,6 +1354,12 @@ inode_has_dentry(inode_t *inode)
{
int dentry_present = 0;
+ if (!inode) {
+ gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, LG_MSG_INODE_NOT_FOUND,
+ "inode not found");
+ return 0;
+ }
+
LOCK(&inode->lock);
{
dentry_present = __inode_has_dentry(inode);
@@ -1720,7 +1619,7 @@ __inode_table_init_root(inode_table_t *table)
iatt.ia_ino = 1;
iatt.ia_type = IA_IFDIR;
- __inode_link(root, NULL, NULL, &iatt);
+ __inode_link(root, NULL, NULL, &iatt, 0);
table->root = root;
}
diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym
index 5a721e0..d060292 100644
--- a/libglusterfs/src/libglusterfs.sym
+++ b/libglusterfs/src/libglusterfs.sym
@@ -357,7 +357,6 @@ default_copy_file_range
default_copy_file_range_cbk
default_copy_file_range_failure_cbk
default_copy_file_range_resume
-__dentry_grep
dht_is_linkfile
dict_add
dict_addn
--
1.8.3.1

View File

@ -0,0 +1,232 @@
From 87b7689f7727a542c5afa22bdebd3781dd650a2f Mon Sep 17 00:00:00 2001
From: Csaba Henk <csaba@redhat.com>
Date: Fri, 17 Jul 2020 11:33:36 +0200
Subject: [PATCH 508/511] fuse: fetch arbitrary number of groups from
/proc/[pid]/status
Glusterfs so far constrained itself with an arbitrary limit (32)
for the number of groups read from /proc/[pid]/status (this was
the number of groups shown there prior to Linux commit
v3.7-9553-g8d238027b87e (v3.8-rc1~74^2~59); since this commit, all
groups are shown).
With this change we'll read groups up to the number Glusterfs
supports in general (64k).
Note: the actual number of groups that are made use of in a
regular Glusterfs setup shall still be capped at ~93 due to limitations
of the RPC transport. To be able to handle more groups than that,
brick side gid resolution (server.manage-gids option) can be used along
with NIS, LDAP or other such networked directory service (see
https://github.com/gluster/glusterdocs/blob/5ba15a2/docs/Administrator%20Guide/Handling-of-users-with-many-groups.md#limit-in-the-glusterfs-protocol
).
Also adding some diagnostic messages to frame_fill_groups().
Upstream:
> Reviewed-on: https://review.gluster.org/c/glusterfs/+/24721
> Change-Id: I271f3dc3e6d3c44d6d989c7a2073ea5f16c26ee0
> fixes: #1075
> Signed-off-by: Csaba Henk <csaba@redhat.com>
BUG: 1749304
Change-Id: I80bf99d34087fb95768bf2259d8c4774d9f5d0c5
Signed-off-by: Csaba Henk <csaba@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/220920
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/glusterfs/stack.h | 7 ++++
tests/bugs/fuse/many-groups-for-acl.t | 13 ++++++-
xlators/mount/fuse/src/fuse-helpers.c | 71 +++++++++++++++++++++++------------
3 files changed, 65 insertions(+), 26 deletions(-)
diff --git a/libglusterfs/src/glusterfs/stack.h b/libglusterfs/src/glusterfs/stack.h
index 1758550..bd466d8 100644
--- a/libglusterfs/src/glusterfs/stack.h
+++ b/libglusterfs/src/glusterfs/stack.h
@@ -429,6 +429,7 @@ call_stack_alloc_groups(call_stack_t *stack, int ngrps)
if (ngrps <= SMALL_GROUP_COUNT) {
stack->groups = stack->groups_small;
} else {
+ GF_FREE(stack->groups_large);
stack->groups_large = GF_CALLOC(ngrps, sizeof(gid_t),
gf_common_mt_groups_t);
if (!stack->groups_large)
@@ -442,6 +443,12 @@ call_stack_alloc_groups(call_stack_t *stack, int ngrps)
}
static inline int
+call_stack_groups_capacity(call_stack_t *stack)
+{
+ return max(stack->ngrps, SMALL_GROUP_COUNT);
+}
+
+static inline int
call_frames_count(call_stack_t *call_stack)
{
call_frame_t *pos;
diff --git a/tests/bugs/fuse/many-groups-for-acl.t b/tests/bugs/fuse/many-groups-for-acl.t
index d959f75..a51b1bc 100755
--- a/tests/bugs/fuse/many-groups-for-acl.t
+++ b/tests/bugs/fuse/many-groups-for-acl.t
@@ -38,6 +38,13 @@ do
done
TEST useradd -o -M -u ${NEW_UID} -g ${NEW_GID} -G ${NEW_USER}-${NEW_GIDS} ${NEW_USER}
+# Linux < 3.8 exports only first 32 gids of pid to userspace
+kernel_exports_few_gids=0
+if [ "$OSTYPE" = Linux ] && \
+ su -m ${NEW_USER} -c "grep ^Groups: /proc/self/status | wc -w | xargs -I@ expr @ - 1 '<' $LAST_GID - $NEW_GID + 1" > /dev/null; then
+ kernel_exports_few_gids=1
+fi
+
# preparation done, start the tests
TEST glusterd
@@ -48,6 +55,8 @@ TEST $CLI volume set $V0 nfs.disable off
TEST $CLI volume set ${V0} server.manage-gids off
TEST $CLI volume start ${V0}
+# This is just a synchronization hack to make sure the bricks are
+# up before going on.
EXPECT_WITHIN ${NFS_EXPORT_TIMEOUT} "1" is_nfs_export_available
# mount the volume with POSIX ACL support, without --resolve-gids
@@ -69,8 +78,8 @@ TEST [ $? -eq 0 ]
su -m ${NEW_USER} -c "touch ${M0}/first-32-gids-2/success > /dev/null"
TEST [ $? -eq 0 ]
-su -m ${NEW_USER} -c "touch ${M0}/gid-64/failure > /dev/null"
-TEST [ $? -ne 0 ]
+su -m ${NEW_USER} -c "touch ${M0}/gid-64/success--if-all-gids-exported > /dev/null"
+TEST [ $? -eq $kernel_exports_few_gids ]
su -m ${NEW_USER} -c "touch ${M0}/gid-120/failure > /dev/null"
TEST [ $? -ne 0 ]
diff --git a/xlators/mount/fuse/src/fuse-helpers.c b/xlators/mount/fuse/src/fuse-helpers.c
index 5bfc40c..6e04cd4 100644
--- a/xlators/mount/fuse/src/fuse-helpers.c
+++ b/xlators/mount/fuse/src/fuse-helpers.c
@@ -139,8 +139,6 @@ get_fuse_state(xlator_t *this, fuse_in_header_t *finh)
return state;
}
-#define FUSE_MAX_AUX_GROUPS \
- 32 /* We can get only up to 32 aux groups from /proc */
void
frame_fill_groups(call_frame_t *frame)
{
@@ -150,8 +148,6 @@ frame_fill_groups(call_frame_t *frame)
char filename[32];
char line[4096];
char *ptr = NULL;
- FILE *fp = NULL;
- int idx = 0;
long int id = 0;
char *saveptr = NULL;
char *endptr = NULL;
@@ -191,45 +187,72 @@ frame_fill_groups(call_frame_t *frame)
call_stack_set_groups(frame->root, ngroups, &mygroups);
} else {
+ FILE *fp = NULL;
+
ret = snprintf(filename, sizeof filename, "/proc/%d/status",
frame->root->pid);
- if (ret >= sizeof filename)
+ if (ret >= sizeof filename) {
+ gf_log(this->name, GF_LOG_ERROR, "procfs path exceeds buffer size");
goto out;
+ }
fp = fopen(filename, "r");
- if (!fp)
+ if (!fp) {
+ gf_log(this->name, GF_LOG_ERROR, "failed to open %s: %s", filename,
+ strerror(errno));
goto out;
+ }
- if (call_stack_alloc_groups(frame->root, ngroups) != 0)
- goto out;
+ for (;;) {
+ gf_boolean_t found_groups = _gf_false;
+ int idx = 0;
- while ((ptr = fgets(line, sizeof line, fp))) {
- if (strncmp(ptr, "Groups:", 7) != 0)
- continue;
+ if (call_stack_alloc_groups(frame->root, ngroups) != 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "failed to allocate gid buffer");
+ goto out;
+ }
+ while ((ptr = fgets(line, sizeof line, fp))) {
+ if (strncmp(ptr, "Groups:", 7) == 0) {
+ found_groups = _gf_true;
+ break;
+ }
+ }
+ if (!found_groups) {
+ gf_log(this->name, GF_LOG_ERROR, "cannot find gid list in %s",
+ filename);
+ break;
+ }
ptr = line + 8;
for (ptr = strtok_r(ptr, " \t\r\n", &saveptr); ptr;
ptr = strtok_r(NULL, " \t\r\n", &saveptr)) {
errno = 0;
id = strtol(ptr, &endptr, 0);
- if (errno == ERANGE)
- break;
- if (!endptr || *endptr)
+ if (errno == ERANGE || !endptr || *endptr) {
+ gf_log(this->name, GF_LOG_ERROR, "failed to parse %s",
+ filename);
break;
- frame->root->groups[idx++] = id;
- if (idx == FUSE_MAX_AUX_GROUPS)
+ }
+ if (idx < call_stack_groups_capacity(frame->root))
+ frame->root->groups[idx] = id;
+ idx++;
+ if (idx == GF_MAX_AUX_GROUPS)
break;
}
-
- frame->root->ngrps = idx;
- break;
+ if (idx > call_stack_groups_capacity(frame->root)) {
+ ngroups = idx;
+ rewind(fp);
+ } else {
+ frame->root->ngrps = idx;
+ break;
+ }
}
+ out:
+ if (fp)
+ fclose(fp);
}
-
-out:
- if (fp)
- fclose(fp);
#elif defined(GF_SOLARIS_HOST_OS)
char filename[32];
char scratch[128];
@@ -245,7 +268,7 @@ out:
fp = fopen(filename, "r");
if (fp != NULL) {
if (fgets(scratch, sizeof scratch, fp) != NULL) {
- ngrps = MIN(prcred->pr_ngroups, FUSE_MAX_AUX_GROUPS);
+ ngrps = MIN(prcred->pr_ngroups, GF_MAX_AUX_GROUPS);
if (call_stack_alloc_groups(frame->root, ngrps) != 0) {
fclose(fp);
return;
--
1.8.3.1

View File

@ -0,0 +1,407 @@
From a18f03cbf2b5652f8617cb4dd236bb4ca9838d96 Mon Sep 17 00:00:00 2001
From: Mohit Agrawal <moagrawa@redhat.com>
Date: Tue, 6 Oct 2020 16:54:15 +0530
Subject: [PATCH 509/511] core: configure optimum inode table hash_size for shd
In brick_mux environment a shd process consume high memory.
After print the statedump i have found it allocates 1M per afr xlator
for all bricks.In case of configure 4k volumes it consumes almost total
6G RSS size in which 4G consumes by inode_tables
[cluster/replicate.test1-replicate-0 - usage-type gf_common_mt_list_head memusage]
size=1273488
num_allocs=2
max_size=1273488
max_num_allocs=2
total_allocs=2
inode_new_table function allocates memory(1M) for a list of inode and dentry hash.
For shd lru_limit size is 1 so we don't need to create a big hash table so to reduce
RSS size for shd process pass optimum bucket count at the time of creating inode_table.
> Change-Id: I039716d42321a232fdee1ee8fd50295e638715bb
> Fixes: #1538
> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
> (Cherry pick from commit ca6bbc486e76fdb9a8e07119bb10d7fa45b2e93b)
> (Reviewed on upstream link https://github.com/gluster/glusterfs/issues/1538)
Change-Id: I039716d42321a232fdee1ee8fd50295e638715bb
BUG: 1898777
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/221191
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
api/src/glfs-master.c | 2 +-
libglusterfs/src/glusterfs/inode.h | 17 +++++----
libglusterfs/src/inode.c | 53 +++++++++++++++++---------
xlators/cluster/afr/src/afr.c | 10 ++++-
xlators/cluster/dht/src/dht-rebalance.c | 3 +-
xlators/cluster/ec/src/ec.c | 2 +-
xlators/features/bit-rot/src/bitd/bit-rot.c | 2 +-
xlators/features/quota/src/quotad-helpers.c | 2 +-
xlators/features/trash/src/trash.c | 4 +-
xlators/mount/fuse/src/fuse-bridge.c | 6 +--
xlators/nfs/server/src/nfs.c | 2 +-
xlators/protocol/server/src/server-handshake.c | 3 +-
12 files changed, 66 insertions(+), 40 deletions(-)
diff --git a/api/src/glfs-master.c b/api/src/glfs-master.c
index b4473b1..9e604d3 100644
--- a/api/src/glfs-master.c
+++ b/api/src/glfs-master.c
@@ -45,7 +45,7 @@ graph_setup(struct glfs *fs, glusterfs_graph_t *graph)
}
if (!new_subvol->itable) {
- itable = inode_table_new(131072, new_subvol);
+ itable = inode_table_new(131072, new_subvol, 0, 0);
if (!itable) {
errno = ENOMEM;
ret = -1;
diff --git a/libglusterfs/src/glusterfs/inode.h b/libglusterfs/src/glusterfs/inode.h
index c875653..62c093d 100644
--- a/libglusterfs/src/glusterfs/inode.h
+++ b/libglusterfs/src/glusterfs/inode.h
@@ -35,11 +35,12 @@ typedef struct _dentry dentry_t;
struct _inode_table {
pthread_mutex_t lock;
- size_t hashsize; /* bucket size of inode hash and dentry hash */
- char *name; /* name of the inode table, just for gf_log() */
- inode_t *root; /* root directory inode, with number 1 */
- xlator_t *xl; /* xlator to be called to do purge */
- uint32_t lru_limit; /* maximum LRU cache size */
+ size_t dentry_hashsize; /* Number of buckets for dentry hash*/
+ size_t inode_hashsize; /* Size of inode hash table */
+ char *name; /* name of the inode table, just for gf_log() */
+ inode_t *root; /* root directory inode, with number 1 */
+ xlator_t *xl; /* xlator to be called to do purge */
+ uint32_t lru_limit; /* maximum LRU cache size */
struct list_head *inode_hash; /* buckets for inode hash table */
struct list_head *name_hash; /* buckets for dentry hash table */
struct list_head active; /* list of inodes currently active (in an fop) */
@@ -116,12 +117,14 @@ struct _inode {
#define GFID_STR_PFX_LEN (sizeof(GFID_STR_PFX) - 1)
inode_table_t *
-inode_table_new(uint32_t lru_limit, xlator_t *xl);
+inode_table_new(uint32_t lru_limit, xlator_t *xl, uint32_t dhash_size,
+ uint32_t inodehash_size);
inode_table_t *
inode_table_with_invalidator(uint32_t lru_limit, xlator_t *xl,
int32_t (*invalidator_fn)(xlator_t *, inode_t *),
- xlator_t *invalidator_xl);
+ xlator_t *invalidator_xl, uint32_t dentry_hashsize,
+ uint32_t inode_hashsize);
void
inode_table_destroy_all(glusterfs_ctx_t *ctx);
diff --git a/libglusterfs/src/inode.c b/libglusterfs/src/inode.c
index 71b2d2a..98f8ea6 100644
--- a/libglusterfs/src/inode.c
+++ b/libglusterfs/src/inode.c
@@ -763,7 +763,7 @@ inode_grep(inode_table_t *table, inode_t *parent, const char *name)
return NULL;
}
- int hash = hash_dentry(parent, name, table->hashsize);
+ int hash = hash_dentry(parent, name, table->dentry_hashsize);
pthread_mutex_lock(&table->lock);
{
@@ -839,7 +839,7 @@ inode_grep_for_gfid(inode_table_t *table, inode_t *parent, const char *name,
return ret;
}
- int hash = hash_dentry(parent, name, table->hashsize);
+ int hash = hash_dentry(parent, name, table->dentry_hashsize);
pthread_mutex_lock(&table->lock);
{
@@ -903,7 +903,7 @@ inode_find(inode_table_t *table, uuid_t gfid)
return NULL;
}
- int hash = hash_gfid(gfid, 65536);
+ int hash = hash_gfid(gfid, table->inode_hashsize);
pthread_mutex_lock(&table->lock);
{
@@ -964,7 +964,7 @@ __inode_link(inode_t *inode, inode_t *parent, const char *name,
return NULL;
}
- int ihash = hash_gfid(iatt->ia_gfid, 65536);
+ int ihash = hash_gfid(iatt->ia_gfid, table->inode_hashsize);
old_inode = __inode_find(table, iatt->ia_gfid, ihash);
@@ -1043,7 +1043,7 @@ inode_link(inode_t *inode, inode_t *parent, const char *name, struct iatt *iatt)
table = inode->table;
if (parent && name) {
- hash = hash_dentry(parent, name, table->hashsize);
+ hash = hash_dentry(parent, name, table->dentry_hashsize);
}
if (name && strchr(name, '/')) {
@@ -1262,7 +1262,7 @@ inode_rename(inode_table_t *table, inode_t *srcdir, const char *srcname,
}
if (dstdir && dstname) {
- hash = hash_dentry(dstdir, dstname, table->hashsize);
+ hash = hash_dentry(dstdir, dstname, table->dentry_hashsize);
}
pthread_mutex_lock(&table->lock);
@@ -1626,7 +1626,8 @@ __inode_table_init_root(inode_table_t *table)
inode_table_t *
inode_table_with_invalidator(uint32_t lru_limit, xlator_t *xl,
int32_t (*invalidator_fn)(xlator_t *, inode_t *),
- xlator_t *invalidator_xl)
+ xlator_t *invalidator_xl, uint32_t dentry_hashsize,
+ uint32_t inode_hashsize)
{
inode_table_t *new = NULL;
uint32_t mem_pool_size = lru_limit;
@@ -1644,7 +1645,19 @@ inode_table_with_invalidator(uint32_t lru_limit, xlator_t *xl,
new->invalidator_fn = invalidator_fn;
new->invalidator_xl = invalidator_xl;
- new->hashsize = 14057; /* TODO: Random Number?? */
+ if (dentry_hashsize == 0) {
+ /* Prime number for uniform distribution */
+ new->dentry_hashsize = 14057;
+ } else {
+ new->dentry_hashsize = dentry_hashsize;
+ }
+
+ if (inode_hashsize == 0) {
+ /* The size of hash table always should be power of 2 */
+ new->inode_hashsize = 65536;
+ } else {
+ new->inode_hashsize = inode_hashsize;
+ }
/* In case FUSE is initing the inode table. */
if (!mem_pool_size || (mem_pool_size > DEFAULT_INODE_MEMPOOL_ENTRIES))
@@ -1658,13 +1671,13 @@ inode_table_with_invalidator(uint32_t lru_limit, xlator_t *xl,
if (!new->dentry_pool)
goto out;
- new->inode_hash = (void *)GF_CALLOC(65536, sizeof(struct list_head),
- gf_common_mt_list_head);
+ new->inode_hash = (void *)GF_CALLOC(
+ new->inode_hashsize, sizeof(struct list_head), gf_common_mt_list_head);
if (!new->inode_hash)
goto out;
- new->name_hash = (void *)GF_CALLOC(new->hashsize, sizeof(struct list_head),
- gf_common_mt_list_head);
+ new->name_hash = (void *)GF_CALLOC(
+ new->dentry_hashsize, sizeof(struct list_head), gf_common_mt_list_head);
if (!new->name_hash)
goto out;
@@ -1675,11 +1688,11 @@ inode_table_with_invalidator(uint32_t lru_limit, xlator_t *xl,
if (!new->fd_mem_pool)
goto out;
- for (i = 0; i < 65536; i++) {
+ for (i = 0; i < new->inode_hashsize; i++) {
INIT_LIST_HEAD(&new->inode_hash[i]);
}
- for (i = 0; i < new->hashsize; i++) {
+ for (i = 0; i < new->dentry_hashsize; i++) {
INIT_LIST_HEAD(&new->name_hash[i]);
}
@@ -1717,10 +1730,12 @@ out:
}
inode_table_t *
-inode_table_new(uint32_t lru_limit, xlator_t *xl)
+inode_table_new(uint32_t lru_limit, xlator_t *xl, uint32_t dentry_hashsize,
+ uint32_t inode_hashsize)
{
/* Only fuse for now requires the inode table with invalidator */
- return inode_table_with_invalidator(lru_limit, xl, NULL, NULL);
+ return inode_table_with_invalidator(lru_limit, xl, NULL, NULL,
+ dentry_hashsize, inode_hashsize);
}
int
@@ -2439,8 +2454,10 @@ inode_table_dump(inode_table_t *itable, char *prefix)
return;
}
- gf_proc_dump_build_key(key, prefix, "hashsize");
- gf_proc_dump_write(key, "%" GF_PRI_SIZET, itable->hashsize);
+ gf_proc_dump_build_key(key, prefix, "dentry_hashsize");
+ gf_proc_dump_write(key, "%" GF_PRI_SIZET, itable->dentry_hashsize);
+ gf_proc_dump_build_key(key, prefix, "inode_hashsize");
+ gf_proc_dump_write(key, "%" GF_PRI_SIZET, itable->inode_hashsize);
gf_proc_dump_build_key(key, prefix, "name");
gf_proc_dump_write(key, "%s", itable->name);
diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c
index 8f9e71f..bfa464f 100644
--- a/xlators/cluster/afr/src/afr.c
+++ b/xlators/cluster/afr/src/afr.c
@@ -594,7 +594,15 @@ init(xlator_t *this)
goto out;
}
- this->itable = inode_table_new(SHD_INODE_LRU_LIMIT, this);
+ if (priv->shd.iamshd) {
+ /* Number of hash bucket should be prime number so declare 131
+ total dentry hash buckets
+ */
+ this->itable = inode_table_new(SHD_INODE_LRU_LIMIT, this, 131, 128);
+ } else {
+ this->itable = inode_table_new(SHD_INODE_LRU_LIMIT, this, 0, 0);
+ }
+
if (!this->itable) {
ret = -ENOMEM;
goto out;
diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
index 16ac16c..072896d 100644
--- a/xlators/cluster/dht/src/dht-rebalance.c
+++ b/xlators/cluster/dht/src/dht-rebalance.c
@@ -1168,7 +1168,6 @@ __dht_rebalance_migrate_data(xlator_t *this, gf_defrag_info_t *defrag,
break;
}
-
offset += ret;
total += ret;
@@ -2467,7 +2466,7 @@ dht_build_root_inode(xlator_t *this, inode_t **inode)
0,
};
- itable = inode_table_new(0, this);
+ itable = inode_table_new(0, this, 0, 0);
if (!itable)
return;
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
index 3f31c74..4118c3b 100644
--- a/xlators/cluster/ec/src/ec.c
+++ b/xlators/cluster/ec/src/ec.c
@@ -734,7 +734,7 @@ init(xlator_t *this)
GF_OPTION_INIT("stripe-cache", ec->stripe_cache, uint32, failed);
GF_OPTION_INIT("quorum-count", ec->quorum_count, uint32, failed);
- this->itable = inode_table_new(EC_SHD_INODE_LRU_LIMIT, this);
+ this->itable = inode_table_new(EC_SHD_INODE_LRU_LIMIT, this, 0, 0);
if (!this->itable)
goto failed;
diff --git a/xlators/features/bit-rot/src/bitd/bit-rot.c b/xlators/features/bit-rot/src/bitd/bit-rot.c
index 424c0d5..4e0e798 100644
--- a/xlators/features/bit-rot/src/bitd/bit-rot.c
+++ b/xlators/features/bit-rot/src/bitd/bit-rot.c
@@ -1658,7 +1658,7 @@ notify(xlator_t *this, int32_t event, void *data, ...)
child->child_up = 1;
child->xl = subvol;
if (!child->table)
- child->table = inode_table_new(4096, subvol);
+ child->table = inode_table_new(4096, subvol, 0, 0);
_br_qchild_event(this, child, br_brick_connect);
pthread_cond_signal(&priv->cond);
diff --git a/xlators/features/quota/src/quotad-helpers.c b/xlators/features/quota/src/quotad-helpers.c
index d9f0351..46ac116 100644
--- a/xlators/features/quota/src/quotad-helpers.c
+++ b/xlators/features/quota/src/quotad-helpers.c
@@ -32,7 +32,7 @@ get_quotad_aggregator_state(xlator_t *this, rpcsvc_request_t *req)
UNLOCK(&priv->lock);
if (active_subvol->itable == NULL)
- active_subvol->itable = inode_table_new(4096, active_subvol);
+ active_subvol->itable = inode_table_new(4096, active_subvol, 0, 0);
state->itable = active_subvol->itable;
diff --git a/xlators/features/trash/src/trash.c b/xlators/features/trash/src/trash.c
index 93f020f..099c887 100644
--- a/xlators/features/trash/src/trash.c
+++ b/xlators/features/trash/src/trash.c
@@ -2261,7 +2261,7 @@ reconfigure(xlator_t *this, dict_t *options)
if (!active_earlier && active_now) {
if (!priv->trash_itable) {
- priv->trash_itable = inode_table_new(0, this);
+ priv->trash_itable = inode_table_new(0, this, 0, 0);
if (!priv->trash_itable) {
ret = -ENOMEM;
gf_log(this->name, GF_LOG_ERROR,
@@ -2533,7 +2533,7 @@ init(xlator_t *this)
}
if (priv->state) {
- priv->trash_itable = inode_table_new(0, this);
+ priv->trash_itable = inode_table_new(0, this, 0, 0);
if (!priv->trash_itable) {
ret = -ENOMEM;
priv->state = _gf_false;
diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c
index 1bddac2..919eea3 100644
--- a/xlators/mount/fuse/src/fuse-bridge.c
+++ b/xlators/mount/fuse/src/fuse-bridge.c
@@ -6298,10 +6298,10 @@ fuse_graph_setup(xlator_t *this, glusterfs_graph_t *graph)
}
#if FUSE_KERNEL_MINOR_VERSION >= 11
- itable = inode_table_with_invalidator(priv->lru_limit, graph->top,
- fuse_inode_invalidate_fn, this);
+ itable = inode_table_with_invalidator(
+ priv->lru_limit, graph->top, fuse_inode_invalidate_fn, this, 0, 0);
#else
- itable = inode_table_new(0, graph->top);
+ itable = inode_table_new(0, graph->top, 0, 0);
#endif
if (!itable) {
ret = -1;
diff --git a/xlators/nfs/server/src/nfs.c b/xlators/nfs/server/src/nfs.c
index ebded41..402be30 100644
--- a/xlators/nfs/server/src/nfs.c
+++ b/xlators/nfs/server/src/nfs.c
@@ -564,7 +564,7 @@ nfs_init_subvolume(struct nfs_state *nfs, xlator_t *xl)
return -1;
lrusize = nfs->memfactor * GF_NFS_INODE_LRU_MULT;
- xl->itable = inode_table_new(lrusize, xl);
+ xl->itable = inode_table_new(lrusize, xl, 0, 0);
if (!xl->itable) {
gf_msg(GF_NFS, GF_LOG_CRITICAL, ENOMEM, NFS_MSG_NO_MEMORY,
"Failed to allocate inode table");
diff --git a/xlators/protocol/server/src/server-handshake.c b/xlators/protocol/server/src/server-handshake.c
index 1d1177d..eeca73c 100644
--- a/xlators/protocol/server/src/server-handshake.c
+++ b/xlators/protocol/server/src/server-handshake.c
@@ -36,7 +36,6 @@ gf_compare_client_version(rpcsvc_request_t *req, int fop_prognum,
return ret;
}
-
int
server_getspec(rpcsvc_request_t *req)
{
@@ -629,7 +628,7 @@ server_setvolume(rpcsvc_request_t *req)
/* TODO: what is this ? */
client->bound_xl->itable = inode_table_new(conf->inode_lru_limit,
- client->bound_xl);
+ client->bound_xl, 0, 0);
}
}
UNLOCK(&conf->itable_lock);
--
1.8.3.1

View File

@ -0,0 +1,784 @@
From 5294c82e0528059b10cbaab7805b20e76ffdd66b Mon Sep 17 00:00:00 2001
From: mohit84 <moagrawa@redhat.com>
Date: Mon, 30 Nov 2020 17:39:53 +0530
Subject: [PATCH 510/511] glusterd[brick_mux]: Optimize friend handshake code
to avoid call_bail (#1614)
During glusterd handshake glusterd received a volume dictionary
from peer end to compare the own volume dictionary data.If the options
are differ it sets the key to recognize volume options are changed
and call import syntask to delete/start the volume.In brick_mux
environment while number of volumes are high(5k) the dict api in function
glusterd_compare_friend_volume takes time because the function
glusterd_handle_friend_req saves all peer volume data in a single dictionary.
Due to time taken by the function glusterd_handle_friend RPC requests receives
a call_bail from a peer end gluster(CLI) won't be able to show volume status.
Solution: To optimize the code done below changes
1) Populate a new specific dictionary to save the peer end version specific
data so that function won't take much time to take the decision about the
peer end has some volume updates.
2) In case of volume has differ version set the key in status_arr instead
of saving in a dictionary to make the operation is faster.
Note: To validate the changes followed below procedure
1) Setup 5100 distributed volumes 3x1
2) Enable brick_mux
3) Start all the volumes
4) Kill all gluster processes on 3rd node
5) Run a loop to update volume option on a 1st node
for i in {1..5100}; do gluster v set vol$i performance.open-behind off; done
6) Start the glusterd process on the 3rd node
7) Wait to finish handshake and check there should not be any call_bail message
in the logs
> Change-Id: Ibad7c23988539cc369ecc39dea2ea6985470bee1
> Fixes: #1613
> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
> (Cherry pick from commit 12545d91eed27ff9abb0505a12c7d4e75b45a53e)
> (Reviewed on upstream link https://github.com/gluster/glusterfs/issues/1613)
Change-Id: Ibad7c23988539cc369ecc39dea2ea6985470bee1
BUG: 1898784
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/221193
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/ctx.c | 4 +
libglusterfs/src/dict.c | 166 ++++++++++++++++++++++++++-
libglusterfs/src/globals.c | 2 -
libglusterfs/src/glusterfs/dict.h | 5 +
libglusterfs/src/glusterfs/globals.h | 2 +
libglusterfs/src/libglusterfs.sym | 1 +
xlators/mgmt/glusterd/src/glusterd-handler.c | 39 ++++---
xlators/mgmt/glusterd/src/glusterd-sm.c | 6 +-
xlators/mgmt/glusterd/src/glusterd-sm.h | 1 +
xlators/mgmt/glusterd/src/glusterd-utils.c | 148 ++++++++++++++----------
xlators/mgmt/glusterd/src/glusterd-utils.h | 2 +-
xlators/mgmt/glusterd/src/glusterd.h | 8 +-
12 files changed, 301 insertions(+), 83 deletions(-)
diff --git a/libglusterfs/src/ctx.c b/libglusterfs/src/ctx.c
index 4a001c2..ae1a77a 100644
--- a/libglusterfs/src/ctx.c
+++ b/libglusterfs/src/ctx.c
@@ -14,6 +14,7 @@
#include "glusterfs/glusterfs.h"
#include "timer-wheel.h"
+glusterfs_ctx_t *global_ctx = NULL;
glusterfs_ctx_t *
glusterfs_ctx_new()
{
@@ -51,6 +52,9 @@ glusterfs_ctx_new()
GF_ATOMIC_INIT(ctx->stats.max_dict_pairs, 0);
GF_ATOMIC_INIT(ctx->stats.total_pairs_used, 0);
GF_ATOMIC_INIT(ctx->stats.total_dicts_used, 0);
+
+ if (!global_ctx)
+ global_ctx = ctx;
out:
return ctx;
}
diff --git a/libglusterfs/src/dict.c b/libglusterfs/src/dict.c
index d8cdda4..e5f619c 100644
--- a/libglusterfs/src/dict.c
+++ b/libglusterfs/src/dict.c
@@ -56,7 +56,13 @@ struct dict_cmp {
static data_t *
get_new_data()
{
- data_t *data = mem_get(THIS->ctx->dict_data_pool);
+ data_t *data = NULL;
+
+ if (global_ctx) {
+ data = mem_get(global_ctx->dict_data_pool);
+ } else {
+ data = mem_get(THIS->ctx->dict_data_pool);
+ }
if (!data)
return NULL;
@@ -3503,3 +3509,161 @@ unlock:
UNLOCK(&dict->lock);
return 0;
}
+
+/* Popluate specific dictionary on the basis of passed key array at the
+ time of unserialize buffer
+*/
+int32_t
+dict_unserialize_specific_keys(char *orig_buf, int32_t size, dict_t **fill,
+ char **suffix_key_arr, dict_t **specific_dict,
+ int totkeycount)
+{
+ char *buf = orig_buf;
+ int ret = -1;
+ int32_t count = 0;
+ int i = 0;
+ int j = 0;
+
+ data_t *value = NULL;
+ char *key = NULL;
+ int32_t keylen = 0;
+ int32_t vallen = 0;
+ int32_t hostord = 0;
+ xlator_t *this = NULL;
+ int32_t keylenarr[totkeycount];
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ if (!buf) {
+ gf_msg_callingfn("dict", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
+ "buf is null!");
+ goto out;
+ }
+
+ if (size == 0) {
+ gf_msg_callingfn("dict", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
+ "size is 0!");
+ goto out;
+ }
+
+ if (!fill) {
+ gf_msg_callingfn("dict", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
+ "fill is null!");
+ goto out;
+ }
+
+ if (!*fill) {
+ gf_msg_callingfn("dict", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
+ "*fill is null!");
+ goto out;
+ }
+
+ if ((buf + DICT_HDR_LEN) > (orig_buf + size)) {
+ gf_msg_callingfn("dict", GF_LOG_ERROR, 0, LG_MSG_UNDERSIZED_BUF,
+ "undersized buffer "
+ "passed. available (%lu) < required (%lu)",
+ (long)(orig_buf + size), (long)(buf + DICT_HDR_LEN));
+ goto out;
+ }
+
+ memcpy(&hostord, buf, sizeof(hostord));
+ count = ntoh32(hostord);
+ buf += DICT_HDR_LEN;
+
+ if (count < 0) {
+ gf_smsg("dict", GF_LOG_ERROR, 0, LG_MSG_COUNT_LESS_THAN_ZERO,
+ "count=%d", count, NULL);
+ goto out;
+ }
+
+ /* Compute specific key length and save in array */
+ for (i = 0; i < totkeycount; i++) {
+ keylenarr[i] = strlen(suffix_key_arr[i]);
+ }
+
+ for (i = 0; i < count; i++) {
+ if ((buf + DICT_DATA_HDR_KEY_LEN) > (orig_buf + size)) {
+ gf_msg_callingfn("dict", GF_LOG_ERROR, 0, LG_MSG_UNDERSIZED_BUF,
+ "undersized "
+ "buffer passed. available (%lu) < "
+ "required (%lu)",
+ (long)(orig_buf + size),
+ (long)(buf + DICT_DATA_HDR_KEY_LEN));
+ goto out;
+ }
+ memcpy(&hostord, buf, sizeof(hostord));
+ keylen = ntoh32(hostord);
+ buf += DICT_DATA_HDR_KEY_LEN;
+
+ if ((buf + DICT_DATA_HDR_VAL_LEN) > (orig_buf + size)) {
+ gf_msg_callingfn("dict", GF_LOG_ERROR, 0, LG_MSG_UNDERSIZED_BUF,
+ "undersized "
+ "buffer passed. available (%lu) < "
+ "required (%lu)",
+ (long)(orig_buf + size),
+ (long)(buf + DICT_DATA_HDR_VAL_LEN));
+ goto out;
+ }
+ memcpy(&hostord, buf, sizeof(hostord));
+ vallen = ntoh32(hostord);
+ buf += DICT_DATA_HDR_VAL_LEN;
+
+ if ((keylen < 0) || (vallen < 0)) {
+ gf_msg_callingfn("dict", GF_LOG_ERROR, 0, LG_MSG_UNDERSIZED_BUF,
+ "undersized length passed "
+ "key:%d val:%d",
+ keylen, vallen);
+ goto out;
+ }
+ if ((buf + keylen) > (orig_buf + size)) {
+ gf_msg_callingfn("dict", GF_LOG_ERROR, 0, LG_MSG_UNDERSIZED_BUF,
+ "undersized buffer passed. "
+ "available (%lu) < required (%lu)",
+ (long)(orig_buf + size), (long)(buf + keylen));
+ goto out;
+ }
+ key = buf;
+ buf += keylen + 1; /* for '\0' */
+
+ if ((buf + vallen) > (orig_buf + size)) {
+ gf_msg_callingfn("dict", GF_LOG_ERROR, 0, LG_MSG_UNDERSIZED_BUF,
+ "undersized buffer passed. "
+ "available (%lu) < required (%lu)",
+ (long)(orig_buf + size), (long)(buf + vallen));
+ goto out;
+ }
+ value = get_new_data();
+
+ if (!value) {
+ ret = -1;
+ goto out;
+ }
+ value->len = vallen;
+ value->data = gf_memdup(buf, vallen);
+ value->data_type = GF_DATA_TYPE_STR_OLD;
+ value->is_static = _gf_false;
+ buf += vallen;
+
+ ret = dict_addn(*fill, key, keylen, value);
+ if (ret < 0) {
+ data_destroy(value);
+ goto out;
+ }
+ for (j = 0; j < totkeycount; j++) {
+ if (keylen > keylenarr[j]) {
+ if (!strcmp(key + keylen - keylenarr[j], suffix_key_arr[j])) {
+ ret = dict_addn(*specific_dict, key, keylen, value);
+ break;
+ }
+ }
+ }
+
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
diff --git a/libglusterfs/src/globals.c b/libglusterfs/src/globals.c
index e433ee8..30c15b6 100644
--- a/libglusterfs/src/globals.c
+++ b/libglusterfs/src/globals.c
@@ -96,7 +96,6 @@ const char *gf_upcall_list[GF_UPCALL_FLAGS_MAXVALUE] = {
/* This global ctx is a bad hack to prevent some of the libgfapi crashes.
* This should be removed once the patch on resource pool is accepted
*/
-glusterfs_ctx_t *global_ctx = NULL;
pthread_mutex_t global_ctx_mutex = PTHREAD_MUTEX_INITIALIZER;
xlator_t global_xlator;
static int gf_global_mem_acct_enable = 1;
@@ -236,7 +235,6 @@ __glusterfs_this_location()
if (*this_location == NULL) {
thread_xlator = &global_xlator;
}
-
return this_location;
}
diff --git a/libglusterfs/src/glusterfs/dict.h b/libglusterfs/src/glusterfs/dict.h
index 8239c7a..6e469c7 100644
--- a/libglusterfs/src/glusterfs/dict.h
+++ b/libglusterfs/src/glusterfs/dict.h
@@ -423,4 +423,9 @@ dict_has_key_from_array(dict_t *dict, char **strings, gf_boolean_t *result);
int
dict_serialized_length_lk(dict_t *this);
+
+int32_t
+dict_unserialize_specific_keys(char *orig_buf, int32_t size, dict_t **fill,
+ char **specific_key_arr, dict_t **specific_dict,
+ int totkeycount);
#endif
diff --git a/libglusterfs/src/glusterfs/globals.h b/libglusterfs/src/glusterfs/globals.h
index cc145cd..33fb023 100644
--- a/libglusterfs/src/glusterfs/globals.h
+++ b/libglusterfs/src/glusterfs/globals.h
@@ -199,4 +199,6 @@ int
gf_global_mem_acct_enable_get(void);
int
gf_global_mem_acct_enable_set(int val);
+
+extern glusterfs_ctx_t *global_ctx;
#endif /* !_GLOBALS_H */
diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym
index d060292..bc770e2 100644
--- a/libglusterfs/src/libglusterfs.sym
+++ b/libglusterfs/src/libglusterfs.sym
@@ -436,6 +436,7 @@ dict_clear_flag
dict_check_flag
dict_unref
dict_unserialize
+dict_unserialize_specific_keys
drop_token
eh_destroy
eh_dump
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index b8799ab..908361c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -86,6 +86,9 @@ glusterd_big_locked_handler(rpcsvc_request_t *req, rpcsvc_actor actor_fn)
return ret;
}
+static char *specific_key_suffix[] = {".quota-cksum", ".ckusm", ".version",
+ ".quota-version", ".name"};
+
static int
glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
int port, gd1_mgmt_friend_req *friend_req)
@@ -97,6 +100,8 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
char rhost[UNIX_PATH_MAX + 1] = {0};
uuid_t friend_uuid = {0};
dict_t *dict = NULL;
+ dict_t *peer_ver = NULL;
+ int totcount = sizeof(specific_key_suffix) / sizeof(specific_key_suffix[0]);
gf_uuid_parse(uuid_utoa(uuid), friend_uuid);
if (!port)
@@ -104,8 +109,19 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
ret = glusterd_remote_hostname_get(req, rhost, sizeof(rhost));
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
+ dict = dict_new();
+ peer_ver = dict_new();
+
RCU_READ_LOCK;
+ if (!ctx || !dict || !peer_ver) {
+ gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Unable to allocate memory");
+ ret = -1;
+ goto out;
+ }
+
peerinfo = glusterd_peerinfo_find(uuid, rhost);
if (peerinfo == NULL) {
@@ -130,28 +146,14 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
event->peername = gf_strdup(peerinfo->hostname);
gf_uuid_copy(event->peerid, peerinfo->uuid);
- ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
-
- if (!ctx) {
- gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
- "Unable to allocate memory");
- ret = -1;
- goto out;
- }
-
gf_uuid_copy(ctx->uuid, uuid);
if (hostname)
ctx->hostname = gf_strdup(hostname);
ctx->req = req;
- dict = dict_new();
- if (!dict) {
- ret = -1;
- goto out;
- }
-
- ret = dict_unserialize(friend_req->vols.vols_val, friend_req->vols.vols_len,
- &dict);
+ ret = dict_unserialize_specific_keys(
+ friend_req->vols.vols_val, friend_req->vols.vols_len, &dict,
+ specific_key_suffix, &peer_ver, totcount);
if (ret)
goto out;
@@ -159,6 +161,7 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
dict->extra_stdfree = friend_req->vols.vols_val;
ctx->vols = dict;
+ ctx->peer_ver = peer_ver;
event->ctx = ctx;
ret = glusterd_friend_sm_inject_event(event);
@@ -188,6 +191,8 @@ out:
} else {
free(friend_req->vols.vols_val);
}
+ if (peer_ver)
+ dict_unref(peer_ver);
if (event)
GF_FREE(event->peername);
GF_FREE(event);
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
index 044da3d..d10a792 100644
--- a/xlators/mgmt/glusterd/src/glusterd-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
@@ -106,6 +106,8 @@ glusterd_destroy_friend_req_ctx(glusterd_friend_req_ctx_t *ctx)
if (ctx->vols)
dict_unref(ctx->vols);
+ if (ctx->peer_ver)
+ dict_unref(ctx->peer_ver);
GF_FREE(ctx->hostname);
GF_FREE(ctx);
}
@@ -936,8 +938,8 @@ glusterd_ac_handle_friend_add_req(glusterd_friend_sm_event_t *event, void *ctx)
// Build comparison logic here.
pthread_mutex_lock(&conf->import_volumes);
{
- ret = glusterd_compare_friend_data(ev_ctx->vols, &status,
- event->peername);
+ ret = glusterd_compare_friend_data(ev_ctx->vols, ev_ctx->peer_ver,
+ &status, event->peername);
if (ret) {
pthread_mutex_unlock(&conf->import_volumes);
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.h b/xlators/mgmt/glusterd/src/glusterd-sm.h
index ce008ac..efdf68e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-sm.h
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.h
@@ -174,6 +174,7 @@ typedef struct glusterd_friend_req_ctx_ {
rpcsvc_request_t *req;
int port;
dict_t *vols;
+ dict_t *peer_ver; // Dictionary to save peer ver data
} glusterd_friend_req_ctx_t;
typedef struct glusterd_friend_update_ctx_ {
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index f7030fb..cf32bd9 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -3709,12 +3709,14 @@ out:
return ret;
}
-int32_t
-glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
- int32_t *status, char *hostname)
+static int32_t
+glusterd_compare_friend_volume(dict_t *peer_data,
+ glusterd_friend_synctask_args_t *arg,
+ int32_t count, int32_t *status, char *hostname)
{
int32_t ret = -1;
char key[64] = "";
+ char key_prefix[32];
int keylen;
glusterd_volinfo_t *volinfo = NULL;
char *volname = NULL;
@@ -3726,15 +3728,20 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
xlator_t *this = NULL;
GF_ASSERT(peer_data);
+ GF_ASSERT(arg);
GF_ASSERT(status);
this = THIS;
GF_ASSERT(this);
- keylen = snprintf(key, sizeof(key), "volume%d.name", count);
- ret = dict_get_strn(peer_data, key, keylen, &volname);
- if (ret)
+ snprintf(key_prefix, sizeof(key_prefix), "volume%d", count);
+ keylen = snprintf(key, sizeof(key), "%s.name", key_prefix);
+ ret = dict_get_strn(arg->peer_ver_data, key, keylen, &volname);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s is NULL in peer_ver_data", key, NULL);
goto out;
+ }
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
@@ -3750,10 +3757,13 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
goto out;
}
- keylen = snprintf(key, sizeof(key), "volume%d.version", count);
- ret = dict_get_int32n(peer_data, key, keylen, &version);
- if (ret)
+ keylen = snprintf(key, sizeof(key), "%s.version", key_prefix);
+ ret = dict_get_int32n(arg->peer_ver_data, key, keylen, &version);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s is NULL in peer_ver_data", key, NULL);
goto out;
+ }
if (version > volinfo->version) {
// Mismatch detected
@@ -3772,10 +3782,13 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
// Now, versions are same, compare cksums.
//
- snprintf(key, sizeof(key), "volume%d.ckusm", count);
- ret = dict_get_uint32(peer_data, key, &cksum);
- if (ret)
+ snprintf(key, sizeof(key), "%s.ckusm", key_prefix);
+ ret = dict_get_uint32(arg->peer_ver_data, key, &cksum);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s is NULL in peer_ver_data", key, NULL);
goto out;
+ }
if (cksum != volinfo->cksum) {
ret = 0;
@@ -3790,8 +3803,8 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
if (!dict_get_sizen(volinfo->dict, VKEY_FEATURES_QUOTA))
goto skip_quota;
- snprintf(key, sizeof(key), "volume%d.quota-version", count);
- ret = dict_get_uint32(peer_data, key, &quota_version);
+ snprintf(key, sizeof(key), "%s.quota-version", key_prefix);
+ ret = dict_get_uint32(arg->peer_ver_data, key, &quota_version);
if (ret) {
gf_msg_debug(this->name, 0,
"quota-version key absent for"
@@ -3809,6 +3822,7 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
"%d on peer %s",
volinfo->volname, volinfo->quota_conf_version, quota_version,
hostname);
+ GF_ATOMIC_INIT(volinfo->volpeerupdate, 1);
*status = GLUSTERD_VOL_COMP_UPDATE_REQ;
goto out;
} else if (quota_version < volinfo->quota_conf_version) {
@@ -3819,8 +3833,8 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count,
// Now, versions are same, compare cksums.
//
- snprintf(key, sizeof(key), "volume%d.quota-cksum", count);
- ret = dict_get_uint32(peer_data, key, &quota_cksum);
+ snprintf(key, sizeof(key), "%s.quota-cksum", key_prefix);
+ ret = dict_get_uint32(arg->peer_ver_data, key, &quota_cksum);
if (ret) {
gf_msg_debug(this->name, 0,
"quota checksum absent for "
@@ -3846,13 +3860,12 @@ skip_quota:
*status = GLUSTERD_VOL_COMP_SCS;
out:
- keylen = snprintf(key, sizeof(key), "volume%d.update", count);
-
if (*status == GLUSTERD_VOL_COMP_UPDATE_REQ) {
- ret = dict_set_int32n(peer_data, key, keylen, 1);
- } else {
- ret = dict_set_int32n(peer_data, key, keylen, 0);
+ /*Set the status to ensure volume is updated on the peer
+ */
+ arg->status_arr[(count / 64)] ^= 1UL << (count % 64);
}
+
if (*status == GLUSTERD_VOL_COMP_RJT) {
gf_event(EVENT_COMPARE_FRIEND_VOLUME_FAILED, "volume=%s",
volinfo->volname);
@@ -4935,8 +4948,9 @@ out:
return ret;
}
-int32_t
-glusterd_import_friend_volume(dict_t *peer_data, int count)
+static int32_t
+glusterd_import_friend_volume(dict_t *peer_data, int count,
+ glusterd_friend_synctask_args_t *arg)
{
int32_t ret = -1;
glusterd_conf_t *priv = NULL;
@@ -4954,10 +4968,27 @@ glusterd_import_friend_volume(dict_t *peer_data, int count)
priv = this->private;
GF_ASSERT(priv);
- ret = snprintf(key, sizeof(key), "volume%d.update", count);
- ret = dict_get_int32n(peer_data, key, ret, &update);
- if (ret || !update) {
+ if (arg) {
+ /*Check if the volume options are updated on the other peers
+ */
+ update = (1UL & (arg->status_arr[(count / 64)] >> (count % 64)));
+ } else {
+ ret = snprintf(key, sizeof(key), "volume%d.update", count);
+ ret = dict_get_int32n(peer_data, key, ret, &update);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=%s", key, NULL);
+ goto out;
+ }
+ }
+
+ if (!update) {
/* if update is 0 that means the volume is not imported */
+ gf_log(this->name, GF_LOG_DEBUG,
+ "The volume%d does"
+ " not have any peer change",
+ count);
+ ret = 0;
goto out;
}
@@ -5045,6 +5076,8 @@ glusterd_import_friend_volumes_synctask(void *opaque)
glusterd_conf_t *conf = NULL;
dict_t *peer_data = NULL;
glusterd_friend_synctask_args_t *arg = NULL;
+ uint64_t bm = 0;
+ uint64_t mask = 0;
this = THIS;
GF_ASSERT(this);
@@ -5056,17 +5089,7 @@ glusterd_import_friend_volumes_synctask(void *opaque)
if (!arg)
goto out;
- peer_data = dict_new();
- if (!peer_data) {
- goto out;
- }
-
- ret = dict_unserialize(arg->dict_buf, arg->dictlen, &peer_data);
- if (ret) {
- errno = ENOMEM;
- goto out;
- }
-
+ peer_data = arg->peer_data;
ret = dict_get_int32n(peer_data, "count", SLEN("count"), &count);
if (ret)
goto out;
@@ -5083,11 +5106,18 @@ glusterd_import_friend_volumes_synctask(void *opaque)
conf->restart_bricks = _gf_true;
while (i <= count) {
- ret = glusterd_import_friend_volume(peer_data, i);
- if (ret) {
- break;
+ bm = arg->status_arr[i / 64];
+ while (bm != 0) {
+ /* mask will contain the lowest bit set from bm. */
+ mask = bm & (-bm);
+ bm ^= mask;
+ ret = glusterd_import_friend_volume(peer_data, i + ffsll(mask) - 2,
+ arg);
+ if (ret < 0) {
+ break;
+ }
}
- i++;
+ i += 64;
}
if (i > count) {
glusterd_svcs_manager(NULL);
@@ -5095,11 +5125,9 @@ glusterd_import_friend_volumes_synctask(void *opaque)
conf->restart_bricks = _gf_false;
synccond_broadcast(&conf->cond_restart_bricks);
out:
- if (peer_data)
- dict_unref(peer_data);
if (arg) {
- if (arg->dict_buf)
- GF_FREE(arg->dict_buf);
+ dict_unref(arg->peer_data);
+ dict_unref(arg->peer_ver_data);
GF_FREE(arg);
}
@@ -5121,7 +5149,7 @@ glusterd_import_friend_volumes(dict_t *peer_data)
goto out;
while (i <= count) {
- ret = glusterd_import_friend_volume(peer_data, i);
+ ret = glusterd_import_friend_volume(peer_data, i, NULL);
if (ret)
goto out;
i++;
@@ -5260,7 +5288,8 @@ out:
}
int32_t
-glusterd_compare_friend_data(dict_t *peer_data, int32_t *status, char *hostname)
+glusterd_compare_friend_data(dict_t *peer_data, dict_t *cmp, int32_t *status,
+ char *hostname)
{
int32_t ret = -1;
int32_t count = 0;
@@ -5289,8 +5318,19 @@ glusterd_compare_friend_data(dict_t *peer_data, int32_t *status, char *hostname)
if (ret)
goto out;
+ arg = GF_CALLOC(1, sizeof(*arg) + sizeof(uint64_t) * (count / 64),
+ gf_common_mt_char);
+ if (!arg) {
+ ret = -1;
+ gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Out Of Memory");
+ goto out;
+ }
+ arg->peer_data = dict_ref(peer_data);
+ arg->peer_ver_data = dict_ref(cmp);
while (i <= count) {
- ret = glusterd_compare_friend_volume(peer_data, i, status, hostname);
+ ret = glusterd_compare_friend_volume(peer_data, arg, i, status,
+ hostname);
if (ret)
goto out;
@@ -5310,21 +5350,13 @@ glusterd_compare_friend_data(dict_t *peer_data, int32_t *status, char *hostname)
* first brick to come up before attaching the subsequent bricks
* in case brick multiplexing is enabled
*/
- arg = GF_CALLOC(1, sizeof(*arg), gf_common_mt_char);
- ret = dict_allocate_and_serialize(peer_data, &arg->dict_buf,
- &arg->dictlen);
- if (ret < 0) {
- gf_log(this->name, GF_LOG_ERROR,
- "dict_serialize failed while handling "
- " import friend volume request");
- goto out;
- }
-
glusterd_launch_synctask(glusterd_import_friend_volumes_synctask, arg);
}
out:
if (ret && arg) {
+ dict_unref(arg->peer_data);
+ dict_unref(arg->peer_ver_data);
GF_FREE(arg);
}
gf_msg_debug(this->name, 0, "Returning with ret: %d, status: %d", ret,
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index 5f5de82..02d85d2 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -231,7 +231,7 @@ glusterd_add_volumes_to_export_dict(dict_t *peer_data, char **buf,
u_int *length);
int32_t
-glusterd_compare_friend_data(dict_t *peer_data, int32_t *status,
+glusterd_compare_friend_data(dict_t *peer_data, dict_t *cmp, int32_t *status,
char *hostname);
int
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index f739b5d..efe4d0e 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -234,8 +234,12 @@ typedef struct glusterd_add_dict_args {
} glusterd_add_dict_args_t;
typedef struct glusterd_friend_synctask_args {
- char *dict_buf;
- u_int dictlen;
+ dict_t *peer_data;
+ dict_t *peer_ver_data; // Dictionary to save peer version data
+ /* This status_arr[1] is not a real size, real size of the array
+ is dynamically allocated
+ */
+ uint64_t status_arr[1];
} glusterd_friend_synctask_args_t;
typedef enum gf_brick_status {
--
1.8.3.1

View File

@ -0,0 +1,39 @@
From 868d346cc35c222d19b95bd9c367674c9ea859df Mon Sep 17 00:00:00 2001
From: Vinayakswami Hariharmath <vharihar@redhat.com>
Date: Tue, 15 Dec 2020 16:23:49 +0530
Subject: [PATCH 511/511] features/shard: Missing format specifier
PRIu64 format specifier explicitly needs (percent sign) as
prefix and that was missing as part of the below commit on
downstream
https://code.engineering.redhat.com/gerrit/#/c/221061/
BUG: 1752739
Change-Id: I354de58796f350eb1aa42fcdf8092ca2e69ccbb6
Signed-off-by: Vinayakswami Hariharmath <vharihar@redhat.com>
Change-Id: I4598893e3fcca3a2b3e6e8ef9b64b3e5e98923e6
Reviewed-on: https://code.engineering.redhat.com/gerrit/221217
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
---
xlators/features/shard/src/shard.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c
index a967f35..099b062 100644
--- a/xlators/features/shard/src/shard.c
+++ b/xlators/features/shard/src/shard.c
@@ -1855,7 +1855,7 @@ int shard_truncate_last_shard(call_frame_t *frame, xlator_t *this,
*/
if (!inode) {
gf_msg_debug(this->name, 0,
- "Last shard to be truncated absent in backend: " PRIu64
+ "Last shard to be truncated absent in backend:%" PRIu64
" of gfid: %s. Directly proceeding to update file size",
local->first_block, uuid_utoa(local->loc.inode->gfid));
shard_update_file_size(frame, this, NULL, &local->loc,
--
1.8.3.1

View File

@ -0,0 +1,105 @@
From c963653a89c3f6466af9a3e8f19246a7907f7f8c Mon Sep 17 00:00:00 2001
From: nik-redhat <nladha@redhat.com>
Date: Thu, 30 Jul 2020 13:04:52 +0530
Subject: [PATCH 512/517] glusterd: shared storage mount fails in ipv6
environment
Issue:
In case of ipv6 environment, the mounting of glusterd_shared_storage
volume fails as it doesn't recognises the ipv6 enviornment.
Fix:
In case of ipv6 environment, the address-family is passed
to the hooks script on creating shared-storage, then depending
upon the address-family --xlator-option=transport.address-family=inet6
option is added to the mount command, and the mounting succeeds.
>Fixes: #1406
>
>Change-Id: Ib1888c34d85e6c01618b0ba214cbe1f57576908d
>Signed-off-by: nik-redhat <nladha@redhat.com>
Upstream patch: https://review.gluster.org/c/glusterfs/+/24797
BUG: 1856574
Change-Id: Ib1888c34d85e6c01618b0ba214cbe1f57576908d
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/221844
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Srijan Sivakumar <ssivakum@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
.../set/post/S32gluster_enable_shared_storage.sh | 11 +++++++++--
xlators/mgmt/glusterd/src/glusterd-hooks.c | 19 +++++++++++++++++++
2 files changed, 28 insertions(+), 2 deletions(-)
diff --git a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
index 3bae37c..9597503 100755
--- a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
+++ b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
@@ -104,8 +104,15 @@ function check_volume_status()
echo $status
}
-mount_cmd="mount -t glusterfs $local_node_hostname:/gluster_shared_storage \
- /run/gluster/shared_storage"
+key=`echo $5 | cut -d '=' -f 1`
+val=`echo $5 | cut -d '=' -f 2`
+if [ "$key" == "transport.address-family" ]; then
+ mount_cmd="mount -t glusterfs -o xlator-option=transport.address-family=inet6 \
+ $local_node_hostname:/gluster_shared_storage /var/run/gluster/shared_storage"
+else
+ mount_cmd="mount -t glusterfs $local_node_hostname:/gluster_shared_storage \
+ /var/run/gluster/shared_storage"
+fi
if [ "$option" == "enable" ]; then
retry=0;
diff --git a/xlators/mgmt/glusterd/src/glusterd-hooks.c b/xlators/mgmt/glusterd/src/glusterd-hooks.c
index 216cdf7..4f0d775 100644
--- a/xlators/mgmt/glusterd/src/glusterd-hooks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-hooks.c
@@ -200,11 +200,16 @@ glusterd_hooks_set_volume_args(dict_t *dict, runner_t *runner)
int i = 0;
int count = 0;
int ret = -1;
+ int flag = 0;
char query[1024] = {
0,
};
char *key = NULL;
char *value = NULL;
+ char *inet_family = NULL;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
ret = dict_get_int32(dict, "count", &count);
if (ret)
@@ -228,9 +233,23 @@ glusterd_hooks_set_volume_args(dict_t *dict, runner_t *runner)
continue;
runner_argprintf(runner, "%s=%s", key, value);
+ if ((strncmp(key, "cluster.enable-shared-storage",
+ SLEN("cluster.enable-shared-storage")) == 0 ||
+ strncmp(key, "enable-shared-storage",
+ SLEN("enable-shared-storage")) == 0) &&
+ strncmp(value, "enable", SLEN("enable")) == 0)
+ flag = 1;
}
glusterd_hooks_add_custom_args(dict, runner);
+ if (flag == 1) {
+ ret = dict_get_str_sizen(this->options, "transport.address-family",
+ &inet_family);
+ if (!ret) {
+ runner_argprintf(runner, "transport.address-family=%s",
+ inet_family);
+ }
+ }
ret = 0;
out:
--
1.8.3.1

View File

@ -0,0 +1,191 @@
From 708c17a8a69b2657f384affaedfcf4ba0a123893 Mon Sep 17 00:00:00 2001
From: karthik-us <ksubrahm@redhat.com>
Date: Wed, 23 Dec 2020 14:45:07 +0530
Subject: [PATCH 513/517] afr: mark pending xattrs as a part of metadata heal
...if pending xattrs are zero for all children.
Problem:
If there are no pending xattrs and a metadata heal needs to be
performed, it can be possible that we end up with xattrs inadvertendly
deleted from all bricks, as explained in the BZ.
Fix:
After picking one among the sources as the good copy, mark pending xattrs on
all sources to blame the sinks. Now even if this metadata heal fails midway,
a subsequent heal will still choose one of the valid sources that it
picked previously.
Upstream patch details:
> Fixes: #1067
> Change-Id: If1b050b70b0ad911e162c04db4d89b263e2b8d7b
> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Upstream patch: https://review.gluster.org/#/c/glusterfs/+/21922/
BUG: 1640148
Change-Id: If1b050b70b0ad911e162c04db4d89b263e2b8d7b
Signed-off-by: karthik-us <ksubrahm@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/222073
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
---
tests/bugs/replicate/mdata-heal-no-xattrs.t | 59 ++++++++++++++++++++++
xlators/cluster/afr/src/afr-self-heal-metadata.c | 62 +++++++++++++++++++++++-
2 files changed, 120 insertions(+), 1 deletion(-)
create mode 100644 tests/bugs/replicate/mdata-heal-no-xattrs.t
diff --git a/tests/bugs/replicate/mdata-heal-no-xattrs.t b/tests/bugs/replicate/mdata-heal-no-xattrs.t
new file mode 100644
index 0000000..d3b0c50
--- /dev/null
+++ b/tests/bugs/replicate/mdata-heal-no-xattrs.t
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+echo "Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+
+# Change permission on brick-0: simulates the case where there is metadata
+# mismatch but no pending xattrs. This brick will become the source for heal.
+TEST chmod +x $B0/$V0"0"/FILE
+
+# Add gfid to xattrop
+xattrop_b0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_b0`
+gfid_str_FILE=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/FILE))
+TEST ln $xattrop_b0/$base_entry_b0 $xattrop_b0/$gfid_str_FILE
+EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Brick-0 should contain xattrs blaming other 2 bricks.
+# The values will be zero because heal is over.
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/FILE
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}0/FILE
+
+# Brick-1 and Brick-2 must not contain any afr xattrs.
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-1 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}2/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-1 $B0/${V0}2/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}2/FILE
+
+# check permission bits.
+EXPECT '755' stat -c %a $B0/${V0}0/FILE
+EXPECT '755' stat -c %a $B0/${V0}1/FILE
+EXPECT '755' stat -c %a $B0/${V0}2/FILE
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
diff --git a/xlators/cluster/afr/src/afr-self-heal-metadata.c b/xlators/cluster/afr/src/afr-self-heal-metadata.c
index f4e31b6..03f43ba 100644
--- a/xlators/cluster/afr/src/afr-self-heal-metadata.c
+++ b/xlators/cluster/afr/src/afr-self-heal-metadata.c
@@ -190,6 +190,59 @@ out:
return ret;
}
+static int
+__afr_selfheal_metadata_mark_pending_xattrs(call_frame_t *frame, xlator_t *this,
+ inode_t *inode,
+ struct afr_reply *replies,
+ unsigned char *sources)
+{
+ int ret = 0;
+ int i = 0;
+ int m_idx = 0;
+ afr_private_t *priv = NULL;
+ int raw[AFR_NUM_CHANGE_LOGS] = {0};
+ dict_t *xattr = NULL;
+
+ priv = this->private;
+ m_idx = afr_index_for_transaction_type(AFR_METADATA_TRANSACTION);
+ raw[m_idx] = 1;
+
+ xattr = dict_new();
+ if (!xattr)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (sources[i])
+ continue;
+ ret = dict_set_static_bin(xattr, priv->pending_key[i], raw,
+ sizeof(int) * AFR_NUM_CHANGE_LOGS);
+ if (ret) {
+ ret = -1;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (!sources[i])
+ continue;
+ ret = afr_selfheal_post_op(frame, this, inode, i, xattr, NULL);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_INFO, -ret, AFR_MSG_SELF_HEAL_INFO,
+ "Failed to set pending metadata xattr on child %d for %s", i,
+ uuid_utoa(inode->gfid));
+ goto out;
+ }
+ }
+
+ afr_replies_wipe(replies, priv->child_count);
+ ret = afr_selfheal_unlocked_discover(frame, inode, inode->gfid, replies);
+
+out:
+ if (xattr)
+ dict_unref(xattr);
+ return ret;
+}
+
/*
* Look for mismatching uid/gid or mode or user xattrs even if
* AFR xattrs don't say so, and pick one arbitrarily as winner. */
@@ -210,6 +263,7 @@ __afr_selfheal_metadata_finalize_source(call_frame_t *frame, xlator_t *this,
};
int source = -1;
int sources_count = 0;
+ int ret = 0;
priv = this->private;
@@ -300,7 +354,13 @@ __afr_selfheal_metadata_finalize_source(call_frame_t *frame, xlator_t *this,
healed_sinks[i] = 1;
}
}
-
+ if ((sources_count == priv->child_count) && (source > -1) &&
+ (AFR_COUNT(healed_sinks, priv->child_count) != 0)) {
+ ret = __afr_selfheal_metadata_mark_pending_xattrs(frame, this, inode,
+ replies, sources);
+ if (ret < 0)
+ return ret;
+ }
out:
afr_mark_active_sinks(this, sources, locked_on, healed_sinks);
return source;
--
1.8.3.1

View File

@ -0,0 +1,308 @@
From 4c47d6dd7c5ddcaa2a1e159427c0f6713fd33907 Mon Sep 17 00:00:00 2001
From: karthik-us <ksubrahm@redhat.com>
Date: Wed, 23 Dec 2020 14:57:51 +0530
Subject: [PATCH 514/517] afr: event gen changes
The general idea of the changes is to prevent resetting event generation
to zero in the inode ctx, since event gen is something that should
follow 'causal order'.
Change #1:
For a read txn, in inode refresh cbk, if event_generation is
found zero, we are failing the read fop. This is not needed
because change in event gen is only a marker for the next inode refresh to
happen and should not be taken into account by the current read txn.
Change #2:
The event gen being zero above can happen if there is a racing lookup,
which resets even get (in afr_lookup_done) if there are non zero afr
xattrs. The resetting is done only to trigger an inode refresh and a
possible client side heal on the next lookup. That can be acheived by
setting the need_refresh flag in the inode ctx. So replaced all
occurences of resetting even gen to zero with a call to
afr_inode_need_refresh_set().
Change #3:
In both lookup and discover path, we are doing an inode refresh which is
not required since all 3 essentially do the same thing- update the inode
ctx with the good/bad copies from the brick replies. Inode refresh also
triggers background heals, but I think it is okay to do it when we call
refresh during the read and write txns and not in the lookup path.
The .ts which relied on inode refresh in lookup path to trigger heals are
now changed to do read txn so that inode refresh and the heal happens.
Upstream patch details:
> Change-Id: Iebf39a9be6ffd7ffd6e4046c96b0fa78ade6c5ec
> Fixes: #1179
> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
> Reported-by: Erik Jacobson <erik.jacobson at hpe.com>
Upstream patch: https://review.gluster.org/#/c/glusterfs/+/24316/
BUG: 1640148
Change-Id: Iebf39a9be6ffd7ffd6e4046c96b0fa78ade6c5ec
Signed-off-by: karthik-us <ksubrahm@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/222074
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
---
...fid-mismatch-resolution-with-fav-child-policy.t | 8 +-
xlators/cluster/afr/src/afr-common.c | 92 +++++-----------------
xlators/cluster/afr/src/afr-dir-write.c | 6 +-
xlators/cluster/afr/src/afr.h | 5 +-
4 files changed, 29 insertions(+), 82 deletions(-)
diff --git a/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t b/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t
index f4aa351..12af0c8 100644
--- a/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t
+++ b/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t
@@ -168,8 +168,8 @@ TEST [ "$gfid_1" != "$gfid_2" ]
#We know that second brick has the bigger size file
BIGGER_FILE_MD5=$(md5sum $B0/${V0}1/f3 | cut -d\ -f1)
-TEST ls $M0/f3
-TEST cat $M0/f3
+TEST ls $M0 #Trigger entry heal via readdir inode refresh
+TEST cat $M0/f3 #Trigger data heal via readv inode refresh
EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
#gfid split-brain should be resolved
@@ -215,8 +215,8 @@ TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
-TEST ls $M0/f4
-TEST cat $M0/f4
+TEST ls $M0 #Trigger entry heal via readdir inode refresh
+TEST cat $M0/f4 #Trigger data heal via readv inode refresh
EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
#gfid split-brain should be resolved
diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
index fca2cd5..90b4f14 100644
--- a/xlators/cluster/afr/src/afr-common.c
+++ b/xlators/cluster/afr/src/afr-common.c
@@ -284,7 +284,7 @@ __afr_set_in_flight_sb_status(xlator_t *this, afr_local_t *local,
metadatamap |= (1 << index);
}
if (metadatamap_old != metadatamap) {
- event = 0;
+ __afr_inode_need_refresh_set(inode, this);
}
break;
@@ -297,7 +297,7 @@ __afr_set_in_flight_sb_status(xlator_t *this, afr_local_t *local,
datamap |= (1 << index);
}
if (datamap_old != datamap)
- event = 0;
+ __afr_inode_need_refresh_set(inode, this);
break;
default:
@@ -461,34 +461,6 @@ out:
}
int
-__afr_inode_event_gen_reset_small(inode_t *inode, xlator_t *this)
-{
- int ret = -1;
- uint16_t datamap = 0;
- uint16_t metadatamap = 0;
- uint32_t event = 0;
- uint64_t val = 0;
- afr_inode_ctx_t *ctx = NULL;
-
- ret = __afr_inode_ctx_get(this, inode, &ctx);
- if (ret)
- return ret;
-
- val = ctx->read_subvol;
-
- metadatamap = (val & 0x000000000000ffff) >> 0;
- datamap = (val & 0x00000000ffff0000) >> 16;
- event = 0;
-
- val = ((uint64_t)metadatamap) | (((uint64_t)datamap) << 16) |
- (((uint64_t)event) << 32);
-
- ctx->read_subvol = val;
-
- return ret;
-}
-
-int
__afr_inode_read_subvol_get(inode_t *inode, xlator_t *this, unsigned char *data,
unsigned char *metadata, int *event_p)
{
@@ -559,22 +531,6 @@ out:
}
int
-__afr_inode_event_gen_reset(inode_t *inode, xlator_t *this)
-{
- afr_private_t *priv = NULL;
- int ret = -1;
-
- priv = this->private;
-
- if (priv->child_count <= 16)
- ret = __afr_inode_event_gen_reset_small(inode, this);
- else
- ret = -1;
-
- return ret;
-}
-
-int
afr_inode_read_subvol_get(inode_t *inode, xlator_t *this, unsigned char *data,
unsigned char *metadata, int *event_p)
{
@@ -723,30 +679,22 @@ out:
return need_refresh;
}
-static int
-afr_inode_need_refresh_set(inode_t *inode, xlator_t *this)
+int
+__afr_inode_need_refresh_set(inode_t *inode, xlator_t *this)
{
int ret = -1;
afr_inode_ctx_t *ctx = NULL;
- GF_VALIDATE_OR_GOTO(this->name, inode, out);
-
- LOCK(&inode->lock);
- {
- ret = __afr_inode_ctx_get(this, inode, &ctx);
- if (ret)
- goto unlock;
-
+ ret = __afr_inode_ctx_get(this, inode, &ctx);
+ if (ret == 0) {
ctx->need_refresh = _gf_true;
}
-unlock:
- UNLOCK(&inode->lock);
-out:
+
return ret;
}
int
-afr_inode_event_gen_reset(inode_t *inode, xlator_t *this)
+afr_inode_need_refresh_set(inode_t *inode, xlator_t *this)
{
int ret = -1;
@@ -754,7 +702,7 @@ afr_inode_event_gen_reset(inode_t *inode, xlator_t *this)
LOCK(&inode->lock);
{
- ret = __afr_inode_event_gen_reset(inode, this);
+ ret = __afr_inode_need_refresh_set(inode, this);
}
UNLOCK(&inode->lock);
out:
@@ -1191,7 +1139,7 @@ afr_txn_refresh_done(call_frame_t *frame, xlator_t *this, int err)
ret = afr_inode_get_readable(frame, inode, this, local->readable,
&event_generation, local->transaction.type);
- if (ret == -EIO || (local->is_read_txn && !event_generation)) {
+ if (ret == -EIO) {
/* No readable subvolume even after refresh ==> splitbrain.*/
if (!priv->fav_child_policy) {
err = EIO;
@@ -2413,7 +2361,7 @@ afr_lookup_done(call_frame_t *frame, xlator_t *this)
if (read_subvol == -1)
goto cant_interpret;
if (ret) {
- afr_inode_event_gen_reset(local->inode, this);
+ afr_inode_need_refresh_set(local->inode, this);
dict_del_sizen(local->replies[read_subvol].xdata, GF_CONTENT_KEY);
}
} else {
@@ -2971,6 +2919,7 @@ afr_discover_unwind(call_frame_t *frame, xlator_t *this)
afr_private_t *priv = NULL;
afr_local_t *local = NULL;
int read_subvol = -1;
+ int ret = 0;
unsigned char *data_readable = NULL;
unsigned char *success_replies = NULL;
@@ -2992,7 +2941,10 @@ afr_discover_unwind(call_frame_t *frame, xlator_t *this)
if (!afr_has_quorum(success_replies, this, frame))
goto unwind;
- afr_replies_interpret(frame, this, local->inode, NULL);
+ ret = afr_replies_interpret(frame, this, local->inode, NULL);
+ if (ret) {
+ afr_inode_need_refresh_set(local->inode, this);
+ }
read_subvol = afr_read_subvol_decide(local->inode, this, NULL,
data_readable);
@@ -3248,11 +3200,7 @@ afr_discover(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
afr_read_subvol_get(loc->inode, this, NULL, NULL, &event,
AFR_DATA_TRANSACTION, NULL);
- if (afr_is_inode_refresh_reqd(loc->inode, this, event,
- local->event_generation))
- afr_inode_refresh(frame, this, loc->inode, NULL, afr_discover_do);
- else
- afr_discover_do(frame, this, 0);
+ afr_discover_do(frame, this, 0);
return 0;
out:
@@ -3393,11 +3341,7 @@ afr_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
afr_read_subvol_get(loc->parent, this, NULL, NULL, &event,
AFR_DATA_TRANSACTION, NULL);
- if (afr_is_inode_refresh_reqd(loc->inode, this, event,
- local->event_generation))
- afr_inode_refresh(frame, this, loc->parent, NULL, afr_lookup_do);
- else
- afr_lookup_do(frame, this, 0);
+ afr_lookup_do(frame, this, 0);
return 0;
out:
diff --git a/xlators/cluster/afr/src/afr-dir-write.c b/xlators/cluster/afr/src/afr-dir-write.c
index 416c19d..d419bfc 100644
--- a/xlators/cluster/afr/src/afr-dir-write.c
+++ b/xlators/cluster/afr/src/afr-dir-write.c
@@ -123,11 +123,11 @@ __afr_dir_write_finalize(call_frame_t *frame, xlator_t *this)
continue;
if (local->replies[i].op_ret < 0) {
if (local->inode)
- afr_inode_event_gen_reset(local->inode, this);
+ afr_inode_need_refresh_set(local->inode, this);
if (local->parent)
- afr_inode_event_gen_reset(local->parent, this);
+ afr_inode_need_refresh_set(local->parent, this);
if (local->parent2)
- afr_inode_event_gen_reset(local->parent2, this);
+ afr_inode_need_refresh_set(local->parent2, this);
continue;
}
diff --git a/xlators/cluster/afr/src/afr.h b/xlators/cluster/afr/src/afr.h
index ed5096e..3a2b26d 100644
--- a/xlators/cluster/afr/src/afr.h
+++ b/xlators/cluster/afr/src/afr.h
@@ -948,7 +948,10 @@ afr_inode_read_subvol_set(inode_t *inode, xlator_t *this,
int event_generation);
int
-afr_inode_event_gen_reset(inode_t *inode, xlator_t *this);
+__afr_inode_need_refresh_set(inode_t *inode, xlator_t *this);
+
+int
+afr_inode_need_refresh_set(inode_t *inode, xlator_t *this);
int
afr_read_subvol_select_by_policy(inode_t *inode, xlator_t *this,
--
1.8.3.1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,338 @@
From 8d24d891aade910b0bb86b27c25a8d2382e19ba0 Mon Sep 17 00:00:00 2001
From: karthik-us <ksubrahm@redhat.com>
Date: Tue, 15 Dec 2020 15:04:19 +0530
Subject: [PATCH 516/517] afr: return -EIO for gfid split-brains.
Problem:
entry-self-heal-anon-dir-off.t was failing occasionally because
afr_gfid_split_brain_source() returned -1 instead of -EIO for
split-brains, causing the code to proceed to afr_lookup_done(), which
in turn succeeded the lookup if there was a parallel client side heal
going on.
Fix:
Return -EIO instead of -1 so that lookp fails.
Also, afr_selfheal_name() was using the same dict to get and set values. This
could be problematic if the caller passed local->xdata_req, since
setting a response in a request dict can lead to bugs.So changed it to use
separate request and response dicts.
Upstream patch details:
> Fixes: #1739
> Credits Pranith Karampuri <pranith.karampuri@phonepe.com>
> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
>Change-Id: I5cb4c547fb25e6bfc8bec1740f7eb64e1a5ad443
Upstream patch: https://github.com/gluster/glusterfs/pull/1819/
BUG: 1640148
Signed-off-by: karthik-us <ksubrahm@redhat.com>
Change-Id: I5cb4c547fb25e6bfc8bec1740f7eb64e1a5ad443
Reviewed-on: https://code.engineering.redhat.com/gerrit/221209
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
---
xlators/cluster/afr/src/afr-common.c | 12 ++++++++----
xlators/cluster/afr/src/afr-self-heal-common.c | 27 +++++++++++++-------------
xlators/cluster/afr/src/afr-self-heal-entry.c | 8 ++++----
xlators/cluster/afr/src/afr-self-heal-name.c | 23 +++++++++++-----------
xlators/cluster/afr/src/afr-self-heal.h | 5 +++--
xlators/cluster/afr/src/afr-self-heald.c | 2 +-
6 files changed, 42 insertions(+), 35 deletions(-)
diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
index 6f2da11..416012c 100644
--- a/xlators/cluster/afr/src/afr-common.c
+++ b/xlators/cluster/afr/src/afr-common.c
@@ -2366,7 +2366,7 @@ afr_lookup_done(call_frame_t *frame, xlator_t *this)
/* If we were called from glfsheal and there is still a gfid
* mismatch, succeed the lookup and let glfsheal print the
* response via gfid-heal-msg.*/
- if (!dict_get_str_sizen(local->xattr_req, "gfid-heal-msg",
+ if (!dict_get_str_sizen(local->xattr_rsp, "gfid-heal-msg",
&gfid_heal_msg))
goto cant_interpret;
@@ -2421,7 +2421,7 @@ afr_lookup_done(call_frame_t *frame, xlator_t *this)
goto error;
}
- ret = dict_get_str_sizen(local->xattr_req, "gfid-heal-msg", &gfid_heal_msg);
+ ret = dict_get_str_sizen(local->xattr_rsp, "gfid-heal-msg", &gfid_heal_msg);
if (!ret) {
ret = dict_set_str_sizen(local->replies[read_subvol].xdata,
"gfid-heal-msg", gfid_heal_msg);
@@ -2768,9 +2768,12 @@ afr_lookup_selfheal_wrap(void *opaque)
local = frame->local;
this = frame->this;
loc_pargfid(&local->loc, pargfid);
+ if (!local->xattr_rsp)
+ local->xattr_rsp = dict_new();
ret = afr_selfheal_name(frame->this, pargfid, local->loc.name,
- &local->cont.lookup.gfid_req, local->xattr_req);
+ &local->cont.lookup.gfid_req, local->xattr_req,
+ local->xattr_rsp);
if (ret == -EIO)
goto unwind;
@@ -2786,7 +2789,8 @@ afr_lookup_selfheal_wrap(void *opaque)
return 0;
unwind:
- AFR_STACK_UNWIND(lookup, frame, -1, EIO, NULL, NULL, NULL, NULL);
+ AFR_STACK_UNWIND(lookup, frame, -1, EIO, NULL, NULL, local->xattr_rsp,
+ NULL);
return 0;
}
diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
index 0a8a7fd..0954d2c 100644
--- a/xlators/cluster/afr/src/afr-self-heal-common.c
+++ b/xlators/cluster/afr/src/afr-self-heal-common.c
@@ -245,7 +245,8 @@ int
afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
inode_t *inode, uuid_t pargfid, const char *bname,
int src_idx, int child_idx,
- unsigned char *locked_on, int *src, dict_t *xdata)
+ unsigned char *locked_on, int *src, dict_t *req,
+ dict_t *rsp)
{
afr_private_t *priv = NULL;
char g1[64] = {
@@ -266,8 +267,8 @@ afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
"All the bricks should be up to resolve the gfid split "
"barin");
- if (xdata) {
- ret = dict_set_sizen_str_sizen(xdata, "gfid-heal-msg",
+ if (rsp) {
+ ret = dict_set_sizen_str_sizen(rsp, "gfid-heal-msg",
SALL_BRICKS_UP_TO_RESOLVE);
if (ret)
gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_DICT_SET_FAILED,
@@ -277,8 +278,8 @@ afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
goto out;
}
- if (xdata) {
- ret = dict_get_int32_sizen(xdata, "heal-op", &heal_op);
+ if (req) {
+ ret = dict_get_int32_sizen(req, "heal-op", &heal_op);
if (ret)
goto fav_child;
} else {
@@ -292,8 +293,8 @@ afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
if (*src == -1) {
gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
SNO_BIGGER_FILE);
- if (xdata) {
- ret = dict_set_sizen_str_sizen(xdata, "gfid-heal-msg",
+ if (rsp) {
+ ret = dict_set_sizen_str_sizen(rsp, "gfid-heal-msg",
SNO_BIGGER_FILE);
if (ret)
gf_msg(this->name, GF_LOG_ERROR, 0,
@@ -310,8 +311,8 @@ afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
if (*src == -1) {
gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
SNO_DIFF_IN_MTIME);
- if (xdata) {
- ret = dict_set_sizen_str_sizen(xdata, "gfid-heal-msg",
+ if (rsp) {
+ ret = dict_set_sizen_str_sizen(rsp, "gfid-heal-msg",
SNO_DIFF_IN_MTIME);
if (ret)
gf_msg(this->name, GF_LOG_ERROR, 0,
@@ -323,7 +324,7 @@ afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
break;
case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK:
- ret = dict_get_str_sizen(xdata, "child-name", &src_brick);
+ ret = dict_get_str_sizen(req, "child-name", &src_brick);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
"Error getting the source "
@@ -335,8 +336,8 @@ afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
if (*src == -1) {
gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
SERROR_GETTING_SRC_BRICK);
- if (xdata) {
- ret = dict_set_sizen_str_sizen(xdata, "gfid-heal-msg",
+ if (rsp) {
+ ret = dict_set_sizen_str_sizen(rsp, "gfid-heal-msg",
SERROR_GETTING_SRC_BRICK);
if (ret)
gf_msg(this->name, GF_LOG_ERROR, 0,
@@ -400,7 +401,7 @@ out:
uuid_utoa_r(replies[child_idx].poststat.ia_gfid, g1), src_idx,
priv->children[src_idx]->name, src_idx,
uuid_utoa_r(replies[src_idx].poststat.ia_gfid, g2));
- return -1;
+ return -EIO;
}
return 0;
}
diff --git a/xlators/cluster/afr/src/afr-self-heal-entry.c b/xlators/cluster/afr/src/afr-self-heal-entry.c
index 20b07dd..a17dd93 100644
--- a/xlators/cluster/afr/src/afr-self-heal-entry.c
+++ b/xlators/cluster/afr/src/afr-self-heal-entry.c
@@ -399,7 +399,7 @@ afr_selfheal_detect_gfid_and_type_mismatch(xlator_t *this,
(ia_type == replies[i].poststat.ia_type)) {
ret = afr_gfid_split_brain_source(this, replies, inode, pargfid,
bname, src_idx, i, locked_on, src,
- NULL);
+ NULL, NULL);
if (ret)
gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
"Skipping conservative merge on the "
@@ -474,7 +474,7 @@ __afr_selfheal_merge_dirent(call_frame_t *frame, xlator_t *this, fd_t *fd,
return ret;
/* In case of type mismatch / unable to resolve gfid mismatch on the
- * entry, return -1.*/
+ * entry, return -EIO.*/
ret = afr_selfheal_detect_gfid_and_type_mismatch(
this, replies, inode, fd->inode->gfid, name, source, locked_on, &src);
@@ -905,7 +905,7 @@ afr_selfheal_entry_do_subvol(call_frame_t *frame, xlator_t *this, fd_t *fd,
break;
}
- if (ret == -1) {
+ if (ret == -EIO) {
/* gfid or type mismatch. */
mismatch = _gf_true;
ret = 0;
@@ -1072,7 +1072,7 @@ afr_selfheal_entry_do(call_frame_t *frame, xlator_t *this, fd_t *fd, int source,
else
ret = afr_selfheal_entry_do_subvol(frame, this, fd, i);
- if (ret == -1) {
+ if (ret == -EIO) {
/* gfid or type mismatch. */
mismatch = _gf_true;
ret = 0;
diff --git a/xlators/cluster/afr/src/afr-self-heal-name.c b/xlators/cluster/afr/src/afr-self-heal-name.c
index 51e3d8c..9ec2066 100644
--- a/xlators/cluster/afr/src/afr-self-heal-name.c
+++ b/xlators/cluster/afr/src/afr-self-heal-name.c
@@ -217,7 +217,8 @@ afr_selfheal_name_gfid_mismatch_check(xlator_t *this, struct afr_reply *replies,
int source, unsigned char *sources,
int *gfid_idx, uuid_t pargfid,
const char *bname, inode_t *inode,
- unsigned char *locked_on, dict_t *xdata)
+ unsigned char *locked_on, dict_t *req,
+ dict_t *rsp)
{
int i = 0;
int gfid_idx_iter = -1;
@@ -245,11 +246,11 @@ afr_selfheal_name_gfid_mismatch_check(xlator_t *this, struct afr_reply *replies,
if (sources[i] || source == -1) {
if ((sources[gfid_idx_iter] || source == -1) &&
gf_uuid_compare(gfid, gfid1)) {
- ret = afr_gfid_split_brain_source(this, replies, inode, pargfid,
- bname, gfid_idx_iter, i,
- locked_on, gfid_idx, xdata);
+ ret = afr_gfid_split_brain_source(
+ this, replies, inode, pargfid, bname, gfid_idx_iter, i,
+ locked_on, gfid_idx, req, rsp);
if (!ret && *gfid_idx >= 0) {
- ret = dict_set_sizen_str_sizen(xdata, "gfid-heal-msg",
+ ret = dict_set_sizen_str_sizen(rsp, "gfid-heal-msg",
"GFID split-brain resolved");
if (ret)
gf_msg(this->name, GF_LOG_ERROR, 0,
@@ -303,7 +304,7 @@ __afr_selfheal_name_do(call_frame_t *frame, xlator_t *this, inode_t *parent,
unsigned char *sources, unsigned char *sinks,
unsigned char *healed_sinks, int source,
unsigned char *locked_on, struct afr_reply *replies,
- void *gfid_req, dict_t *xdata)
+ void *gfid_req, dict_t *req, dict_t *rsp)
{
int gfid_idx = -1;
int ret = -1;
@@ -333,7 +334,7 @@ __afr_selfheal_name_do(call_frame_t *frame, xlator_t *this, inode_t *parent,
ret = afr_selfheal_name_gfid_mismatch_check(this, replies, source, sources,
&gfid_idx, pargfid, bname,
- inode, locked_on, xdata);
+ inode, locked_on, req, rsp);
if (ret)
return ret;
@@ -450,7 +451,7 @@ out:
int
afr_selfheal_name_do(call_frame_t *frame, xlator_t *this, inode_t *parent,
uuid_t pargfid, const char *bname, void *gfid_req,
- dict_t *xdata)
+ dict_t *req, dict_t *rsp)
{
afr_private_t *priv = NULL;
unsigned char *sources = NULL;
@@ -505,7 +506,7 @@ afr_selfheal_name_do(call_frame_t *frame, xlator_t *this, inode_t *parent,
ret = __afr_selfheal_name_do(frame, this, parent, pargfid, bname, inode,
sources, sinks, healed_sinks, source,
- locked_on, replies, gfid_req, xdata);
+ locked_on, replies, gfid_req, req, rsp);
}
unlock:
afr_selfheal_unentrylk(frame, this, parent, this->name, bname, locked_on,
@@ -578,7 +579,7 @@ afr_selfheal_name_unlocked_inspect(call_frame_t *frame, xlator_t *this,
int
afr_selfheal_name(xlator_t *this, uuid_t pargfid, const char *bname,
- void *gfid_req, dict_t *xdata)
+ void *gfid_req, dict_t *req, dict_t *rsp)
{
inode_t *parent = NULL;
call_frame_t *frame = NULL;
@@ -600,7 +601,7 @@ afr_selfheal_name(xlator_t *this, uuid_t pargfid, const char *bname,
if (need_heal) {
ret = afr_selfheal_name_do(frame, this, parent, pargfid, bname,
- gfid_req, xdata);
+ gfid_req, req, rsp);
if (ret)
goto out;
}
diff --git a/xlators/cluster/afr/src/afr-self-heal.h b/xlators/cluster/afr/src/afr-self-heal.h
index c8dc384..6b0bf69 100644
--- a/xlators/cluster/afr/src/afr-self-heal.h
+++ b/xlators/cluster/afr/src/afr-self-heal.h
@@ -127,7 +127,7 @@ afr_throttled_selfheal(call_frame_t *frame, xlator_t *this);
int
afr_selfheal_name(xlator_t *this, uuid_t gfid, const char *name, void *gfid_req,
- dict_t *xdata);
+ dict_t *req, dict_t *rsp);
int
afr_selfheal_data(call_frame_t *frame, xlator_t *this, fd_t *fd);
@@ -357,7 +357,8 @@ int
afr_gfid_split_brain_source(xlator_t *this, struct afr_reply *replies,
inode_t *inode, uuid_t pargfid, const char *bname,
int src_idx, int child_idx,
- unsigned char *locked_on, int *src, dict_t *xdata);
+ unsigned char *locked_on, int *src, dict_t *req,
+ dict_t *rsp);
int
afr_mark_source_sinks_if_file_empty(xlator_t *this, unsigned char *sources,
unsigned char *sinks,
diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c
index 939a135..18aed93 100644
--- a/xlators/cluster/afr/src/afr-self-heald.c
+++ b/xlators/cluster/afr/src/afr-self-heald.c
@@ -295,7 +295,7 @@ afr_shd_selfheal_name(struct subvol_healer *healer, int child, uuid_t parent,
{
int ret = -1;
- ret = afr_selfheal_name(THIS, parent, bname, NULL, NULL);
+ ret = afr_selfheal_name(THIS, parent, bname, NULL, NULL, NULL);
return ret;
}
--
1.8.3.1

View File

@ -0,0 +1,388 @@
From da75c2857fd8b173d47fb7fc3b925ffd14105f64 Mon Sep 17 00:00:00 2001
From: "Kaleb S. KEITHLEY" <kkeithle@rhel7x.kkeithle.usersys.redhat.com>
Date: Wed, 23 Dec 2020 07:39:13 -0500
Subject: [PATCH 517/517] gfapi: 'glfs_h_creat_open' - new API to create handle
and open fd
Right now we have two separate APIs, one
- 'glfs_h_creat_handle' to create handle & another
- 'glfs_h_open' to create a glfd to return to application
Having two separate routines can result in access errors
while trying to create and write into a read-only file.
Since a fd is opened even during file/directory creation,
introducing a new API to make these two operations atomic i.e,
which can create both handle & fd and pass them to application
This is backport of below mainline patch -
- https://review.gluster.org/#/c/glusterfs/+/23448/
- bz#1753569
> Signed-off-by: Soumya Koduri <skoduri@redhat.com>
> Change-Id: Ibf513fcfcdad175f4d7eb6fa7a61b8feec6d33b5
> release-6: commit 5a2af2fd06356f6fc79d591c352caffd4c511c9e
> master: commit 41a0f2aa755ec7162facd30209f2fa3f40308766
BUG: 1910119
Change-Id: Ib397dbe82a6928d8f24251809d30febddd007bfc
Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/222083
Reviewed-by: Soumya Koduri <skoduri@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
api/src/gfapi.aliases | 1 +
api/src/gfapi.map | 5 ++
api/src/glfs-handleops.c | 135 ++++++++++++++++++++++++++++++++++
api/src/glfs-handles.h | 5 ++
tests/basic/gfapi/glfs_h_creat_open.c | 118 +++++++++++++++++++++++++++++
tests/basic/gfapi/glfs_h_creat_open.t | 27 +++++++
6 files changed, 291 insertions(+)
create mode 100644 tests/basic/gfapi/glfs_h_creat_open.c
create mode 100755 tests/basic/gfapi/glfs_h_creat_open.t
diff --git a/api/src/gfapi.aliases b/api/src/gfapi.aliases
index 692ae13..3d3415c 100644
--- a/api/src/gfapi.aliases
+++ b/api/src/gfapi.aliases
@@ -197,3 +197,4 @@ _pub_glfs_fsetattr _glfs_fsetattr$GFAPI_6.0
_pub_glfs_setattr _glfs_setattr$GFAPI_6.0
_pub_glfs_set_statedump_path _glfs_set_statedump_path@GFAPI_6.4
+_pub_glfs_h_creat_open _glfs_h_creat_open@GFAPI_6.6
diff --git a/api/src/gfapi.map b/api/src/gfapi.map
index df65837..614f3f6 100644
--- a/api/src/gfapi.map
+++ b/api/src/gfapi.map
@@ -276,3 +276,8 @@ GFAPI_6.4 {
global:
glfs_set_statedump_path;
} GFAPI_PRIVATE_6.1;
+
+GFAPI_6.6 {
+ global:
+ glfs_h_creat_open;
+} GFAPI_6.4;
diff --git a/api/src/glfs-handleops.c b/api/src/glfs-handleops.c
index d4e1545..7b8ff14 100644
--- a/api/src/glfs-handleops.c
+++ b/api/src/glfs-handleops.c
@@ -843,6 +843,141 @@ invalid_fs:
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_creat, 3.4.2);
struct glfs_object *
+pub_glfs_h_creat_open(struct glfs *fs, struct glfs_object *parent,
+ const char *path, int flags, mode_t mode,
+ struct stat *stat, struct glfs_fd **out_fd)
+{
+ int ret = -1;
+ struct glfs_fd *glfd = NULL;
+ xlator_t *subvol = NULL;
+ inode_t *inode = NULL;
+ loc_t loc = {
+ 0,
+ };
+ struct iatt iatt = {
+ 0,
+ };
+ uuid_t gfid;
+ dict_t *xattr_req = NULL;
+ struct glfs_object *object = NULL;
+ dict_t *fop_attr = NULL;
+
+ /* validate in args */
+ if ((fs == NULL) || (parent == NULL) || (path == NULL) ||
+ (out_fd == NULL)) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ DECLARE_OLD_THIS;
+ __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
+
+ /* get the active volume */
+ subvol = glfs_active_subvol(fs);
+ if (!subvol) {
+ ret = -1;
+ goto out;
+ }
+
+ /* get/refresh the in arg objects inode in correlation to the xlator */
+ inode = glfs_resolve_inode(fs, subvol, parent);
+ if (!inode) {
+ ret = -1;
+ goto out;
+ }
+
+ xattr_req = dict_new();
+ if (!xattr_req) {
+ ret = -1;
+ errno = ENOMEM;
+ goto out;
+ }
+
+ gf_uuid_generate(gfid);
+ ret = dict_set_gfuuid(xattr_req, "gfid-req", gfid, true);
+ if (ret) {
+ ret = -1;
+ errno = ENOMEM;
+ goto out;
+ }
+
+ GLFS_LOC_FILL_PINODE(inode, loc, ret, errno, out, path);
+
+ glfd = glfs_fd_new(fs);
+ if (!glfd) {
+ ret = -1;
+ errno = ENOMEM;
+ goto out;
+ }
+
+ glfd->fd = fd_create(loc.inode, getpid());
+ if (!glfd->fd) {
+ ret = -1;
+ errno = ENOMEM;
+ goto out;
+ }
+ glfd->fd->flags = flags;
+
+ ret = get_fop_attr_thrd_key(&fop_attr);
+ if (ret)
+ gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
+
+ /* fop/op */
+ ret = syncop_create(subvol, &loc, flags, mode, glfd->fd, &iatt, xattr_req,
+ NULL);
+ DECODE_SYNCOP_ERR(ret);
+
+ /* populate out args */
+ if (ret == 0) {
+ glfd->fd->flags = flags;
+
+ ret = glfs_loc_link(&loc, &iatt);
+ if (ret != 0) {
+ goto out;
+ }
+
+ if (stat)
+ glfs_iatt_to_stat(fs, &iatt, stat);
+
+ ret = glfs_create_object(&loc, &object);
+ }
+
+out:
+ if (ret && object != NULL) {
+ /* Release the held reference */
+ glfs_h_close(object);
+ object = NULL;
+ }
+
+ loc_wipe(&loc);
+
+ if (inode)
+ inode_unref(inode);
+
+ if (fop_attr)
+ dict_unref(fop_attr);
+
+ if (xattr_req)
+ dict_unref(xattr_req);
+
+ if (ret && glfd) {
+ GF_REF_PUT(glfd);
+ } else if (glfd) {
+ glfd_set_state_bind(glfd);
+ *out_fd = glfd;
+ }
+
+ glfs_subvol_done(fs, subvol);
+
+ __GLFS_EXIT_FS;
+
+invalid_fs:
+ return object;
+}
+
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_creat_open, 6.6);
+
+struct glfs_object *
pub_glfs_h_mkdir(struct glfs *fs, struct glfs_object *parent, const char *path,
mode_t mode, struct stat *stat)
{
diff --git a/api/src/glfs-handles.h b/api/src/glfs-handles.h
index f7e6a06..4d039b9 100644
--- a/api/src/glfs-handles.h
+++ b/api/src/glfs-handles.h
@@ -250,6 +250,11 @@ int
glfs_h_access(glfs_t *fs, glfs_object_t *object, int mask) __THROW
GFAPI_PUBLIC(glfs_h_access, 3.6.0);
+struct glfs_object *
+glfs_h_creat_open(struct glfs *fs, struct glfs_object *parent, const char *path,
+ int flags, mode_t mode, struct stat *stat,
+ struct glfs_fd **out_fd) __THROW
+ GFAPI_PUBLIC(glfs_h_creat_open, 6.6);
/*
SYNOPSIS
diff --git a/tests/basic/gfapi/glfs_h_creat_open.c b/tests/basic/gfapi/glfs_h_creat_open.c
new file mode 100644
index 0000000..7672561
--- /dev/null
+++ b/tests/basic/gfapi/glfs_h_creat_open.c
@@ -0,0 +1,118 @@
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define LOG_ERR(func, ret) \
+ do { \
+ if (ret != 0) { \
+ fprintf(stderr, "%s : returned error ret(%d), errno(%d)\n", func, \
+ ret, errno); \
+ exit(1); \
+ } else { \
+ fprintf(stderr, "%s : returned %d\n", func, ret); \
+ } \
+ } while (0)
+#define LOG_IF_NO_ERR(func, ret) \
+ do { \
+ if (ret == 0) { \
+ fprintf(stderr, "%s : hasn't returned error %d\n", func, ret); \
+ exit(1); \
+ } else { \
+ fprintf(stderr, "%s : returned %d\n", func, ret); \
+ } \
+ } while (0)
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs = NULL;
+ int ret = 0;
+ struct glfs_object *root = NULL, *leaf = NULL;
+ glfs_fd_t *fd = NULL;
+ char *filename = "/ro-file";
+ struct stat sb = {
+ 0,
+ };
+ char *logfile = NULL;
+ char *volname = NULL;
+ char *hostname = NULL;
+ char buf[32] = "abcdefghijklmnopqrstuvwxyz012345";
+
+ fprintf(stderr, "Starting glfs_h_creat_open\n");
+
+ if (argc != 4) {
+ fprintf(stderr, "Invalid argument\n");
+ exit(1);
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ logfile = argv[3];
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ LOG_ERR("glfs_set_volfile_server", ret);
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ LOG_ERR("glfs_set_logging", ret);
+
+ ret = glfs_init(fs);
+ LOG_ERR("glfs_init", ret);
+
+ sleep(2);
+ root = glfs_h_lookupat(fs, NULL, "/", &sb, 0);
+ if (!root) {
+ ret = -1;
+ LOG_ERR("glfs_h_lookupat root", ret);
+ }
+ leaf = glfs_h_lookupat(fs, root, filename, &sb, 0);
+ if (!leaf) {
+ ret = -1;
+ LOG_IF_NO_ERR("glfs_h_lookupat leaf", ret);
+ }
+
+ leaf = glfs_h_creat_open(fs, root, filename, O_RDONLY, 00444, &sb, &fd);
+ if (!leaf || !fd) {
+ ret = -1;
+ LOG_ERR("glfs_h_creat leaf", ret);
+ }
+ fprintf(stderr, "glfs_h_create_open leaf - %p\n", leaf);
+
+ ret = glfs_write(fd, buf, 32, 0);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_write: error writing to file %s, %s\n", filename,
+ strerror(errno));
+ goto out;
+ }
+
+ ret = glfs_h_getattrs(fs, leaf, &sb);
+ LOG_ERR("glfs_h_getattrs", ret);
+
+ if (sb.st_size != 32) {
+ fprintf(stderr, "glfs_write: post size mismatch\n");
+ goto out;
+ }
+
+ fprintf(stderr, "Successfully opened and written to a read-only file \n");
+out:
+ if (fd)
+ glfs_close(fd);
+
+ ret = glfs_fini(fs);
+ LOG_ERR("glfs_fini", ret);
+
+ fprintf(stderr, "End of libgfapi_fini\n");
+
+ exit(0);
+}
diff --git a/tests/basic/gfapi/glfs_h_creat_open.t b/tests/basic/gfapi/glfs_h_creat_open.t
new file mode 100755
index 0000000..f24ae73
--- /dev/null
+++ b/tests/basic/gfapi/glfs_h_creat_open.t
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/glfs_h_creat_open.c -lgfapi
+
+TEST ./$(dirname $0)/glfs_h_creat_open $H0 $V0 $logdir/glfs.log
+
+cleanup_tester $(dirname $0)/glfs_h_creat_open
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
--
1.8.3.1

View File

@ -0,0 +1,41 @@
From 818025e467ea98b32a855c92ba6aef6e172e029f Mon Sep 17 00:00:00 2001
From: Nikhil Ladha <nladha@redhat.com>
Date: Fri, 8 Jan 2021 13:12:46 +0530
Subject: [PATCH 518/526] glusterd: Fix for shared storage in ipv6 env
Issue:
Mounting shared storage volume was failing in ipv6 env if the hostnames were FQDNs.
The brickname for the volume was being cut off, as a result, volume creation was failing.
>Change-Id: Ib38993724c709b35b603f9ac666630c50c932c3e
>Fixes: #1406
>Signed-off-by: nik-redhat <nladha@redhat.com>
Upstream patch: https://github.com/gluster/glusterfs/pull/1972
BUG: 1856574
Change-Id: Ib38993724c709b35b603f9ac666630c50c932c3e
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/223248
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
---
extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
index 9597503..e9261af 100755
--- a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
+++ b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
@@ -46,7 +46,7 @@ do
key=`echo $line | cut -d ':' -f 1`
if [ "$key" == "Hostname" ]; then
- hostname=`echo $line | cut -d ':' -f 2 | xargs`
+ hostname=`echo $line | cut -d ' ' -f 2 | xargs`
fi
if [ "$key" == "State" ]; then
--
1.8.3.1

View File

@ -0,0 +1,58 @@
From 6ed227367b6eb7d6d7afde3859ad0a711a3adf36 Mon Sep 17 00:00:00 2001
From: Leela Venkaiah G <gleelavenkaiah@gmail.com>
Date: Wed, 13 Jan 2021 16:02:25 +0530
Subject: [PATCH 519/526] glusterfs-events: Fix incorrect attribute access
(#2002)
Issue: When GlusterCmdException is raised, current code try to access
message atrribute which doesn't exist and resulting in a malformed
error string on failure operations
Code Change: Replace `message` with `args[0]`
>Fixes: #2001
>Change-Id: I65c9f0ee79310937a384025b8d454acda154e4bb
>Signed-off-by: Leela Venkaiah G <lgangava@redhat.com>
Upstream patch: https://github.com/gluster/glusterfs/pull/2002
BUG: 1600459
Change-Id: I65c9f0ee79310937a384025b8d454acda154e4bb
Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/223584
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
events/src/peer_eventsapi.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/events/src/peer_eventsapi.py b/events/src/peer_eventsapi.py
index 26b77a0..c388da4 100644
--- a/events/src/peer_eventsapi.py
+++ b/events/src/peer_eventsapi.py
@@ -174,9 +174,9 @@ def sync_to_peers(args):
sync_file_to_peers(WEBHOOKS_FILE_TO_SYNC)
except GlusterCmdException as e:
# Print stdout if stderr is empty
- errmsg = e.message[2] if e.message[2] else e.message[1]
+ errmsg = e.args[0][2] if e.args[0][2] else e.args[0][1]
handle_output_error("Failed to sync Webhooks file: [Error: {0}]"
- "{1}".format(e.message[0], errmsg),
+ "{1}".format(e.args[0][0], errmsg),
errcode=ERROR_WEBHOOK_SYNC_FAILED,
json_output=args.json)
@@ -185,9 +185,9 @@ def sync_to_peers(args):
sync_file_to_peers(CUSTOM_CONFIG_FILE_TO_SYNC)
except GlusterCmdException as e:
# Print stdout if stderr is empty
- errmsg = e.message[2] if e.message[2] else e.message[1]
+ errmsg = e.args[0][2] if e.args[0][2] else e.args[0][1]
handle_output_error("Failed to sync Config file: [Error: {0}]"
- "{1}".format(e.message[0], errmsg),
+ "{1}".format(e.args[0][0], errmsg),
errcode=ERROR_CONFIG_SYNC_FAILED,
json_output=args.json)
--
1.8.3.1

View File

@ -0,0 +1,70 @@
From a3fd2c9d85bbd23131c985599d9c9d74f66f32d2 Mon Sep 17 00:00:00 2001
From: Pranith Kumar K <pkarampu@redhat.com>
Date: Thu, 10 Oct 2019 10:50:59 +0530
Subject: [PATCH 520/526] performance/open-behind: seek fop should
open_and_resume
Upstream patch:
> Upstream-patch-link: https://review.gluster.org/#/c/glusterfs/+/23530
> fixes: bz#1760187
> Change-Id: I4c6ad13194d4fc5c7705e35bf9a27fce504b51f9
> Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
BUG: 1830713
Change-Id: I4c6ad13194d4fc5c7705e35bf9a27fce504b51f9
Signed-off-by: Pranith Kumar K <pkarampu@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/224484
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/performance/open-behind/src/open-behind.c | 27 +++++++++++++++++++++++
1 file changed, 27 insertions(+)
diff --git a/xlators/performance/open-behind/src/open-behind.c b/xlators/performance/open-behind/src/open-behind.c
index 268c717..3ee3c40 100644
--- a/xlators/performance/open-behind/src/open-behind.c
+++ b/xlators/performance/open-behind/src/open-behind.c
@@ -711,6 +711,32 @@ err:
}
int
+ob_seek(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ gf_seek_what_t what, dict_t *xdata)
+{
+ call_stub_t *stub = NULL;
+ fd_t *wind_fd = NULL;
+
+ wind_fd = ob_get_wind_fd(this, fd, NULL);
+
+ stub = fop_seek_stub(frame, default_seek_resume, wind_fd, offset, what,
+ xdata);
+
+ fd_unref(wind_fd);
+
+ if (!stub)
+ goto err;
+
+ open_and_resume(this, wind_fd, stub);
+
+ return 0;
+err:
+ STACK_UNWIND_STRICT(fstat, frame, -1, ENOMEM, 0, 0);
+
+ return 0;
+}
+
+int
ob_flush(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
{
call_stub_t *stub = NULL;
@@ -1276,6 +1302,7 @@ struct xlator_fops fops = {
.flush = ob_flush,
.fsync = ob_fsync,
.fstat = ob_fstat,
+ .seek = ob_seek,
.ftruncate = ob_ftruncate,
.fsetxattr = ob_fsetxattr,
.setxattr = ob_setxattr,
--
1.8.3.1

View File

@ -0,0 +1,121 @@
From 211d0f7dbb4991b2191925973222ebc79f010e84 Mon Sep 17 00:00:00 2001
From: Xavi Hernandez <xhernandez@redhat.com>
Date: Sun, 8 Mar 2020 18:36:45 +0100
Subject: [PATCH 521/526] open-behind: fix missing fd reference
Open behind was not keeping any reference on fd's pending to be
opened. This makes it possible that a concurrent close and en entry
fop (unlink, rename, ...) caused destruction of the fd while it
was still being used.
Upstream patch:
> Upstream patch link: https://review.gluster.org/c/glusterfs/+/24204
> Change-Id: Ie9e992902cf2cd7be4af1f8b4e57af9bd6afd8e9
> Fixes: bz#1810934
> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
Change-Id: Ie9e992902cf2cd7be4af1f8b4e57af9bd6afd8e9
BUG: 1830713
Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/224485
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/performance/open-behind/src/open-behind.c | 27 ++++++++++++++---------
1 file changed, 16 insertions(+), 11 deletions(-)
diff --git a/xlators/performance/open-behind/src/open-behind.c b/xlators/performance/open-behind/src/open-behind.c
index 3ee3c40..dd2f2fd 100644
--- a/xlators/performance/open-behind/src/open-behind.c
+++ b/xlators/performance/open-behind/src/open-behind.c
@@ -206,8 +206,13 @@ ob_fd_free(ob_fd_t *ob_fd)
if (ob_fd->xdata)
dict_unref(ob_fd->xdata);
- if (ob_fd->open_frame)
+ if (ob_fd->open_frame) {
+ /* If we sill have a frame it means that background open has never
+ * been triggered. We need to release the pending reference. */
+ fd_unref(ob_fd->fd);
+
STACK_DESTROY(ob_fd->open_frame->root);
+ }
GF_FREE(ob_fd);
}
@@ -297,6 +302,7 @@ ob_wake_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
call_resume(stub);
}
+ /* The background open is completed. We can release the 'fd' reference. */
fd_unref(fd);
STACK_DESTROY(frame->root);
@@ -331,7 +337,9 @@ ob_fd_wake(xlator_t *this, fd_t *fd, ob_fd_t *ob_fd)
}
if (frame) {
- frame->local = fd_ref(fd);
+ /* We don't need to take a reference here. We already have a reference
+ * while the open is pending. */
+ frame->local = fd;
STACK_WIND(frame, ob_wake_cbk, FIRST_CHILD(this),
FIRST_CHILD(this)->fops->open, &ob_fd->loc, ob_fd->flags, fd,
@@ -345,15 +353,12 @@ void
ob_inode_wake(xlator_t *this, struct list_head *ob_fds)
{
ob_fd_t *ob_fd = NULL, *tmp = NULL;
- fd_t *fd = NULL;
if (!list_empty(ob_fds)) {
list_for_each_entry_safe(ob_fd, tmp, ob_fds, ob_fds_on_inode)
{
ob_fd_wake(this, ob_fd->fd, ob_fd);
- fd = ob_fd->fd;
ob_fd_free(ob_fd);
- fd_unref(fd);
}
}
}
@@ -365,7 +370,7 @@ ob_fd_copy(ob_fd_t *src, ob_fd_t *dst)
if (!src || !dst)
goto out;
- dst->fd = __fd_ref(src->fd);
+ dst->fd = src->fd;
dst->loc.inode = inode_ref(src->loc.inode);
gf_uuid_copy(dst->loc.gfid, src->loc.gfid);
dst->flags = src->flags;
@@ -509,7 +514,6 @@ ob_open_behind(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
ob_fd->ob_inode = ob_inode;
- /* don't do fd_ref, it'll cause leaks */
ob_fd->fd = fd;
ob_fd->open_frame = copy_frame(frame);
@@ -539,15 +543,16 @@ ob_open_behind(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
}
UNLOCK(&fd->inode->lock);
- if (!open_in_progress && !unlinked) {
- fd_ref(fd);
+ /* We take a reference while the background open is pending or being
+ * processed. If we finally wind the request in the foreground, then
+ * ob_fd_free() will take care of this additional reference. */
+ fd_ref(fd);
+ if (!open_in_progress && !unlinked) {
STACK_UNWIND_STRICT(open, frame, 0, 0, fd, xdata);
if (!conf->lazy_open)
ob_fd_wake(this, fd, NULL);
-
- fd_unref(fd);
} else {
ob_fd_free(ob_fd);
STACK_WIND(frame, default_open_cbk, FIRST_CHILD(this),
--
1.8.3.1

View File

@ -0,0 +1,746 @@
From 46e2bbd52d4427c1348fa38dcb5d2b5f125555f1 Mon Sep 17 00:00:00 2001
From: Amar Tumballi <amarts@redhat.com>
Date: Thu, 30 May 2019 15:25:01 +0530
Subject: [PATCH 522/526] lcov: improve line coverage
upcall: remove extra variable assignment and use just one
initialization.
open-behind: reduce the overall number of lines, in functions
not frequently called
selinux: reduce some lines in init failure cases
Upstream patch:
> Upstream-patch-link: https://review.gluster.org/#/c/glusterfs/+/22789
> updates: bz#1693692
> Change-Id: I7c1de94f2ec76a5bfe1f48a9632879b18e5fbb95
> Signed-off-by: Amar Tumballi <amarts@redhat.com>
BUG: 1830713
Change-Id: I7c1de94f2ec76a5bfe1f48a9632879b18e5fbb95
Signed-off-by: Amar Tumballi <amarts@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/224486
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/features/selinux/src/selinux.c | 6 +-
xlators/features/upcall/src/upcall.c | 108 +++++++---------------
xlators/performance/open-behind/src/open-behind.c | 58 ++++--------
3 files changed, 55 insertions(+), 117 deletions(-)
diff --git a/xlators/features/selinux/src/selinux.c b/xlators/features/selinux/src/selinux.c
index 58b4c5d..e8e16cd 100644
--- a/xlators/features/selinux/src/selinux.c
+++ b/xlators/features/selinux/src/selinux.c
@@ -234,7 +234,6 @@ init(xlator_t *this)
priv = GF_CALLOC(1, sizeof(*priv), gf_selinux_mt_selinux_priv_t);
if (!priv) {
gf_log(this->name, GF_LOG_ERROR, "out of memory");
- ret = ENOMEM;
goto out;
}
@@ -242,7 +241,6 @@ init(xlator_t *this)
this->local_pool = mem_pool_new(selinux_priv_t, 64);
if (!this->local_pool) {
- ret = -1;
gf_msg(this->name, GF_LOG_ERROR, ENOMEM, SL_MSG_ENOMEM,
"Failed to create local_t's memory pool");
goto out;
@@ -252,9 +250,7 @@ init(xlator_t *this)
ret = 0;
out:
if (ret) {
- if (priv) {
- GF_FREE(priv);
- }
+ GF_FREE(priv);
mem_pool_destroy(this->local_pool);
}
return ret;
diff --git a/xlators/features/upcall/src/upcall.c b/xlators/features/upcall/src/upcall.c
index 2583c50..0795f58 100644
--- a/xlators/features/upcall/src/upcall.c
+++ b/xlators/features/upcall/src/upcall.c
@@ -57,14 +57,13 @@ static int32_t
up_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
fd_t *fd, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -111,14 +110,13 @@ up_writev(call_frame_t *frame, xlator_t *this, fd_t *fd, struct iovec *vector,
int count, off_t off, uint32_t flags, struct iobref *iobref,
dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -167,14 +165,13 @@ static int32_t
up_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
off_t offset, uint32_t flags, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -220,14 +217,13 @@ static int32_t
up_lk(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t cmd,
struct gf_flock *flock, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -274,14 +270,13 @@ static int32_t
up_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset,
dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -343,14 +338,13 @@ static int32_t
up_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc, struct iatt *stbuf,
int32_t valid, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -410,14 +404,13 @@ static int32_t
up_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, newloc, NULL, oldloc->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -472,14 +465,13 @@ static int32_t
up_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflag,
dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, loc, NULL, loc->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -531,14 +523,13 @@ static int32_t
up_link(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, newloc, NULL, oldloc->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -592,14 +583,13 @@ static int32_t
up_rmdir(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, loc, NULL, loc->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -653,14 +643,13 @@ static int32_t
up_mkdir(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
mode_t umask, dict_t *params)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, loc, NULL, loc->parent, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -717,15 +706,13 @@ static int32_t
up_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
mode_t mode, mode_t umask, fd_t *fd, dict_t *params)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, loc, NULL, loc->parent, NULL);
-
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -774,14 +761,13 @@ out:
static int32_t
up_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -826,14 +812,13 @@ out:
static int32_t
up_stat(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -852,14 +837,13 @@ err:
static int32_t
up_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -879,14 +863,13 @@ static int32_t
up_ftruncate(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -932,14 +915,13 @@ static int32_t
up_access(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t mask,
dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -986,14 +968,13 @@ static int32_t
up_readlink(call_frame_t *frame, xlator_t *this, loc_t *loc, size_t size,
dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1047,14 +1028,13 @@ static int32_t
up_mknod(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
dev_t rdev, mode_t umask, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, loc, NULL, loc->parent, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1110,14 +1090,13 @@ static int32_t
up_symlink(call_frame_t *frame, xlator_t *this, const char *linkpath,
loc_t *loc, mode_t umask, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, loc, NULL, loc->parent, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1164,14 +1143,13 @@ static int32_t
up_opendir(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd,
dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1216,14 +1194,13 @@ out:
static int32_t
up_statfs(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1270,14 +1247,13 @@ static int32_t
up_readdir(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
off_t off, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1334,14 +1310,13 @@ static int32_t
up_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
off_t off, dict_t *dict)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1361,14 +1336,13 @@ static int32_t
up_fsetattr(call_frame_t *frame, xlator_t *this, fd_t *fd, struct iatt *stbuf,
int32_t valid, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1415,14 +1389,13 @@ static int32_t
up_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t mode,
off_t offset, size_t len, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1470,14 +1443,13 @@ static int32_t
up_discard(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
size_t len, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1524,14 +1496,13 @@ static int
up_zerofill(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
off_t len, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1577,14 +1548,13 @@ static int32_t
up_seek(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
gf_seek_what_t what, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1652,14 +1622,13 @@ static int32_t
up_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
int32_t flags, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, loc, NULL, loc->inode, dict);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1727,14 +1696,13 @@ static int32_t
up_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *dict,
int32_t flags, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, fd, fd->inode, dict);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1800,7 +1768,7 @@ static int32_t
up_fremovexattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
dict_t *xattr = NULL;
@@ -1808,13 +1776,11 @@ up_fremovexattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
xattr = dict_for_key_value(name, "", 1, _gf_true);
if (!xattr) {
- op_errno = ENOMEM;
goto err;
}
local = upcall_local_init(frame, this, NULL, fd, fd->inode, xattr);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1885,7 +1851,7 @@ static int32_t
up_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
const char *name, dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
dict_t *xattr = NULL;
@@ -1893,13 +1859,11 @@ up_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
xattr = dict_for_key_value(name, "", 1, _gf_true);
if (!xattr) {
- op_errno = ENOMEM;
goto err;
}
local = upcall_local_init(frame, this, loc, NULL, loc->inode, xattr);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -1950,14 +1914,13 @@ static int32_t
up_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, fd->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
@@ -2000,14 +1963,13 @@ static int32_t
up_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, const char *name,
dict_t *xdata)
{
- int32_t op_errno = -1;
+ int32_t op_errno = ENOMEM;
upcall_local_t *local = NULL;
EXIT_IF_UPCALL_OFF(this, out);
local = upcall_local_init(frame, this, NULL, NULL, loc->inode, NULL);
if (!local) {
- op_errno = ENOMEM;
goto err;
}
diff --git a/xlators/performance/open-behind/src/open-behind.c b/xlators/performance/open-behind/src/open-behind.c
index dd2f2fd..cbe89ec 100644
--- a/xlators/performance/open-behind/src/open-behind.c
+++ b/xlators/performance/open-behind/src/open-behind.c
@@ -581,7 +581,7 @@ ob_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags, fd_t *fd,
{
fd_t *old_fd = NULL;
int ret = -1;
- int op_errno = 0;
+ int op_errno = ENOMEM;
call_stub_t *stub = NULL;
old_fd = fd_lookup(fd->inode, 0);
@@ -589,7 +589,6 @@ ob_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags, fd_t *fd,
/* open-behind only when this is the first FD */
stub = fop_open_stub(frame, default_open_resume, loc, flags, fd, xdata);
if (!stub) {
- op_errno = ENOMEM;
fd_unref(old_fd);
goto err;
}
@@ -603,7 +602,6 @@ ob_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags, fd_t *fd,
ret = ob_open_behind(frame, this, loc, flags, fd, xdata);
if (ret) {
- op_errno = ENOMEM;
goto err;
}
@@ -900,18 +898,12 @@ int
ob_finodelk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
int cmd, struct gf_flock *flock, dict_t *xdata)
{
- call_stub_t *stub = NULL;
-
- stub = fop_finodelk_stub(frame, default_finodelk_resume, volume, fd, cmd,
- flock, xdata);
- if (!stub)
- goto err;
-
- open_and_resume(this, fd, stub);
-
- return 0;
-err:
- STACK_UNWIND_STRICT(finodelk, frame, -1, ENOMEM, 0);
+ call_stub_t *stub = fop_finodelk_stub(frame, default_finodelk_resume,
+ volume, fd, cmd, flock, xdata);
+ if (stub)
+ open_and_resume(this, fd, stub);
+ else
+ STACK_UNWIND_STRICT(finodelk, frame, -1, ENOMEM, 0);
return 0;
}
@@ -921,18 +913,12 @@ ob_fentrylk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
const char *basename, entrylk_cmd cmd, entrylk_type type,
dict_t *xdata)
{
- call_stub_t *stub = NULL;
-
- stub = fop_fentrylk_stub(frame, default_fentrylk_resume, volume, fd,
- basename, cmd, type, xdata);
- if (!stub)
- goto err;
-
- open_and_resume(this, fd, stub);
-
- return 0;
-err:
- STACK_UNWIND_STRICT(fentrylk, frame, -1, ENOMEM, 0);
+ call_stub_t *stub = fop_fentrylk_stub(
+ frame, default_fentrylk_resume, volume, fd, basename, cmd, type, xdata);
+ if (stub)
+ open_and_resume(this, fd, stub);
+ else
+ STACK_UNWIND_STRICT(fentrylk, frame, -1, ENOMEM, 0);
return 0;
}
@@ -941,18 +927,12 @@ int
ob_fxattrop(call_frame_t *frame, xlator_t *this, fd_t *fd,
gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
{
- call_stub_t *stub = NULL;
-
- stub = fop_fxattrop_stub(frame, default_fxattrop_resume, fd, optype, xattr,
- xdata);
- if (!stub)
- goto err;
-
- open_and_resume(this, fd, stub);
-
- return 0;
-err:
- STACK_UNWIND_STRICT(fxattrop, frame, -1, ENOMEM, 0, 0);
+ call_stub_t *stub = fop_fxattrop_stub(frame, default_fxattrop_resume, fd,
+ optype, xattr, xdata);
+ if (stub)
+ open_and_resume(this, fd, stub);
+ else
+ STACK_UNWIND_STRICT(fxattrop, frame, -1, ENOMEM, 0, 0);
return 0;
}
--
1.8.3.1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,70 @@
From 36dddf59a02d91d3db5b124be626ab6bc235ed5a Mon Sep 17 00:00:00 2001
From: Xavi Hernandez <xhernandez@redhat.com>
Date: Wed, 19 Aug 2020 23:27:38 +0200
Subject: [PATCH 524/526] open-behind: fix call_frame leak
When an open was delayed, a copy of the frame was created because the
current frame was used to unwind the "fake" open. When the open was
actually sent, the frame was correctly destroyed. However if the file
was closed before needing to send the open, the frame was not destroyed.
This patch correctly destroys the frame in all cases.
Upstream patch:
> Upstream-patch-link: https://review.gluster.org/#/c/glusterfs/+/24892
> Change-Id: I8c00fc7f15545c240e8151305d9e4cf06d653926
> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
> Fixes: #1440
BUG: 1830713
Change-Id: I8c00fc7f15545c240e8151305d9e4cf06d653926
Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/224488
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/performance/open-behind/src/open-behind.c | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/xlators/performance/open-behind/src/open-behind.c b/xlators/performance/open-behind/src/open-behind.c
index e43fe73..1ab635e 100644
--- a/xlators/performance/open-behind/src/open-behind.c
+++ b/xlators/performance/open-behind/src/open-behind.c
@@ -333,6 +333,14 @@ ob_stub_dispatch(xlator_t *xl, ob_inode_t *ob_inode, fd_t *fd,
return 0;
}
+static void
+ob_open_destroy(call_stub_t *stub, fd_t *fd)
+{
+ STACK_DESTROY(stub->frame->root);
+ call_stub_destroy(stub);
+ fd_unref(fd);
+}
+
static int32_t
ob_open_dispatch(xlator_t *xl, ob_inode_t *ob_inode, fd_t *fd,
call_stub_t *stub)
@@ -355,8 +363,7 @@ ob_open_dispatch(xlator_t *xl, ob_inode_t *ob_inode, fd_t *fd,
if (stub != NULL) {
if (closed) {
- call_stub_destroy(stub);
- fd_unref(fd);
+ ob_open_destroy(stub, fd);
} else {
call_resume(stub);
}
@@ -776,8 +783,7 @@ ob_fdclose(xlator_t *this, fd_t *fd)
UNLOCK(&fd->inode->lock);
if (stub != NULL) {
- call_stub_destroy(stub);
- fd_unref(fd);
+ ob_open_destroy(stub, fd);
}
ob_resume_pending(&list);
--
1.8.3.1

View File

@ -0,0 +1,109 @@
From 41aae052b5e3afe64d3e0668643726bab0e77265 Mon Sep 17 00:00:00 2001
From: Xavi Hernandez <xhernandez@redhat.com>
Date: Fri, 4 Sep 2020 14:49:50 +0200
Subject: [PATCH 525/526] open-behind: implement create fop
Open behind didn't implement create fop. This caused that files created
were not accounted for the number of open fd's. This could cause future
opens to be delayed when they shouldn't.
This patch implements the create fop. It also fixes a problem when
destroying the stack: when frame->local was not NULL, STACK_DESTROY()
tried to mem_put() it, which is not correct.
Upstream patch:
> Upstream-patch-link: https://review.gluster.org/#/c/glusterfs/+/24953
> Fixes: #1440
> Change-Id: Ic982bad07d4af30b915d7eb1fbcef7a847a45869
> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
BUG: 1830713
Change-Id: Ic982bad07d4af30b915d7eb1fbcef7a847a45869
Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/224489
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/performance/open-behind/src/open-behind.c | 52 +++++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/xlators/performance/open-behind/src/open-behind.c b/xlators/performance/open-behind/src/open-behind.c
index 1ab635e..600c3b6 100644
--- a/xlators/performance/open-behind/src/open-behind.c
+++ b/xlators/performance/open-behind/src/open-behind.c
@@ -336,6 +336,7 @@ ob_stub_dispatch(xlator_t *xl, ob_inode_t *ob_inode, fd_t *fd,
static void
ob_open_destroy(call_stub_t *stub, fd_t *fd)
{
+ stub->frame->local = NULL;
STACK_DESTROY(stub->frame->root);
call_stub_destroy(stub);
fd_unref(fd);
@@ -516,6 +517,56 @@ ob_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags, fd_t *fd,
}
static int32_t
+ob_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+ mode_t mode, mode_t umask, fd_t *fd, dict_t *xdata)
+{
+ ob_inode_t *ob_inode;
+ call_stub_t *stub;
+ fd_t *first_fd;
+ ob_state_t state;
+
+ /* Create requests are never delayed. We always send them synchronously. */
+ state = ob_open_and_resume_fd(this, fd, 1, true, true, &ob_inode,
+ &first_fd);
+ if (state == OB_STATE_READY) {
+ /* There's no pending open, but there are other file descriptors opened
+ * so we simply forward the request synchronously. */
+ return default_create(frame, this, loc, flags, mode, umask, fd, xdata);
+ }
+
+ if (state == OB_STATE_OPEN_TRIGGERED) {
+ /* The first open is in progress (either because it was already issued
+ * or because this request triggered it). We try to create a new stub
+ * to retry the operation once the initial open completes. */
+ stub = fop_create_stub(frame, ob_create, loc, flags, mode, umask, fd,
+ xdata);
+ if (stub != NULL) {
+ return ob_stub_dispatch(this, ob_inode, first_fd, stub);
+ }
+
+ state = -ENOMEM;
+ }
+
+ /* Since we forced a synchronous request, OB_STATE_FIRST_OPEN will never
+ * be returned by ob_open_and_resume_fd(). If we are here it can only be
+ * because there has been a problem. */
+
+ /* In case of failure we need to decrement the number of open files because
+ * ob_fdclose() won't be called. */
+
+ LOCK(&fd->inode->lock);
+ {
+ ob_inode->open_count--;
+ }
+ UNLOCK(&fd->inode->lock);
+
+ gf_smsg(this->name, GF_LOG_ERROR, -state, OPEN_BEHIND_MSG_FAILED, "fop=%s",
+ "create", "path=%s", loc->path, NULL);
+
+ return default_create_failure_cbk(frame, -state);
+}
+
+static int32_t
ob_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
off_t offset, uint32_t flags, dict_t *xdata)
{
@@ -946,6 +997,7 @@ fini(xlator_t *this)
struct xlator_fops fops = {
.open = ob_open,
+ .create = ob_create,
.readv = ob_readv,
.writev = ob_writev,
.flush = ob_flush,
--
1.8.3.1

View File

@ -0,0 +1,44 @@
From baeca3c9b70548463ceea0ae27e6f98cf06e96b7 Mon Sep 17 00:00:00 2001
From: srijan-sivakumar <ssivakum@redhat.com>
Date: Tue, 28 Jul 2020 22:27:34 +0530
Subject: [PATCH 526/526] Quota quota_fsck.py, converting byte string to string
Issue: The quota_fsck.py script throws an TypeError
due to the fact that the data is read as bytes and then
the string operations are applied on the. Now, in python3
string is unicode and hence we get the type error.
Code Changes:
Decoding the bytes value into utf-8 format.
>Change-Id: Ia1ff52a821d664a371c8166692ff506ae39f6e40
>Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
>Fixes: #1401
Upstream patch: https://review.gluster.org/c/glusterfs/+/24785
BUG: 1719171
Change-Id: Ia1ff52a821d664a371c8166692ff506ae39f6e40
Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/224780
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Kshithij Iyer <kiyer@redhat.com>
Reviewed-by: Rinku Kothiya <rkothiya@redhat.com>
---
extras/quota/quota_fsck.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/extras/quota/quota_fsck.py b/extras/quota/quota_fsck.py
index 174f2a2..ea8d638 100755
--- a/extras/quota/quota_fsck.py
+++ b/extras/quota/quota_fsck.py
@@ -157,6 +157,7 @@ def get_quota_xattr_brick(dpath):
xattr_dict['parents'] = {}
for xattr in pairs:
+ xattr = xattr.decode("utf-8")
xattr_key = xattr.split("=")[0]
if re.search("# file:", xattr_key):
# skip the file comment
--
1.8.3.1

View File

@ -0,0 +1,200 @@
From 4152c77defac24ace3b1b6b9cc81a4f614254e4f Mon Sep 17 00:00:00 2001
From: srijan-sivakumar <ssivakum@redhat.com>
Date: Sat, 18 Jul 2020 05:59:09 +0530
Subject: [PATCH 527/532] Events: Socket creation after getaddrinfo and IPv4
and IPv6 packet capture
Issue: Currently, the socket creation is done
prior to getaddrinfo function being invoked. This
can cause mismatch in the protocol and address
families of the created socket and the result
of the getaddrinfo api. Also, the glustereventsd
UDP server by default only captures IPv4 packets
hence IPv6 packets are not even captured.
Code Changes:
1. Modified the socket creation in such a way that
the parameters taken in are dependent upon the
result of the getaddrinfo function.
2. Created a subclass for adding address family
in glustereventsd.py for both AF_INET and AF_INET6.
3. Modified addresses in the eventsapiconf.py.in
Reasoning behind the approach:
1. If we are using getaddrinfo function then
socket creation should happen only after we
check if we received back valid addresses.
Hence socket creation should come after the call
to getaddrinfo
2. The listening server which pushes the events
to the webhook has to listen for both IPv4
and IPv6 messages as we would not be sure as to
what address family is picked in _gf_event.
>Fixes: #1377
>Change-Id: I568dcd1a977c8832f0fef981e1f81cac7043c760
>Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
Upstream patch: https://review.gluster.org/c/glusterfs/+/24722
BUG: 1814744
Change-Id: I568dcd1a977c8832f0fef981e1f81cac7043c760
Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/225567
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
---
events/src/eventsapiconf.py.in | 2 ++
events/src/glustereventsd.py | 37 ++++++++++++++++++++++++++++++-------
libglusterfs/src/events.c | 27 +++++++++++++++++++--------
3 files changed, 51 insertions(+), 15 deletions(-)
diff --git a/events/src/eventsapiconf.py.in b/events/src/eventsapiconf.py.in
index 76b5954..700093b 100644
--- a/events/src/eventsapiconf.py.in
+++ b/events/src/eventsapiconf.py.in
@@ -28,6 +28,8 @@ def get_glusterd_workdir():
return glusterd_workdir
SERVER_ADDRESS = "0.0.0.0"
+SERVER_ADDRESSv4 = "0.0.0.0"
+SERVER_ADDRESSv6 = "::1"
DEFAULT_CONFIG_FILE = "@SYSCONF_DIR@/glusterfs/eventsconfig.json"
CUSTOM_CONFIG_FILE_TO_SYNC = "/events/config.json"
CUSTOM_CONFIG_FILE = get_glusterd_workdir() + CUSTOM_CONFIG_FILE_TO_SYNC
diff --git a/events/src/glustereventsd.py b/events/src/glustereventsd.py
index c4c7b65..341a3b6 100644
--- a/events/src/glustereventsd.py
+++ b/events/src/glustereventsd.py
@@ -13,6 +13,7 @@
from __future__ import print_function
import sys
import signal
+import threading
try:
import socketserver
except ImportError:
@@ -23,10 +24,17 @@ from argparse import ArgumentParser, RawDescriptionHelpFormatter
from eventtypes import all_events
import handlers
import utils
-from eventsapiconf import SERVER_ADDRESS, PID_FILE
+from eventsapiconf import SERVER_ADDRESSv4, SERVER_ADDRESSv6, PID_FILE
from eventsapiconf import AUTO_BOOL_ATTRIBUTES, AUTO_INT_ATTRIBUTES
from utils import logger, PidFile, PidFileLockFailed, boolify
+# Subclass so that specifically IPv4 packets are captured
+class UDPServerv4(socketserver.ThreadingUDPServer):
+ address_family = socket.AF_INET
+
+# Subclass so that specifically IPv6 packets are captured
+class UDPServerv6(socketserver.ThreadingUDPServer):
+ address_family = socket.AF_INET6
class GlusterEventsRequestHandler(socketserver.BaseRequestHandler):
@@ -89,6 +97,10 @@ def signal_handler_sigusr2(sig, frame):
utils.restart_webhook_pool()
+def UDP_server_thread(sock):
+ sock.serve_forever()
+
+
def init_event_server():
utils.setup_logger()
utils.load_all()
@@ -99,15 +111,26 @@ def init_event_server():
sys.stderr.write("Unable to get Port details from Config\n")
sys.exit(1)
- # Start the Eventing Server, UDP Server
+ # Creating the Eventing Server, UDP Server for IPv4 packets
+ try:
+ serverv4 = UDPServerv4((SERVER_ADDRESSv4, port),
+ GlusterEventsRequestHandler)
+ except socket.error as e:
+ sys.stderr.write("Failed to start Eventsd for IPv4: {0}\n".format(e))
+ sys.exit(1)
+ # Creating the Eventing Server, UDP Server for IPv6 packets
try:
- server = socketserver.ThreadingUDPServer(
- (SERVER_ADDRESS, port),
- GlusterEventsRequestHandler)
+ serverv6 = UDPServerv6((SERVER_ADDRESSv6, port),
+ GlusterEventsRequestHandler)
except socket.error as e:
- sys.stderr.write("Failed to start Eventsd: {0}\n".format(e))
+ sys.stderr.write("Failed to start Eventsd for IPv6: {0}\n".format(e))
sys.exit(1)
- server.serve_forever()
+ server_thread1 = threading.Thread(target=UDP_server_thread,
+ args=(serverv4,))
+ server_thread2 = threading.Thread(target=UDP_server_thread,
+ args=(serverv6,))
+ server_thread1.start()
+ server_thread2.start()
def get_args():
diff --git a/libglusterfs/src/events.c b/libglusterfs/src/events.c
index 6d1e383..4d720ca 100644
--- a/libglusterfs/src/events.c
+++ b/libglusterfs/src/events.c
@@ -40,6 +40,7 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
char *host = NULL;
struct addrinfo hints;
struct addrinfo *result = NULL;
+ struct addrinfo *iter_result_ptr = NULL;
xlator_t *this = THIS;
char *volfile_server_transport = NULL;
@@ -51,13 +52,6 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
goto out;
}
- /* Initialize UDP socket */
- sock = socket(AF_INET, SOCK_DGRAM, 0);
- if (sock < 0) {
- ret = EVENT_ERROR_SOCKET;
- goto out;
- }
-
if (ctx) {
volfile_server_transport = ctx->cmd_args.volfile_server_transport;
}
@@ -66,7 +60,6 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
}
/* host = NULL returns localhost */
- host = NULL;
if (ctx && ctx->cmd_args.volfile_server &&
(strcmp(volfile_server_transport, "unix"))) {
/* If it is client code then volfile_server is set
@@ -84,6 +77,24 @@ _gf_event(eventtypes_t event, const char *fmt, ...)
goto out;
}
+ // iterate over the result and break when socket creation is success.
+ for (iter_result_ptr = result; iter_result_ptr != NULL;
+ iter_result_ptr = iter_result_ptr->ai_next) {
+ sock = socket(iter_result_ptr->ai_family, iter_result_ptr->ai_socktype,
+ iter_result_ptr->ai_protocol);
+ if (sock != -1) {
+ break;
+ }
+ }
+ /*
+ * If none of the addrinfo structures lead to a successful socket
+ * creation, socket creation has failed.
+ */
+ if (sock < 0) {
+ ret = EVENT_ERROR_SOCKET;
+ goto out;
+ }
+
va_start(arguments, fmt);
ret = gf_vasprintf(&msg, fmt, arguments);
va_end(arguments);
--
1.8.3.1

View File

@ -0,0 +1,134 @@
From 3fc74ce6c282f0f43fdcfeda47b71a1b19945b6d Mon Sep 17 00:00:00 2001
From: srijan-sivakumar <ssivakum@redhat.com>
Date: Wed, 3 Feb 2021 10:11:04 +0530
Subject: [PATCH 528/532] Extras: Removing xattr_analysis script
The xattr_analysis.py script is used rarely for
debugging and seeing that it has some dependencies,
removing it from the release.
If need be, it would be directly shared with the cu.
Label: DOWNSTREAM ONLY
BUG: 1719171
Change-Id: I4bb0df3ebfa7e43e13858b4b6e3efbb02ea79d5f
Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/226301
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/quota/Makefile.am | 4 +--
extras/quota/xattr_analysis.py | 73 ------------------------------------------
glusterfs.spec.in | 1 -
3 files changed, 2 insertions(+), 76 deletions(-)
delete mode 100755 extras/quota/xattr_analysis.py
diff --git a/extras/quota/Makefile.am b/extras/quota/Makefile.am
index cdb6be1..e4d9322 100644
--- a/extras/quota/Makefile.am
+++ b/extras/quota/Makefile.am
@@ -2,7 +2,7 @@ scriptsdir = $(datadir)/glusterfs/scripts
scripts_SCRIPTS = log_accounting.sh
if WITH_SERVER
-scripts_SCRIPTS += xattr_analysis.py quota_fsck.py
+scripts_SCRIPTS += quota_fsck.py
endif
-EXTRA_DIST = log_accounting.sh xattr_analysis.py quota_fsck.py
+EXTRA_DIST = log_accounting.sh quota_fsck.py
diff --git a/extras/quota/xattr_analysis.py b/extras/quota/xattr_analysis.py
deleted file mode 100755
index 7bd7d96..0000000
--- a/extras/quota/xattr_analysis.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/python3
-# Below script has two purposes
-# 1. Display xattr of entire FS tree in a human readable form
-# 2. Display all the directory where contri and size mismatch.
-# (If there are any directory with contri and size mismatch that are not dirty
-# then that highlights a propagation issue)
-# The script takes only one input LOG _FILE generated from the command,
-# find <brick_path> | xargs getfattr -d -m. -e hex > log_gluster_xattr
-
-from __future__ import print_function
-import re
-import subprocess
-import sys
-from hurry.filesize import size
-
-if len(sys.argv) < 2:
- sys.exit('Usage: %s log_gluster_xattr \n'
- 'to generate log_gluster_xattr use: \n'
- 'find <brick_path> | xargs getfattr -d -m. -e hex > log_gluster_xattr'
- % sys.argv[0])
-LOG_FILE=sys.argv[1]
-
-def get_quota_xattr_brick():
- out = subprocess.check_output (["/usr/bin/cat", LOG_FILE])
- pairs = out.splitlines()
-
- xdict = {}
- mismatch_size = [('====contri_size===', '====size====')]
- for xattr in pairs:
- k = xattr.split("=")[0]
- if re.search("# file:", k):
- print(xdict)
- filename=k
- print("=====" + filename + "=======")
- xdict = {}
- elif k is "":
- pass
- else:
- print(xattr)
- v = xattr.split("=")[1]
- if re.search("contri", k):
- if len(v) == 34:
- # for files size is obtained in iatt, file count should be 1, dir count=0
- xdict['contri_file_count'] = int(v[18:34], 16)
- xdict['contri_dir_count'] = 0
- else:
- xdict['contri_size'] = size(int(v[2:18], 16))
- xdict['contri_file_count'] = int(v[18:34], 16)
- xdict['contri_dir_count'] = int(v[34:], 16)
- elif re.search("size", k):
- xdict['size'] = size(int(v[2:18], 16))
- xdict['file_count'] = int(v[18:34], 16)
- xdict['dir_count'] = int(v[34:], 16)
- elif re.search("dirty", k):
- if v == '0x3000':
- xdict['dirty'] = False
- elif v == '0x3100':
- xdict['dirty'] = True
- elif re.search("limit_objects", k):
- xdict['limit_objects'] = int(v[2:18], 16)
- elif re.search("limit_set", k):
- xdict['limit_set'] = size(int(v[2:18], 16))
-
- if 'size' in xdict and 'contri_size' in xdict and xdict['size'] != xdict['contri_size']:
- mismatch_size.append((xdict['contri_size'], xdict['size'], filename))
-
- for values in mismatch_size:
- print(values)
-
-
-if __name__ == '__main__':
- get_quota_xattr_brick()
-
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 30d7162..2be7677 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1380,7 +1380,6 @@ exit 0
%if ( 0%{!?_without_server:1} )
%files server
%doc extras/clear_xattrs.sh
-%{_datadir}/glusterfs/scripts/xattr_analysis.py*
%{_datadir}/glusterfs/scripts/quota_fsck.py*
# sysconf
%config(noreplace) %{_sysconfdir}/glusterfs
--
1.8.3.1

View File

@ -0,0 +1,75 @@
From 1c7e96e73273b7891ea6ef0d768c2bf7ff5de7b0 Mon Sep 17 00:00:00 2001
From: Shwetha K Acharya <sacharya@redhat.com>
Date: Thu, 4 Feb 2021 16:29:39 +0530
Subject: [PATCH 529/532] geo-rep: prompt should work for ignore_deletes
The python cli is intelligent enough to parse both "-" and "_" alike:
Example:
geo-replication config updated successfully
sync_job 4
geo-replication config updated successfully
gluster volume geo-replication primary 127.0.0.1::secondary config | grep sync_jobs
sync_jobs:5
Thus the prompt which appears after ignore-deletes true should
work for both ignore-deletes and ignore_deletes.
Label: DOWNSTREAM ONLY
BUG: 1224906
Change-Id: I89f854200a604d07d3ac6c374fe6d445ce9f22ca
Signed-off-by: Shwetha K Acharya <sacharya@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/226599
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
cli/src/cli-cmd-parser.c | 5 +++--
tests/00-geo-rep/bug-1708603.t | 12 ++++++++++--
2 files changed, 13 insertions(+), 4 deletions(-)
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index 34f17c9..dda8979 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -3107,8 +3107,9 @@ cli_cmd_gsync_set_parse(struct cli_state *state, const char **words,
if (!ret)
ret = dict_set_int32(dict, "type", type);
if (!ret && type == GF_GSYNC_OPTION_TYPE_CONFIG) {
- if (!strcmp((char *)words[wordcount - 2], "ignore-deletes") &&
- !strcmp((char *)words[wordcount - 1], "true")) {
+ if ((((!strcmp((char *)words[wordcount - 2], "ignore_deletes")) ||
+ (!strcmp((char *)words[wordcount - 2], "ignore-deletes")))) &&
+ ((!strcmp((char *)words[wordcount - 1], "true")))) {
question =
"There exists ~15 seconds delay for the option to take"
" effect from stime of the corresponding brick. Please"
diff --git a/tests/00-geo-rep/bug-1708603.t b/tests/00-geo-rep/bug-1708603.t
index 26913f1..edafb48 100644
--- a/tests/00-geo-rep/bug-1708603.t
+++ b/tests/00-geo-rep/bug-1708603.t
@@ -44,11 +44,19 @@ TEST glusterfs -s $H0 --volfile-id $GSV0 $M1
#Create geo-rep session
TEST create_georep_session $master $slave
-echo n | $GEOREP_CLI $master $slave config ignore-deletes true >/dev/null 2>&1
-EXPECT "false" echo $($GEOREP_CLI $master $slave config ignore-deletes)
+echo n | $GEOREP_CLI $master $slave config ignore_deletes true >/dev/null 2>&1
+EXPECT "false" echo $($GEOREP_CLI $master $slave config ignore_deletes)
+
+echo y | $GEOREP_CLI $master $slave config ignore_deletes true
+EXPECT "true" echo $($GEOREP_CLI $master $slave config ignore_deletes)
+
+$GEOREP_CLI $master $slave config ignore_deletes false
echo y | $GEOREP_CLI $master $slave config ignore-deletes true
EXPECT "true" echo $($GEOREP_CLI $master $slave config ignore-deletes)
+echo n | $GEOREP_CLI $master $slave config ignore-deletes true >/dev/null 2>&1
+EXPECT "true" echo $($GEOREP_CLI $master $slave config ignore-deletes)
+
#Stop Geo-rep
TEST $GEOREP_CLI $master $slave stop
--
1.8.3.1

View File

@ -0,0 +1,41 @@
From 5a7348a266587704dae4f1ddda16b7c95f547251 Mon Sep 17 00:00:00 2001
From: Rinku Kothiya <rkothiya@redhat.com>
Date: Sun, 7 Feb 2021 13:40:24 +0000
Subject: [PATCH 530/532] gfapi: avoid crash while logging message.
Breaking parameter into two different parameter
to avoid a crash.
Upstream:
> Reviewed-on: https://github.com/gluster/glusterfs/pull/2139
> fixes: #2138
> Change-Id: Idd5f3631488c1d892748f83e6847fb6fd2d0802a
> Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
BUG: 1691320
Change-Id: Ifd6a96982ffd4e5334f8be2297de2ad826f3145b
Signed-off-by: Rinku Kothiya <rkothiya@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/226851
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
api/src/glfs-fops.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/api/src/glfs-fops.c b/api/src/glfs-fops.c
index 051541f..6dc3b66 100644
--- a/api/src/glfs-fops.c
+++ b/api/src/glfs-fops.c
@@ -1529,7 +1529,7 @@ glfs_pwritev_common(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
ret = -1;
errno = EINVAL;
gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ARG,
- "size >= %llu is not allowed", GF_UNIT_GB, NULL);
+ "Data size too large", "size=%llu", GF_UNIT_GB, NULL);
goto out;
}
--
1.8.3.1

View File

@ -0,0 +1,69 @@
From 058a853a1438b2a62586c545f71150ade3de23b7 Mon Sep 17 00:00:00 2001
From: schaffung <ssivakum@redhat.com>
Date: Wed, 10 Feb 2021 13:43:48 +0530
Subject: [PATCH 531/532] Glustereventsd Default port change (#2091)
Issue : The default port of glustereventsd is currently 24009
which is preventing glustereventsd from binding to the UDP port
due to selinux policies.
Fix: Changing the default port to be bound by chanding it to something
in the ephemeral range.
>Fixes: #2080
>Change-Id: Ibdc87f83f82f69660dca95d6d14b226e10d8bd33
>Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
Upstream Patch : https://github.com/gluster/glusterfs/pull/2091
BUG: 1814744
Change-Id: Ibdc87f83f82f69660dca95d6d14b226e10d8bd33
Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/227249
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
events/src/eventsconfig.json | 2 +-
extras/firewalld/glusterfs.xml | 2 +-
libglusterfs/src/events.c | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/events/src/eventsconfig.json b/events/src/eventsconfig.json
index 89e5b9c..14d8f84 100644
--- a/events/src/eventsconfig.json
+++ b/events/src/eventsconfig.json
@@ -1,5 +1,5 @@
{
"log-level": "INFO",
- "port": 24009,
+ "port": 55555,
"disable-events-log": false
}
diff --git a/extras/firewalld/glusterfs.xml b/extras/firewalld/glusterfs.xml
index 7e17644..dc74b2e 100644
--- a/extras/firewalld/glusterfs.xml
+++ b/extras/firewalld/glusterfs.xml
@@ -4,7 +4,7 @@
<description>Default ports for gluster-distributed storage</description>
<port protocol="tcp" port="24007"/> <!--For glusterd -->
<port protocol="tcp" port="24008"/> <!--For glusterd RDMA port management -->
-<port protocol="tcp" port="24009"/> <!--For glustereventsd -->
+<port protocol="tcp" port="55555"/> <!--For glustereventsd -->
<port protocol="tcp" port="38465"/> <!--Gluster NFS service -->
<port protocol="tcp" port="38466"/> <!--Gluster NFS service -->
<port protocol="tcp" port="38467"/> <!--Gluster NFS service -->
diff --git a/libglusterfs/src/events.c b/libglusterfs/src/events.c
index 4d720ca..3659606 100644
--- a/libglusterfs/src/events.c
+++ b/libglusterfs/src/events.c
@@ -26,7 +26,7 @@
#include "glusterfs/events.h"
#define EVENT_HOST "127.0.0.1"
-#define EVENT_PORT 24009
+#define EVENT_PORT 55555
int
_gf_event(eventtypes_t event, const char *fmt, ...)
--
1.8.3.1

View File

@ -0,0 +1,79 @@
From 2dad17fdbaab2ab2cda6a05dec9dcd2d37ea32ff Mon Sep 17 00:00:00 2001
From: Nikhil Ladha <nladha@redhat.com>
Date: Wed, 10 Feb 2021 15:07:32 +0530
Subject: [PATCH 532/532] glusterd: fix for starting brick on new port
The Errno set by the runner code was not correct when the bind() fails
to assign an already occupied port in the __socket_server_bind().
Fix:
Updated the code to return the correct errno from the
__socket_server_bind() if the bind() fails due to EADDRINUSE error. And,
use the returned errno from runner_run() to retry allocating a new port
to the brick process.
>Fixes: #1101
>Change-Id: If124337f41344a04f050754e402490529ef4ecdc
>Signed-off-by: nik-redhat nladha@redhat.com
Upstream patch: https://github.com/gluster/glusterfs/pull/2090
BUG: 1865796
Change-Id: If124337f41344a04f050754e402490529ef4ecdc
Signed-off-by: nik-redhat <nladha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/227261
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Xavi Hernandez Juan <xhernandez@redhat.com>
---
rpc/rpc-transport/socket/src/socket.c | 3 +++
xlators/mgmt/glusterd/src/glusterd-utils.c | 6 ++----
2 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c
index 1ee7320..96ed9f1 100644
--- a/rpc/rpc-transport/socket/src/socket.c
+++ b/rpc/rpc-transport/socket/src/socket.c
@@ -973,8 +973,11 @@ __socket_server_bind(rpc_transport_t *this)
this->myinfo.identifier, strerror(errno));
if (errno == EADDRINUSE) {
gf_log(this->name, GF_LOG_ERROR, "Port is already in use");
+ ret = -EADDRINUSE;
+ goto out;
}
}
+
if (AF_UNIX != SA(&this->myinfo.sockaddr)->sa_family) {
if (getsockname(priv->sock, SA(&this->myinfo.sockaddr),
&this->myinfo.sockaddr_len) != 0) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index cf32bd9..bc188a2 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -2151,6 +2151,7 @@ glusterd_volume_start_glusterfs(glusterd_volinfo_t *volinfo,
ret = -1;
goto out;
}
+
/* Build the exp_path, before starting the glusterfsd even in
valgrind mode. Otherwise all the glusterfsd processes start
writing the valgrind log to the same file.
@@ -2289,13 +2290,10 @@ retry:
if (wait) {
synclock_unlock(&priv->big_lock);
- errno = 0;
ret = runner_run(&runner);
- if (errno != 0)
- ret = errno;
synclock_lock(&priv->big_lock);
- if (ret == EADDRINUSE) {
+ if (ret == -EADDRINUSE) {
/* retry after getting a new port */
gf_msg(this->name, GF_LOG_WARNING, -ret,
GD_MSG_SRC_BRICK_PORT_UNAVAIL,
--
1.8.3.1

View File

@ -0,0 +1,250 @@
From 854ab79dbef449c39adf66e3faebb4681359fce4 Mon Sep 17 00:00:00 2001
From: mohit84 <moagrawa@redhat.com>
Date: Thu, 18 Feb 2021 09:40:44 +0530
Subject: [PATCH 533/538] glusterd: Rebalance cli is not showing correct status
after reboot (#2172)
Rebalance cli is not showing correct status after reboot.
The CLI is not correct status because defrag object is not
valid at the time of creating a rpc connection to show the status.
The defrag object is not valid because at the time of start a glusterd
glusterd_restart_rebalance can be call almost at the same time by two
different synctask and glusterd got a disconnect on rpc object and it
cleanup the defrag object.
Solution: To avoid the defrag object populate a reference count before
create a defrag rpc object.
>Fixes: #1339
>Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
>Change-Id: Ia284015d79beaa3d703ebabb92f26870a5aaafba
Upstream Patch : https://github.com/gluster/glusterfs/pull/2172
BUG: 1832306
Change-Id: Ia284015d79beaa3d703ebabb92f26870a5aaafba
Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/228249
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-rebalance.c | 35 ++++++++++-----
xlators/mgmt/glusterd/src/glusterd-syncop.c | 1 +
xlators/mgmt/glusterd/src/glusterd-utils.c | 59 +++++++++++++++++++++++++-
xlators/mgmt/glusterd/src/glusterd-utils.h | 5 +++
xlators/mgmt/glusterd/src/glusterd.h | 1 +
5 files changed, 90 insertions(+), 11 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index b419a89..fcd5318 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -86,6 +86,7 @@ __glusterd_defrag_notify(struct rpc_clnt *rpc, void *mydata,
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
int pid = -1;
+ int refcnt = 0;
this = THIS;
if (!this)
@@ -125,11 +126,12 @@ __glusterd_defrag_notify(struct rpc_clnt *rpc, void *mydata,
}
case RPC_CLNT_DISCONNECT: {
- if (!defrag->connected)
- return 0;
-
LOCK(&defrag->lock);
{
+ if (!defrag->connected) {
+ UNLOCK(&defrag->lock);
+ return 0;
+ }
defrag->connected = 0;
}
UNLOCK(&defrag->lock);
@@ -146,11 +148,11 @@ __glusterd_defrag_notify(struct rpc_clnt *rpc, void *mydata,
glusterd_defrag_rpc_put(defrag);
if (defrag->cbk_fn)
defrag->cbk_fn(volinfo, volinfo->rebal.defrag_status);
-
- GF_FREE(defrag);
+ refcnt = glusterd_defrag_unref(defrag);
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_REBALANCE_DISCONNECTED,
- "Rebalance process for volume %s has disconnected.",
- volinfo->volname);
+ "Rebalance process for volume %s has disconnected"
+ " and defrag refcnt is %d.",
+ volinfo->volname, refcnt);
break;
}
case RPC_CLNT_DESTROY:
@@ -309,7 +311,11 @@ glusterd_handle_defrag_start(glusterd_volinfo_t *volinfo, char *op_errstr,
gf_msg_debug("glusterd", 0, "rebalance command failed");
goto out;
}
-
+ /* Take reference before sleep to save defrag object cleanup while
+ glusterd_restart_rebalance call for other bricks by syncktask
+ at the time of restart a glusterd.
+ */
+ glusterd_defrag_ref(defrag);
sleep(5);
ret = glusterd_rebalance_rpc_create(volinfo);
@@ -372,6 +378,7 @@ glusterd_rebalance_rpc_create(glusterd_volinfo_t *volinfo)
GF_ASSERT(this);
priv = this->private;
GF_ASSERT(priv);
+ struct rpc_clnt *rpc = NULL;
// rebalance process is not started
if (!defrag)
@@ -396,13 +403,21 @@ glusterd_rebalance_rpc_create(glusterd_volinfo_t *volinfo)
}
glusterd_volinfo_ref(volinfo);
- ret = glusterd_rpc_create(&defrag->rpc, options, glusterd_defrag_notify,
- volinfo, _gf_true);
+ ret = glusterd_rpc_create(&rpc, options, glusterd_defrag_notify, volinfo,
+ _gf_false);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_RPC_CREATE_FAIL,
"Glusterd RPC creation failed");
goto out;
}
+ LOCK(&defrag->lock);
+ {
+ if (!defrag->rpc)
+ defrag->rpc = rpc;
+ else
+ rpc_clnt_unref(rpc);
+ }
+ UNLOCK(&defrag->lock);
ret = 0;
out:
if (options)
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index df78fef..05c9e11 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -1732,6 +1732,7 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
if (!rpc) {
if (pending_node->type == GD_NODE_REBALANCE && pending_node->node) {
volinfo = pending_node->node;
+ glusterd_defrag_ref(volinfo->rebal.defrag);
ret = glusterd_rebalance_rpc_create(volinfo);
if (ret) {
ret = 0;
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index bc188a2..9fb8eab 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -93,6 +93,44 @@
#define NLMV4_VERSION 4
#define NLMV1_VERSION 1
+int
+glusterd_defrag_ref(glusterd_defrag_info_t *defrag)
+{
+ int refcnt = 0;
+
+ if (!defrag)
+ goto out;
+
+ LOCK(&defrag->lock);
+ {
+ refcnt = ++defrag->refcnt;
+ }
+ UNLOCK(&defrag->lock);
+
+out:
+ return refcnt;
+}
+
+int
+glusterd_defrag_unref(glusterd_defrag_info_t *defrag)
+{
+ int refcnt = -1;
+
+ if (!defrag)
+ goto out;
+
+ LOCK(&defrag->lock);
+ {
+ refcnt = --defrag->refcnt;
+ if (refcnt <= 0)
+ GF_FREE(defrag);
+ }
+ UNLOCK(&defrag->lock);
+
+out:
+ return refcnt;
+}
+
gf_boolean_t
is_brick_mx_enabled(void)
{
@@ -9370,6 +9408,7 @@ glusterd_volume_defrag_restart(glusterd_volinfo_t *volinfo, char *op_errstr,
char pidfile[PATH_MAX] = "";
int ret = -1;
pid_t pid = 0;
+ int refcnt = 0;
this = THIS;
GF_ASSERT(this);
@@ -9410,7 +9449,25 @@ glusterd_volume_defrag_restart(glusterd_volinfo_t *volinfo, char *op_errstr,
volinfo->volname);
goto out;
}
- ret = glusterd_rebalance_rpc_create(volinfo);
+ refcnt = glusterd_defrag_ref(volinfo->rebal.defrag);
+ /* If refcnt value is 1 it means either defrag object is
+ poulated by glusterd_rebalance_defrag_init or previous
+ rpc creation was failed.If it is not 1 it means it(defrag)
+ was populated at the time of start a rebalance daemon.
+ We need to create a rpc object only while a previous
+ rpc connection was not established successfully at the
+ time of restart a rebalance daemon by
+ glusterd_handle_defrag_start otherwise rebalance cli
+ does not show correct status after just reboot a node and try
+ to print the rebalance status because defrag object has been
+ destroyed during handling of rpc disconnect.
+ */
+ if (refcnt == 1) {
+ ret = glusterd_rebalance_rpc_create(volinfo);
+ } else {
+ ret = 0;
+ glusterd_defrag_unref(volinfo->rebal.defrag);
+ }
break;
}
case GF_DEFRAG_STATUS_NOT_STARTED:
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index 02d85d2..4541471 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -886,4 +886,9 @@ int32_t
glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type,
int32_t sub_count);
+int
+glusterd_defrag_ref(glusterd_defrag_info_t *defrag);
+
+int
+glusterd_defrag_unref(glusterd_defrag_info_t *defrag);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index efe4d0e..9de3f28 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -321,6 +321,7 @@ struct glusterd_defrag_info_ {
uint64_t total_data;
uint64_t num_files_lookedup;
uint64_t total_failures;
+ int refcnt;
gf_lock_t lock;
int cmd;
pthread_t th;
--
1.8.3.1

View File

@ -0,0 +1,47 @@
From b3647eb5415b2e3d9e1a11ad6c4689e520f17b39 Mon Sep 17 00:00:00 2001
From: mohit84 <moagrawa@redhat.com>
Date: Mon, 22 Feb 2021 10:09:34 +0530
Subject: [PATCH 534/538] glusterd: Resolve use after free bug (#2181)
In the commit 61ae58e67567ea4de8f8efc6b70a9b1f8e0f1bea
introduced a coverity bug use object after cleanup
the object.
Cleanup memory after comeout from a critical section
>Fixes: #2180
>Change-Id: Iee2050c4883a0dd44b8523bb822b664462ab6041
>Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Upstream Patch : https://github.com/gluster/glusterfs/pull/2181
BUG: 1832306
Change-Id: Iee2050c4883a0dd44b8523bb822b664462ab6041
Signed-off-by: srijan-sivakumar <ssivakum@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/228578
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-utils.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 9fb8eab..6d40be5 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -122,11 +122,10 @@ glusterd_defrag_unref(glusterd_defrag_info_t *defrag)
LOCK(&defrag->lock);
{
refcnt = --defrag->refcnt;
- if (refcnt <= 0)
- GF_FREE(defrag);
}
UNLOCK(&defrag->lock);
-
+ if (refcnt <= 0)
+ GF_FREE(defrag);
out:
return refcnt;
}
--
1.8.3.1

View File

@ -0,0 +1,270 @@
From 775d500cd136bd8c940faaeffde1217c25a87e3d Mon Sep 17 00:00:00 2001
From: Yaniv Kaul <ykaul@redhat.com>
Date: Sun, 2 Jun 2019 21:14:18 +0300
Subject: [PATCH 535/538] (multiple files) use dict_allocate_and_serialize()
where applicable.
This function does length, allocation and serialization for you.
Upstream patch:
> Upstream-patch-link: https://review.gluster.org/#/c/glusterfs/+/22800
> Change-Id: I142a259952a2fe83dd719442afaefe4a43a8e55e
> updates: bz#1193929
> Signed-off-by: Yaniv Kaul <ykaul@redhat.com>
Change-Id: I142a259952a2fe83dd719442afaefe4a43a8e55e
BUG: 1911292
Signed-off-by: Yaniv Kaul <ykaul@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/228611
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/cluster/afr/src/afr-inode-read.c | 34 +++++---------------------
xlators/cluster/ec/src/ec-combine.c | 16 +++---------
xlators/features/locks/src/posix.c | 23 +++--------------
xlators/protocol/client/src/client-handshake.c | 14 +++--------
xlators/protocol/server/src/server-handshake.c | 24 +++++++-----------
xlators/protocol/server/src/server-helpers.c | 27 +++-----------------
6 files changed, 28 insertions(+), 110 deletions(-)
diff --git a/xlators/cluster/afr/src/afr-inode-read.c b/xlators/cluster/afr/src/afr-inode-read.c
index 523a5b4..cf305af 100644
--- a/xlators/cluster/afr/src/afr-inode-read.c
+++ b/xlators/cluster/afr/src/afr-inode-read.c
@@ -948,24 +948,13 @@ unlock:
goto unwind;
}
- len = dict_serialized_length(local->dict);
- if (len <= 0) {
- goto unwind;
- }
-
- lockinfo_buf = GF_CALLOC(1, len, gf_common_mt_char);
- if (!lockinfo_buf) {
+ op_ret = dict_allocate_and_serialize(
+ local->dict, (char **)&lockinfo_buf, (unsigned int *)&len);
+ if (op_ret != 0) {
local->op_ret = -1;
- local->op_errno = ENOMEM;
goto unwind;
}
- op_ret = dict_serialize(local->dict, lockinfo_buf);
- if (op_ret < 0) {
- local->op_ret = -1;
- local->op_errno = -op_ret;
- }
-
op_ret = dict_set_dynptr(newdict, GF_XATTR_LOCKINFO_KEY,
(void *)lockinfo_buf, len);
if (op_ret < 0) {
@@ -1064,24 +1053,13 @@ unlock:
goto unwind;
}
- len = dict_serialized_length(local->dict);
- if (len <= 0) {
- goto unwind;
- }
-
- lockinfo_buf = GF_CALLOC(1, len, gf_common_mt_char);
- if (!lockinfo_buf) {
+ op_ret = dict_allocate_and_serialize(
+ local->dict, (char **)&lockinfo_buf, (unsigned int *)&len);
+ if (op_ret != 0) {
local->op_ret = -1;
- local->op_errno = ENOMEM;
goto unwind;
}
- op_ret = dict_serialize(local->dict, lockinfo_buf);
- if (op_ret < 0) {
- local->op_ret = -1;
- local->op_errno = -op_ret;
- }
-
op_ret = dict_set_dynptr(newdict, GF_XATTR_LOCKINFO_KEY,
(void *)lockinfo_buf, len);
if (op_ret < 0) {
diff --git a/xlators/cluster/ec/src/ec-combine.c b/xlators/cluster/ec/src/ec-combine.c
index 99e5534..9d712b3 100644
--- a/xlators/cluster/ec/src/ec-combine.c
+++ b/xlators/cluster/ec/src/ec-combine.c
@@ -486,22 +486,12 @@ ec_dict_data_merge(ec_cbk_data_t *cbk, int32_t which, char *key)
tmp = NULL;
- len = dict_serialized_length(lockinfo);
- if (len < 0) {
- err = len;
-
- goto out;
- }
- ptr = GF_MALLOC(len, gf_common_mt_char);
- if (ptr == NULL) {
- err = -ENOMEM;
-
- goto out;
- }
- err = dict_serialize(lockinfo, ptr);
+ err = dict_allocate_and_serialize(lockinfo, (char **)&ptr,
+ (unsigned int *)&len);
if (err != 0) {
goto out;
}
+
dict = (which == EC_COMBINE_XDATA) ? cbk->xdata : cbk->dict;
err = dict_set_dynptr(dict, key, ptr, len);
if (err != 0) {
diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c
index 5ae0125..cdd1ff7 100644
--- a/xlators/features/locks/src/posix.c
+++ b/xlators/features/locks/src/posix.c
@@ -1547,8 +1547,9 @@ pl_fgetxattr_handle_lockinfo(xlator_t *this, fd_t *fd, dict_t *dict,
goto out;
}
- len = dict_serialized_length(tmp);
- if (len < 0) {
+ op_ret = dict_allocate_and_serialize(tmp, (char **)&buf,
+ (unsigned int *)&len);
+ if (op_ret != 0) {
*op_errno = -op_ret;
op_ret = -1;
gf_log(this->name, GF_LOG_WARNING,
@@ -1558,24 +1559,6 @@ pl_fgetxattr_handle_lockinfo(xlator_t *this, fd_t *fd, dict_t *dict,
goto out;
}
- buf = GF_CALLOC(1, len, gf_common_mt_char);
- if (buf == NULL) {
- op_ret = -1;
- *op_errno = ENOMEM;
- goto out;
- }
-
- op_ret = dict_serialize(tmp, buf);
- if (op_ret < 0) {
- *op_errno = -op_ret;
- op_ret = -1;
- gf_log(this->name, GF_LOG_WARNING,
- "dict_serialize failed (%s) while handling lockinfo "
- "for fd (ptr: %p inode-gfid:%s)",
- strerror(*op_errno), fd, uuid_utoa(fd->inode->gfid));
- goto out;
- }
-
op_ret = dict_set_dynptr(dict, GF_XATTR_LOCKINFO_KEY, buf, len);
if (op_ret < 0) {
*op_errno = -op_ret;
diff --git a/xlators/protocol/client/src/client-handshake.c b/xlators/protocol/client/src/client-handshake.c
index 0002361..6b20d92 100644
--- a/xlators/protocol/client/src/client-handshake.c
+++ b/xlators/protocol/client/src/client-handshake.c
@@ -1286,18 +1286,10 @@ client_setvolume(xlator_t *this, struct rpc_clnt *rpc)
"Failed to set client opversion in handshake message");
}
- ret = dict_serialized_length(options);
- if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, PC_MSG_DICT_ERROR,
- "failed to get serialized length of dict");
+ ret = dict_allocate_and_serialize(options, (char **)&req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret != 0) {
ret = -1;
- goto fail;
- }
- req.dict.dict_len = ret;
- req.dict.dict_val = GF_CALLOC(1, req.dict.dict_len,
- gf_client_mt_clnt_req_buf_t);
- ret = dict_serialize(options, req.dict.dict_val);
- if (ret < 0) {
gf_msg(this->name, GF_LOG_ERROR, 0, PC_MSG_DICT_SERIALIZE_FAIL,
"failed to serialize "
"dictionary");
diff --git a/xlators/protocol/server/src/server-handshake.c b/xlators/protocol/server/src/server-handshake.c
index eeca73c..54dc030 100644
--- a/xlators/protocol/server/src/server-handshake.c
+++ b/xlators/protocol/server/src/server-handshake.c
@@ -676,22 +676,16 @@ fail:
GF_ASSERT(rsp);
rsp->op_ret = 0;
- ret = dict_serialized_length(reply);
- if (ret > 0) {
- rsp->dict.dict_len = ret;
- rsp->dict.dict_val = GF_CALLOC(1, rsp->dict.dict_len,
- gf_server_mt_rsp_buf_t);
- if (rsp->dict.dict_val) {
- ret = dict_serialize(reply, rsp->dict.dict_val);
- if (ret < 0) {
- gf_msg_debug("server-handshake", 0,
- "failed "
- "to serialize reply dict");
- op_ret = -1;
- op_errno = -ret;
- }
- }
+
+ ret = dict_allocate_and_serialize(reply, (char **)&rsp->dict.dict_val,
+ &rsp->dict.dict_len);
+ if (ret != 0) {
+ ret = -1;
+ gf_msg_debug("server-handshake", 0, "failed to serialize reply dict");
+ op_ret = -1;
+ op_errno = -ret;
}
+
rsp->op_ret = op_ret;
rsp->op_errno = gf_errno_to_error(op_errno);
diff --git a/xlators/protocol/server/src/server-helpers.c b/xlators/protocol/server/src/server-helpers.c
index e74a24d..33959b5 100644
--- a/xlators/protocol/server/src/server-helpers.c
+++ b/xlators/protocol/server/src/server-helpers.c
@@ -902,7 +902,6 @@ serialize_rsp_direntp(gf_dirent_t *entries, gfs3_readdirp_rsp *rsp)
gfs3_dirplist *trav = NULL;
gfs3_dirplist *prev = NULL;
int ret = -1;
- int temp = 0;
GF_VALIDATE_OR_GOTO("server", entries, out);
GF_VALIDATE_OR_GOTO("server", rsp, out);
@@ -923,28 +922,10 @@ serialize_rsp_direntp(gf_dirent_t *entries, gfs3_readdirp_rsp *rsp)
/* if 'dict' is present, pack it */
if (entry->dict) {
- temp = dict_serialized_length(entry->dict);
-
- if (temp < 0) {
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, PS_MSG_INVALID_ENTRY,
- "failed to get "
- "serialized length of reply dict");
- errno = EINVAL;
- trav->dict.dict_len = 0;
- goto out;
- }
- trav->dict.dict_len = temp;
-
- trav->dict.dict_val = GF_CALLOC(1, trav->dict.dict_len,
- gf_server_mt_rsp_buf_t);
- if (!trav->dict.dict_val) {
- errno = ENOMEM;
- trav->dict.dict_len = 0;
- goto out;
- }
-
- ret = dict_serialize(entry->dict, trav->dict.dict_val);
- if (ret < 0) {
+ ret = dict_allocate_and_serialize(entry->dict,
+ (char **)&trav->dict.dict_val,
+ &trav->dict.dict_len);
+ if (ret != 0) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, PS_MSG_DICT_SERIALIZE_FAIL,
"failed to serialize reply dict");
errno = -ret;
--
1.8.3.1

View File

@ -0,0 +1,102 @@
From 32281b4b5cf79d0ef6f0c65775bb81093e1ba479 Mon Sep 17 00:00:00 2001
From: Mohit Agrawal <moagrawa@redhat.com>
Date: Wed, 24 Feb 2021 18:44:12 +0530
Subject: [PATCH 536/538] dht: Ongoing IO is failed during volume shrink
operation (#2188)
In the commit (c878174) we have introduced a check
to avoid stale layout issue.To avoid a stale layout
issue dht has set a key along with layout at the time
of wind a create fop and posix validates the parent
layout based on the key value. If layout does not match
it throw and error.In case of volume shrink layout has
been changed by reabalance daemon and if layout does not
matches dht is not able to wind a create fop successfully.
Solution: To avoid the issue populate a key only while
dht has wind a fop first time. After got an
error in 2nd attempt dht takes a lock and then
reattempt to wind a fop again.
> Fixes: #2187
> Change-Id: Ie018386e7823a11eea415496bb226ca032453a55
> Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
> (Cherry pick from commit da6ce622b722f7d12619c5860293faf03f7cd00c
> Reviewed on upstream link https://github.com/gluster/glusterfs/pull/2188
Bug: 1924044
Change-Id: I7670dbe2d562b83db0af3753f994653ffdd49591
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/228941
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/cluster/dht/src/dht-common.c | 41 ++++++++++++++++++++++++++----------
1 file changed, 30 insertions(+), 11 deletions(-)
diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c
index fe1d0ee..7425c1a 100644
--- a/xlators/cluster/dht/src/dht-common.c
+++ b/xlators/cluster/dht/src/dht-common.c
@@ -8526,15 +8526,32 @@ dht_create_wind_to_avail_subvol(call_frame_t *frame, xlator_t *this,
{
dht_local_t *local = NULL;
xlator_t *avail_subvol = NULL;
+ int lk_count = 0;
local = frame->local;
if (!dht_is_subvol_filled(this, subvol)) {
- gf_msg_debug(this->name, 0, "creating %s on %s", loc->path,
- subvol->name);
-
- dht_set_parent_layout_in_dict(loc, this, local);
-
+ lk_count = local->lock[0].layout.parent_layout.lk_count;
+ gf_msg_debug(this->name, 0, "creating %s on %s with lock_count %d",
+ loc->path, subvol->name, lk_count);
+ /*The function dht_set_parent_layout_in_dict sets the layout
+ in dictionary and posix_create validates a layout before
+ creating a file.In case if parent layout does not match
+ with disk layout posix xlator throw an error but in case
+ if volume is shrunk layout has been changed by rebalance daemon
+ so we need to call this function only while a function is calling
+ without taking any lock otherwise we would not able to populate a
+ layout on disk in case if layout has changed.
+ */
+ if (!lk_count) {
+ dht_set_parent_layout_in_dict(loc, this, local);
+ } else {
+ /* Delete a key to avoid layout validate if it was set by
+ previous STACK_WIND attempt when a lock was not taken
+ by dht_create
+ */
+ (void)dict_del_sizen(local->params, GF_PREOP_PARENT_KEY);
+ }
STACK_WIND_COOKIE(frame, dht_create_cbk, subvol, subvol,
subvol->fops->create, loc, flags, mode, umask, fd,
params);
@@ -8554,12 +8571,14 @@ dht_create_wind_to_avail_subvol(call_frame_t *frame, xlator_t *this,
goto out;
}
-
- gf_msg_debug(this->name, 0, "creating %s on %s", loc->path,
- subvol->name);
-
- dht_set_parent_layout_in_dict(loc, this, local);
-
+ lk_count = local->lock[0].layout.parent_layout.lk_count;
+ gf_msg_debug(this->name, 0, "creating %s on %s with lk_count %d",
+ loc->path, subvol->name, lk_count);
+ if (!lk_count) {
+ dht_set_parent_layout_in_dict(loc, this, local);
+ } else {
+ (void)dict_del_sizen(local->params, GF_PREOP_PARENT_KEY);
+ }
STACK_WIND_COOKIE(frame, dht_create_cbk, subvol, subvol,
subvol->fops->create, loc, flags, mode, umask, fd,
params);
--
1.8.3.1

View File

@ -0,0 +1,387 @@
From 7b7ec67680415c22773ebb2a5daacf298b6b1e06 Mon Sep 17 00:00:00 2001
From: Xavi Hernandez <xhernandez@redhat.com>
Date: Sat, 13 Feb 2021 18:37:32 +0100
Subject: [PATCH 537/538] cluster/afr: Fix race in lockinfo (f)getxattr
A shared dictionary was updated outside the lock after having updated
the number of remaining answers. This means that one thread may be
processing the last answer and unwinding the request before another
thread completes updating the dict.
Thread 1 Thread 2
LOCK()
call_cnt-- (=1)
UNLOCK()
LOCK()
call_cnt-- (=0)
UNLOCK()
update_dict(dict)
if (call_cnt == 0) {
STACK_UNWIND(dict);
}
update_dict(dict)
if (call_cnt == 0) {
STACK_UNWIND(dict);
}
The updates from thread 1 are lost.
This patch also reduces the work done inside the locked region and
reduces code duplication.
Upstream-patch:
> Upstream-patch-link: https://github.com/gluster/glusterfs/pull/2162
> Fixes: #2161
> Change-Id: Idc0d34ab19ea6031de0641f7b05c624d90fac8fa
> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
BUG: 1911292
Change-Id: Idc0d34ab19ea6031de0641f7b05c624d90fac8fa
Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/228924
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/cluster/afr/src/afr-inode-read.c | 254 ++++++++++++++-----------------
1 file changed, 112 insertions(+), 142 deletions(-)
diff --git a/xlators/cluster/afr/src/afr-inode-read.c b/xlators/cluster/afr/src/afr-inode-read.c
index cf305af..98e195a 100644
--- a/xlators/cluster/afr/src/afr-inode-read.c
+++ b/xlators/cluster/afr/src/afr-inode-read.c
@@ -15,6 +15,8 @@
#include <stdlib.h>
#include <signal.h>
+#include <urcu/uatomic.h>
+
#include <glusterfs/glusterfs.h>
#include "afr.h"
#include <glusterfs/dict.h>
@@ -868,188 +870,121 @@ afr_getxattr_quota_size_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
return 0;
}
-int32_t
-afr_getxattr_lockinfo_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *dict,
- dict_t *xdata)
+static int32_t
+afr_update_local_dicts(call_frame_t *frame, dict_t *dict, dict_t *xdata)
{
- int call_cnt = 0, len = 0;
- char *lockinfo_buf = NULL;
- dict_t *lockinfo = NULL, *newdict = NULL;
- afr_local_t *local = NULL;
+ afr_local_t *local;
+ dict_t *local_dict;
+ dict_t *local_xdata;
+ int32_t ret;
- LOCK(&frame->lock);
- {
- local = frame->local;
+ local = frame->local;
+ local_dict = NULL;
+ local_xdata = NULL;
- call_cnt = --local->call_count;
+ ret = -ENOMEM;
- if ((op_ret < 0) || (!dict && !xdata)) {
- goto unlock;
- }
-
- if (xdata) {
- if (!local->xdata_rsp) {
- local->xdata_rsp = dict_new();
- if (!local->xdata_rsp) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- goto unlock;
- }
- }
+ if ((dict != NULL) && (local->dict == NULL)) {
+ local_dict = dict_new();
+ if (local_dict == NULL) {
+ goto done;
}
+ }
- if (!dict) {
- goto unlock;
+ if ((xdata != NULL) && (local->xdata_rsp == NULL)) {
+ local_xdata = dict_new();
+ if (local_xdata == NULL) {
+ goto done;
}
+ }
- op_ret = dict_get_ptr_and_len(dict, GF_XATTR_LOCKINFO_KEY,
- (void **)&lockinfo_buf, &len);
+ if ((local_dict != NULL) || (local_xdata != NULL)) {
+ /* TODO: Maybe it would be better to preallocate both dicts before
+ * sending the requests. This way we don't need to use a LOCK()
+ * here. */
+ LOCK(&frame->lock);
- if (!lockinfo_buf) {
- goto unlock;
+ if ((local_dict != NULL) && (local->dict == NULL)) {
+ local->dict = local_dict;
+ local_dict = NULL;
}
- if (!local->dict) {
- local->dict = dict_new();
- if (!local->dict) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- goto unlock;
- }
+ if ((local_xdata != NULL) && (local->xdata_rsp == NULL)) {
+ local->xdata_rsp = local_xdata;
+ local_xdata = NULL;
}
- }
-unlock:
- UNLOCK(&frame->lock);
- if (lockinfo_buf != NULL) {
- lockinfo = dict_new();
- if (lockinfo == NULL) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- } else {
- op_ret = dict_unserialize(lockinfo_buf, len, &lockinfo);
-
- if (lockinfo && local->dict) {
- dict_copy(lockinfo, local->dict);
- }
- }
- }
-
- if (xdata && local->xdata_rsp) {
- dict_copy(xdata, local->xdata_rsp);
+ UNLOCK(&frame->lock);
}
- if (!call_cnt) {
- newdict = dict_new();
- if (!newdict) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- goto unwind;
+ if (dict != NULL) {
+ if (dict_copy(dict, local->dict) < 0) {
+ goto done;
}
+ }
- op_ret = dict_allocate_and_serialize(
- local->dict, (char **)&lockinfo_buf, (unsigned int *)&len);
- if (op_ret != 0) {
- local->op_ret = -1;
- goto unwind;
+ if (xdata != NULL) {
+ if (dict_copy(xdata, local->xdata_rsp) < 0) {
+ goto done;
}
+ }
- op_ret = dict_set_dynptr(newdict, GF_XATTR_LOCKINFO_KEY,
- (void *)lockinfo_buf, len);
- if (op_ret < 0) {
- local->op_ret = -1;
- local->op_errno = -op_ret;
- goto unwind;
- }
+ ret = 0;
- unwind:
- AFR_STACK_UNWIND(getxattr, frame, op_ret, op_errno, newdict,
- local->xdata_rsp);
+done:
+ if (local_dict != NULL) {
+ dict_unref(local_dict);
}
- dict_unref(lockinfo);
+ if (local_xdata != NULL) {
+ dict_unref(local_xdata);
+ }
- return 0;
+ return ret;
}
-int32_t
-afr_fgetxattr_lockinfo_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *dict,
- dict_t *xdata)
+static void
+afr_getxattr_lockinfo_cbk_common(call_frame_t *frame, int32_t op_ret,
+ int32_t op_errno, dict_t *dict, dict_t *xdata,
+ bool is_fgetxattr)
{
- int call_cnt = 0, len = 0;
+ int len = 0;
char *lockinfo_buf = NULL;
dict_t *lockinfo = NULL, *newdict = NULL;
afr_local_t *local = NULL;
- LOCK(&frame->lock);
- {
- local = frame->local;
-
- call_cnt = --local->call_count;
-
- if ((op_ret < 0) || (!dict && !xdata)) {
- goto unlock;
- }
-
- if (xdata) {
- if (!local->xdata_rsp) {
- local->xdata_rsp = dict_new();
- if (!local->xdata_rsp) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- goto unlock;
- }
- }
- }
-
- if (!dict) {
- goto unlock;
- }
+ local = frame->local;
+ if ((op_ret >= 0) && (dict != NULL)) {
op_ret = dict_get_ptr_and_len(dict, GF_XATTR_LOCKINFO_KEY,
(void **)&lockinfo_buf, &len);
-
- if (!lockinfo_buf) {
- goto unlock;
- }
-
- if (!local->dict) {
- local->dict = dict_new();
- if (!local->dict) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- goto unlock;
+ if (lockinfo_buf != NULL) {
+ lockinfo = dict_new();
+ if (lockinfo == NULL) {
+ op_ret = -1;
+ } else {
+ op_ret = dict_unserialize(lockinfo_buf, len, &lockinfo);
}
}
}
-unlock:
- UNLOCK(&frame->lock);
- if (lockinfo_buf != NULL) {
- lockinfo = dict_new();
- if (lockinfo == NULL) {
- local->op_ret = -1;
- local->op_errno = ENOMEM;
- } else {
- op_ret = dict_unserialize(lockinfo_buf, len, &lockinfo);
-
- if (lockinfo && local->dict) {
- dict_copy(lockinfo, local->dict);
- }
+ if ((op_ret >= 0) && ((lockinfo != NULL) || (xdata != NULL))) {
+ op_ret = afr_update_local_dicts(frame, lockinfo, xdata);
+ if (lockinfo != NULL) {
+ dict_unref(lockinfo);
}
}
- if (xdata && local->xdata_rsp) {
- dict_copy(xdata, local->xdata_rsp);
+ if (op_ret < 0) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
}
- if (!call_cnt) {
+ if (uatomic_sub_return(&local->call_count, 1) == 0) {
newdict = dict_new();
if (!newdict) {
local->op_ret = -1;
- local->op_errno = ENOMEM;
+ local->op_errno = op_errno = ENOMEM;
goto unwind;
}
@@ -1057,23 +992,58 @@ unlock:
local->dict, (char **)&lockinfo_buf, (unsigned int *)&len);
if (op_ret != 0) {
local->op_ret = -1;
+ local->op_errno = op_errno = ENOMEM;
goto unwind;
}
op_ret = dict_set_dynptr(newdict, GF_XATTR_LOCKINFO_KEY,
(void *)lockinfo_buf, len);
if (op_ret < 0) {
- local->op_ret = -1;
- local->op_errno = -op_ret;
+ GF_FREE(lockinfo_buf);
+ local->op_ret = op_ret = -1;
+ local->op_errno = op_errno = -op_ret;
goto unwind;
}
unwind:
- AFR_STACK_UNWIND(fgetxattr, frame, op_ret, op_errno, newdict,
- local->xdata_rsp);
+ /* TODO: These unwinds use op_ret and op_errno instead of local->op_ret
+ * and local->op_errno. This doesn't seem right because any
+ * failure during processing of each answer could be silently
+ * ignored. This is kept this was the old behavior and because
+ * local->op_ret is initialized as -1 and local->op_errno is
+ * initialized as EUCLEAN, which makes these values useless. */
+ if (is_fgetxattr) {
+ AFR_STACK_UNWIND(fgetxattr, frame, op_ret, op_errno, newdict,
+ local->xdata_rsp);
+ } else {
+ AFR_STACK_UNWIND(getxattr, frame, op_ret, op_errno, newdict,
+ local->xdata_rsp);
+ }
+
+ if (newdict != NULL) {
+ dict_unref(newdict);
+ }
}
+}
+
+static int32_t
+afr_getxattr_lockinfo_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
+{
+ afr_getxattr_lockinfo_cbk_common(frame, op_ret, op_errno, dict, xdata,
+ false);
- dict_unref(lockinfo);
+ return 0;
+}
+
+static int32_t
+afr_fgetxattr_lockinfo_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
+{
+ afr_getxattr_lockinfo_cbk_common(frame, op_ret, op_errno, dict, xdata,
+ true);
return 0;
}
--
1.8.3.1

View File

@ -0,0 +1,46 @@
From 31cd7627ff329a39691239322df3bc88e962ad02 Mon Sep 17 00:00:00 2001
From: Xavi Hernandez <xhernandez@redhat.com>
Date: Mon, 1 Mar 2021 05:19:39 +0100
Subject: [PATCH 538/538] afr: fix coverity issue introduced by 90cefde
Fixes coverity issues 1447029 and 1447028.
Backport of:
> Upstream-patch-link: https://github.com/gluster/glusterfs/pull/2201
> Updates: #2161
> Change-Id: I6a564231d6aeb76de20675b7ced5d45eed8c377f
> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
BUG: 1911292
Change-Id: I6a564231d6aeb76de20675b7ced5d45eed8c377f
Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/229200
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/cluster/afr/src/afr-inode-read.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/xlators/cluster/afr/src/afr-inode-read.c b/xlators/cluster/afr/src/afr-inode-read.c
index 98e195a..d874172 100644
--- a/xlators/cluster/afr/src/afr-inode-read.c
+++ b/xlators/cluster/afr/src/afr-inode-read.c
@@ -918,13 +918,13 @@ afr_update_local_dicts(call_frame_t *frame, dict_t *dict, dict_t *xdata)
}
if (dict != NULL) {
- if (dict_copy(dict, local->dict) < 0) {
+ if (dict_copy(dict, local->dict) == NULL) {
goto done;
}
}
if (xdata != NULL) {
- if (dict_copy(xdata, local->xdata_rsp) < 0) {
+ if (dict_copy(xdata, local->xdata_rsp) == NULL) {
goto done;
}
}
--
1.8.3.1

View File

@ -0,0 +1,62 @@
From 88523814fe296c9cc9f7619e06210830f59c5edf Mon Sep 17 00:00:00 2001
From: Xavi Hernandez <xhernandez@redhat.com>
Date: Fri, 12 Mar 2021 10:32:09 +0100
Subject: [PATCH 539/539] extras: disable lookup-optimize in virt and block
groups
lookup-optimize doesn't provide any benefit for virtualized
environments and gluster-block workloads, but it's known to cause
corruption in some cases when sharding is also enabled and the volume
is expanded or shrunk.
For this reason, we disable lookup-optimize by default on those
environments.
Backport of:
> Upstream-patch-link: https://github.com/gluster/glusterfs/pull/2254
> Fixes: #2253
> Change-Id: I25861aa50b335556a995a9c33318dd3afb41bf71
> Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
BUG: 1939372
Change-Id: I25861aa50b335556a995a9c33318dd3afb41bf71
Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/231173
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/group-distributed-virt | 1 +
extras/group-gluster-block | 1 +
extras/group-virt.example | 1 +
3 files changed, 3 insertions(+)
diff --git a/extras/group-distributed-virt b/extras/group-distributed-virt
index a960b76..6da3de0 100644
--- a/extras/group-distributed-virt
+++ b/extras/group-distributed-virt
@@ -8,3 +8,4 @@ user.cifs=off
client.event-threads=4
server.event-threads=4
performance.client-io-threads=on
+cluster.lookup-optimize=off
diff --git a/extras/group-gluster-block b/extras/group-gluster-block
index 1e39801..b8d3e8d 100644
--- a/extras/group-gluster-block
+++ b/extras/group-gluster-block
@@ -25,3 +25,4 @@ features.shard-block-size=64MB
user.cifs=off
server.allow-insecure=on
cluster.choose-local=off
+cluster.lookup-optimize=off
diff --git a/extras/group-virt.example b/extras/group-virt.example
index 3a441eb..155f5f5 100644
--- a/extras/group-virt.example
+++ b/extras/group-virt.example
@@ -21,3 +21,4 @@ server.tcp-user-timeout=20
server.keepalive-time=10
server.keepalive-interval=2
server.keepalive-count=5
+cluster.lookup-optimize=off
--
1.8.3.1

View File

@ -0,0 +1,37 @@
From 6895b6c67e9c29af3f966b4d9ee5cb40da763d24 Mon Sep 17 00:00:00 2001
From: Mohit Agrawal <moagrawa@redhat.com>
Date: Wed, 14 Apr 2021 12:38:45 +0530
Subject: [PATCH 540/540] extras: Disable write-behind for group samba.
when write-behind is enabled with Samba it could be a
source of data corruption. The translator, while
processing a write call, immediately returns success but continues
writing the data to the server in the background. This can cause data
corruption when two clients relying on Samba to provide data consistency
are operating on the same file.
> fixes: https://github.com/gluster/glusterfs/issues/2329
Change-Id: I5265056ff315a5f3cd97ea11b18db0831b1b901d
Solution: Disable write-behind for samba group
BUG: 1948547
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/235876
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
extras/group-samba | 1 +
1 file changed, 1 insertion(+)
diff --git a/extras/group-samba b/extras/group-samba
index eeee6e0..9611a1f 100644
--- a/extras/group-samba
+++ b/extras/group-samba
@@ -9,3 +9,4 @@ performance.nl-cache=on
performance.nl-cache-timeout=600
performance.readdir-ahead=on
performance.parallel-readdir=on
+performance.write-behind=off
--
1.8.3.1

View File

@ -0,0 +1,545 @@
From 23ab7175e64ab4d75fbcb6874008843cc78b65b8 Mon Sep 17 00:00:00 2001
From: Ashish Pandey <aspandey@redhat.com>
Date: Fri, 16 Apr 2021 18:48:56 +0530
Subject: [PATCH 541/542] glusterd-volgen: Add functionality to accept any
custom xlator
Add new function which allow users to insert any custom xlators.
It makes to provide a way to add any processing into file operations.
Users can deploy the plugin(xlator shared object) and integrate it to glusterfsd.
If users want to enable a custom xlator, do the follows:
1. put xlator object(.so file) into "XLATOR_DIR/user/"
2. set the option user.xlator.<xlator> to the existing xlator-name to specify of the position in graph
3. restart gluster volume
Options for custom xlator are able to set in "user.xlator.<xlator>.<optkey>".
Backport of :
>https://github.com/gluster/glusterfs/commit/ea86b664f3b1f54901ce1b7d7fba7d80456f2089
>Fixes: https://github.com/gluster/glusterfs/issues/1943
>Change-Id: Ife3ae1514ea474f5dae2897223012f9d04b64674
>Signed-off-by:Ryo Furuhashi <ryo.furuhashi.nh@hitachi.com>
>Co-authored-by: Yaniv Kaul <ykaul@redhat.com>
>Co-authored-by: Xavi Hernandez <xhernandez@users.noreply.github.com>
Change-Id: Ic8f28bfcfde67213eb1092b0ebf4822c874d37bb
BUG: 1927235
Signed-off-by: Ashish Pandey <aspandey@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/236830
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
Reviewed-by: Xavi Hernandez Juan <xhernandez@redhat.com>
---
cli/src/cli-rpc-ops.c | 148 ++++++++++++++++++++------
cli/src/cli.h | 2 -
tests/basic/user-xlator.t | 65 ++++++++++++
tests/env.rc.in | 3 +
xlators/mgmt/glusterd/src/glusterd-volgen.c | 155 ++++++++++++++++++++++++++++
5 files changed, 342 insertions(+), 31 deletions(-)
create mode 100755 tests/basic/user-xlator.t
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index 4e91265..51b5447 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -2269,49 +2269,131 @@ out:
return ret;
}
-char *
-is_server_debug_xlator(void *myframe)
+/*
+ * returns
+ * 1 : is server debug xlator
+ * 0 : is not server debug xlator
+ * <0 : error
+ */
+static int
+is_server_debug_xlator(char *key, char *value)
+{
+ if (!key || !value)
+ return -1;
+
+ if (strcmp("debug.trace", key) == 0 ||
+ strcmp("debug.error-gen", key) == 0) {
+ if (strcmp("client", value) == 0)
+ return 0;
+ else
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * returns
+ * 1 : is user xlator
+ * 0 : is not user xlator
+ * <0 : error
+ */
+static int
+is_server_user_xlator(char *key, char *value)
+{
+ int ret = 0;
+
+ if (!key || !value)
+ return -1;
+
+ ret = fnmatch("user.xlator.*", key, 0);
+ if (ret < 0) {
+ ret = -1;
+ goto out;
+ } else if (ret == FNM_NOMATCH) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = fnmatch("user.xlator.*.*", key, 0);
+ if (ret < 0) {
+ ret = -1;
+ goto out;
+ } else if (ret != FNM_NOMATCH) { // this is user xlator's option key
+ ret = 0;
+ goto out;
+ }
+
+ ret = 1;
+
+out:
+ return ret;
+}
+
+static int
+added_server_xlator(void *myframe, char **added_xlator)
{
call_frame_t *frame = NULL;
cli_local_t *local = NULL;
char **words = NULL;
char *key = NULL;
char *value = NULL;
- char *debug_xlator = NULL;
+ int ret = 0;
frame = myframe;
local = frame->local;
words = (char **)local->words;
while (*words != NULL) {
- if (strstr(*words, "trace") == NULL &&
- strstr(*words, "error-gen") == NULL) {
- words++;
- continue;
- }
-
key = *words;
words++;
value = *words;
- if (value == NULL)
+
+ if (!value) {
break;
- if (strstr(value, "client")) {
- words++;
- continue;
- } else {
- if (!(strstr(value, "posix") || strstr(value, "acl") ||
- strstr(value, "locks") || strstr(value, "io-threads") ||
- strstr(value, "marker") || strstr(value, "index"))) {
- words++;
- continue;
- } else {
- debug_xlator = gf_strdup(key);
- break;
+ }
+
+ ret = is_server_debug_xlator(key, value);
+ if (ret < 0) {
+ gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
+ "failed to check that debug xlator was added");
+ ret = -1;
+ goto out;
+ }
+
+ if (ret) {
+ *added_xlator = gf_strdup(key);
+ if (!*added_xlator) {
+ gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
+ "Out of memory");
+ ret = -1;
+ goto out;
+ }
+ break;
+ }
+
+ ret = is_server_user_xlator(key, value);
+ if (ret < 0) {
+ gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
+ "failed to check that user xlator was added");
+ ret = -1;
+ goto out;
+ }
+
+ if (ret) {
+ *added_xlator = gf_strdup(key);
+ if (!*added_xlator) {
+ gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
+ "Out of memory");
+ ret = -1;
+ goto out;
}
+ break;
}
}
- return debug_xlator;
+out:
+ return ret;
}
int
@@ -2327,7 +2409,7 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
char msg[1024] = {
0,
};
- char *debug_xlator = NULL;
+ char *added_xlator = NULL;
char tmp_str[512] = {
0,
};
@@ -2365,18 +2447,26 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
* The process has to be restarted. So this is a check from the
* volume set option such that if debug xlators such as trace/errorgen
* are provided in the set command, warn the user.
+ * volume set option such that if user custom xlators or debug
+ * xlators such as trace/errorgen are provided in the set command,
+ * warn the user.
*/
- debug_xlator = is_server_debug_xlator(myframe);
+ ret = added_server_xlator(myframe, &added_xlator);
+ if (ret < 0) {
+ gf_log("cli", GF_LOG_ERROR,
+ "failed to check that server graph has been changed");
+ goto out;
+ }
if (dict_get_str(dict, "help-str", &help_str) && !msg[0])
snprintf(msg, sizeof(msg), "Set volume %s",
(rsp.op_ret) ? "unsuccessful" : "successful");
- if (rsp.op_ret == 0 && debug_xlator) {
+ if (rsp.op_ret == 0 && added_xlator) {
snprintf(tmp_str, sizeof(tmp_str),
"\n%s translator has been "
"added to the server volume file. Please restart the"
" volume for enabling the translator",
- debug_xlator);
+ added_xlator);
}
if ((global_state->mode & GLUSTER_MODE_XML) && (help_str == NULL)) {
@@ -2394,7 +2484,7 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
cli_err("volume set: failed");
} else {
if (help_str == NULL) {
- if (debug_xlator == NULL)
+ if (added_xlator == NULL)
cli_out("volume set: success");
else
cli_out("volume set: success%s", tmp_str);
@@ -2408,7 +2498,7 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
out:
if (dict)
dict_unref(dict);
- GF_FREE(debug_xlator);
+ GF_FREE(added_xlator);
cli_cmd_broadcast_response(ret);
gf_free_xdr_cli_rsp(rsp);
return ret;
diff --git a/cli/src/cli.h b/cli/src/cli.h
index 7b4f446..b5b69ea 100644
--- a/cli/src/cli.h
+++ b/cli/src/cli.h
@@ -502,8 +502,6 @@ cli_xml_output_snapshot(int cmd_type, dict_t *dict, int op_ret, int op_errno,
int
cli_xml_snapshot_status_single_snap(cli_local_t *local, dict_t *dict,
char *key);
-char *
-is_server_debug_xlator(void *myframe);
int32_t
cli_cmd_snapshot_parse(const char **words, int wordcount, dict_t **options,
diff --git a/tests/basic/user-xlator.t b/tests/basic/user-xlator.t
new file mode 100755
index 0000000..a711f9f
--- /dev/null
+++ b/tests/basic/user-xlator.t
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#### patchy.dev.d-backends-patchy1.vol
+brick=${B0//\//-}
+SERVER_VOLFILE="/var/lib/glusterd/vols/${V0}/${V0}.${H0}.${brick:1}-${V0}1.vol"
+
+cleanup;
+
+TEST mkdir -p $B0/single-brick
+TEST mkdir -p ${GLUSTER_XLATOR_DIR}/user
+
+## deploy dummy user xlator
+TEST cp ${GLUSTER_XLATOR_DIR}/playground/template.so ${GLUSTER_XLATOR_DIR}/user/hoge.so
+
+TEST glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+TEST $CLI volume set $V0 user.xlator.hoge posix
+TEST grep -q 'user/hoge' ${SERVER_VOLFILE}
+
+TEST $CLI volume set $V0 user.xlator.hoge.opt1 10
+TEST grep -q '"option opt1 10"' ${SERVER_VOLFILE}
+TEST $CLI volume set $V0 user.xlator.hoge.opt2 hogehoge
+TEST grep -q '"option opt2 hogehoge"' ${SERVER_VOLFILE}
+TEST $CLI volume set $V0 user.xlator.hoge.opt3 true
+TEST grep -q '"option opt3 true"' ${SERVER_VOLFILE}
+
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}4
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}5
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}6
+
+TEST $CLI volume set $V0 user.xlator.hoge trash
+TEST grep -q 'user/hoge' ${SERVER_VOLFILE}
+
+TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}4
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}5
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}6
+
+TEST ! $CLI volume set $V0 user.xlator.hoge unknown
+TEST grep -q 'user/hoge' ${SERVER_VOLFILE} # When the CLI fails, the volfile is not modified.
+
+TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}4
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}5
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}6
+
+#### teardown
+
+TEST rm -f ${GLUSTER_XLATOR_DIR}/user/hoge.so
+cleanup;
diff --git a/tests/env.rc.in b/tests/env.rc.in
index c7472a7..1f0ca88 100644
--- a/tests/env.rc.in
+++ b/tests/env.rc.in
@@ -40,3 +40,6 @@ export GLUSTER_LIBEXECDIR
RUN_NFS_TESTS=@BUILD_GNFS@
export RUN_NFS_TESTS
+
+GLUSTER_XLATOR_DIR=@libdir@/glusterfs/@PACKAGE_VERSION@/xlator
+export GLUSTER_XLATOR_DIR
\ No newline at end of file
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
index 1920284..a242b5c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
@@ -45,6 +45,11 @@ struct gd_validate_reconf_opts {
extern struct volopt_map_entry glusterd_volopt_map[];
+struct check_and_add_user_xlator_t {
+ volgen_graph_t *graph;
+ char *volname;
+};
+
#define RPC_SET_OPT(XL, CLI_OPT, XLATOR_OPT, ERROR_CMD) \
do { \
char *_value = NULL; \
@@ -2822,6 +2827,145 @@ out:
return ret;
}
+static gf_boolean_t
+check_user_xlator_position(dict_t *dict, char *key, data_t *value,
+ void *prev_xlname)
+{
+ if (strncmp(key, "user.xlator.", SLEN("user.xlator.")) != 0) {
+ return false;
+ }
+
+ if (fnmatch("user.xlator.*.*", key, 0) == 0) {
+ return false;
+ }
+
+ char *value_str = data_to_str(value);
+ if (!value_str) {
+ return false;
+ }
+
+ if (strcmp(value_str, prev_xlname) == 0) {
+ gf_log("glusterd", GF_LOG_INFO,
+ "found insert position of user-xlator(%s)", key);
+ return true;
+ }
+
+ return false;
+}
+
+static int
+set_user_xlator_option(dict_t *set_dict, char *key, data_t *value, void *data)
+{
+ xlator_t *xl = data;
+ char *optname = strrchr(key, '.') + 1;
+
+ gf_log("glusterd", GF_LOG_DEBUG, "set user xlator option %s = %s", key,
+ value->data);
+
+ return xlator_set_option(xl, optname, strlen(optname), data_to_str(value));
+}
+
+static int
+insert_user_xlator_to_graph(dict_t *set_dict, char *key, data_t *value,
+ void *action_data)
+{
+ int ret = -1;
+
+ struct check_and_add_user_xlator_t *data = action_data;
+
+ char *xlator_name = strrchr(key, '.') + 1; // user.xlator.<xlator_name>
+ char *xlator_option_matcher = NULL;
+ char *type = NULL;
+ xlator_t *xl = NULL;
+
+ // convert optkey to xlator type
+ if (gf_asprintf(&type, "user/%s", xlator_name) < 0) {
+ gf_log("glusterd", GF_LOG_ERROR, "failed to generate user-xlator type");
+ goto out;
+ }
+
+ gf_log("glusterd", GF_LOG_INFO, "add user xlator=%s to graph", type);
+
+ xl = volgen_graph_add(data->graph, type, data->volname);
+ if (!xl) {
+ goto out;
+ }
+
+ ret = gf_asprintf(&xlator_option_matcher, "user.xlator.%s.*", xlator_name);
+ if (ret < 0) {
+ gf_log("glusterd", GF_LOG_ERROR,
+ "failed to generate user-xlator option matcher");
+ goto out;
+ }
+
+ dict_foreach_fnmatch(set_dict, xlator_option_matcher,
+ set_user_xlator_option, xl);
+
+out:
+ if (type)
+ GF_FREE(type);
+ if (xlator_option_matcher)
+ GF_FREE(xlator_option_matcher);
+
+ return ret;
+}
+
+static int
+validate_user_xlator_position(dict_t *this, char *key, data_t *value,
+ void *unused)
+{
+ int ret = -1;
+ int i = 0;
+
+ if (!value)
+ goto out;
+
+ if (fnmatch("user.xlator.*.*", key, 0) == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ char *value_str = data_to_str(value);
+ if (!value_str)
+ goto out;
+
+ int num_xlators = sizeof(server_graph_table) /
+ sizeof(server_graph_table[0]);
+ for (i = 0; i < num_xlators; i++) {
+ if (server_graph_table[i].dbg_key &&
+ strcmp(value_str, server_graph_table[i].dbg_key) == 0) {
+ ret = 0;
+ goto out;
+ }
+ }
+
+out:
+ if (ret == -1)
+ gf_log("glusterd", GF_LOG_ERROR, "invalid user xlator position %s = %s",
+ key, value->data);
+
+ return ret;
+}
+
+static int
+check_and_add_user_xl(volgen_graph_t *graph, dict_t *set_dict, char *volname,
+ char *prev_xlname)
+{
+ if (!prev_xlname)
+ goto out;
+
+ struct check_and_add_user_xlator_t data = {.graph = graph,
+ .volname = volname};
+
+ if (dict_foreach_match(set_dict, check_user_xlator_position, prev_xlname,
+ insert_user_xlator_to_graph, &data) < 0) {
+ return -1;
+ }
+
+out:
+ return 0;
+}
+
static int
server_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
dict_t *set_dict, void *param)
@@ -2831,6 +2975,12 @@ server_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
char *loglevel = NULL;
int i = 0;
+ if (dict_foreach_fnmatch(set_dict, "user.xlator.*",
+ validate_user_xlator_position, NULL) < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
i = sizeof(server_graph_table) / sizeof(server_graph_table[0]) - 1;
while (i >= 0) {
@@ -2848,6 +2998,11 @@ server_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
if (ret)
goto out;
+ ret = check_and_add_user_xl(graph, set_dict, volinfo->volname,
+ server_graph_table[i].dbg_key);
+ if (ret)
+ goto out;
+
i--;
}
--
1.8.3.1

View File

@ -0,0 +1,64 @@
From f3db0c99faf813e0f2e9ffcf599416555a59df1f Mon Sep 17 00:00:00 2001
From: Ashish Pandey <aspandey@redhat.com>
Date: Tue, 9 Feb 2021 16:43:35 +0530
Subject: [PATCH 542/542] xlaotrs/mgmt: Fixing coverity issue 1445996
Backport of https://github.com/gluster/glusterfs/pull/2148/commits/9785e96e0bdf6e60896570fdf5e4a6976a6f60ba
Fixing "Null pointer dereferences"
BUG: 1927235
Change-Id: Idbc014e1302d2450f97bccd028681198c0d97424
Signed-off-by: Ashish Pandey <aspandey@redhat.com>
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/237433
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-volgen.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
index a242b5c..71aed08 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
@@ -2916,21 +2916,23 @@ validate_user_xlator_position(dict_t *this, char *key, data_t *value,
{
int ret = -1;
int i = 0;
+ char *value_str = NULL;
if (!value)
goto out;
+ value_str = data_to_str(value);
+ if (!value_str)
+ goto out;
+
if (fnmatch("user.xlator.*.*", key, 0) == 0) {
ret = 0;
goto out;
}
- char *value_str = data_to_str(value);
- if (!value_str)
- goto out;
-
int num_xlators = sizeof(server_graph_table) /
sizeof(server_graph_table[0]);
+
for (i = 0; i < num_xlators; i++) {
if (server_graph_table[i].dbg_key &&
strcmp(value_str, server_graph_table[i].dbg_key) == 0) {
@@ -2942,7 +2944,7 @@ validate_user_xlator_position(dict_t *this, char *key, data_t *value,
out:
if (ret == -1)
gf_log("glusterd", GF_LOG_ERROR, "invalid user xlator position %s = %s",
- key, value->data);
+ key, value_str);
return ret;
}
--
1.8.3.1

View File

@ -0,0 +1,162 @@
From 71fc5b7949e00c4448f5ec1291e756b201a70082 Mon Sep 17 00:00:00 2001
From: Ravishankar N <ravishankar@redhat.com>
Date: Thu, 29 Apr 2021 18:34:57 +0530
Subject: [PATCH 543/543] glusterd: handle custom xlator failure cases
Problem-1:
custom xlator insertion was failing for those xlators in the brick graph
whose dbg_key was NULL in the server_graph_table. Looking at the git log,
the dbg_key was added in commit d1397dbd7d6cdbd2d81d5d36d608b6175d449db4
for inserting debug xlators.
Fix: I think it is fine to define it for all brick xlators below server.
Problem-2:
In the commit-op phase, glusterd_op_set_volume() updates the volinfo
dict with the key-value pairs and then proceeds to create the volfiles.
If any of the steps fail, the volinfo dict retains those key-values,
until glusterd is restarted or `gluster vol reset $VOLNAME` is issued.
Fix:
Make a copy of the volinfo dict and if there are any failures in
proceeding with the set volume logic, restore the dict to its original
state.
Backport of:
> Upstream-patch-link: https://github.com/gluster/glusterfs/pull/2371
> Change-Id: I9010dab33d0139b8e6d603308e331b6d220a4849
> Updates: #2370
> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Change-Id: I9010dab33d0139b8e6d603308e331b6d220a4849
BUG: 1953901
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/239889
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
tests/basic/user-xlator.t | 16 ++++++++++++++--
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 16 ++++++++++++++++
xlators/mgmt/glusterd/src/glusterd-volgen.c | 14 +++++++-------
3 files changed, 37 insertions(+), 9 deletions(-)
diff --git a/tests/basic/user-xlator.t b/tests/basic/user-xlator.t
index a711f9f..ed2d831 100755
--- a/tests/basic/user-xlator.t
+++ b/tests/basic/user-xlator.t
@@ -35,8 +35,18 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}4
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}5
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}6
-TEST $CLI volume set $V0 user.xlator.hoge trash
-TEST grep -q 'user/hoge' ${SERVER_VOLFILE}
+# Test that the insertion at all positions between server and posix is successful.
+# It is not guaranteed that the brick process will start/work in all positions though.
+TESTS_EXPECTED_IN_LOOP=34
+declare -a brick_side_xlators=("decompounder" "io-stats" "quota" "index" "barrier"
+ "marker" "selinux" "io-threads" "upcall" "leases"
+ "read-only" "worm" "locks" "access-control"
+ "bitrot-stub" "changelog" "trash")
+for xlator in "${brick_side_xlators[@]}"
+ do
+ TEST_IN_LOOP $CLI volume set $V0 user.xlator.hoge $xlator
+ TEST_IN_LOOP grep -q 'user/hoge' ${SERVER_VOLFILE}
+ done
TEST $CLI volume stop $V0
TEST $CLI volume start $V0
@@ -49,6 +59,8 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}6
TEST ! $CLI volume set $V0 user.xlator.hoge unknown
TEST grep -q 'user/hoge' ${SERVER_VOLFILE} # When the CLI fails, the volfile is not modified.
+# User xlator insert failures must not prevent setting other volume options.
+TEST $CLI volume set $V0 storage.reserve 10%
TEST $CLI volume stop $V0
TEST $CLI volume start $V0
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 1e84f5f..893af29 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -2911,6 +2911,7 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
uint32_t new_op_version = 0;
gf_boolean_t quorum_action = _gf_false;
glusterd_svc_t *svc = NULL;
+ dict_t *volinfo_dict_orig = NULL;
this = THIS;
GF_ASSERT(this);
@@ -2918,6 +2919,10 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
priv = this->private;
GF_ASSERT(priv);
+ volinfo_dict_orig = dict_new();
+ if (!volinfo_dict_orig)
+ goto out;
+
ret = dict_get_int32n(dict, "count", SLEN("count"), &dict_count);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
@@ -2949,6 +2954,11 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
goto out;
}
+ if (dict_copy(volinfo->dict, volinfo_dict_orig) == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
/* TODO: Remove this once v3.3 compatibility is not required */
check_op_version = dict_get_str_boolean(dict, "check-op-version",
_gf_false);
@@ -3171,6 +3181,12 @@ out:
gf_msg_debug(this->name, 0, "returning %d", ret);
if (quorum_action)
glusterd_do_quorum_action();
+ if (ret < 0 && count > 1) {
+ if (dict_reset(volinfo->dict) == 0)
+ dict_copy(volinfo_dict_orig, volinfo->dict);
+ }
+ if (volinfo_dict_orig)
+ dict_unref(volinfo_dict_orig);
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
index 71aed08..aa85bdb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
@@ -2706,24 +2706,24 @@ out:
static volgen_brick_xlator_t server_graph_table[] = {
{brick_graph_add_server, NULL},
{brick_graph_add_decompounder, "decompounder"},
- {brick_graph_add_io_stats, "NULL"},
+ {brick_graph_add_io_stats, "io-stats"},
{brick_graph_add_sdfs, "sdfs"},
{brick_graph_add_namespace, "namespace"},
- {brick_graph_add_cdc, NULL},
+ {brick_graph_add_cdc, "cdc" },
{brick_graph_add_quota, "quota"},
{brick_graph_add_index, "index"},
- {brick_graph_add_barrier, NULL},
+ {brick_graph_add_barrier, "barrier" },
{brick_graph_add_marker, "marker"},
{brick_graph_add_selinux, "selinux"},
{brick_graph_add_fdl, "fdl"},
{brick_graph_add_iot, "io-threads"},
{brick_graph_add_upcall, "upcall"},
{brick_graph_add_leases, "leases"},
- {brick_graph_add_pump, NULL},
- {brick_graph_add_ro, NULL},
- {brick_graph_add_worm, NULL},
+ {brick_graph_add_pump, "pump" },
+ {brick_graph_add_ro, "read-only" },
+ {brick_graph_add_worm, "worm" },
{brick_graph_add_locks, "locks"},
- {brick_graph_add_acl, "acl"},
+ {brick_graph_add_acl, "access-control"},
{brick_graph_add_bitrot_stub, "bitrot-stub"},
{brick_graph_add_changelog, "changelog"},
#if USE_GFDB /* changetimerecorder depends on gfdb */
--
1.8.3.1

View File

@ -0,0 +1,47 @@
From 840f437d232fbafac9f4448b0f8d0e9976ea1e1d Mon Sep 17 00:00:00 2001
From: Tamar Shacked <tshacked@redhat.com>
Date: Mon, 23 Aug 2021 20:46:13 +0300
Subject: [PATCH 544/544] RHGS-3.5.4: rebuild to ship with RHEL-8.5
Label: DOWNSTREAM ONLY
BUG: 1996984
Signed-off-by: Tamar Shacked <tshacked@redhat.com>
Change-Id: Idafc64b8ee5da165c87428b8a5166cf319ef7660
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/267350
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfs.spec.in | 2 ++
rfc.sh | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 2be7677..4511979 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -1982,6 +1982,8 @@ fi
%endif
%changelog
+* Tue Aug 24 2021 Tamar Shacked <tshacked@redhat.com>
+- build RGHS client for RHEL-8.5 (#1996984)
* Mon May 11 2020 Sunny Kumar <sunkumar@redhat.com>
- added requires policycoreutils-python-utils on rhel8 for geo-replication
diff --git a/rfc.sh b/rfc.sh
index c0559b9..b1153be 100755
--- a/rfc.sh
+++ b/rfc.sh
@@ -315,7 +315,7 @@ main()
if [ -z "${reference}" ]; then
$drier git push $ORIGIN HEAD:refs/for/$branch/rfc;
else
- $drier git push $ORIGIN HEAD:refs/for/$branch/ref-${reference};
+ $drier git push $ORIGIN HEAD:refs/for/$branch;
fi
}
--
1.8.3.1

View File

@ -237,7 +237,7 @@ Release: 0.1%{?prereltag:.%{prereltag}}%{?dist}
%else
Name: glusterfs
Version: 6.0
Release: 49.1%{?dist}
Release: 56.4%{?dist}
ExcludeArch: i686
%endif
License: GPLv2 or LGPLv3+
@ -795,7 +795,70 @@ Patch0477: 0477-glusterd-snapshot-Snapshot-prevalidation-failure-not.patch
Patch0478: 0478-DHT-Fixing-rebalance-failure-on-issuing-stop-command.patch
Patch0479: 0479-ganesha-ha-revised-regex-exprs-for-status.patch
Patch0480: 0480-DHT-Rebalance-Ensure-Rebalance-reports-status-only-o.patch
Patch0481: 0481-RHGS-3.5.3-rebuild-to-ship-with-RHEL.patch
Patch0481: 0481-Update-rfc.sh-to-rhgs-3.5.4.patch
Patch0482: 0482-logger-Always-print-errors-in-english.patch
Patch0483: 0483-afr-more-quorum-checks-in-lookup-and-new-entry-marki.patch
Patch0484: 0484-glusterd-rebalance-status-displays-stats-as-0-after-.patch
Patch0485: 0485-cli-rpc-conditional-init-of-global-quota-rpc-1578.patch
Patch0486: 0486-glusterd-brick-sock-file-deleted-log-error-1560.patch
Patch0487: 0487-Events-Log-file-not-re-opened-after-logrotate.patch
Patch0488: 0488-glusterd-afr-enable-granular-entry-heal-by-default.patch
Patch0489: 0489-glusterd-fix-bug-in-enabling-granular-entry-heal.patch
Patch0490: 0490-Segmentation-fault-occurs-during-truncate.patch
Patch0491: 0491-glusterd-mount-directory-getting-truncated-on-mounti.patch
Patch0492: 0492-afr-lookup-Pass-xattr_req-in-while-doing-a-selfheal-.patch
Patch0493: 0493-geo-rep-Note-section-is-required-for-ignore_deletes.patch
Patch0494: 0494-glusterd-start-the-brick-on-a-different-port.patch
Patch0495: 0495-geo-rep-descriptive-message-when-worker-crashes-due-.patch
Patch0496: 0496-posix-Use-MALLOC-instead-of-alloca-to-allocate-memor.patch
Patch0497: 0497-socket-Use-AES128-cipher-in-SSL-if-AES-is-supported-.patch
Patch0498: 0498-geo-rep-Fix-corner-case-in-rename-on-mkdir-during-hy.patch
Patch0499: 0499-gfapi-give-appropriate-error-when-size-exceeds.patch
Patch0500: 0500-features-shard-Convert-shard-block-indices-to-uint64.patch
Patch0501: 0501-Cli-Removing-old-syntax-of-tier-cmds-from-help-menu.patch
Patch0502: 0502-dht-fixing-a-permission-update-issue.patch
Patch0503: 0503-gfapi-Suspend-synctasks-instead-of-blocking-them.patch
Patch0504: 0504-io-stats-Configure-ios_sample_buf_size-based-on-samp.patch
Patch0505: 0505-trash-Create-inode_table-only-while-feature-is-enabl.patch
Patch0506: 0506-posix-Attach-a-posix_spawn_disk_thread-with-glusterf.patch
Patch0507: 0507-inode-make-critical-section-smaller.patch
Patch0508: 0508-fuse-fetch-arbitrary-number-of-groups-from-proc-pid-.patch
Patch0509: 0509-core-configure-optimum-inode-table-hash_size-for-shd.patch
Patch0510: 0510-glusterd-brick_mux-Optimize-friend-handshake-code-to.patch
Patch0511: 0511-features-shard-Missing-format-specifier.patch
Patch0512: 0512-glusterd-shared-storage-mount-fails-in-ipv6-environm.patch
Patch0513: 0513-afr-mark-pending-xattrs-as-a-part-of-metadata-heal.patch
Patch0514: 0514-afr-event-gen-changes.patch
Patch0515: 0515-cluster-afr-Heal-directory-rename-without-rmdir-mkdi.patch
Patch0516: 0516-afr-return-EIO-for-gfid-split-brains.patch
Patch0517: 0517-gfapi-glfs_h_creat_open-new-API-to-create-handle-and.patch
Patch0518: 0518-glusterd-Fix-for-shared-storage-in-ipv6-env.patch
Patch0519: 0519-glusterfs-events-Fix-incorrect-attribute-access-2002.patch
Patch0520: 0520-performance-open-behind-seek-fop-should-open_and_res.patch
Patch0521: 0521-open-behind-fix-missing-fd-reference.patch
Patch0522: 0522-lcov-improve-line-coverage.patch
Patch0523: 0523-open-behind-rewrite-of-internal-logic.patch
Patch0524: 0524-open-behind-fix-call_frame-leak.patch
Patch0525: 0525-open-behind-implement-create-fop.patch
Patch0526: 0526-Quota-quota_fsck.py-converting-byte-string-to-string.patch
Patch0527: 0527-Events-Socket-creation-after-getaddrinfo-and-IPv4-an.patch
Patch0528: 0528-Extras-Removing-xattr_analysis-script.patch
Patch0529: 0529-geo-rep-prompt-should-work-for-ignore_deletes.patch
Patch0530: 0530-gfapi-avoid-crash-while-logging-message.patch
Patch0531: 0531-Glustereventsd-Default-port-change-2091.patch
Patch0532: 0532-glusterd-fix-for-starting-brick-on-new-port.patch
Patch0533: 0533-glusterd-Rebalance-cli-is-not-showing-correct-status.patch
Patch0534: 0534-glusterd-Resolve-use-after-free-bug-2181.patch
Patch0535: 0535-multiple-files-use-dict_allocate_and_serialize-where.patch
Patch0536: 0536-dht-Ongoing-IO-is-failed-during-volume-shrink-operat.patch
Patch0537: 0537-cluster-afr-Fix-race-in-lockinfo-f-getxattr.patch
Patch0538: 0538-afr-fix-coverity-issue-introduced-by-90cefde.patch
Patch0539: 0539-extras-disable-lookup-optimize-in-virt-and-block-gro.patch
Patch0540: 0540-extras-Disable-write-behind-for-group-samba.patch
Patch0541: 0541-glusterd-volgen-Add-functionality-to-accept-any-cust.patch
Patch0542: 0542-xlaotrs-mgmt-Fixing-coverity-issue-1445996.patch
Patch0543: 0543-glusterd-handle-custom-xlator-failure-cases.patch
Patch0544: 0544-RHGS-3.5.4-rebuild-to-ship-with-RHEL-8.5.patch
%description
GlusterFS is a distributed file-system capable of scaling to several
@ -1936,7 +1999,6 @@ exit 0
%if ( 0%{!?_without_server:1} )
%files server
%doc extras/clear_xattrs.sh
%{_datadir}/glusterfs/scripts/xattr_analysis.py*
%{_datadir}/glusterfs/scripts/quota_fsck.py*
# sysconf
%config(noreplace) %{_sysconfdir}/glusterfs
@ -2539,8 +2601,42 @@ fi
%endif
%changelog
* Fri Feb 19 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-49.1
- fixes bugs bz#1930561
* Mon Aug 30 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.4
- Add gating.yaml, fixes bugs bz#1996984
* Tue Aug 24 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.3
- fixes bugs bz#1996984
* Thu May 06 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.2
- fixes bugs bz#1953901
* Thu Apr 22 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.1
- fixes bugs bz#1927235
* Wed Apr 14 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56
- fixes bugs bz#1948547
* Fri Mar 19 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-55
- fixes bugs bz#1939372
* Wed Mar 03 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-54
- fixes bugs bz#1832306 bz#1911292 bz#1924044
* Thu Feb 11 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-53
- fixes bugs bz#1224906 bz#1691320 bz#1719171 bz#1814744 bz#1865796
* Thu Jan 28 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-52
- fixes bugs bz#1600459 bz#1719171 bz#1830713 bz#1856574
* Mon Dec 28 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-51
- fixes bugs bz#1640148 bz#1856574 bz#1910119
* Tue Dec 15 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-50
- fixes bugs bz#1224906 bz#1412494 bz#1612973 bz#1663821 bz#1691320
bz#1726673 bz#1749304 bz#1752739 bz#1779238 bz#1813866 bz#1814744 bz#1821599
bz#1832306 bz#1835229 bz#1842449 bz#1865796 bz#1878077 bz#1882923 bz#1885966
bz#1890506 bz#1896425 bz#1898776 bz#1898777 bz#1898778 bz#1898781 bz#1898784
bz#1903468
* Wed Nov 25 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-49
- fixes bugs bz#1286171