autobuild v3.12.2-7

Resolves: bz#958062 bz#1186664 bz#1226874 bz#1446046 bz#1529451
Resolves: bz#1550315 bz#1557365 bz#1559884 bz#1561733
Signed-off-by: Milind Changire <mchangir@redhat.com>
This commit is contained in:
Milind Changire 2018-04-04 06:07:49 -04:00
parent 772c9f37aa
commit 155a159af9
12 changed files with 4641 additions and 1 deletions

View File

@ -0,0 +1,235 @@
From 355e366ff59dfc2ecd4fdf1e5653664b9ac0c45f Mon Sep 17 00:00:00 2001
From: Mohit Agrawal <moagrawa@redhat.com>
Date: Wed, 14 Mar 2018 09:37:52 +0530
Subject: [PATCH 202/212] glusterd: TLS verification fails while using
intermediate CA
Problem: TLS verification fails while using intermediate CA
if mgmt SSL is enabled.
Solution: There are two main issue of TLS verification failing
1) not calling ssl_api to set cert_depth
2) The current code does not allow to set certificate depth
while MGMT SSL is enabled.
After apply this patch to set certificate depth user
need to set parameter option transport.socket.ssl-cert-depth <depth>
in /var/lib/glusterd/secure_acccess instead to set in
/etc/glusterfs/glusterd.vol. At the time of set secure_mgmt in ctx
we will check the value of cert-depth and save the value of cert-depth
in ctx.If user does not provide any value in cert-depth in that case
it will consider default value is 1
> BUG: 1555154
> Change-Id: I89e9a9e1026e37efb5c20f9ec62b1989ef644f35
> Reviewed on https://review.gluster.org/#/c/19708/
> (cherry pick from commit cf06dd544004701ef43fa81c5b7a95353d5c1d65)
BUG: 1446046
Change-Id: I94000bc8741ceb5659ec9f376eac447ae84792ad
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/133849
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
api/src/glfs-mgmt.c | 1 +
cli/src/cli.c | 1 +
glusterfsd/src/glusterfsd-mgmt.c | 2 ++
glusterfsd/src/glusterfsd.c | 1 +
heal/src/glfs-heal.c | 1 +
libglusterfs/src/glusterfs.h | 6 ++++
libglusterfs/src/graph.c | 42 +++++++++++++++++++++++++++-
rpc/rpc-transport/socket/src/socket.c | 12 +++++---
xlators/mgmt/glusterd/src/glusterd-handler.c | 3 ++
9 files changed, 64 insertions(+), 5 deletions(-)
diff --git a/api/src/glfs-mgmt.c b/api/src/glfs-mgmt.c
index 32b9dbd..b70dc35 100644
--- a/api/src/glfs-mgmt.c
+++ b/api/src/glfs-mgmt.c
@@ -996,6 +996,7 @@ glfs_mgmt_init (struct glfs *fs)
if (sys_access (SECURE_ACCESS_FILE, F_OK) == 0) {
ctx->secure_mgmt = 1;
+ ctx->ssl_cert_depth = glusterfs_read_secure_access_file ();
}
rpc = rpc_clnt_new (options, THIS, THIS->name, 8);
diff --git a/cli/src/cli.c b/cli/src/cli.c
index 52c1b67..b64d4ef 100644
--- a/cli/src/cli.c
+++ b/cli/src/cli.c
@@ -432,6 +432,7 @@ parse_cmdline (int argc, char *argv[], struct cli_state *state)
/* Do this first so that an option can override. */
if (sys_access (SECURE_ACCESS_FILE, F_OK) == 0) {
state->ctx->secure_mgmt = 1;
+ state->ctx->ssl_cert_depth = glusterfs_read_secure_access_file ();
}
if (state->argc > GEO_REP_CMD_CONFIG_INDEX &&
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index 69d93f5..ef53d09 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -2467,6 +2467,8 @@ glusterfs_mgmt_init (glusterfs_ctx_t *ctx)
goto out;
}
+
+ ctx->ssl_cert_depth = glusterfs_read_secure_access_file ();
}
rpc = rpc_clnt_new (options, THIS, THIS->name, 8);
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index 38b863c..3ae89a6 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -1917,6 +1917,7 @@ parse_cmdline (int argc, char *argv[], glusterfs_ctx_t *ctx)
/* Do this before argp_parse so it can be overridden. */
if (sys_access (SECURE_ACCESS_FILE, F_OK) == 0) {
cmd_args->secure_mgmt = 1;
+ ctx->ssl_cert_depth = glusterfs_read_secure_access_file ();
}
argp_parse (&argp, argc, argv, ARGP_IN_ORDER, NULL, cmd_args);
diff --git a/heal/src/glfs-heal.c b/heal/src/glfs-heal.c
index 532b6f9..153cd29 100644
--- a/heal/src/glfs-heal.c
+++ b/heal/src/glfs-heal.c
@@ -1617,6 +1617,7 @@ main (int argc, char **argv)
if (sys_access(SECURE_ACCESS_FILE, F_OK) == 0) {
fs->ctx->secure_mgmt = 1;
+ fs->ctx->ssl_cert_depth = glusterfs_read_secure_access_file ();
}
ret = glfs_set_volfile_server (fs, "unix", DEFAULT_GLUSTERD_SOCKFILE, 0);
diff --git a/libglusterfs/src/glusterfs.h b/libglusterfs/src/glusterfs.h
index 5abfafa..5d5f5c8 100644
--- a/libglusterfs/src/glusterfs.h
+++ b/libglusterfs/src/glusterfs.h
@@ -536,6 +536,11 @@ struct _glusterfs_ctx {
*/
int secure_mgmt;
+ /* The option is use to set cert_depth while management connection
+ use SSL
+ */
+ int ssl_cert_depth;
+
/*
* Should *our* server/inbound connections use SSL? This is only true
* if we're glusterd and secure_mgmt is set, or if we're glusterfsd
@@ -638,4 +643,5 @@ int glusterfs_graph_parent_up (glusterfs_graph_t *graph);
void
gf_free_mig_locks (lock_migration_info_t *locks);
+int glusterfs_read_secure_access_file (void);
#endif /* _GLUSTERFS_H */
diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c
index 738cd96..cdd7123 100644
--- a/libglusterfs/src/graph.c
+++ b/libglusterfs/src/graph.c
@@ -16,7 +16,7 @@
#include "defaults.h"
#include <unistd.h>
#include "syscall.h"
-
+#include <regex.h>
#include "libglusterfs-messages.h"
#if 0
@@ -68,7 +68,47 @@ _gf_dump_details (int argc, char **argv)
}
#endif
+int
+glusterfs_read_secure_access_file (void)
+{
+ FILE *fp = NULL;
+ char line[100] = {0,};
+ int cert_depth = 1; /* Default SSL CERT DEPTH */
+ regex_t regcmpl;
+ char *key = {"^option transport.socket.ssl-cert-depth"};
+ char keyval[50] = {0,};
+ int start = 0, end = 0, copy_len = 0;
+ regmatch_t result[1] = {{0} };
+
+ fp = fopen (SECURE_ACCESS_FILE, "r");
+ if (!fp)
+ goto out;
+ /* Check if any line matches with key */
+ while (fgets(line, sizeof(line), fp) != NULL) {
+ if (regcomp (&regcmpl, key, REG_EXTENDED)) {
+ goto out;
+ }
+ if (!regexec (&regcmpl, line, 1, result, 0)) {
+ start = result[0].rm_so;
+ end = result[0].rm_eo;
+ copy_len = end - start;
+ strcpy (keyval, line+copy_len);
+ if (keyval[0]) {
+ cert_depth = atoi(keyval);
+ if (cert_depth == 0)
+ cert_depth = 1; /* Default SSL CERT DEPTH */
+ break;
+ }
+ }
+ regfree(&regcmpl);
+ }
+
+out:
+ if (fp)
+ fclose (fp);
+ return cert_depth;
+}
int
glusterfs_xlator_link (xlator_t *pxl, xlator_t *cxl)
diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c
index 590d465..157b5b7 100644
--- a/rpc/rpc-transport/socket/src/socket.c
+++ b/rpc/rpc-transport/socket/src/socket.c
@@ -4324,7 +4324,13 @@ socket_init (rpc_transport_t *this)
"using %s polling thread",
priv->own_thread ? "private" : "system");
- if (!dict_get_int32 (this->options, SSL_CERT_DEPTH_OPT, &cert_depth)) {
+ if (!priv->mgmt_ssl) {
+ if (!dict_get_int32 (this->options, SSL_CERT_DEPTH_OPT, &cert_depth)) {
+ gf_log (this->name, GF_LOG_INFO,
+ "using certificate depth %d", cert_depth);
+ }
+ } else {
+ cert_depth = this->ctx->ssl_cert_depth;
gf_log (this->name, GF_LOG_INFO,
"using certificate depth %d", cert_depth);
}
@@ -4463,9 +4469,7 @@ socket_init (rpc_transport_t *this)
goto err;
}
-#if (OPENSSL_VERSION_NUMBER < 0x00905100L)
- SSL_CTX_set_verify_depth(ctx,cert_depth);
-#endif
+ SSL_CTX_set_verify_depth(priv->ssl_ctx, cert_depth);
if (crl_path) {
#ifdef X509_V_FLAG_CRL_CHECK_ALL
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 16a3773..ddab159 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -3544,6 +3544,9 @@ glusterd_friend_rpc_create (xlator_t *this, glusterd_peerinfo_t *peerinfo,
"failed to set ssl-enabled in dict");
goto out;
}
+
+ this->ctx->ssl_cert_depth = glusterfs_read_secure_access_file ();
+
}
ret = glusterd_rpc_create (&peerinfo->rpc, options,
--
1.8.3.1

View File

@ -0,0 +1,107 @@
From 45481e3e7ca074eb405b0db5521d4ca08bb20641 Mon Sep 17 00:00:00 2001
From: karthik-us <ksubrahm@redhat.com>
Date: Fri, 9 Mar 2018 14:45:07 +0530
Subject: [PATCH 203/212] mgmt/glusterd: Adding validation for setting
quorum-count
In a replicated volume it was allowing to set the quorum-count value
between the range [1 - 2147483647]. This patch adds validation for
allowing only maximum of replica_count number of quorum-count value
to be set on a volume.
Upstream patch: https://review.gluster.org/#/c/19104/
> Change-Id: I13952f3c6cf498c9f2b91161503fc0fba9d94898
> BUG: 1529515
> Signed-off-by: karthik-us <ksubrahm@redhat.com>
Change-Id: Ie4a74184ae640703524f371f4a0de6d70a6e9abb
BUG: 1186664
Signed-off-by: karthik-us <ksubrahm@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/132255
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
xlators/cluster/afr/src/afr.c | 2 +-
xlators/mgmt/glusterd/src/glusterd-volume-set.c | 45 ++++++++++++++++++++++---
2 files changed, 41 insertions(+), 6 deletions(-)
diff --git a/xlators/cluster/afr/src/afr.c b/xlators/cluster/afr/src/afr.c
index dec6e60..0122b7f 100644
--- a/xlators/cluster/afr/src/afr.c
+++ b/xlators/cluster/afr/src/afr.c
@@ -959,7 +959,7 @@ struct volume_options options[] = {
.max = INT_MAX,
.default_value = 0,
.description = "If quorum-type is \"fixed\" only allow writes if "
- "this many bricks or present. Other quorum types "
+ "this many bricks are present. Other quorum types "
"will OVERWRITE this value.",
},
{ .key = {"quorum-reads"},
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 8d3407d..d01e282 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -847,6 +847,40 @@ out:
}
static int
+validate_quorum_count (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
+ char *value, char **op_errstr)
+{
+ int ret = 0;
+ xlator_t *this = NULL;
+ int q_count = 0;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = gf_string2int (value, &q_count);
+ if (ret) {
+ gf_asprintf (op_errstr, "%s is not an integer. %s expects a "
+ "valid integer value.", value, key);
+ goto out;
+ }
+
+ if (q_count < 1 || q_count > volinfo->replica_count) {
+ gf_asprintf (op_errstr, "%d in %s %d is out of range [1 - %d]",
+ q_count, key, q_count, volinfo->replica_count);
+ ret = -1;
+ }
+
+out:
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY, "%s",
+ *op_errstr);
+ }
+ gf_msg_debug (this->name, 0, "Returning %d", ret);
+
+ return ret;
+}
+
+static int
validate_subvols_per_directory (glusterd_volinfo_t *volinfo, dict_t *dict,
char *key, char *value, char **op_errstr)
{
@@ -1456,11 +1490,12 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.op_version = 1,
.flags = OPT_FLAG_CLIENT_OPT
},
- { .key = "cluster.quorum-count",
- .voltype = "cluster/replicate",
- .option = "quorum-count",
- .op_version = 1,
- .flags = OPT_FLAG_CLIENT_OPT
+ { .key = "cluster.quorum-count",
+ .voltype = "cluster/replicate",
+ .option = "quorum-count",
+ .op_version = 1,
+ .validate_fn = validate_quorum_count,
+ .flags = OPT_FLAG_CLIENT_OPT
},
{ .key = "cluster.choose-local",
.voltype = "cluster/replicate",
--
1.8.3.1

View File

@ -0,0 +1,73 @@
From 9b001e38b21d433580d55e68225f2cd5af058dbf Mon Sep 17 00:00:00 2001
From: Gaurav Yadav <gyadav@redhat.com>
Date: Thu, 1 Mar 2018 14:44:34 +0530
Subject: [PATCH 204/212] glusterd : memory leak in mgmt_v3 lock functionality
In order to take care of stale lock issue, a timer was intrduced
in mgmt_v3 lock. This timer is not freeing the memory due to
which this leak got introduced
With this fix now memory cleanup in locking is handled properly
>upstream patch: https://review.gluster.org/#/c/19651/
> https://review.gluster.org/#/c/19723/
>Change-Id: I2e1ce3ebba3520f7660321f3d97554080e4e22f4
>BUG: 1550339
>Signed-off-by: Gaurav Yadav <gyadav@redhat.com>
Change-Id: I2e1ce3ebba3520f7660321f3d97554080e4e22f4
BUG: 1529451
Signed-off-by: Gaurav Yadav <gyadav@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/134218
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-locks.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c
index bd73b37..a19d688 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.c
@@ -719,6 +719,7 @@ gd_mgmt_v3_unlock_timer_cbk (void *data)
int32_t ret = -1;
glusterfs_ctx_t *mgmt_lock_timer_ctx = NULL;
xlator_t *mgmt_lock_timer_xl = NULL;
+ gf_timer_t *timer = NULL;
this = THIS;
GF_VALIDATE_OR_GOTO ("glusterd", this, out);
@@ -766,9 +767,10 @@ out:
GF_VALIDATE_OR_GOTO (this->name, mgmt_lock_timer_ctx,
ret_function);
+ timer = mgmt_lock_timer->timer;
+ GF_FREE (timer->data);
gf_timer_call_cancel (mgmt_lock_timer_ctx,
mgmt_lock_timer->timer);
- GF_FREE(key);
dict_del (conf->mgmt_v3_lock_timer, bt_key);
mgmt_lock_timer->timer = NULL;
}
@@ -791,6 +793,7 @@ glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type)
xlator_t *this = NULL;
glusterfs_ctx_t *mgmt_lock_timer_ctx = NULL;
xlator_t *mgmt_lock_timer_xl = NULL;
+ gf_timer_t *timer = NULL;
this = THIS;
GF_ASSERT (this);
@@ -893,6 +896,9 @@ glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type)
mgmt_lock_timer_ctx = mgmt_lock_timer_xl->ctx;
GF_VALIDATE_OR_GOTO (this->name, mgmt_lock_timer_ctx, out);
ret = 0;
+
+ timer = mgmt_lock_timer->timer;
+ GF_FREE (timer->data);
gf_timer_call_cancel (mgmt_lock_timer_ctx,
mgmt_lock_timer->timer);
dict_del (priv->mgmt_v3_lock_timer, key_dup);
--
1.8.3.1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,123 @@
From ed84bfec039d2f3d63902dfe3bade2fe6eb6c31e Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Tue, 3 Apr 2018 21:28:37 +0530
Subject: [PATCH 206/212] glusterd: honour localtime-logging for all the
daemons
>upstream patch : https://review.gluster.org/#/c/19814/
>Change-Id: I97a70d29365b0a454241ac5f5cae56d93eefd73a
>Fixes: bz#1563334
>Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Change-Id: I97a70d29365b0a454241ac5f5cae56d93eefd73a
BUG: 958062
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/134447
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-rebalance.c | 6 ++++++
xlators/mgmt/glusterd/src/glusterd-snapd-svc.c | 6 ++++++
xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c | 6 ++++++
xlators/mgmt/glusterd/src/glusterd-tierd-svc.c | 6 ++++++
4 files changed, 24 insertions(+)
diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index 76191c4..848e689 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -196,6 +196,7 @@ glusterd_handle_defrag_start (glusterd_volinfo_t *volinfo, char *op_errstr,
char volname[PATH_MAX] = {0,};
char valgrind_logfile[PATH_MAX] = {0,};
char *volfileserver = NULL;
+ char *localtime_logging = NULL;
this = THIS;
GF_VALIDATE_OR_GOTO ("glusterd", this, out);
@@ -312,6 +313,11 @@ glusterd_handle_defrag_start (glusterd_volinfo_t *volinfo, char *op_errstr,
runner_argprintf (&runner, logfile);
if (volinfo->memory_accounting)
runner_add_arg (&runner, "--mem-accounting");
+ if (dict_get_str (priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY,
+ &localtime_logging) == 0) {
+ if (strcmp (localtime_logging, "enable") == 0)
+ runner_add_arg (&runner, "--localtime-logging");
+ }
ret = runner_run_nowait (&runner);
if (ret) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
index 5621852..bd1c02e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
@@ -226,6 +226,7 @@ glusterd_snapdsvc_start (glusterd_svc_t *svc, int flags)
char snapd_id[PATH_MAX] = {0,};
glusterd_volinfo_t *volinfo = NULL;
glusterd_snapdsvc_t *snapd = NULL;
+ char *localtime_logging = NULL;
this = THIS;
GF_ASSERT(this);
@@ -298,6 +299,11 @@ glusterd_snapdsvc_start (glusterd_svc_t *svc, int flags)
"-l", svc->proc.logfile,
"--brick-name", snapd_id,
"-S", svc->conn.sockpath, NULL);
+ if (dict_get_str (priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY,
+ &localtime_logging) == 0) {
+ if (strcmp (localtime_logging, "enable") == 0)
+ runner_add_arg (&runner, "--localtime-logging");
+ }
snapd_port = pmap_assign_port (THIS, volinfo->snapd.port, snapd_id);
if (!snapd_port) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
index f229865..ba948b4 100644
--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
@@ -150,6 +150,7 @@ glusterd_svc_start (glusterd_svc_t *svc, int flags, dict_t *cmdline)
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
char valgrind_logfile[PATH_MAX] = {0};
+ char *localtime_logging = NULL;
this = THIS;
GF_ASSERT (this);
@@ -190,6 +191,11 @@ glusterd_svc_start (glusterd_svc_t *svc, int flags, dict_t *cmdline)
"-S", svc->conn.sockpath,
NULL);
+ if (dict_get_str (priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY,
+ &localtime_logging) == 0) {
+ if (strcmp (localtime_logging, "enable") == 0)
+ runner_add_arg (&runner, "--localtime-logging");
+ }
if (cmdline)
dict_foreach (cmdline, svc_add_args, (void *) &runner);
diff --git a/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c b/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c
index a2876ae..2c556fc 100644
--- a/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c
@@ -259,6 +259,7 @@ glusterd_tierdsvc_start (glusterd_svc_t *svc, int flags)
glusterd_volinfo_t *volinfo = NULL;
glusterd_tierdsvc_t *tierd = NULL;
int cmd = GF_DEFRAG_CMD_START_TIER;
+ char *localtime_logging = NULL;
this = THIS;
GF_VALIDATE_OR_GOTO (THIS->name, this, out);
@@ -353,6 +354,11 @@ glusterd_tierdsvc_start (glusterd_svc_t *svc, int flags)
volinfo->rebal.commit_hash);
if (volinfo->memory_accounting)
runner_add_arg (&runner, "--mem-accounting");
+ if (dict_get_str (priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY,
+ &localtime_logging) == 0) {
+ if (strcmp (localtime_logging, "enable") == 0)
+ runner_add_arg (&runner, "--localtime-logging");
+ }
snprintf (msg, sizeof (msg),
"Starting the tierd service for volume %s", volinfo->volname);
--
1.8.3.1

View File

@ -0,0 +1,113 @@
From 47678bde5c2f8e674289d2b0893865ab3fa43940 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Sun, 1 Apr 2018 10:10:41 +0530
Subject: [PATCH 207/212] glusterd: fix txn_opinfo memory leak
For transactions where there's no volname involved (eg : gluster v
status), the originator node initiates with staging phase and what that
means in op-sm there's no unlock event triggered which resulted into a
txn_opinfo dictionary leak.
Credits : cynthia.zhou@nokia-sbell.com
> upstream patch : https://review.gluster.org/#/c/19801/
>Change-Id: I92fffbc2e8e1b010f489060f461be78aa2b86615
>Fixes: bz#1550339
>Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Change-Id: I92fffbc2e8e1b010f489060f461be78aa2b86615
BUG: 1529451
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/134448
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-handler.c | 1 +
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 32 ++++++++++++++++++++--------
xlators/mgmt/glusterd/src/glusterd-op-sm.h | 1 +
3 files changed, 25 insertions(+), 9 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index ddab159..dbf80a1 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -1073,6 +1073,7 @@ __glusterd_handle_stage_op (rpcsvc_request_t *req)
glusterd_txn_opinfo_init (&txn_op_info, &state, &op_req.op,
req_ctx->dict, req);
+ txn_op_info.skip_locking = _gf_true;
ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index a02a0b3..72d349b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -5919,14 +5919,15 @@ glusterd_op_init_commit_rsp_dict (glusterd_op_t op)
static int
glusterd_op_ac_commit_op (glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
- glusterd_req_ctx_t *req_ctx = NULL;
- int32_t status = 0;
- char *op_errstr = NULL;
- dict_t *dict = NULL;
- dict_t *rsp_dict = NULL;
- xlator_t *this = NULL;
- uuid_t *txn_id = NULL;
+ int ret = 0;
+ glusterd_req_ctx_t *req_ctx = NULL;
+ int32_t status = 0;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+ xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+ glusterd_op_info_t txn_op_info = {{0},};
this = THIS;
GF_ASSERT (this);
@@ -5965,6 +5966,15 @@ glusterd_op_ac_commit_op (glusterd_op_sm_event_t *event, void *ctx)
ret = -1;
goto out;
}
+ ret = glusterd_get_txn_opinfo (&event->txn_id, &txn_op_info);
+ if (ret) {
+ gf_msg_callingfn (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_TRANS_OPINFO_GET_FAIL,
+ "Unable to get transaction opinfo "
+ "for transaction ID : %s",
+ uuid_utoa (event->txn_id));
+ goto out;
+ }
ret = dict_set_bin (rsp_dict, "transaction_id",
txn_id, sizeof(*txn_id));
@@ -5985,7 +5995,11 @@ out:
if (rsp_dict)
dict_unref (rsp_dict);
-
+ /* for no volname transactions, the txn_opinfo needs to be cleaned up
+ * as there's no unlock event triggered
+ */
+ if (txn_op_info.skip_locking)
+ ret = glusterd_clear_txn_opinfo (txn_id);
gf_msg_debug (this->name, 0, "Returning with %d", ret);
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
index 24b1944..f2aee9c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
@@ -101,6 +101,7 @@ struct glusterd_op_info_ {
char *op_errstr;
struct cds_list_head pending_bricks;
uint32_t txn_generation;
+ gf_boolean_t skip_locking;
};
typedef struct glusterd_op_info_ glusterd_op_info_t;
--
1.8.3.1

View File

@ -0,0 +1,75 @@
From 699f3c720d340b95177c521037c1cb8799930b9e Mon Sep 17 00:00:00 2001
From: N Balachandran <nbalacha@redhat.com>
Date: Fri, 16 Mar 2018 20:54:15 +0530
Subject: [PATCH 208/212] cluster/dht: enable lookup-optimize by default
Lookup-optimize has been shown to improve create
performance. The code has been in the project for several
years and is considered stable.
Enabling this by default in order to test this in the
upstream regression runs.
upstream master:https://review.gluster.org/#/c/19731/
> Change-Id: Iab792979ee34f0af4713931e0b5b399c23f65313
> updates: bz#1557435
> BUG: 1557435
> Signed-off-by: N Balachandran <nbalacha@redhat.com>
Change-Id: I076b4e4beb9db390f619f38bf4598589b95685c7
BUG: 1557365
Signed-off-by: N Balachandran <nbalacha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/134450
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
tests/features/unhashed-auto.t | 2 +-
xlators/cluster/dht/src/dht-rebalance.c | 4 +++-
xlators/cluster/dht/src/dht-shared.c | 2 +-
3 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/tests/features/unhashed-auto.t b/tests/features/unhashed-auto.t
index ddebd03..0a6bbfb 100755
--- a/tests/features/unhashed-auto.t
+++ b/tests/features/unhashed-auto.t
@@ -114,7 +114,7 @@ TEST [ x"$new_hash" = x"00000001" ]
# Unset the option and check that newly created directories get 1 in the
# disk layout
-TEST $CLI volume reset $V0 cluster.lookup-optimize
+TEST $CLI volume set $V0 cluster.lookup-optimize off
TEST mkdir $M0/dir1
new_hash=$(get_xattr_hash $B0/${V0}1/dir1)
TEST [ x"$new_hash" = x"00000001" ]
diff --git a/xlators/cluster/dht/src/dht-rebalance.c b/xlators/cluster/dht/src/dht-rebalance.c
index 9770359..9e31ff8 100644
--- a/xlators/cluster/dht/src/dht-rebalance.c
+++ b/xlators/cluster/dht/src/dht-rebalance.c
@@ -4422,7 +4422,9 @@ gf_defrag_start_crawl (void *data)
ret = syncop_setxattr (this, &loc, fix_layout, 0, NULL, NULL);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "fix layout on %s failed",
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set commit hash on %s. "
+ "Rebalance cannot proceed.",
loc.path);
defrag->total_failures++;
ret = -1;
diff --git a/xlators/cluster/dht/src/dht-shared.c b/xlators/cluster/dht/src/dht-shared.c
index 42daff0..2f0d8ce 100644
--- a/xlators/cluster/dht/src/dht-shared.c
+++ b/xlators/cluster/dht/src/dht-shared.c
@@ -943,7 +943,7 @@ struct volume_options options[] = {
},
{ .key = {"lookup-optimize"},
.type = GF_OPTION_TYPE_BOOL,
- .default_value = "off",
+ .default_value = "on",
.description = "This option if set to ON enables the optimization "
"of -ve lookups, by not doing a lookup on non-hashed subvolumes for "
"files, in case the hashed subvolume does not return any result. "
--
1.8.3.1

View File

@ -0,0 +1,97 @@
From b3c216d77ae7a054d4f5f28a93239afe00771cd8 Mon Sep 17 00:00:00 2001
From: N Balachandran <nbalacha@redhat.com>
Date: Thu, 29 Mar 2018 18:23:13 +0530
Subject: [PATCH 209/212] cluster/dht: Update layout in inode only on success
With lookup-optimize enabled, gf_defrag_settle_hash in rebalance
sometimes flips the on-disk layout on volume root post the
migration of all files in the directory.
This is sometimes seen when attempting to fix the layout of a
directory multiple times before calling gf_defrag_settle_hash.
dht_fix_layout_of_directory generates a new layout in memory but
updates it in the inode ctx before it is set on disk. The layout
may be different the second time around due to
dht_selfheal_layout_maximize_overlap. If the layout is then not
written to the disk, the inode now contains the wrong layout.
gf_defrag_settle_hash does not check the correctness of the layout
in the inode before updating the commit-hash and writing it to the
disk thus changing the layout of the directory.
upstream master:https://review.gluster.org/#/c/19797/
> Change-Id: Ie1407d92982518f2a0c40ec70ad370b34a87b4d4
> updates: bz#1557435
> Signed-off-by: N Balachandran <nbalacha@redhat.com>
Change-Id: I4222b7c985226ca175e0581c103bad62084339a2
BUG: 1557365
Signed-off-by: N Balachandran <nbalacha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/134451
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
xlators/cluster/dht/src/dht-common.c | 25 ++++++++++++++++++++++++-
xlators/cluster/dht/src/dht-selfheal.c | 3 ---
2 files changed, 24 insertions(+), 4 deletions(-)
diff --git a/xlators/cluster/dht/src/dht-common.c b/xlators/cluster/dht/src/dht-common.c
index f1e6a92..6319a87 100644
--- a/xlators/cluster/dht/src/dht-common.c
+++ b/xlators/cluster/dht/src/dht-common.c
@@ -3545,6 +3545,28 @@ dht_common_setxattr_cbk (call_frame_t *frame, void *cookie,
+static int
+dht_fix_layout_setxattr_cbk (call_frame_t *frame, void *cookie,
+ xlator_t *this, int32_t op_ret, int32_t op_errno,
+ dict_t *xdata)
+{
+ dht_local_t *local = NULL;
+ dht_layout_t *layout = NULL;
+
+ if (op_ret == 0) {
+
+ /* update the layout in the inode ctx */
+ local = frame->local;
+ layout = local->selfheal.layout;
+
+ dht_layout_set (this, local->loc.inode, layout);
+ }
+
+ DHT_STACK_UNWIND (setxattr, frame, op_ret, op_errno, xdata);
+ return 0;
+}
+
+
int
dht_err_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
int op_ret, int op_errno, dict_t *xdata)
@@ -5531,7 +5553,8 @@ dht_setxattr (call_frame_t *frame, xlator_t *this,
DHT_MSG_FIX_LAYOUT_INFO,
"fixing the layout of %s", loc->path);
- ret = dht_fix_directory_layout (frame, dht_common_setxattr_cbk,
+ ret = dht_fix_directory_layout (frame,
+ dht_fix_layout_setxattr_cbk,
layout);
if (ret) {
op_errno = ENOTCONN;
diff --git a/xlators/cluster/dht/src/dht-selfheal.c b/xlators/cluster/dht/src/dht-selfheal.c
index 328251d..1707e08 100644
--- a/xlators/cluster/dht/src/dht-selfheal.c
+++ b/xlators/cluster/dht/src/dht-selfheal.c
@@ -2112,9 +2112,6 @@ dht_fix_layout_of_directory (call_frame_t *frame, loc_t *loc,
}
done:
if (new_layout) {
- /* Now that the new layout has all the proper layout, change the
- inode context */
- dht_layout_set (this, loc->inode, new_layout);
/* Make sure the extra 'ref' for existing layout is removed */
dht_layout_unref (this, local->layout);
--
1.8.3.1

View File

@ -0,0 +1,53 @@
From 53649d22deea97c1604f5688ecab303eb46104d9 Mon Sep 17 00:00:00 2001
From: Xavi Hernandez <xhernandez@redhat.com>
Date: Wed, 28 Mar 2018 11:34:49 +0200
Subject: [PATCH 210/212] cluster/ec: send list-node-uuids request to all
subvolumes
The xattr trusted.glusterfs.list-node-uuids was only sent to a single
subvolume. This was returning null uuids from the other subvolumes as
if they were down.
This fix forces that xattr to be requested from all subvolumes.
> Upstream patch: https://review.gluster.org/19784
Change-Id: If62eb39a6857258923ba625e153d4ad79018ea2f
BUG: 1561733
Signed-off-by: Xavi Hernandez <xhernandez@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/134066
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Ashish Pandey <aspandey@redhat.com>
---
tests/basic/ec/ec-rebalance.t | 1 +
xlators/cluster/ec/src/ec.c | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/tests/basic/ec/ec-rebalance.t b/tests/basic/ec/ec-rebalance.t
index b5c3072..6cda3a3 100644
--- a/tests/basic/ec/ec-rebalance.t
+++ b/tests/basic/ec/ec-rebalance.t
@@ -14,6 +14,7 @@ cleanup
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{0..2}
+TEST $CLI volume set $V0 lookup-optimize on
TEST $CLI volume start $V0
#Mount the volume
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
index 956b45b..eb91c4a 100644
--- a/xlators/cluster/ec/src/ec.c
+++ b/xlators/cluster/ec/src/ec.c
@@ -881,7 +881,7 @@ ec_gf_getxattr (call_frame_t *frame, xlator_t *this, loc_t *loc,
if (name &&
((fnmatch (GF_XATTR_STIME_PATTERN, name, 0) == 0) ||
- (XATTR_IS_NODE_UUID(name)))) {
+ XATTR_IS_NODE_UUID(name) || XATTR_IS_NODE_UUID_LIST(name))) {
minimum = EC_MINIMUM_ALL;
}
--
1.8.3.1

View File

@ -0,0 +1,40 @@
From cc7483e65a0b165112446d4598fe4215a4e8109f Mon Sep 17 00:00:00 2001
From: Jiffin Tony Thottan <jthottan@redhat.com>
Date: Wed, 4 Apr 2018 09:29:43 +0530
Subject: [PATCH 211/212] common-ha/scripts : pass the list of servers properly
to stop_ganesha_all()
Label: BACKPORT FROM UPSTREAM 3.10
Upstream Reference :
>Change-Id: I6d92623cd9fb450d7a27f5acc61eca0b3cbc9b08
>BUG: 1563500
>Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
>Patch link : https://review.gluster.org/#/c/19816/
Change-Id: I6d92623cd9fb450d7a27f5acc61eca0b3cbc9b08
BUG: 1226874
Signed-off-by: Jiffin Tony Thottan <jthottan@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/134453
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
extras/ganesha/scripts/ganesha-ha.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index 623fb64..4459068 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -199,7 +199,7 @@ setup_cluster()
if [ $? -ne 0 ]; then
logger "pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} ${servers} failed"
#set up failed stop all ganesha process and clean up symlinks in cluster
- stop_ganesha_all ${servers}
+ stop_ganesha_all "${servers}"
exit 1;
fi
pcs cluster start --all
--
1.8.3.1

View File

@ -0,0 +1,94 @@
From a81170eca4848c6bd2d0fa9e8a2c9fc9803b868e Mon Sep 17 00:00:00 2001
From: Poornima G <pgurusid@redhat.com>
Date: Thu, 4 Jan 2018 19:39:05 +0530
Subject: [PATCH 212/212] readdir-ahead: Cleanup the xattr request code
upstream master: https://review.gluster.org/#/c/19172/
> Change-Id: Ia0c697583751290a455da3cd1894e0c5685d1bd8
> updates: #297
> Signed-off-by: Poornima G <pgurusid@redhat.com>
BUG: 1559884
Change-Id: Ia0c697583751290a455da3cd1894e0c5685d1bd8
Signed-off-by: Poornima G <pgurusid@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/134500
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
.../performance/readdir-ahead/src/readdir-ahead.c | 42 ++--------------------
1 file changed, 2 insertions(+), 40 deletions(-)
diff --git a/xlators/performance/readdir-ahead/src/readdir-ahead.c b/xlators/performance/readdir-ahead/src/readdir-ahead.c
index c2ceda4..0d3bdbd 100644
--- a/xlators/performance/readdir-ahead/src/readdir-ahead.c
+++ b/xlators/performance/readdir-ahead/src/readdir-ahead.c
@@ -474,31 +474,6 @@ err:
return -1;
}
-
-static int
-rda_unpack_mdc_loaded_keys_to_dict(char *payload, dict_t *dict)
-{
- int ret = -1;
- char *mdc_key = NULL;
-
- if (!payload || !dict) {
- goto out;
- }
-
- mdc_key = strtok(payload, " ");
- while (mdc_key != NULL) {
- ret = dict_set_int8 (dict, mdc_key, 0);
- if (ret) {
- goto out;
- }
- mdc_key = strtok(NULL, " ");
- }
-
-out:
- return ret;
-}
-
-
static int32_t
rda_opendir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno, fd_t *fd, dict_t *xdata)
@@ -528,9 +503,7 @@ static int32_t
rda_opendir(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd,
dict_t *xdata)
{
- int ret = -1;
int op_errno = 0;
- char *payload = NULL;
struct rda_local *local = NULL;
dict_t *xdata_from_req = NULL;
@@ -552,21 +525,10 @@ rda_opendir(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd,
* Retrieve list of keys set by md-cache xlator and store it
* in local to be consumed in rda_opendir_cbk
*/
- ret = dict_get_str (xdata, GF_MDC_LOADED_KEY_NAMES, &payload);
- if (ret)
- goto wind;
- ret = rda_unpack_mdc_loaded_keys_to_dict((char *) payload,
- xdata_from_req);
- if (ret)
- goto wind;
-
- dict_copy (xdata, xdata_from_req);
- dict_del (xdata_from_req, GF_MDC_LOADED_KEY_NAMES);
-
- local->xattrs = xdata_from_req;
+ local->xattrs = dict_ref (xdata);
frame->local = local;
}
-wind:
+
STACK_WIND(frame, rda_opendir_cbk, FIRST_CHILD(this),
FIRST_CHILD(this)->fops->opendir, loc, fd, xdata);
return 0;
--
1.8.3.1

View File

@ -192,7 +192,7 @@ Release: 0.1%{?prereltag:.%{prereltag}}%{?dist}
%else
Name: glusterfs
Version: 3.12.2
Release: 6%{?dist}
Release: 7%{?dist}
%endif
License: GPLv2 or LGPLv3+
Group: System Environment/Base
@ -466,6 +466,17 @@ Patch0198: 0198-cluster-ec-Change-default-read-policy-to-gfid-hash.patch
Patch0199: 0199-cluster-ec-avoid-delays-in-self-heal.patch
Patch0200: 0200-quick-read-Discard-cache-for-fallocate-zerofill-and-.patch
Patch0201: 0201-posix-After-set-storage.reserve-limit-df-does-not-sh.patch
Patch0202: 0202-glusterd-TLS-verification-fails-while-using-intermed.patch
Patch0203: 0203-mgmt-glusterd-Adding-validation-for-setting-quorum-c.patch
Patch0204: 0204-glusterd-memory-leak-in-mgmt_v3-lock-functionality.patch
Patch0205: 0205-cluster-dht-User-xattrs-are-not-healed-after-brick-s.patch
Patch0206: 0206-glusterd-honour-localtime-logging-for-all-the-daemon.patch
Patch0207: 0207-glusterd-fix-txn_opinfo-memory-leak.patch
Patch0208: 0208-cluster-dht-enable-lookup-optimize-by-default.patch
Patch0209: 0209-cluster-dht-Update-layout-in-inode-only-on-success.patch
Patch0210: 0210-cluster-ec-send-list-node-uuids-request-to-all-subvo.patch
Patch0211: 0211-common-ha-scripts-pass-the-list-of-servers-properly-.patch
Patch0212: 0212-readdir-ahead-Cleanup-the-xattr-request-code.patch
%description
GlusterFS is a distributed file-system capable of scaling to several
@ -2409,6 +2420,10 @@ fi
%endif
%changelog
* Wed Apr 04 2018 Milind Changire <mchangir@redhat.com> - 3.12.2-7
- fixes bugs bz#958062 bz#1186664 bz#1226874 bz#1446046 bz#1529451 bz#1550315
bz#1557365 bz#1559884 bz#1561733
* Mon Mar 26 2018 Milind Changire <mchangir@redhat.com> - 3.12.2-6
- fixes bugs bz#1491785 bz#1518710 bz#1523599 bz#1528733 bz#1550474
bz#1550982 bz#1551186 bz#1552360 bz#1552414 bz#1552425 bz#1554255 bz#1554905