autobuild v3.12.2-28

Resolves: bz#1626350 bz#1648210 bz#1649651 bz#1650138
Signed-off-by: Milind Changire <mchangir@redhat.com>
This commit is contained in:
Milind Changire 2018-11-20 11:04:33 -05:00
parent e62d29446d
commit b919423cd2
7 changed files with 761 additions and 1 deletions

View File

@ -0,0 +1,75 @@
From 15d1f5b80b1eeb9c8f7d85c72247ffc4ef704267 Mon Sep 17 00:00:00 2001
From: Atin Mukherjee <amukherj@redhat.com>
Date: Fri, 9 Nov 2018 12:44:20 +0530
Subject: [PATCH 445/450] glusterd: don't call svcs_reconfigure for all volumes
during op-version bump up
With having large number of volumes in a configuration having
svcs_reconfigure () called for every volumes makes cluster.op-version bump up to
time out. Instead call svcs_reconfigure () only once.
> Change-Id: Ic6a133d77113c992a4dbeaf7f5663b7ffcbb0ae9
> Fixes: bz#1648237
> Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
upstream patch: https://review.gluster.org/#/c/glusterfs/+/21608/
Change-Id: Ic6a133d77113c992a4dbeaf7f5663b7ffcbb0ae9
BUG: 1648210
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/156190
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 716d3f2..8d767cc 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -2622,6 +2622,7 @@ glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict,
glusterd_volinfo_t *volinfo = NULL;
glusterd_svc_t *svc = NULL;
gf_boolean_t start_nfs_svc = _gf_false;
+ gf_boolean_t svcs_reconfigure = _gf_false;
conf = this->private;
ret = dict_get_str (dict, "key1", &key);
@@ -2717,15 +2718,16 @@ glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict,
}
if (GLUSTERD_STATUS_STARTED
== volinfo->status) {
- ret = glusterd_svcs_reconfigure ();
- if (ret) {
- gf_msg (this->name,
- GF_LOG_ERROR, 0,
- GD_MSG_SVC_RESTART_FAIL,
- "Unable to restart "
- "services");
- goto out;
- }
+ svcs_reconfigure = _gf_true;
+ }
+ }
+ if (svcs_reconfigure) {
+ ret = glusterd_svcs_reconfigure();
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SVC_RESTART_FAIL,
+ "Unable to restart services");
+ goto out;
}
}
if (start_nfs_svc) {
@@ -2758,7 +2760,6 @@ glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict,
ret = dict_set_str (dup_opt, key, value);
if (ret)
goto out;
-
ret = glusterd_get_next_global_opt_version_str (conf->opts,
&next_version);
if (ret)
--
1.8.3.1

View File

@ -0,0 +1,91 @@
From 1de7497540b8428187df5048a1b8e82c2feec604 Mon Sep 17 00:00:00 2001
From: Mohit Agrawal <moagrawa@redhat.com>
Date: Mon, 19 Nov 2018 13:00:57 +0530
Subject: [PATCH 446/450] core: Portmap entries showing stale brick entries
when bricks are down
Problem: pmap is showing stale brick entries after down the brick
because of glusterd_brick_rpc_notify call gf_is_service_running
before call pmap_registry_remove to ensure about brick instance.
Solutiom: 1) Change the condition in gf_is_pid_running to ensure about
process existence, use open instead of access to achieve
the same
2) Call search_brick_path_from_proc in __glusterd_brick_rpc_notify
along with gf_is_service_running
> Change-Id: Ia663ac61c01fdee6c12f47c0300cdf93f19b6a19
> fixes: bz#1646892
> Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
> (Cherry picked from commit bcf1e8b07491b48c5372924dbbbad5b8391c6d81)
> (Reviwed on upstream link https://review.gluster.org/#/c/glusterfs/+/21568/)
BUG: 1649651
Change-Id: I06b0842d5e3ffc909304529311709064237ccc94
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/156326
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
libglusterfs/src/common-utils.c | 5 ++++-
xlators/mgmt/glusterd/src/glusterd-handler.c | 7 +++++--
xlators/mgmt/glusterd/src/glusterd-utils.h | 2 ++
3 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/libglusterfs/src/common-utils.c b/libglusterfs/src/common-utils.c
index 54ef875..dd6cdb3 100644
--- a/libglusterfs/src/common-utils.c
+++ b/libglusterfs/src/common-utils.c
@@ -3986,13 +3986,16 @@ gf_boolean_t
gf_is_pid_running (int pid)
{
char fname[32] = {0,};
+ int fd = -1;
snprintf(fname, sizeof(fname), "/proc/%d/cmdline", pid);
- if (sys_access (fname , R_OK) != 0) {
+ fd = sys_open(fname, O_RDONLY, 0);
+ if (fd < 0) {
return _gf_false;
}
+ sys_close(fd);
return _gf_true;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index bf37e70..a129afc 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -6193,11 +6193,14 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
/* In case of an abrupt shutdown of a brick PMAP_SIGNOUT
* event is not received by glusterd which can lead to a
* stale port entry in glusterd, so forcibly clean up
- * the same if the process is not running
+ * the same if the process is not running sometime
+ * gf_is_service_running true so to ensure about brick instance
+ * call search_brick_path_from_proc
*/
GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo,
brickinfo, conf);
- if (!gf_is_service_running (pidfile, &pid)) {
+ if (!gf_is_service_running (pidfile, &pid) ||
+ !search_brick_path_from_proc(pid, brickinfo->path)) {
ret = pmap_registry_remove (
THIS, brickinfo->port,
brickinfo->path,
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index ffcc636..8e5320d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -897,4 +897,6 @@ glusterd_get_index_basepath (glusterd_brickinfo_t *brickinfo, char *buffer,
gf_boolean_t
glusterd_is_profile_on (glusterd_volinfo_t *volinfo);
+char *
+search_brick_path_from_proc(pid_t brick_pid, char *brickpath);
#endif
--
1.8.3.1

View File

@ -0,0 +1,55 @@
From afff5f5aaab363afebb8fd359af2b8403b992930 Mon Sep 17 00:00:00 2001
From: Ashish Pandey <aspandey@redhat.com>
Date: Thu, 6 Sep 2018 11:20:32 +0530
Subject: [PATCH 447/450] cluster/ec: Don't update trusted.ec.version if fop
succeeds
If a fop has succeeded on all the bricks and trying to release
the lock, there is no need to update the version for the
file/entry. All it will do is to increase the version from
x to x+1 on all the bricks.
If this update (x to x+1) fails on some brick, this will indicate
that the entry is unhealthy while in realty everything is fine
with the entry.
Avoiding this update will help to not to send one xattrop
at the end of the fops. Which will decrease the chances
of entries being in unhealthy state and also improve the
performance.
upstream patch : https://review.gluster.org/#/c/glusterfs/+/21105
Change-Id: Id9fca6bd2991425db6ed7d1f36af27027accb636
BUG: 1626350
Signed-off-by: Ashish Pandey <aspandey@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/156342
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/cluster/ec/src/ec-common.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/xlators/cluster/ec/src/ec-common.c b/xlators/cluster/ec/src/ec-common.c
index 6d0eb62..a7a8234 100644
--- a/xlators/cluster/ec/src/ec-common.c
+++ b/xlators/cluster/ec/src/ec-common.c
@@ -2372,6 +2372,15 @@ ec_update_info(ec_lock_link_t *link)
if (ctx->dirty[1] != 0) {
dirty[1] = -1;
}
+ /*If everything is fine and we already
+ *have version xattr set on entry, there
+ *is no need to update version again*/
+ if (ctx->pre_version[0]) {
+ version[0] = 0;
+ }
+ if (ctx->pre_version[1]) {
+ version[1] = 0;
+ }
} else {
link->optimistic_changelog = _gf_false;
ec_set_dirty_flag (link, ctx, dirty);
--
1.8.3.1

View File

@ -0,0 +1,68 @@
From 597826a5fa4e307a23615a03031d2df0f739652f Mon Sep 17 00:00:00 2001
From: Mohit Agrawal <moagrawa@redhat.com>
Date: Tue, 20 Nov 2018 15:55:31 +0530
Subject: [PATCH 448/450] core: Resolve memory leak at the time of graph init
Problem: Memory leak when graph init fails as during volfile
exchange between brick and glusterd
Solution: Fix the error code path in glusterfs_graph_init
> Change-Id: If62bee61283fccb7fd60abc6ea217cfac12358fa
> fixes: bz#1651431
> Signed-off-by: Mohit Agrawal <moagrawal@redhat.com>
> (cherry pick from commit 751b14f2bfd40e08ad395ccd98c6eb0a41ac4e91)
> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/21658/)
Change-Id: I29fd290e71754214cc242eac0cc9461d18abec81
BUG: 1650138
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/156358
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfsd/src/glusterfsd.c | 11 +++++++----
xlators/mgmt/glusterd/src/glusterd.c | 4 ----
2 files changed, 7 insertions(+), 8 deletions(-)
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index 6b7adc4..262a0c1 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -2383,11 +2383,14 @@ out:
if (fp)
fclose (fp);
- if (ret && !ctx->active) {
- glusterfs_graph_destroy (graph);
+ if (ret) {
+ if (graph && (ctx && (ctx->active != graph)))
+ glusterfs_graph_destroy (graph);
/* there is some error in setting up the first graph itself */
- emancipate (ctx, ret);
- cleanup_and_exit (ret);
+ if (!ctx->active) {
+ emancipate (ctx, ret);
+ cleanup_and_exit (ret);
+ }
}
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
index 076019f..ca17526 100644
--- a/xlators/mgmt/glusterd/src/glusterd.c
+++ b/xlators/mgmt/glusterd/src/glusterd.c
@@ -1120,10 +1120,6 @@ glusterd_init_uds_listener (xlator_t *this)
strncpy (sockfile, sock_data->data, UNIX_PATH_MAX);
}
- options = dict_new ();
- if (!options)
- goto out;
-
ret = rpcsvc_transport_unix_options_build (&options, sockfile);
if (ret)
goto out;
--
1.8.3.1

View File

@ -0,0 +1,397 @@
From afcb244f1264af8b0df42b5c79905fd52f01b924 Mon Sep 17 00:00:00 2001
From: Mohammed Rafi KC <rkavunga@redhat.com>
Date: Thu, 15 Nov 2018 13:18:36 +0530
Subject: [PATCH 449/450] glusterd/mux: Optimize brick disconnect handler code
Removed unnecessary iteration during brick disconnect
handler when multiplex is enabled.
>Change-Id: I62dd3337b7e7da085da5d76aaae206e0b0edff9f
>fixes: bz#1650115
>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
upstream patch : https://review.gluster.org/#/c/glusterfs/+/21651/
Change-Id: I62dd3337b7e7da085da5d76aaae206e0b0edff9f
BUG: 1649651
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/156327
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-handler.c | 74 ++++------------
xlators/mgmt/glusterd/src/glusterd-utils.c | 122 +++++++++++++--------------
xlators/mgmt/glusterd/src/glusterd-utils.h | 3 +-
xlators/mgmt/glusterd/src/glusterd.h | 21 +++--
4 files changed, 87 insertions(+), 133 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index a129afc..cab0dec 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -6046,37 +6046,6 @@ out:
static int gd_stale_rpc_disconnect_log;
-static int
-glusterd_mark_bricks_stopped_by_proc (glusterd_brick_proc_t *brick_proc) {
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_brickinfo_t *brickinfo_tmp = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- int ret = -1;
-
- cds_list_for_each_entry (brickinfo, &brick_proc->bricks, brick_list) {
- ret = glusterd_get_volinfo_from_brick (brickinfo->path,
- &volinfo);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL, "Failed to get volinfo"
- " from brick(%s)", brickinfo->path);
- goto out;
- }
- cds_list_for_each_entry (brickinfo_tmp, &volinfo->bricks,
- brick_list) {
- if (strcmp (brickinfo->path,
- brickinfo_tmp->path) == 0) {
- glusterd_set_brick_status (brickinfo_tmp,
- GF_BRICK_STOPPED);
- brickinfo_tmp->start_triggered = _gf_false;
- }
- }
- }
- return 0;
-out:
- return ret;
-}
-
int
__glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event, void *data)
@@ -6087,7 +6056,6 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_volinfo_t *volinfo = NULL;
xlator_t *this = NULL;
- int temp = 0;
int32_t pid = -1;
glusterd_brickinfo_t *brickinfo_tmp = NULL;
glusterd_brick_proc_t *brick_proc = NULL;
@@ -6218,33 +6186,21 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
}
}
- if (is_brick_mx_enabled()) {
- cds_list_for_each_entry (brick_proc, &conf->brick_procs,
- brick_proc_list) {
- cds_list_for_each_entry (brickinfo_tmp,
- &brick_proc->bricks,
- brick_list) {
- if (strcmp (brickinfo_tmp->path,
- brickinfo->path) == 0) {
- ret = glusterd_mark_bricks_stopped_by_proc
- (brick_proc);
- if (ret) {
- gf_msg(THIS->name,
- GF_LOG_ERROR, 0,
- GD_MSG_BRICK_STOP_FAIL,
- "Unable to stop "
- "bricks of process"
- " to which brick(%s)"
- " belongs",
- brickinfo->path);
- goto out;
- }
- temp = 1;
- break;
- }
- }
- if (temp == 1)
- break;
+ if (is_brick_mx_enabled() && glusterd_is_brick_started(brickinfo)) {
+ brick_proc = brickinfo->brick_proc;
+ if (!brick_proc)
+ break;
+ cds_list_for_each_entry(brickinfo_tmp, &brick_proc->bricks,
+ mux_bricks)
+ {
+ glusterd_set_brick_status(brickinfo_tmp, GF_BRICK_STOPPED);
+ brickinfo_tmp->start_triggered = _gf_false;
+ /* When bricks are stopped, ports also need to
+ * be cleaned up
+ */
+ pmap_registry_remove(
+ THIS, brickinfo_tmp->port, brickinfo_tmp->path,
+ GF_PMAP_PORT_BRICKSERVER, NULL, _gf_true);
}
} else {
glusterd_set_brick_status (brickinfo, GF_BRICK_STOPPED);
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 7179a68..ec7e27a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -1088,6 +1088,7 @@ glusterd_brickinfo_new (glusterd_brickinfo_t **brickinfo)
goto out;
CDS_INIT_LIST_HEAD (&new_brickinfo->brick_list);
+ CDS_INIT_LIST_HEAD (&new_brickinfo->mux_bricks);
pthread_mutex_init (&new_brickinfo->restart_mutex, NULL);
*brickinfo = new_brickinfo;
@@ -1978,6 +1979,7 @@ glusterd_volume_start_glusterfs (glusterd_volinfo_t *volinfo,
struct rpc_clnt *rpc = NULL;
rpc_clnt_connection_t *conn = NULL;
int pid = -1;
+ glusterd_brick_proc_t *brick_proc = NULL;
GF_ASSERT (volinfo);
GF_ASSERT (brickinfo);
@@ -2188,15 +2190,20 @@ retry:
goto out;
}
- ret = glusterd_brick_process_add_brick (brickinfo);
+ ret = glusterd_brickprocess_new(&brick_proc);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICKPROC_ADD_BRICK_FAILED, "Adding brick %s:%s "
- "to brick process failed.", brickinfo->hostname,
- brickinfo->path);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICKPROC_NEW_FAILED,
+ "Failed to create new brick process instance");
goto out;
}
+ brick_proc->port = brickinfo->port;
+ cds_list_add_tail(&brick_proc->brick_proc_list, &priv->brick_procs);
+ brickinfo->brick_proc = brick_proc;
+ cds_list_add_tail(&brickinfo->mux_bricks, &brick_proc->bricks);
+ brickinfo->brick_proc = brick_proc;
+ brick_proc->brick_count++;
+
connect:
ret = glusterd_brick_connect (volinfo, brickinfo, socketpath);
if (ret) {
@@ -2328,9 +2335,6 @@ glusterd_brick_process_remove_brick (glusterd_brickinfo_t *brickinfo)
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
glusterd_brick_proc_t *brick_proc = NULL;
- glusterd_brickinfo_t *brickinfoiter = NULL;
- glusterd_brick_proc_t *brick_proc_tmp = NULL;
- glusterd_brickinfo_t *tmp = NULL;
this = THIS;
GF_VALIDATE_OR_GOTO ("glusterd", this, out);
@@ -2339,48 +2343,44 @@ glusterd_brick_process_remove_brick (glusterd_brickinfo_t *brickinfo)
GF_VALIDATE_OR_GOTO (this->name, priv, out);
GF_VALIDATE_OR_GOTO (this->name, brickinfo, out);
- cds_list_for_each_entry_safe (brick_proc, brick_proc_tmp,
- &priv->brick_procs, brick_proc_list) {
- if (brickinfo->port != brick_proc->port) {
- continue;
- }
-
- GF_VALIDATE_OR_GOTO (this->name, (brick_proc->brick_count > 0), out);
+ brick_proc = brickinfo->brick_proc;
+ if (!brick_proc) {
+ if (brickinfo->status != GF_BRICK_STARTED) {
+ /* this function will be called from gluster_pmap_signout and
+ * glusterd_volume_stop_glusterfs. So it is possible to have
+ * brick_proc set as null.
+ */
+ ret = 0;
+ }
+ goto out;
+ }
- cds_list_for_each_entry_safe (brickinfoiter, tmp,
- &brick_proc->bricks, brick_list) {
- if (strcmp (brickinfoiter->path, brickinfo->path) == 0) {
- cds_list_del_init (&brickinfoiter->brick_list);
+ GF_VALIDATE_OR_GOTO(this->name, (brick_proc->brick_count > 0), out);
- GF_FREE (brickinfoiter->logfile);
- GF_FREE (brickinfoiter);
- brick_proc->brick_count--;
- break;
- }
- }
+ cds_list_del_init(&brickinfo->mux_bricks);
+ brick_proc->brick_count--;
- /* If all bricks have been removed, delete the brick process */
- if (brick_proc->brick_count == 0) {
- ret = glusterd_brickprocess_delete (brick_proc);
- if (ret)
- goto out;
- }
- break;
+ /* If all bricks have been removed, delete the brick process */
+ if (brick_proc->brick_count == 0) {
+ ret = glusterd_brickprocess_delete(brick_proc);
+ if (ret)
+ goto out;
}
+ brickinfo->brick_proc = NULL;
ret = 0;
out:
return ret;
}
int
-glusterd_brick_process_add_brick (glusterd_brickinfo_t *brickinfo)
+glusterd_brick_process_add_brick (glusterd_brickinfo_t *brickinfo,
+ glusterd_brickinfo_t *parent_brickinfo)
{
int ret = -1;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
glusterd_brick_proc_t *brick_proc = NULL;
- glusterd_brickinfo_t *brickinfo_dup = NULL;
this = THIS;
GF_VALIDATE_OR_GOTO ("glusterd", this, out);
@@ -2389,37 +2389,28 @@ glusterd_brick_process_add_brick (glusterd_brickinfo_t *brickinfo)
GF_VALIDATE_OR_GOTO (this->name, priv, out);
GF_VALIDATE_OR_GOTO (this->name, brickinfo, out);
- ret = glusterd_brickinfo_new (&brickinfo_dup);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_BRICK_NEW_INFO_FAIL,
- "Failed to create new brickinfo");
- goto out;
- }
-
- ret = glusterd_brickinfo_dup (brickinfo, brickinfo_dup);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_SET_INFO_FAIL, "Failed to dup brickinfo");
- goto out;
- }
-
- ret = glusterd_brick_proc_for_port (brickinfo->port, &brick_proc);
- if (ret) {
- ret = glusterd_brickprocess_new (&brick_proc);
+ if (!parent_brickinfo) {
+ ret = glusterd_brick_proc_for_port(brickinfo->port,
+ &brick_proc);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICKPROC_NEW_FAILED, "Failed to create "
- "new brick process instance");
- goto out;
+ ret = glusterd_brickprocess_new (&brick_proc);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_BRICKPROC_NEW_FAILED,
+ "Failed to create "
+ "new brick process instance");
+ goto out;
+ }
+ brick_proc->port = brickinfo->port;
+ cds_list_add_tail(&brick_proc->brick_proc_list,
+ &priv->brick_procs);
}
-
- brick_proc->port = brickinfo->port;
-
- cds_list_add_tail (&brick_proc->brick_proc_list, &priv->brick_procs);
+ } else {
+ ret = 0;
+ brick_proc = parent_brickinfo->brick_proc;
}
-
- cds_list_add_tail (&brickinfo_dup->brick_list, &brick_proc->bricks);
+ cds_list_add_tail(&brickinfo->mux_bricks, &brick_proc->bricks);
+ brickinfo->brick_proc = brick_proc;
brick_proc->brick_count++;
out:
return ret;
@@ -2538,6 +2529,7 @@ glusterd_volume_stop_glusterfs (glusterd_volinfo_t *volinfo,
brickinfo->status = GF_BRICK_STOPPED;
brickinfo->start_triggered = _gf_false;
+ brickinfo->brick_proc = NULL;
if (del_brick)
glusterd_delete_brick (volinfo, brickinfo);
out:
@@ -5704,7 +5696,8 @@ attach_brick (xlator_t *this,
goto out;
}
brickinfo->port = other_brick->port;
- ret = glusterd_brick_process_add_brick (brickinfo);
+ ret = glusterd_brick_process_add_brick(brickinfo
+ , other_brick);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_BRICKPROC_ADD_BRICK_FAILED,
@@ -6259,7 +6252,8 @@ glusterd_brick_start (glusterd_volinfo_t *volinfo,
(void) glusterd_brick_connect (volinfo, brickinfo,
socketpath);
- ret = glusterd_brick_process_add_brick (brickinfo);
+ ret = glusterd_brick_process_add_brick (brickinfo,
+ NULL);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_BRICKPROC_ADD_BRICK_FAILED,
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index 8e5320d..69bb8c8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -179,7 +179,8 @@ int32_t
glusterd_resolve_brick (glusterd_brickinfo_t *brickinfo);
int
-glusterd_brick_process_add_brick (glusterd_brickinfo_t *brickinfo);
+glusterd_brick_process_add_brick (glusterd_brickinfo_t *brickinfo,
+ glusterd_brickinfo_t *parent_brickinfo);
int
glusterd_brick_process_remove_brick (glusterd_brickinfo_t *brickinfo);
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index edd41aa..3dfbf9c 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -211,6 +211,15 @@ typedef enum gf_brick_status {
GF_BRICK_STARTING
} gf_brick_status_t;
+struct glusterd_brick_proc {
+ int port;
+ uint32_t brick_count;
+ struct cds_list_head brick_proc_list;
+ struct cds_list_head bricks;
+};
+
+typedef struct glusterd_brick_proc glusterd_brick_proc_t;
+
struct glusterd_brickinfo {
char hostname[1024];
char path[PATH_MAX];
@@ -249,19 +258,13 @@ struct glusterd_brickinfo {
gf_boolean_t port_registered;
gf_boolean_t start_triggered;
pthread_mutex_t restart_mutex;
+ glusterd_brick_proc_t *brick_proc; /* Information regarding mux bricks */
+ struct cds_list_head mux_bricks;
+ /* List to store the bricks in brick_proc*/
};
typedef struct glusterd_brickinfo glusterd_brickinfo_t;
-struct glusterd_brick_proc {
- int port;
- uint32_t brick_count;
- struct cds_list_head brick_proc_list;
- struct cds_list_head bricks;
-};
-
-typedef struct glusterd_brick_proc glusterd_brick_proc_t;
-
struct gf_defrag_brickinfo_ {
char *name;
int files;
--
1.8.3.1

View File

@ -0,0 +1,65 @@
From 92b94a92d2ab3a0d392c0ba6c412bc20144de956 Mon Sep 17 00:00:00 2001
From: Mohit Agrawal <moagrawa@redhat.com>
Date: Tue, 20 Nov 2018 18:35:58 +0530
Subject: [PATCH 450/450] glusterd: fix Resource leak coverity issue
Problem: In commit bcf1e8b07491b48c5372924dbbbad5b8391c6d81 code
was missed to free path return by function search_brick_path_from_proc
> This patch fixes CID:
> 1396668: Resource leak
> (Cherry pick from commit 818e60ac9269c49396480a151c049042af5b2929)
> (Reviewed on link https://review.gluster.org/#/c/glusterfs/+/21630/)
> Change-Id: I4888c071c1058023c7e138a8bcb94ec97305fadf
> fixes: bz#1646892
Change-Id: I9b0a9f4257b74e65c9f8c8686a6b124445f64d64
BUG: 1649651
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/156334
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-handler.c | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index cab0dec..7486f51 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -6060,6 +6060,7 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
glusterd_brickinfo_t *brickinfo_tmp = NULL;
glusterd_brick_proc_t *brick_proc = NULL;
char pidfile[PATH_MAX] = {0};
+ char *brickpath = NULL;
brickid = mydata;
if (!brickid)
@@ -6167,8 +6168,11 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
*/
GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo,
brickinfo, conf);
- if (!gf_is_service_running (pidfile, &pid) ||
- !search_brick_path_from_proc(pid, brickinfo->path)) {
+ gf_is_service_running(pidfile, &pid);
+ if (pid > 0)
+ brickpath = search_brick_path_from_proc(pid,
+ brickinfo->path);
+ if (!gf_is_service_running (pidfile, &pid) || !brickpath) {
ret = pmap_registry_remove (
THIS, brickinfo->port,
brickinfo->path,
@@ -6186,6 +6190,9 @@ __glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
}
}
+ if (brickpath)
+ GF_FREE (brickpath);
+
if (is_brick_mx_enabled() && glusterd_is_brick_started(brickinfo)) {
brick_proc = brickinfo->brick_proc;
if (!brick_proc)
--
1.8.3.1

View File

@ -192,7 +192,7 @@ Release: 0.1%{?prereltag:.%{prereltag}}%{?dist}
%else
Name: glusterfs
Version: 3.12.2
Release: 27%{?dist}
Release: 28%{?dist}
%endif
License: GPLv2 or LGPLv3+
Group: System Environment/Base
@ -709,6 +709,12 @@ Patch0441: 0441-features-shard-Make-lru-limit-of-inode-list-configur.patch
Patch0442: 0442-glusterd-Reset-op-version-for-features.shard-lru-lim.patch
Patch0443: 0443-features-shard-Hold-a-ref-on-base-inode-when-adding-.patch
Patch0444: 0444-features-shard-fix-formatting-warning.patch
Patch0445: 0445-glusterd-don-t-call-svcs_reconfigure-for-all-volumes.patch
Patch0446: 0446-core-Portmap-entries-showing-stale-brick-entries-whe.patch
Patch0447: 0447-cluster-ec-Don-t-update-trusted.ec.version-if-fop-su.patch
Patch0448: 0448-core-Resolve-memory-leak-at-the-time-of-graph-init.patch
Patch0449: 0449-glusterd-mux-Optimize-brick-disconnect-handler-code.patch
Patch0450: 0450-glusterd-fix-Resource-leak-coverity-issue.patch
%description
GlusterFS is a distributed file-system capable of scaling to several
@ -2657,6 +2663,9 @@ fi
%endif
%changelog
* Tue Nov 20 2018 Milind Changire <mchangir@redhat.com> - 3.12.2-28
- fixes bugs bz#1626350 bz#1648210 bz#1649651 bz#1650138
* Fri Nov 09 2018 Milind Changire <mchangir@redhat.com> - 3.12.2-27
- respin