From 0f566d005fd1a6581690a2887e46d8a1eb510d53 Mon Sep 17 00:00:00 2001 From: Milind Changire Date: Tue, 4 Dec 2018 01:46:19 -0500 Subject: [PATCH] autobuild v3.12.2-30 Resolves: bz#1650138 bz#1653073 Signed-off-by: Milind Changire --- ...x-bricks-per-process-default-value-t.patch | 38 ++ ...olve-memory-leak-path-in-server_init.patch | 384 ++++++++++++++++++ glusterfs.spec | 7 +- 3 files changed, 428 insertions(+), 1 deletion(-) create mode 100644 0452-glusterd-make-max-bricks-per-process-default-value-t.patch create mode 100644 0453-server-Resolve-memory-leak-path-in-server_init.patch diff --git a/0452-glusterd-make-max-bricks-per-process-default-value-t.patch b/0452-glusterd-make-max-bricks-per-process-default-value-t.patch new file mode 100644 index 0000000..840e24e --- /dev/null +++ b/0452-glusterd-make-max-bricks-per-process-default-value-t.patch @@ -0,0 +1,38 @@ +From 30908dd034f9289f3f0120a17bb856a1da81422f Mon Sep 17 00:00:00 2001 +From: Atin Mukherjee +Date: Wed, 21 Nov 2018 07:49:07 +0530 +Subject: [PATCH 452/453] glusterd: make max-bricks-per-process default value + to 250 + +>Change-Id: Ia2c6a10e2b76a4aa8bd4ea97e5ce33bdc813942e +>Fixes: bz#1652118 +>Signed-off-by: Atin Mukherjee + +upstream patch: https://review.gluster.org/#/c/glusterfs/+/21701/ + +Change-Id: Ia2c6a10e2b76a4aa8bd4ea97e5ce33bdc813942e +BUG: 1653073 +Signed-off-by: Sanju Rakonde +Reviewed-on: https://code.engineering.redhat.com/gerrit/157424 +Reviewed-by: Atin Mukherjee +Tested-by: RHGS Build Bot +--- + xlators/mgmt/glusterd/src/glusterd-volume-set.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +index 1175f1d..27d7b20 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c ++++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c +@@ -3534,7 +3534,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { + }, + { .key = GLUSTERD_BRICKMUX_LIMIT_KEY, + .voltype = "mgmt/glusterd", +- .value = "0", ++ .value = "250", + .op_version = GD_OP_VERSION_3_11_1, + .validate_fn = validate_mux_limit, + .type = GLOBAL_DOC, +-- +1.8.3.1 + diff --git a/0453-server-Resolve-memory-leak-path-in-server_init.patch b/0453-server-Resolve-memory-leak-path-in-server_init.patch new file mode 100644 index 0000000..ca11a26 --- /dev/null +++ b/0453-server-Resolve-memory-leak-path-in-server_init.patch @@ -0,0 +1,384 @@ +From 919597d141dd79b34a9c0ef9e52a63cc43320d6c Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Mon, 3 Dec 2018 17:05:19 +0530 +Subject: [PATCH 453/453] server: Resolve memory leak path in server_init + +Problem: 1) server_init does not cleanup allocate resources + while it is failed before return error + 2) dict leak at the time of graph destroying + +Solution: 1) free resources in case of server_init is failed + 2) Take dict_ref of graph xlator before destroying + the graph to avoid leak + +> Change-Id: I9e31e156b9ed6bebe622745a8be0e470774e3d15 +> fixes: bz#1654917 +> Cherry pick from commit 46c15ea8fa98bb3d92580b192f03863c2e2a2d9c +> Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/21750/ + +Change-Id: I5ba1b37840bcaa4a7fa4c05822c84016a2d89ea2 +BUG: 1650138 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/157445 +Tested-by: RHGS Build Bot +Reviewed-by: Pranith Kumar Karampuri +Reviewed-by: Raghavendra Gowdappa +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + glusterfsd/src/glusterfsd.c | 4 +++ + libglusterfs/src/xlator.c | 31 ++++++++++++++++++ + libglusterfs/src/xlator.h | 3 ++ + rpc/rpc-lib/src/rpc-transport.c | 26 ++++++++++++---- + rpc/rpc-lib/src/rpc-transport.h | 3 ++ + rpc/rpc-lib/src/rpcsvc.c | 40 ++++++++++++++++++++++++ + rpc/rpc-lib/src/rpcsvc.h | 3 ++ + xlators/mgmt/glusterd/src/glusterd-utils.c | 33 ++------------------ + xlators/protocol/server/src/server.c | 50 +++++++++++++++++++++++++++--- + 9 files changed, 151 insertions(+), 42 deletions(-) + +diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c +index 2e43cdb..78f3719 100644 +--- a/glusterfsd/src/glusterfsd.c ++++ b/glusterfsd/src/glusterfsd.c +@@ -2411,6 +2411,10 @@ out: + xl = graph->first; + if ((ctx && (ctx->active != graph)) && + (xl && !strcmp(xl->type, "protocol/server"))) { ++ /* Take dict ref for every graph xlator to avoid dict leak ++ at the time of graph destroying ++ */ ++ gluster_graph_take_reference(graph->first); + glusterfs_graph_fini(graph); + glusterfs_graph_destroy(graph); + } +diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c +index 8aa8aa1..340d83d 100644 +--- a/libglusterfs/src/xlator.c ++++ b/libglusterfs/src/xlator.c +@@ -1225,3 +1225,34 @@ glusterfs_delete_volfile_checksum (glusterfs_ctx_t *ctx, + + return 0; + } ++ ++/* ++ The function is required to take dict ref for every xlator at graph. ++ At the time of compare graph topology create a graph and populate ++ key values in the dictionary, after finished graph comparison we do destroy ++ the new graph.At the time of construct graph we don't take any reference ++ so to avoid dict leak at the of destroying graph due to ref counter underflow ++ we need to call dict_ref here. ++ ++*/ ++ ++void ++gluster_graph_take_reference(xlator_t *tree) ++{ ++ xlator_t *trav = tree; ++ xlator_t *prev = tree; ++ ++ if (!tree) { ++ gf_msg("parser", GF_LOG_ERROR, 0, LG_MSG_TREE_NOT_FOUND, ++ "Translator tree not found"); ++ return; ++ } ++ ++ while (prev) { ++ trav = prev->next; ++ if (prev->options) ++ dict_ref(prev->options); ++ prev = trav; ++ } ++ return; ++} +diff --git a/libglusterfs/src/xlator.h b/libglusterfs/src/xlator.h +index 1879641..f8f2630 100644 +--- a/libglusterfs/src/xlator.h ++++ b/libglusterfs/src/xlator.h +@@ -1087,4 +1087,7 @@ glusterfs_delete_volfile_checksum (glusterfs_ctx_t *ctx, + const char *volfile_id); + int + xlator_memrec_free (xlator_t *xl); ++ ++void ++gluster_graph_take_reference(xlator_t *tree); + #endif /* _XLATOR_H */ +diff --git a/rpc/rpc-lib/src/rpc-transport.c b/rpc/rpc-lib/src/rpc-transport.c +index 94880f4..77abf96 100644 +--- a/rpc/rpc-lib/src/rpc-transport.c ++++ b/rpc/rpc-lib/src/rpc-transport.c +@@ -160,6 +160,25 @@ out: + return msg; + } + ++void ++rpc_transport_cleanup(rpc_transport_t *trans) ++{ ++ if (!trans) ++ return; ++ ++ if (trans->fini) ++ trans->fini(trans); ++ ++ GF_FREE(trans->name); ++ ++ if (trans->xl) ++ pthread_mutex_destroy(&trans->lock); ++ ++ if (trans->dl_handle) ++ dlclose(trans->dl_handle); ++ ++ GF_FREE(trans); ++} + + + rpc_transport_t * +@@ -361,12 +380,7 @@ rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *trans_name) + + fail: + if (trans) { +- GF_FREE (trans->name); +- +- if (trans->dl_handle) +- dlclose (trans->dl_handle); +- +- GF_FREE (trans); ++ rpc_transport_cleanup(trans); + } + + GF_FREE (name); +diff --git a/rpc/rpc-lib/src/rpc-transport.h b/rpc/rpc-lib/src/rpc-transport.h +index 33f474e..23246c5 100644 +--- a/rpc/rpc-lib/src/rpc-transport.h ++++ b/rpc/rpc-lib/src/rpc-transport.h +@@ -316,4 +316,7 @@ rpc_transport_unix_options_build (dict_t **options, char *filepath, + + int + rpc_transport_inet_options_build (dict_t **options, const char *hostname, int port); ++ ++void ++rpc_transport_cleanup(rpc_transport_t *); + #endif /* __RPC_TRANSPORT_H__ */ +diff --git a/rpc/rpc-lib/src/rpcsvc.c b/rpc/rpc-lib/src/rpcsvc.c +index 8d0c409..695e9fb 100644 +--- a/rpc/rpc-lib/src/rpcsvc.c ++++ b/rpc/rpc-lib/src/rpcsvc.c +@@ -36,6 +36,7 @@ + #include + #include + #include ++#include + + #include "xdr-rpcclnt.h" + #include "glusterfs-acl.h" +@@ -1677,6 +1678,7 @@ rpcsvc_create_listener (rpcsvc_t *svc, dict_t *options, char *name) + + listener = rpcsvc_listener_alloc (svc, trans); + if (listener == NULL) { ++ ret = -1; + goto out; + } + +@@ -1684,6 +1686,7 @@ rpcsvc_create_listener (rpcsvc_t *svc, dict_t *options, char *name) + out: + if (!listener && trans) { + rpc_transport_disconnect (trans, _gf_true); ++ rpc_transport_cleanup(trans); + } + + return ret; +@@ -2285,6 +2288,43 @@ rpcsvc_get_throttle (rpcsvc_t *svc) + return svc->throttle; + } + ++/* Function call to cleanup resources for svc ++ */ ++int ++rpcsvc_destroy(rpcsvc_t *svc) ++{ ++ struct rpcsvc_auth_list *auth = NULL; ++ struct rpcsvc_auth_list *tmp = NULL; ++ rpcsvc_listener_t *listener = NULL; ++ rpcsvc_listener_t *next = NULL; ++ int ret = 0; ++ ++ if (!svc) ++ return ret; ++ ++ list_for_each_entry_safe(listener, next, &svc->listeners, list) ++ { ++ rpcsvc_listener_destroy(listener); ++ } ++ ++ list_for_each_entry_safe(auth, tmp, &svc->authschemes, authlist) ++ { ++ list_del_init(&auth->authlist); ++ GF_FREE(auth); ++ } ++ ++ rpcsvc_program_unregister(svc, &gluster_dump_prog); ++ if (svc->rxpool) { ++ mem_pool_destroy(svc->rxpool); ++ svc->rxpool = NULL; ++ } ++ ++ pthread_mutex_destroy(&svc->rpclock); ++ GF_FREE(svc); ++ ++ return ret; ++} ++ + /* The global RPC service initializer. + */ + rpcsvc_t * +diff --git a/rpc/rpc-lib/src/rpcsvc.h b/rpc/rpc-lib/src/rpcsvc.h +index 34429b4..d3aafac 100644 +--- a/rpc/rpc-lib/src/rpcsvc.h ++++ b/rpc/rpc-lib/src/rpcsvc.h +@@ -610,4 +610,7 @@ rpcsvc_auth_array (rpcsvc_t *svc, char *volname, int *autharr, int arrlen); + rpcsvc_vector_sizer + rpcsvc_get_program_vector_sizer (rpcsvc_t *svc, uint32_t prognum, + uint32_t progver, int procnum); ++ ++extern int ++rpcsvc_destroy(rpcsvc_t *svc); + #endif +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c +index ec7e27a..b63c95a 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c +@@ -9384,35 +9384,6 @@ glusterd_defrag_volume_status_update (glusterd_volinfo_t *volinfo, + + return ret; + } +-/* +- The function is required to take dict ref for every xlator at graph. +- At the time of compare graph topology create a graph and populate +- key values in the dictionary, after finished graph comparison we do destroy +- the new graph.At the time of construct graph we don't take any reference +- so to avoid leak due to ref counter underflow we need to call dict_ref here. +- +-*/ +- +-void +-glusterd_graph_take_reference (xlator_t *tree) +-{ xlator_t *trav = tree; +- xlator_t *prev = tree; +- +- if (!tree) { +- gf_msg ("parser", GF_LOG_ERROR, 0, LG_MSG_TREE_NOT_FOUND, +- "Translator tree not found"); +- return; +- } +- +- while (prev) { +- trav = prev->next; +- if (prev->options) +- dict_ref (prev->options); +- prev = trav; +- } +- return; +-} +- + + + int +@@ -9461,14 +9432,14 @@ glusterd_check_topology_identical (const char *filename1, + if (grph1 == NULL) + goto out; + +- glusterd_graph_take_reference (grph1->first); ++ gluster_graph_take_reference (grph1->first); + + /* create the graph for filename2 */ + grph2 = glusterfs_graph_construct(fp2); + if (grph2 == NULL) + goto out; + +- glusterd_graph_take_reference (grph2->first); ++ gluster_graph_take_reference (grph2->first); + + /* compare the graph topology */ + *identical = is_graph_topology_equal(grph1, grph2); +diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c +index 65d712f..6f510ea 100644 +--- a/xlators/protocol/server/src/server.c ++++ b/xlators/protocol/server/src/server.c +@@ -1144,12 +1144,54 @@ client_destroy_cbk (xlator_t *this, client_t *client) + return 0; + } + ++void ++server_cleanup(xlator_t *this, server_conf_t *conf) ++{ ++ if (!this || !conf) ++ return; ++ ++ LOCK_DESTROY(&conf->itable_lock); ++ pthread_mutex_destroy(&conf->mutex); ++ ++ if (this->ctx->event_pool) { ++ /* Free the event pool */ ++ (void)event_pool_destroy(this->ctx->event_pool); ++ } ++ ++ if (dict_get(this->options, "config-directory")) { ++ GF_FREE(conf->conf_dir); ++ conf->conf_dir = NULL; ++ } ++ ++ if (conf->child_status) { ++ GF_FREE(conf->child_status); ++ conf->child_status = NULL; ++ } ++ ++ if (this->ctx->statedump_path) { ++ GF_FREE(this->ctx->statedump_path); ++ this->ctx->statedump_path = NULL; ++ } ++ ++ if (conf->auth_modules) { ++ gf_auth_fini(conf->auth_modules); ++ dict_unref(conf->auth_modules); ++ } ++ ++ if (conf->rpc) { ++ (void)rpcsvc_destroy(conf->rpc); ++ conf->rpc = NULL; ++ } ++ ++ GF_FREE(conf); ++ this->private = NULL; ++} ++ + int + init (xlator_t *this) + { + int32_t ret = -1; + server_conf_t *conf = NULL; +- rpcsvc_listener_t *listener = NULL; + char *transport_type = NULL; + char *statedump_path = NULL; + int total_transport = 0; +@@ -1226,6 +1268,7 @@ init (xlator_t *this) + ret = gf_auth_init (this, conf->auth_modules); + if (ret) { + dict_unref (conf->auth_modules); ++ conf->auth_modules = NULL; + goto out; + } + +@@ -1378,10 +1421,7 @@ out: + if (this != NULL) { + this->fini (this); + } +- +- if (listener != NULL) { +- rpcsvc_listener_destroy (listener); +- } ++ server_cleanup(this, conf); + } + + return ret; +-- +1.8.3.1 + diff --git a/glusterfs.spec b/glusterfs.spec index e63e060..886a2cb 100644 --- a/glusterfs.spec +++ b/glusterfs.spec @@ -192,7 +192,7 @@ Release: 0.1%{?prereltag:.%{prereltag}}%{?dist} %else Name: glusterfs Version: 3.12.2 -Release: 29%{?dist} +Release: 30%{?dist} %endif License: GPLv2 or LGPLv3+ Group: System Environment/Base @@ -716,6 +716,8 @@ Patch0448: 0448-core-Resolve-memory-leak-at-the-time-of-graph-init.patch Patch0449: 0449-glusterd-mux-Optimize-brick-disconnect-handler-code.patch Patch0450: 0450-glusterd-fix-Resource-leak-coverity-issue.patch Patch0451: 0451-core-Resolve-memory-leak-at-the-time-of-graph-init.patch +Patch0452: 0452-glusterd-make-max-bricks-per-process-default-value-t.patch +Patch0453: 0453-server-Resolve-memory-leak-path-in-server_init.patch %description GlusterFS is a distributed file-system capable of scaling to several @@ -2664,6 +2666,9 @@ fi %endif %changelog +* Tue Dec 04 2018 Milind Changire - 3.12.2-30 +- fixes bugs bz#1650138 bz#1653073 + * Thu Nov 22 2018 Milind Changire - 3.12.2-29 - fixes bugs bz#1650138