From 86e0efee73c6d47868a248c5663667ccc883066a Mon Sep 17 00:00:00 2001 From: Milind Changire Date: Tue, 9 Oct 2018 08:21:30 -0400 Subject: [PATCH] autobuild v3.12.2-22 Resolves: bz#1631329 bz#1631372 Signed-off-by: Milind Changire --- ...-handshake-prevent-a-buffer-overflow.patch | 39 + 0389-server-don-t-allow-in-basename.patch | 96 ++ ...rfsd-keeping-fd-open-in-index-xlator.patch | 888 ++++++++++++++++++ ...ATOMIC-to-update-blockers-counter-at.patch | 132 +++ ...ait-for-blockers-flag-for-stop-volum.patch | 46 + ..._name-in-server_call_xlator_mem_clea.patch | 40 + ...vent-taking-file-dump-on-server-side.patch | 43 + ...bitrary-file-creation-outside-entry-.patch | 68 ++ ...e-the-option-verify-volfile-checksum.patch | 458 +++++++++ ...dd-buffer-overflow-checks-in-pl_getx.patch | 59 ++ ...w-meta-lock-count-to-be-more-than-on.patch | 88 ++ ...all-fix-the-format-string-exceptions.patch | 643 +++++++++++++ glusterfs.spec | 17 +- 13 files changed, 2616 insertions(+), 1 deletion(-) create mode 100644 0388-glusterd-handshake-prevent-a-buffer-overflow.patch create mode 100644 0389-server-don-t-allow-in-basename.patch create mode 100644 0390-core-glusterfsd-keeping-fd-open-in-index-xlator.patch create mode 100644 0391-glusterd-Use-GF_ATOMIC-to-update-blockers-counter-at.patch create mode 100644 0392-glusterd-don-t-wait-for-blockers-flag-for-stop-volum.patch create mode 100644 0393-core-Pass-xlator_name-in-server_call_xlator_mem_clea.patch create mode 100644 0394-io-stats-prevent-taking-file-dump-on-server-side.patch create mode 100644 0395-index-prevent-arbitrary-file-creation-outside-entry-.patch create mode 100644 0396-protocol-remove-the-option-verify-volfile-checksum.patch create mode 100644 0397-features-locks-add-buffer-overflow-checks-in-pl_getx.patch create mode 100644 0398-lock-Do-not-allow-meta-lock-count-to-be-more-than-on.patch create mode 100644 0399-all-fix-the-format-string-exceptions.patch diff --git a/0388-glusterd-handshake-prevent-a-buffer-overflow.patch b/0388-glusterd-handshake-prevent-a-buffer-overflow.patch new file mode 100644 index 0000000..2531550 --- /dev/null +++ b/0388-glusterd-handshake-prevent-a-buffer-overflow.patch @@ -0,0 +1,39 @@ +From 7eeba48a6aa2bb04b40163849f211f068a8e6452 Mon Sep 17 00:00:00 2001 +From: Amar Tumballi +Date: Tue, 2 Oct 2018 13:27:20 +0530 +Subject: [PATCH 388/399] glusterd-handshake: prevent a buffer overflow + +as key size in xdr can be anything, it can be bigger than the +'NAME_MAX' allowed in the structure, which can allow for service denial +attacks. + +Fixes: CVE-2018-14653 +BUG: 1634668 +Change-Id: I207db66e0bd1959aad7ca40040cc66b9fc81e111 +Signed-off-by: Amar Tumballi +Reviewed-on: https://code.engineering.redhat.com/gerrit/151515 +Reviewed-by: Atin Mukherjee +--- + xlators/mgmt/glusterd/src/glusterd-handshake.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c +index 3d1dfb2..d5594d0 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c ++++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c +@@ -847,6 +847,12 @@ __server_getspec (rpcsvc_request_t *req) + + volume = args.key; + ++ if (strlen (volume) >= (NAME_MAX)) { ++ op_errno = EINVAL; ++ gf_msg (this->name, GF_LOG_ERROR, EINVAL, GD_MSG_NAME_TOO_LONG, ++ "volume name too long (%s)", volume); ++ goto fail; ++ } + /* Need to strip leading '/' from volnames. This was introduced to + * support nfs style mount parameters for native gluster mount + */ +-- +1.8.3.1 + diff --git a/0389-server-don-t-allow-in-basename.patch b/0389-server-don-t-allow-in-basename.patch new file mode 100644 index 0000000..7b5d4c7 --- /dev/null +++ b/0389-server-don-t-allow-in-basename.patch @@ -0,0 +1,96 @@ +From fe704e0f997444d74966aa7c5bfca484ce54f6a4 Mon Sep 17 00:00:00 2001 +From: Amar Tumballi +Date: Thu, 27 Sep 2018 09:55:19 +0530 +Subject: [PATCH 389/399] server: don't allow '/' in basename + +Server stack needs to have all the sort of validation, assuming +clients can be compromized. It is possible for a compromized +client to send basenames with paths with '/', and with that +create files without permission on server. By sanitizing the basename, +and not allowing anything other than actual directory as the parent +for any entry creation, we can mitigate the effects of clients +not able to exploit the server. + +Fixes: CVE-2018-14651 + +BUG: 1633013 +Change-Id: I98d042a9f8e300161fbc3ee5b6e8de755c9765f9 +Signed-off-by: Amar Tumballi +Reviewed-on: https://code.engineering.redhat.com/gerrit/151169 +Reviewed-by: Xavi Hernandez +Reviewed-by: Shyam Ranganathan +Reviewed-by: Atin Mukherjee +--- + xlators/protocol/server/src/server-resolve.c | 31 ++++++++++++++++++++-------- + xlators/storage/posix/src/posix-handle.h | 5 +++-- + 2 files changed, 25 insertions(+), 11 deletions(-) + +diff --git a/xlators/protocol/server/src/server-resolve.c b/xlators/protocol/server/src/server-resolve.c +index b3eda0e..25db43f 100644 +--- a/xlators/protocol/server/src/server-resolve.c ++++ b/xlators/protocol/server/src/server-resolve.c +@@ -307,22 +307,35 @@ resolve_entry_simple (call_frame_t *frame) + ret = 1; + goto out; + } +- +- /* expected @parent was found from the inode cache */ +- gf_uuid_copy (state->loc_now->pargfid, resolve->pargfid); +- state->loc_now->parent = inode_ref (parent); +- +- if (strstr (resolve->bname, "../")) { +- /* Resolving outside the parent's tree is not allowed */ ++ if (parent->ia_type != IA_IFDIR) { ++ /* Parent type should be 'directory', and nothing else */ + gf_msg (this->name, GF_LOG_ERROR, EPERM, + PS_MSG_GFID_RESOLVE_FAILED, +- "%s: path sent by client not allowed", +- resolve->bname); ++ "%s: parent type not directory (%d)", ++ uuid_utoa (parent->gfid), parent->ia_type); + resolve->op_ret = -1; + resolve->op_errno = EPERM; + ret = 1; + goto out; + } ++ ++ /* expected @parent was found from the inode cache */ ++ gf_uuid_copy (state->loc_now->pargfid, resolve->pargfid); ++ state->loc_now->parent = inode_ref (parent); ++ ++ if (strchr (resolve->bname, '/')) { ++ /* basename should be a string (without '/') in a directory, ++ it can't span multiple levels. This can also lead to ++ resolving outside the parent's tree, which is not allowed */ ++ gf_msg (this->name, GF_LOG_ERROR, EPERM, ++ PS_MSG_GFID_RESOLVE_FAILED, ++ "%s: basename sent by client not allowed", ++ resolve->bname); ++ resolve->op_ret = -1; ++ resolve->op_errno = EPERM; ++ ret = 1; ++ goto out; ++ } + state->loc_now->name = resolve->bname; + + inode = inode_grep (state->itable, parent, resolve->bname); +diff --git a/xlators/storage/posix/src/posix-handle.h b/xlators/storage/posix/src/posix-handle.h +index a0f82ec..45ca1d1 100644 +--- a/xlators/storage/posix/src/posix-handle.h ++++ b/xlators/storage/posix/src/posix-handle.h +@@ -223,9 +223,10 @@ + break; \ + } \ + \ +- if (strstr (loc->name, "../")) { \ ++ if (strchr (loc->name, '/')) { \ + gf_msg (this->name, GF_LOG_ERROR, 0, P_MSG_ENTRY_HANDLE_CREATE, \ +- "'../' in name not allowed: (%s)", loc->name); \ ++ "'/' in name not allowed: (%s)", loc->name); \ ++ op_ret = -1; \ + break; \ + } \ + if (LOC_HAS_ABSPATH (loc)) { \ +-- +1.8.3.1 + diff --git a/0390-core-glusterfsd-keeping-fd-open-in-index-xlator.patch b/0390-core-glusterfsd-keeping-fd-open-in-index-xlator.patch new file mode 100644 index 0000000..b4b6467 --- /dev/null +++ b/0390-core-glusterfsd-keeping-fd-open-in-index-xlator.patch @@ -0,0 +1,888 @@ +From 9b26837434977601f1e48477904486ea032f742a Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Mon, 8 Oct 2018 22:32:28 +0530 +Subject: [PATCH 390/399] core: glusterfsd keeping fd open in index xlator + +Problem: Current resource cleanup sequence is not + perfect while brick mux is enabled + +Solution: 1. Destroying xprt after cleanup all fd associated + with a client + 2. Before call fini for brick xlators ensure no stub + should be running on a brick + +> Change-Id: I86195785e428f57d3ef0da3e4061021fafacd435 +> fixes: bz#1631357 +> (cherry picked from commit 5bc4594dabc08fd4de1940c044946e33037f2ac7) +> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/21235/) + +Change-Id: I0676a2f8e42557c1107a877406e255b93a77a269 +BUG: 1631372 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/152170 +Tested-by: RHGS Build Bot +Reviewed-by: Raghavendra Gowdappa +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + libglusterfs/src/defaults-tmpl.c | 8 +- + libglusterfs/src/xlator.c | 2 + + libglusterfs/src/xlator.h | 7 + + xlators/features/index/src/index.c | 50 ++++++- + xlators/features/index/src/index.h | 2 + + xlators/performance/io-threads/src/io-threads.c | 45 ++++++- + xlators/performance/io-threads/src/io-threads.h | 2 +- + xlators/protocol/server/src/server-handshake.c | 23 +++- + xlators/protocol/server/src/server-helpers.c | 79 +++++++++-- + xlators/protocol/server/src/server-helpers.h | 2 +- + xlators/protocol/server/src/server.c | 171 ++++++++++++++++-------- + xlators/protocol/server/src/server.h | 9 +- + xlators/storage/posix/src/posix.c | 11 ++ + 13 files changed, 333 insertions(+), 78 deletions(-) + +diff --git a/libglusterfs/src/defaults-tmpl.c b/libglusterfs/src/defaults-tmpl.c +index 0ef14d5..3fdeabb 100644 +--- a/libglusterfs/src/defaults-tmpl.c ++++ b/libglusterfs/src/defaults-tmpl.c +@@ -119,6 +119,8 @@ default_release (xlator_t *this, fd_t *fd) + int + default_notify (xlator_t *this, int32_t event, void *data, ...) + { ++ xlator_t *victim = data; ++ + switch (event) { + case GF_EVENT_PARENT_UP: + case GF_EVENT_PARENT_DOWN: +@@ -126,7 +128,11 @@ default_notify (xlator_t *this, int32_t event, void *data, ...) + xlator_list_t *list = this->children; + + while (list) { +- xlator_notify (list->xlator, event, this); ++ if (victim && victim->cleanup_starting) { ++ xlator_notify(list->xlator, event, victim); ++ } else { ++ xlator_notify(list->xlator, event, this); ++ } + list = list->next; + } + } +diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c +index 1cf4a63..8aa8aa1 100644 +--- a/libglusterfs/src/xlator.c ++++ b/libglusterfs/src/xlator.c +@@ -489,6 +489,8 @@ xlator_init (xlator_t *xl) + xl->mem_acct_init (xl); + + xl->instance_name = NULL; ++ GF_ATOMIC_INIT(xl->xprtrefcnt, 0); ++ GF_ATOMIC_INIT(xl->fd_cnt, 0); + if (!xl->init) { + gf_msg (xl->name, GF_LOG_WARNING, 0, LG_MSG_INIT_FAILED, + "No init() found"); +diff --git a/libglusterfs/src/xlator.h b/libglusterfs/src/xlator.h +index 7434da8..1879641 100644 +--- a/libglusterfs/src/xlator.h ++++ b/libglusterfs/src/xlator.h +@@ -965,7 +965,14 @@ struct _xlator { + /* flag to avoid recall of xlator_mem_cleanup for xame xlator */ + uint32_t call_cleanup; + ++ /* Variable to save fd_count for detach brick */ ++ gf_atomic_t fd_cnt; + ++ /* Variable to save xprt associated for detach brick */ ++ gf_atomic_t xprtrefcnt; ++ ++ /* Flag to notify got CHILD_DOWN event for detach brick */ ++ uint32_t notify_down; + }; + + typedef struct { +diff --git a/xlators/features/index/src/index.c b/xlators/features/index/src/index.c +index f3b0270..bf3f4dd 100644 +--- a/xlators/features/index/src/index.c ++++ b/xlators/features/index/src/index.c +@@ -188,6 +188,7 @@ worker_enqueue (xlator_t *this, call_stub_t *stub) + pthread_mutex_lock (&priv->mutex); + { + __index_enqueue (&priv->callstubs, stub); ++ GF_ATOMIC_INC(priv->stub_cnt); + pthread_cond_signal (&priv->cond); + } + pthread_mutex_unlock (&priv->mutex); +@@ -223,11 +224,18 @@ index_worker (void *data) + } + if (!bye) + stub = __index_dequeue (&priv->callstubs); ++ if (bye) { ++ priv->curr_count--; ++ if (priv->curr_count == 0) ++ pthread_cond_broadcast(&priv->cond); ++ } + } + pthread_mutex_unlock (&priv->mutex); + +- if (stub) /* guard against spurious wakeups */ ++ if (stub) {/* guard against spurious wakeups */ + call_resume (stub); ++ GF_ATOMIC_DEC(priv->stub_cnt); ++ } + stub = NULL; + if (bye) + break; +@@ -2375,6 +2383,7 @@ init (xlator_t *this) + gf_uuid_generate (priv->internal_vgfid[i]); + + INIT_LIST_HEAD (&priv->callstubs); ++ GF_ATOMIC_INIT(priv->stub_cnt, 0); + + this->local_pool = mem_pool_new (index_local_t, 64); + if (!this->local_pool) { +@@ -2403,6 +2412,7 @@ init (xlator_t *this) + index_set_link_count (priv, count, XATTROP); + priv->down = _gf_false; + ++ priv->curr_count = 0; + ret = gf_thread_create (&priv->thread, &w_attr, index_worker, this, + "idxwrker"); + if (ret) { +@@ -2411,7 +2421,7 @@ init (xlator_t *this) + "Failed to create worker thread, aborting"); + goto out; + } +- ++ priv->curr_count++; + ret = 0; + out: + if (ret) { +@@ -2528,6 +2538,9 @@ notify (xlator_t *this, int event, void *data, ...) + { + int ret = 0; + index_priv_t *priv = NULL; ++ uint64_t stub_cnt = 0; ++ xlator_t *victim = data; ++ struct timespec sleep_till = {0,}; + + if (!this) + return 0; +@@ -2536,6 +2549,39 @@ notify (xlator_t *this, int event, void *data, ...) + if (!priv) + return 0; + ++ if ((event == GF_EVENT_PARENT_DOWN) && victim->cleanup_starting) { ++ stub_cnt = GF_ATOMIC_GET(priv->stub_cnt); ++ clock_gettime(CLOCK_REALTIME, &sleep_till); ++ sleep_till.tv_sec += 1; ++ ++ /* Wait for draining stub from queue before notify PARENT_DOWN */ ++ pthread_mutex_lock(&priv->mutex); ++ { ++ while (stub_cnt) { ++ (void)pthread_cond_timedwait(&priv->cond, &priv->mutex, ++ &sleep_till); ++ stub_cnt = GF_ATOMIC_GET(priv->stub_cnt); ++ } ++ } ++ pthread_mutex_unlock(&priv->mutex); ++ gf_log(this->name, GF_LOG_INFO, ++ "Notify GF_EVENT_PARENT_DOWN for brick %s", victim->name); ++ } ++ ++ if ((event == GF_EVENT_CHILD_DOWN) && victim->cleanup_starting) { ++ pthread_mutex_lock(&priv->mutex); ++ { ++ priv->down = _gf_true; ++ pthread_cond_broadcast(&priv->cond); ++ while (priv->curr_count) ++ pthread_cond_wait(&priv->cond, &priv->mutex); ++ } ++ pthread_mutex_unlock(&priv->mutex); ++ ++ gf_log(this->name, GF_LOG_INFO, ++ "Notify GF_EVENT_CHILD_DOWN for brick %s", victim->name); ++ } ++ + ret = default_notify (this, event, data); + return ret; + } +diff --git a/xlators/features/index/src/index.h b/xlators/features/index/src/index.h +index ae9091d..d935294 100644 +--- a/xlators/features/index/src/index.h ++++ b/xlators/features/index/src/index.h +@@ -62,6 +62,8 @@ typedef struct index_priv { + int64_t pending_count; + pthread_t thread; + gf_boolean_t down; ++ gf_atomic_t stub_cnt; ++ int32_t curr_count; + } index_priv_t; + + typedef struct index_local { +diff --git a/xlators/performance/io-threads/src/io-threads.c b/xlators/performance/io-threads/src/io-threads.c +index 1e1816a..5c47072 100644 +--- a/xlators/performance/io-threads/src/io-threads.c ++++ b/xlators/performance/io-threads/src/io-threads.c +@@ -120,7 +120,7 @@ __iot_dequeue (iot_conf_t *conf, int *pri) + if (!stub) + return NULL; + +- conf->queue_size--; ++ GF_ATOMIC_DEC(conf->queue_size); + conf->queue_sizes[*pri]--; + + return stub; +@@ -153,7 +153,7 @@ __iot_enqueue (iot_conf_t *conf, call_stub_t *stub, int pri) + } + list_add_tail (&stub->list, &ctx->reqs); + +- conf->queue_size++; ++ GF_ATOMIC_INC(conf->queue_size); + conf->queue_sizes[pri]++; + } + +@@ -182,7 +182,7 @@ iot_worker (void *data) + conf->ac_iot_count[pri]--; + pri = -1; + } +- while (conf->queue_size == 0) { ++ while (GF_ATOMIC_GET(conf->queue_size) == 0) { + if (conf->down) { + bye = _gf_true;/*Avoid sleep*/ + break; +@@ -816,7 +816,7 @@ __iot_workers_scale (iot_conf_t *conf) + gf_msg_debug (conf->this->name, 0, + "scaled threads to %d (queue_size=%d/%d)", + conf->curr_count, +- conf->queue_size, scale); ++ GF_ATOMIC_GET(conf->queue_size), scale); + } else { + break; + } +@@ -1030,6 +1030,7 @@ init (xlator_t *this) + bool, out); + + conf->this = this; ++ GF_ATOMIC_INIT(conf->queue_size, 0); + + for (i = 0; i < IOT_PRI_MAX; i++) { + INIT_LIST_HEAD (&conf->clients[i]); +@@ -1073,9 +1074,41 @@ int + notify (xlator_t *this, int32_t event, void *data, ...) + { + iot_conf_t *conf = this->private; ++ xlator_t *victim = data; ++ uint64_t queue_size = 0; ++ struct timespec sleep_till = {0, }; ++ ++ if (GF_EVENT_PARENT_DOWN == event) { ++ if (victim->cleanup_starting) { ++ clock_gettime(CLOCK_REALTIME, &sleep_till); ++ sleep_till.tv_sec += 1; ++ /* Wait for draining stub from queue before notify PARENT_DOWN */ ++ queue_size = GF_ATOMIC_GET(conf->queue_size); ++ ++ pthread_mutex_lock(&conf->mutex); ++ { ++ while (queue_size) { ++ (void)pthread_cond_timedwait(&conf->cond, &conf->mutex, ++ &sleep_till); ++ queue_size = GF_ATOMIC_GET(conf->queue_size); ++ } ++ } ++ pthread_mutex_unlock(&conf->mutex); + +- if (GF_EVENT_PARENT_DOWN == event) +- iot_exit_threads (conf); ++ gf_log(this->name, GF_LOG_INFO, ++ "Notify GF_EVENT_PARENT_DOWN for brick %s", victim->name); ++ } else { ++ iot_exit_threads(conf); ++ } ++ } ++ ++ if (GF_EVENT_CHILD_DOWN == event) { ++ if (victim->cleanup_starting) { ++ iot_exit_threads(conf); ++ gf_log(this->name, GF_LOG_INFO, ++ "Notify GF_EVENT_CHILD_DOWN for brick %s", victim->name); ++ } ++ } + + default_notify (this, event, data); + +diff --git a/xlators/performance/io-threads/src/io-threads.h b/xlators/performance/io-threads/src/io-threads.h +index 9648f74..7a6973c 100644 +--- a/xlators/performance/io-threads/src/io-threads.h ++++ b/xlators/performance/io-threads/src/io-threads.h +@@ -75,7 +75,7 @@ struct iot_conf { + int32_t ac_iot_limit[IOT_PRI_MAX]; + int32_t ac_iot_count[IOT_PRI_MAX]; + int queue_sizes[IOT_PRI_MAX]; +- int queue_size; ++ gf_atomic_t queue_size; + pthread_attr_t w_attr; + gf_boolean_t least_priority; /*Enable/Disable least-priority */ + +diff --git a/xlators/protocol/server/src/server-handshake.c b/xlators/protocol/server/src/server-handshake.c +index 12f620c..75577fa 100644 +--- a/xlators/protocol/server/src/server-handshake.c ++++ b/xlators/protocol/server/src/server-handshake.c +@@ -576,6 +576,7 @@ server_setvolume (rpcsvc_request_t *req) + goto fail; + } + ++ pthread_mutex_lock(&conf->mutex); + list_for_each_entry (tmp, &conf->child_status->status_list, + status_list) { + if (strcmp (tmp->name, name) == 0) +@@ -583,7 +584,7 @@ server_setvolume (rpcsvc_request_t *req) + } + + if (!tmp->name) { +- gf_msg (this->name, GF_LOG_ERROR, 0, ++ gf_msg (this->name, GF_LOG_INFO, 0, + PS_MSG_CHILD_STATUS_FAILED, + "No xlator %s is found in " + "child status list", name); +@@ -594,7 +595,21 @@ server_setvolume (rpcsvc_request_t *req) + PS_MSG_DICT_GET_FAILED, + "Failed to set 'child_up' for xlator %s " + "in the reply dict", tmp->name); ++ if (!tmp->child_up) { ++ ret = dict_set_str(reply, "ERROR", ++ "Not received child_up for this xlator"); ++ if (ret < 0) ++ gf_msg_debug(this->name, 0, "failed to set error msg"); ++ ++ gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_CHILD_STATUS_FAILED, ++ "Not received child_up for this xlator %s", name); ++ op_ret = -1; ++ op_errno = EAGAIN; ++ pthread_mutex_unlock(&conf->mutex); ++ goto fail; ++ } + } ++ pthread_mutex_unlock(&conf->mutex); + + ret = dict_get_str (params, "process-uuid", &client_uid); + if (ret < 0) { +@@ -666,7 +681,7 @@ server_setvolume (rpcsvc_request_t *req) + if (serv_ctx->lk_version != 0 && + serv_ctx->lk_version != lk_version) { + (void) server_connection_cleanup (this, client, +- INTERNAL_LOCKS | POSIX_LOCKS); ++ INTERNAL_LOCKS | POSIX_LOCKS, NULL); + } + + pthread_mutex_lock (&conf->mutex); +@@ -812,9 +827,9 @@ server_setvolume (rpcsvc_request_t *req) + req->trans->clnt_options = dict_ref(params); + + gf_msg (this->name, GF_LOG_INFO, 0, PS_MSG_CLIENT_ACCEPTED, +- "accepted client from %s (version: %s)", ++ "accepted client from %s (version: %s) with subvol %s", + client->client_uid, +- (clnt_version) ? clnt_version : "old"); ++ (clnt_version) ? clnt_version : "old", name); + + gf_event (EVENT_CLIENT_CONNECT, "client_uid=%s;" + "client_identifier=%s;server_identifier=%s;" +diff --git a/xlators/protocol/server/src/server-helpers.c b/xlators/protocol/server/src/server-helpers.c +index c492ab1..99256bf 100644 +--- a/xlators/protocol/server/src/server-helpers.c ++++ b/xlators/protocol/server/src/server-helpers.c +@@ -242,16 +242,51 @@ server_connection_cleanup_flush_cbk (call_frame_t *frame, void *cookie, + int32_t ret = -1; + fd_t *fd = NULL; + client_t *client = NULL; ++ uint64_t fd_cnt = 0; ++ xlator_t *victim = NULL; ++ server_conf_t *conf = NULL; ++ xlator_t *serv_xl = NULL; ++ rpc_transport_t *xprt = NULL; ++ rpc_transport_t *xp_next = NULL; ++ int32_t detach = (long)cookie; ++ gf_boolean_t xprt_found = _gf_false; + + GF_VALIDATE_OR_GOTO ("server", this, out); + GF_VALIDATE_OR_GOTO ("server", frame, out); + + fd = frame->local; + client = frame->root->client; ++ serv_xl = frame->this; ++ conf = serv_xl->private; + + fd_unref (fd); + frame->local = NULL; + ++ if (client) ++ victim = client->bound_xl; ++ ++ if (victim) { ++ fd_cnt = GF_ATOMIC_DEC(victim->fd_cnt); ++ if (!fd_cnt && conf && detach) { ++ pthread_mutex_lock(&conf->mutex); ++ { ++ list_for_each_entry_safe(xprt, xp_next, &conf->xprt_list, list) ++ { ++ if (!xprt->xl_private) ++ continue; ++ if (xprt->xl_private == client) { ++ xprt_found = _gf_true; ++ break; ++ } ++ } ++ } ++ pthread_mutex_unlock(&conf->mutex); ++ if (xprt_found) { ++ rpc_transport_unref(xprt); ++ } ++ } ++ } ++ + gf_client_unref (client); + STACK_DESTROY (frame->root); + +@@ -262,7 +297,7 @@ out: + + + static int +-do_fd_cleanup (xlator_t *this, client_t* client, fdentry_t *fdentries, int fd_count) ++do_fd_cleanup (xlator_t *this, client_t *client, fdentry_t *fdentries, int fd_count, int32_t detach) + { + fd_t *fd = NULL; + int i = 0, ret = -1; +@@ -307,9 +342,10 @@ do_fd_cleanup (xlator_t *this, client_t* client, fdentry_t *fdentries, int fd_co + memset (&tmp_frame->root->lk_owner, 0, + sizeof (gf_lkowner_t)); + +- STACK_WIND (tmp_frame, +- server_connection_cleanup_flush_cbk, +- bound_xl, bound_xl->fops->flush, fd, NULL); ++ STACK_WIND_COOKIE (tmp_frame, ++ server_connection_cleanup_flush_cbk, ++ (void *)(long)detach, bound_xl, ++ bound_xl->fops->flush, fd, NULL); + } + } + +@@ -323,13 +359,18 @@ out: + + int + server_connection_cleanup (xlator_t *this, client_t *client, +- int32_t flags) ++ int32_t flags, gf_boolean_t *fd_exist) + { + server_ctx_t *serv_ctx = NULL; + fdentry_t *fdentries = NULL; + uint32_t fd_count = 0; + int cd_ret = 0; + int ret = 0; ++ xlator_t *bound_xl = NULL; ++ int i = 0; ++ fd_t *fd = NULL; ++ uint64_t fd_cnt = 0; ++ int32_t detach = 0; + + GF_VALIDATE_OR_GOTO (this->name, this, out); + GF_VALIDATE_OR_GOTO (this->name, client, out); +@@ -360,13 +401,35 @@ server_connection_cleanup (xlator_t *this, client_t *client, + } + + if (fdentries != NULL) { ++ /* Loop to configure fd_count on victim brick */ ++ bound_xl = client->bound_xl; ++ if (bound_xl) { ++ for (i = 0; i < fd_count; i++) { ++ fd = fdentries[i].fd; ++ if (!fd) ++ continue; ++ fd_cnt++; ++ } ++ if (fd_cnt) { ++ if (fd_exist) ++ (*fd_exist) = _gf_true; ++ GF_ATOMIC_ADD(bound_xl->fd_cnt, fd_cnt); ++ } ++ } ++ ++ /* If fd_exist is not NULL it means function is invoke ++ by server_rpc_notify at the time of getting DISCONNECT ++ notification ++ */ ++ if (fd_exist) ++ detach = 1; + gf_msg_debug (this->name, 0, "Performing cleanup on %d " + "fdentries", fd_count); +- ret = do_fd_cleanup (this, client, fdentries, fd_count); +- } +- else ++ ret = do_fd_cleanup (this, client, fdentries, fd_count, detach); ++ } else { + gf_msg (this->name, GF_LOG_INFO, 0, PS_MSG_FDENTRY_NULL, + "no fdentries to clean"); ++ } + + if (cd_ret || ret) + ret = -1; +diff --git a/xlators/protocol/server/src/server-helpers.h b/xlators/protocol/server/src/server-helpers.h +index 1f47bc9..84a0cf4 100644 +--- a/xlators/protocol/server/src/server-helpers.h ++++ b/xlators/protocol/server/src/server-helpers.h +@@ -42,7 +42,7 @@ get_frame_from_request (rpcsvc_request_t *req); + + int + server_connection_cleanup (xlator_t *this, struct _client *client, +- int32_t flags); ++ int32_t flags, gf_boolean_t *fd_exist); + + gf_boolean_t + server_cancel_grace_timer (xlator_t *this, struct _client *client); +diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c +index 69ad184..8ac0bd1 100644 +--- a/xlators/protocol/server/src/server.c ++++ b/xlators/protocol/server/src/server.c +@@ -79,7 +79,7 @@ grace_time_handler (void *data) + + if (detached) /* reconnection did not happen :-( */ + server_connection_cleanup (this, client, +- INTERNAL_LOCKS | POSIX_LOCKS); ++ INTERNAL_LOCKS | POSIX_LOCKS, NULL); + gf_client_unref (client); + } + out: +@@ -195,7 +195,7 @@ server_submit_reply (call_frame_t *frame, rpcsvc_request_t *req, void *arg, + "Reply submission failed"); + if (frame && client && !lk_heal) { + server_connection_cleanup (frame->this, client, +- INTERNAL_LOCKS | POSIX_LOCKS); ++ INTERNAL_LOCKS | POSIX_LOCKS, NULL); + } else { + gf_msg_callingfn ("", GF_LOG_ERROR, 0, + PS_MSG_REPLY_SUBMIT_FAILED, +@@ -466,6 +466,33 @@ out: + return error; + } + ++void ++server_call_xlator_mem_cleanup(xlator_t *this, char *victim_name) ++{ ++ pthread_t th_id = { 0, }; ++ int th_ret = -1; ++ server_cleanup_xprt_arg_t *arg = NULL; ++ ++ if (!victim_name) ++ return; ++ ++ gf_log(this->name, GF_LOG_INFO, "Create graph janitor thread for brick %s", ++ victim_name); ++ ++ arg = calloc(1, sizeof(*arg)); ++ arg->this = this; ++ arg->victim_name = gf_strdup(victim_name); ++ th_ret = gf_thread_create_detached(&th_id, server_graph_janitor_threads, ++ arg, "graphjanitor"); ++ if (th_ret) { ++ gf_log(this->name, GF_LOG_ERROR, ++ "graph janitor Thread" ++ " creation is failed for brick %s", ++ victim_name); ++ GF_FREE(arg->victim_name); ++ free(arg); ++ } ++} + + int + server_rpc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event, +@@ -480,14 +507,9 @@ server_rpc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event, + struct timespec grace_ts = {0, }; + char *auth_path = NULL; + int ret = -1; +- gf_boolean_t victim_found = _gf_false; + char *xlator_name = NULL; +- glusterfs_ctx_t *ctx = NULL; +- xlator_t *top = NULL; +- xlator_list_t **trav_p = NULL; +- xlator_t *travxl = NULL; + uint64_t xprtrefcount = 0; +- struct _child_status *tmp = NULL; ++ gf_boolean_t fd_exist = _gf_false; + + + if (!xl || !data) { +@@ -500,7 +522,6 @@ server_rpc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event, + this = xl; + trans = data; + conf = this->private; +- ctx = glusterfsd_ctx; + + switch (event) { + case RPCSVC_EVENT_ACCEPT: +@@ -538,7 +559,8 @@ server_rpc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event, + */ + pthread_mutex_lock (&conf->mutex); + client = trans->xl_private; +- list_del_init (&trans->list); ++ if (!client) ++ list_del_init (&trans->list); + pthread_mutex_unlock (&conf->mutex); + + if (!client) +@@ -563,7 +585,7 @@ server_rpc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event, + gf_client_put (client, &detached); + if (detached) { + server_connection_cleanup (this, client, +- INTERNAL_LOCKS | POSIX_LOCKS); ++ INTERNAL_LOCKS | POSIX_LOCKS, &fd_exist); + + gf_event (EVENT_CLIENT_DISCONNECT, + "client_uid=%s;" +@@ -638,56 +660,38 @@ server_rpc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event, + unref_transport: + /* rpc_transport_unref() causes a RPCSVC_EVENT_TRANSPORT_DESTROY + * to be called in blocking manner +- * So no code should ideally be after this unref ++ * So no code should ideally be after this unref, Call rpc_transport_unref ++ * only while no client exist or client is not detached or no fd associated ++ with client + */ +- rpc_transport_unref (trans); ++ if (!client || !detached || !fd_exist) ++ rpc_transport_unref (trans); + + break; + + case RPCSVC_EVENT_TRANSPORT_DESTROY: ++ pthread_mutex_lock(&conf->mutex); + client = trans->xl_private; ++ list_del_init(&trans->list); ++ pthread_mutex_unlock(&conf->mutex); + if (!client) + break; +- pthread_mutex_lock (&conf->mutex); +- list_for_each_entry (tmp, &conf->child_status->status_list, +- status_list) { +- if (tmp->name && client->bound_xl && +- client->bound_xl->cleanup_starting && +- !strcmp (tmp->name, client->bound_xl->name)) { +- xprtrefcount = GF_ATOMIC_GET (tmp->xprtrefcnt); +- if (xprtrefcount > 0) { +- xprtrefcount = GF_ATOMIC_DEC (tmp->xprtrefcnt); +- if (xprtrefcount == 0) +- xlator_name = gf_strdup(client->bound_xl->name); +- } +- break; ++ ++ if (client->bound_xl && client->bound_xl->cleanup_starting) { ++ xprtrefcount = GF_ATOMIC_GET (client->bound_xl->xprtrefcnt); ++ if (xprtrefcount > 0) { ++ xprtrefcount = GF_ATOMIC_DEC (client->bound_xl->xprtrefcnt); ++ if (xprtrefcount == 0) ++ xlator_name = gf_strdup(client->bound_xl->name); + } + } +- pthread_mutex_unlock (&conf->mutex); + + /* unref only for if (!client->lk_heal) */ + if (!conf->lk_heal) + gf_client_unref (client); + + if (xlator_name) { +- if (this->ctx->active) { +- top = this->ctx->active->first; +- LOCK (&ctx->volfile_lock); +- for (trav_p = &top->children; *trav_p; +- trav_p = &(*trav_p)->next) { +- travxl = (*trav_p)->xlator; +- if (!travxl->call_cleanup && +- strcmp (travxl->name, xlator_name) == 0) { +- victim_found = _gf_true; +- break; +- } +- } +- UNLOCK (&ctx->volfile_lock); +- if (victim_found) { +- xlator_mem_cleanup (travxl); +- glusterfs_autoscale_threads (ctx, -1); +- } +- } ++ server_call_xlator_mem_cleanup (this, xlator_name); + GF_FREE (xlator_name); + } + +@@ -701,6 +705,67 @@ out: + return 0; + } + ++void * ++server_graph_janitor_threads(void *data) ++{ ++ xlator_t *victim = NULL; ++ xlator_t *this = NULL; ++ server_conf_t *conf = NULL; ++ glusterfs_ctx_t *ctx = NULL; ++ char *victim_name = NULL; ++ server_cleanup_xprt_arg_t *arg = NULL; ++ gf_boolean_t victim_found = _gf_false; ++ xlator_list_t **trav_p = NULL; ++ xlator_t *top = NULL; ++ ++ GF_ASSERT(data); ++ ++ arg = data; ++ this = arg->this; ++ victim_name = arg->victim_name; ++ THIS = arg->this; ++ conf = this->private; ++ ++ ctx = THIS->ctx; ++ GF_VALIDATE_OR_GOTO(this->name, ctx, out); ++ ++ top = this->ctx->active->first; ++ LOCK(&ctx->volfile_lock); ++ for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) { ++ victim = (*trav_p)->xlator; ++ if (victim->cleanup_starting && ++ strcmp(victim->name, victim_name) == 0) { ++ victim_found = _gf_true; ++ break; ++ } ++ } ++ if (victim_found) ++ glusterfs_delete_volfile_checksum(ctx, victim->volfile_id); ++ UNLOCK(&ctx->volfile_lock); ++ if (!victim_found) { ++ gf_log(this->name, GF_LOG_ERROR, ++ "victim brick %s is not" ++ " found in graph", ++ victim_name); ++ goto out; ++ } ++ ++ default_notify(victim, GF_EVENT_PARENT_DOWN, victim); ++ if (victim->notify_down) { ++ gf_log(THIS->name, GF_LOG_INFO, ++ "Start call fini for brick" ++ " %s stack", ++ victim->name); ++ xlator_mem_cleanup(victim); ++ glusterfs_autoscale_threads(ctx, -1); ++ } ++ ++out: ++ GF_FREE(arg->victim_name); ++ free(arg); ++ return NULL; ++} ++ + int32_t + mem_acct_init (xlator_t *this) + { +@@ -1136,13 +1201,7 @@ init (xlator_t *this) + conf->child_status = GF_CALLOC (1, sizeof (struct _child_status), + gf_server_mt_child_status); + INIT_LIST_HEAD (&conf->child_status->status_list); +- GF_ATOMIC_INIT (conf->child_status->xprtrefcnt, 0); + +- /*ret = dict_get_str (this->options, "statedump-path", &statedump_path); +- if (!ret) { +- gf_path_strip_trailing_slashes (statedump_path); +- this->ctx->statedump_path = statedump_path; +- }*/ + GF_OPTION_INIT ("statedump-path", statedump_path, path, out); + if (statedump_path) { + gf_path_strip_trailing_slashes (statedump_path); +@@ -1589,6 +1648,11 @@ notify (xlator_t *this, int32_t event, void *data, ...) + + case GF_EVENT_CHILD_DOWN: + { ++ if (victim->cleanup_starting) { ++ victim->notify_down = 1; ++ gf_log(this->name, GF_LOG_INFO, ++ "Getting CHILD_DOWN event for brick %s", victim->name); ++ } + ret = server_process_child_event (this, event, data, + GF_CBK_CHILD_DOWN); + if (ret) { +@@ -1622,7 +1686,7 @@ notify (xlator_t *this, int32_t event, void *data, ...) + status_list) { + if (strcmp (tmp->name, victim->name) == 0) { + tmp->child_up = _gf_false; +- GF_ATOMIC_INIT (tmp->xprtrefcnt, totxprt); ++ GF_ATOMIC_INIT (victim->xprtrefcnt, totxprt); + break; + } + } +@@ -1668,8 +1732,7 @@ notify (xlator_t *this, int32_t event, void *data, ...) + glusterfs_mgmt_pmap_signout (ctx, + victim->name); + if (!xprt_found && victim_found) { +- xlator_mem_cleanup (victim); +- glusterfs_autoscale_threads (ctx, -1); ++ server_call_xlator_mem_cleanup (this, victim); + } + } + break; +diff --git a/xlators/protocol/server/src/server.h b/xlators/protocol/server/src/server.h +index 691c75b..23775d4 100644 +--- a/xlators/protocol/server/src/server.h ++++ b/xlators/protocol/server/src/server.h +@@ -78,7 +78,6 @@ struct _child_status { + struct list_head status_list; + char *name; + gf_boolean_t child_up; +- gf_atomic_t xprtrefcnt; + }; + struct server_conf { + rpcsvc_t *rpc; +@@ -222,6 +221,10 @@ typedef struct _server_ctx { + uint32_t lk_version; + } server_ctx_t; + ++typedef struct server_cleanup_xprt_arg { ++ xlator_t *this; ++ char *victim_name; ++} server_cleanup_xprt_arg_t; + + int + server_submit_reply (call_frame_t *frame, rpcsvc_request_t *req, void *arg, +@@ -246,4 +249,8 @@ serialize_rsp_direntp (gf_dirent_t *entries, gfs3_readdirp_rsp *rsp); + + server_ctx_t* + server_ctx_get (client_t *client, xlator_t *xlator); ++ ++void * ++server_graph_janitor_threads(void *); ++ + #endif /* !_SERVER_H */ +diff --git a/xlators/storage/posix/src/posix.c b/xlators/storage/posix/src/posix.c +index 1d3f1ee..f79dbda 100644 +--- a/xlators/storage/posix/src/posix.c ++++ b/xlators/storage/posix/src/posix.c +@@ -7113,6 +7113,8 @@ notify (xlator_t *this, + void *data, + ...) + { ++ xlator_t *victim = data; ++ + switch (event) + { + case GF_EVENT_PARENT_UP: +@@ -7121,6 +7123,15 @@ notify (xlator_t *this, + default_notify (this, GF_EVENT_CHILD_UP, data); + } + break; ++ case GF_EVENT_PARENT_DOWN: ++ { ++ if (!victim->cleanup_starting) ++ break; ++ gf_log(this->name, GF_LOG_INFO, "Sending CHILD_DOWN for brick %s", ++ victim->name); ++ default_notify(this->parents->xlator, GF_EVENT_CHILD_DOWN, data); ++ } ++ break; + default: + /* */ + break; +-- +1.8.3.1 + diff --git a/0391-glusterd-Use-GF_ATOMIC-to-update-blockers-counter-at.patch b/0391-glusterd-Use-GF_ATOMIC-to-update-blockers-counter-at.patch new file mode 100644 index 0000000..31a63b6 --- /dev/null +++ b/0391-glusterd-Use-GF_ATOMIC-to-update-blockers-counter-at.patch @@ -0,0 +1,132 @@ +From cb565207cb7a3e7a9182bbed57a231f12bcaae0e Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Thu, 20 Sep 2018 18:11:36 +0530 +Subject: [PATCH 391/399] glusterd: Use GF_ATOMIC to update 'blockers' counter + at glusterd_conf + +Problem: Currently in glusterd code uses sync_lock/sync_unlock to + update blockers counter which could add delays to the overall + transaction phase escpecially when there's a batch of volume + stop operations processed by glusterd in brick multiplexing mode. + +Solution: Use GF_ATOMIC to update blocker counter to ensure unnecessary + context switching can be avoided. + +> Change-Id: Ie13177dfee2af66687ae7cf5c67405c152853990 +> Fixes: bz#1631128 +> (Cherry picked from commit 4f6ae853ffa9d06446407f389aaef61ac0b3b424) +> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/21221/) + +Change-Id: I3023bce5ba50bc04e078c56ba6fa62a5b791d205 +BUG: 1631329 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/150641 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 4 +++- + xlators/mgmt/glusterd/src/glusterd-utils.c | 15 +++++---------- + xlators/mgmt/glusterd/src/glusterd.c | 2 +- + xlators/mgmt/glusterd/src/glusterd.h | 2 +- + 4 files changed, 10 insertions(+), 13 deletions(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +index 6dfd819..df5b5c2 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +@@ -6259,9 +6259,11 @@ glusterd_op_stage_validate (glusterd_op_t op, dict_t *dict, char **op_errstr, + static void + glusterd_wait_for_blockers (glusterd_conf_t *priv) + { +- while (priv->blockers) { ++ uint64_t blockers = GF_ATOMIC_GET(priv->blockers); ++ while (blockers) { + synclock_unlock (&priv->big_lock); + sleep (1); ++ blockers = GF_ATOMIC_GET(priv->blockers); + synclock_lock (&priv->big_lock); + } + } +diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c +index 2a176be..04fae63 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-utils.c ++++ b/xlators/mgmt/glusterd/src/glusterd-utils.c +@@ -5452,10 +5452,7 @@ my_callback (struct rpc_req *req, struct iovec *iov, int count, void *v_frame) + call_frame_t *frame = v_frame; + glusterd_conf_t *conf = frame->this->private; + +- synclock_lock (&conf->big_lock); +- --(conf->blockers); +- synclock_unlock (&conf->big_lock); +- ++ GF_ATOMIC_DEC(conf->blockers); + STACK_DESTROY (frame->root); + return 0; + } +@@ -5546,9 +5543,7 @@ attach_brick_callback (struct rpc_req *req, struct iovec *iov, int count, + } + } + out: +- synclock_lock (&conf->big_lock); +- --(conf->blockers); +- synclock_unlock (&conf->big_lock); ++ GF_ATOMIC_DEC(conf->blockers); + STACK_DESTROY (frame->root); + return 0; + } +@@ -5633,7 +5628,7 @@ send_attach_req (xlator_t *this, struct rpc_clnt *rpc, char *path, + cbkfn = attach_brick_callback; + } + /* Send the msg */ +- ++(conf->blockers); ++ GF_ATOMIC_INC(conf->blockers); + ret = rpc_clnt_submit (rpc, &gd_brick_prog, op, + cbkfn, &iov, 1, NULL, 0, iobref, + frame, NULL, 0, NULL, 0, NULL); +@@ -6380,7 +6375,7 @@ glusterd_restart_bricks (void *opaque) + } + conf->restart_bricks = _gf_true; + +- ++(conf->blockers); ++ GF_ATOMIC_INC(conf->blockers); + ret = glusterd_get_quorum_cluster_counts (this, &active_count, + &quorum_count); + if (ret) +@@ -6497,7 +6492,7 @@ glusterd_restart_bricks (void *opaque) + ret = 0; + + out: +- --(conf->blockers); ++ GF_ATOMIC_DEC(conf->blockers); + conf->restart_done = _gf_true; + conf->restart_bricks = _gf_false; + +diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c +index 78a37eb..076019f 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.c ++++ b/xlators/mgmt/glusterd/src/glusterd.c +@@ -1979,7 +1979,7 @@ init (xlator_t *this) + } + } + +- conf->blockers = 0; ++ GF_ATOMIC_INIT(conf->blockers, 0); + /* If the peer count is less than 2 then this would be the best time to + * spawn process/bricks that may need (re)starting since last time + * (this) glusterd was up. */ +diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h +index d4f4f7e..8c70d48 100644 +--- a/xlators/mgmt/glusterd/src/glusterd.h ++++ b/xlators/mgmt/glusterd/src/glusterd.h +@@ -198,7 +198,7 @@ typedef struct { + int ping_timeout; + uint32_t generation; + int32_t workers; +- uint32_t blockers; ++ gf_atomic_t blockers; + uint32_t mgmt_v3_lock_timeout; + gf_boolean_t restart_bricks; + } glusterd_conf_t; +-- +1.8.3.1 + diff --git a/0392-glusterd-don-t-wait-for-blockers-flag-for-stop-volum.patch b/0392-glusterd-don-t-wait-for-blockers-flag-for-stop-volum.patch new file mode 100644 index 0000000..716affd --- /dev/null +++ b/0392-glusterd-don-t-wait-for-blockers-flag-for-stop-volum.patch @@ -0,0 +1,46 @@ +From c5bde98eb28ed3ae6707b7eca3c95f4e5e386c23 Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Wed, 26 Sep 2018 15:23:28 +0530 +Subject: [PATCH 392/399] glusterd: don't wait for blockers flag for stop + volume + +With commit 4f6ae8 even though the overall transaction time for gluster +volume stop can be reduced, but based on testing it can't be guaranteed +that the transaction can finish in 3 minutes before the unlock timer +gets kicked in. The ground problem to this is the command serialization +which atomic field 'blockers' does for volume stop operation. + +This patch removes that dependency for volume stop not to wait for +blockers. + +> Change-Id: Ifaf120115dc7ed21cf21e65e25c4ec7c61ab8258 +> Fixes: bz#1631128 +> Signed-off-by: Atin Mukherjee +> (Cherry picked from commit f72f18b3bf7f9535827a059a5090476b3141723f) +> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/21242/) + +Change-Id: Ia3cb4d812ea1c633b7a501a03e0bf25a20b45a8e +BUG: 1631329 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/151065 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +index df5b5c2..716d3f2 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c ++++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c +@@ -6286,7 +6286,6 @@ glusterd_op_commit_perform (glusterd_op_t op, dict_t *dict, char **op_errstr, + break; + + case GD_OP_STOP_VOLUME: +- glusterd_wait_for_blockers (this->private); + ret = glusterd_op_stop_volume (dict); + break; + +-- +1.8.3.1 + diff --git a/0393-core-Pass-xlator_name-in-server_call_xlator_mem_clea.patch b/0393-core-Pass-xlator_name-in-server_call_xlator_mem_clea.patch new file mode 100644 index 0000000..87bf7b0 --- /dev/null +++ b/0393-core-Pass-xlator_name-in-server_call_xlator_mem_clea.patch @@ -0,0 +1,40 @@ +From 2b97774c20920745251665893f7dcf32dece8df7 Mon Sep 17 00:00:00 2001 +From: Mohit Agrawal +Date: Tue, 9 Oct 2018 11:12:52 +0530 +Subject: [PATCH 393/399] core: Pass xlator_name in + server_call_xlator_mem_cleanup + +Problem: In the commit of patch 9b26837434977601f1e48477904486ea032f742a + xlator_name missed at the time of calling function + server_call_xlator_mem_cleanup + +Solution: Correct the function argument at the time of calling + function + +> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/21235/) +BUG: 1631372 +Change-Id: I80e735fb2cea4c715f7d3210c5781862aea10a92 +Signed-off-by: Mohit Agrawal +Reviewed-on: https://code.engineering.redhat.com/gerrit/152213 +Tested-by: RHGS Build Bot +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/protocol/server/src/server.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c +index 8ac0bd1..11ee7ba 100644 +--- a/xlators/protocol/server/src/server.c ++++ b/xlators/protocol/server/src/server.c +@@ -1732,7 +1732,7 @@ notify (xlator_t *this, int32_t event, void *data, ...) + glusterfs_mgmt_pmap_signout (ctx, + victim->name); + if (!xprt_found && victim_found) { +- server_call_xlator_mem_cleanup (this, victim); ++ server_call_xlator_mem_cleanup (this, victim->name); + } + } + break; +-- +1.8.3.1 + diff --git a/0394-io-stats-prevent-taking-file-dump-on-server-side.patch b/0394-io-stats-prevent-taking-file-dump-on-server-side.patch new file mode 100644 index 0000000..30bc844 --- /dev/null +++ b/0394-io-stats-prevent-taking-file-dump-on-server-side.patch @@ -0,0 +1,43 @@ +From c93ade55ce9794952b372b91aff7bc380b49fd52 Mon Sep 17 00:00:00 2001 +From: Amar Tumballi +Date: Sun, 7 Oct 2018 11:45:35 +0530 +Subject: [PATCH 394/399] io-stats: prevent taking file dump on server side + +By allowing clients taking dump in a file on brick process, we are +allowing compromised clients to create io-stats dumps on server, +which can exhaust all the available inodes. + +Fixes: CVE-2018-14659 + +BUG: 1636305 +Change-Id: I64fc530363b78f849011eb3c91355566ee0c485b +Signed-off-by: Amar Tumballi +Reviewed-on: https://code.engineering.redhat.com/gerrit/152024 +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/debug/io-stats/src/io-stats.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/xlators/debug/io-stats/src/io-stats.c b/xlators/debug/io-stats/src/io-stats.c +index 16a11df..0f71334 100644 +--- a/xlators/debug/io-stats/src/io-stats.c ++++ b/xlators/debug/io-stats/src/io-stats.c +@@ -3022,6 +3022,15 @@ conditional_dump (dict_t *dict, char *key, data_t *value, void *data) + stub = data; + this = stub->this; + ++ /* Don't do this on 'brick-side', only do this on client side */ ++ /* Addresses CVE-2018-14659 */ ++ if (this->ctx->process_mode != GF_CLIENT_PROCESS) { ++ gf_log(this->name, GF_LOG_DEBUG, ++ "taking io-stats dump using setxattr not permitted on brick." ++ " Use 'gluster profile' instead"); ++ return -1; ++ } ++ + /* Create a file name that is appended with the io-stats instance + name as well. This helps when there is more than a single io-stats + instance in the graph, or the client and server processes are running +-- +1.8.3.1 + diff --git a/0395-index-prevent-arbitrary-file-creation-outside-entry-.patch b/0395-index-prevent-arbitrary-file-creation-outside-entry-.patch new file mode 100644 index 0000000..91b359c --- /dev/null +++ b/0395-index-prevent-arbitrary-file-creation-outside-entry-.patch @@ -0,0 +1,68 @@ +From d5c5cbe82ef0f7bf8686e71cf08b92e7baf62f55 Mon Sep 17 00:00:00 2001 +From: Ravishankar N +Date: Sat, 6 Oct 2018 00:50:53 +0530 +Subject: [PATCH 395/399] index: prevent arbitrary file creation outside + entry-changes folder + +Problem: +A compromised client can set arbitrary values for the GF_XATTROP_ENTRY_IN_KEY +and GF_XATTROP_ENTRY_OUT_KEY during xattrop fop. These values are +consumed by index as a filename to be created/deleted according to the key. +Thus it is possible to create/delete random files even outside the gluster +volume boundary. + +Fix: +Index expects the filename to be a basename, i.e. it must not contain any +pathname components like "/" or "../". Enforce this. + +Fixes: CVE-2018-14654 +BUG: 1634671 +Change-Id: Ib01c35414c36e3101af9e99a1ea17535ef8bd3b3 +Signed-off-by: Ravishankar N +Reviewed-on: https://code.engineering.redhat.com/gerrit/151985 +Reviewed-by: Amar Tumballi +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/features/index/src/index.c | 19 +++++++++++++++++++ + 1 file changed, 19 insertions(+) + +diff --git a/xlators/features/index/src/index.c b/xlators/features/index/src/index.c +index bf3f4dd..89cdbda 100644 +--- a/xlators/features/index/src/index.c ++++ b/xlators/features/index/src/index.c +@@ -852,6 +852,15 @@ index_entry_create (xlator_t *this, inode_t *inode, char *filename) + ctx->state[ENTRY_CHANGES] = IN; + } + ++ if (strchr (filename, '/')) { ++ gf_msg (this->name, GF_LOG_ERROR, EINVAL, ++ INDEX_MSG_INDEX_ADD_FAILED, ++ "Got invalid entry (%s) for pargfid path (%s)", ++ filename, pgfid_path); ++ op_errno = EINVAL; ++ goto out; ++ } ++ + op_errno = 0; + + snprintf (entry_path, sizeof(entry_path), "%s/%s", pgfid_path, +@@ -886,6 +895,16 @@ index_entry_delete (xlator_t *this, uuid_t pgfid, char *filename) + + make_gfid_path (priv->index_basepath, ENTRY_CHANGES_SUBDIR, pgfid, + pgfid_path, sizeof (pgfid_path)); ++ ++ if (strchr (filename, '/')) { ++ gf_msg (this->name, GF_LOG_ERROR, EINVAL, ++ INDEX_MSG_INDEX_DEL_FAILED, ++ "Got invalid entry (%s) for pargfid path (%s)", ++ filename, pgfid_path); ++ op_errno = EINVAL; ++ goto out; ++ } ++ + snprintf (entry_path, sizeof(entry_path), "%s/%s", pgfid_path, + filename); + +-- +1.8.3.1 + diff --git a/0396-protocol-remove-the-option-verify-volfile-checksum.patch b/0396-protocol-remove-the-option-verify-volfile-checksum.patch new file mode 100644 index 0000000..e62a250 --- /dev/null +++ b/0396-protocol-remove-the-option-verify-volfile-checksum.patch @@ -0,0 +1,458 @@ +From 00c78b9eb52d8a631cdaef883cd507bd0889639a Mon Sep 17 00:00:00 2001 +From: Amar Tumballi +Date: Fri, 28 Sep 2018 12:06:09 +0530 +Subject: [PATCH 396/399] protocol: remove the option 'verify-volfile-checksum' + +'getspec' operation is not used between 'client' and 'server' ever since +we have off-loaded volfile management to glusterd, ie, at least 7 years. + +No reason to keep the dead code! The removed option had no meaning, +as glusterd didn't provide a way to set (or unset) this option. So, +no regression should be observed from any of the existing glusterfs +deployment, supported or unsupported. + +Fixes: CVE-2018-14653 + +BUG: 1634668 +Change-Id: I8b3a4d302b3c222e065b484cfe449b9c116393f8 +Signed-off-by: Amar Tumballi +Reviewed-on: https://code.engineering.redhat.com/gerrit/151322 +Reviewed-by: Pranith Kumar Karampuri +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/protocol/client/src/client-handshake.c | 83 +------- + xlators/protocol/server/src/server-handshake.c | 276 +------------------------ + xlators/protocol/server/src/server.c | 3 - + 3 files changed, 5 insertions(+), 357 deletions(-) + +diff --git a/xlators/protocol/client/src/client-handshake.c b/xlators/protocol/client/src/client-handshake.c +index aee6b3a..7b36178 100644 +--- a/xlators/protocol/client/src/client-handshake.c ++++ b/xlators/protocol/client/src/client-handshake.c +@@ -37,91 +37,10 @@ typedef struct client_fd_lk_local { + clnt_fd_ctx_t *fdctx; + }clnt_fd_lk_local_t; + +-int +-client3_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count, +- void *myframe) +-{ +- gf_getspec_rsp rsp = {0,}; +- call_frame_t *frame = NULL; +- int ret = 0; +- +- frame = myframe; +- +- if (!frame || !frame->this) { +- gf_msg (THIS->name, GF_LOG_ERROR, EINVAL, PC_MSG_INVALID_ENTRY, +- "frame not found with the request, returning EINVAL"); +- rsp.op_ret = -1; +- rsp.op_errno = EINVAL; +- goto out; +- } +- if (-1 == req->rpc_status) { +- gf_msg (frame->this->name, GF_LOG_WARNING, ENOTCONN, +- PC_MSG_RPC_STATUS_ERROR, "received RPC status error, " +- "returning ENOTCONN"); +- rsp.op_ret = -1; +- rsp.op_errno = ENOTCONN; +- goto out; +- } +- +- ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp); +- if (ret < 0) { +- gf_msg (frame->this->name, GF_LOG_ERROR, EINVAL, +- PC_MSG_XDR_DECODING_FAILED, +- "XDR decoding failed, returning EINVAL"); +- rsp.op_ret = -1; +- rsp.op_errno = EINVAL; +- goto out; +- } +- +- if (-1 == rsp.op_ret) { +- gf_msg (frame->this->name, GF_LOG_WARNING, 0, +- PC_MSG_VOL_FILE_NOT_FOUND, "failed to get the 'volume " +- "file' from server"); +- goto out; +- } +- +-out: +- CLIENT_STACK_UNWIND (getspec, frame, rsp.op_ret, rsp.op_errno, +- rsp.spec); +- +- /* Don't use 'GF_FREE', this is allocated by libc */ +- free (rsp.spec); +- free (rsp.xdata.xdata_val); +- +- return 0; +-} +- + int32_t client3_getspec (call_frame_t *frame, xlator_t *this, void *data) + { +- clnt_conf_t *conf = NULL; +- clnt_args_t *args = NULL; +- gf_getspec_req req = {0,}; +- int op_errno = ESTALE; +- int ret = 0; +- +- if (!frame || !this || !data) +- goto unwind; +- +- args = data; +- conf = this->private; +- req.flags = args->flags; +- req.key = (char *)args->name; +- +- ret = client_submit_request (this, &req, frame, conf->handshake, +- GF_HNDSK_GETSPEC, client3_getspec_cbk, +- NULL, NULL, 0, NULL, 0, NULL, +- (xdrproc_t)xdr_gf_getspec_req); +- +- if (ret) { +- gf_msg (this->name, GF_LOG_WARNING, 0, PC_MSG_SEND_REQ_FAIL, +- "failed to send the request"); +- } +- +- return 0; +-unwind: +- CLIENT_STACK_UNWIND (getspec, frame, -1, op_errno, NULL); ++ CLIENT_STACK_UNWIND (getspec, frame, -1, ENOSYS, NULL); + return 0; +- + } + + int +diff --git a/xlators/protocol/server/src/server-handshake.c b/xlators/protocol/server/src/server-handshake.c +index 75577fa..217678a 100644 +--- a/xlators/protocol/server/src/server-handshake.c ++++ b/xlators/protocol/server/src/server-handshake.c +@@ -38,204 +38,13 @@ gf_compare_client_version (rpcsvc_request_t *req, int fop_prognum, + } + + int +-_volfile_update_checksum (xlator_t *this, char *key, uint32_t checksum) +-{ +- server_conf_t *conf = NULL; +- struct _volfile_ctx *temp_volfile = NULL; +- +- conf = this->private; +- temp_volfile = conf->volfile; +- +- while (temp_volfile) { +- if ((NULL == key) && (NULL == temp_volfile->key)) +- break; +- if ((NULL == key) || (NULL == temp_volfile->key)) { +- temp_volfile = temp_volfile->next; +- continue; +- } +- if (strcmp (temp_volfile->key, key) == 0) +- break; +- temp_volfile = temp_volfile->next; +- } +- +- if (!temp_volfile) { +- temp_volfile = GF_CALLOC (1, sizeof (struct _volfile_ctx), +- gf_server_mt_volfile_ctx_t); +- if (!temp_volfile) +- goto out; +- temp_volfile->next = conf->volfile; +- temp_volfile->key = (key)? gf_strdup (key): NULL; +- temp_volfile->checksum = checksum; +- +- conf->volfile = temp_volfile; +- goto out; +- } +- +- if (temp_volfile->checksum != checksum) { +- gf_msg (this->name, GF_LOG_INFO, 0, PS_MSG_REMOUNT_CLIENT_REQD, +- "the volume file was modified between a prior access " +- "and now. This may lead to inconsistency between " +- "clients, you are advised to remount client"); +- temp_volfile->checksum = checksum; +- } +- +-out: +- return 0; +-} +- +- +-static size_t +-getspec_build_volfile_path (xlator_t *this, const char *key, char *path, +- size_t path_len) +-{ +- char *filename = NULL; +- server_conf_t *conf = NULL; +- int ret = -1; +- int free_filename = 0; +- char data_key[256] = {0,}; +- +- conf = this->private; +- +- /* Inform users that this option is changed now */ +- ret = dict_get_str (this->options, "client-volume-filename", +- &filename); +- if (ret == 0) { +- gf_msg (this->name, GF_LOG_WARNING, 0, PS_MSG_DEFAULTING_FILE, +- "option 'client-volume-filename' is changed to " +- "'volume-filename.' which now takes 'key' as an " +- "option to choose/fetch different files from server. " +- "Refer documentation or contact developers for more " +- "info. Currently defaulting to given file '%s'", +- filename); +- } +- +- if (key && !filename) { +- sprintf (data_key, "volume-filename.%s", key); +- ret = dict_get_str (this->options, data_key, &filename); +- if (ret < 0) { +- /* Make sure that key doesn't contain "../" in path */ +- if ((gf_strstr (key, "/", "..")) == -1) { +- gf_msg (this->name, GF_LOG_ERROR, EINVAL, +- PS_MSG_INVALID_ENTRY, "%s: invalid " +- "key", key); +- goto out; +- } +- } +- } +- +- if (!filename) { +- ret = dict_get_str (this->options, +- "volume-filename.default", &filename); +- if (ret < 0) { +- gf_msg_debug (this->name, 0, "no default volume " +- "filename given, defaulting to %s", +- DEFAULT_VOLUME_FILE_PATH); +- } +- } +- +- if (!filename && key) { +- ret = gf_asprintf (&filename, "%s/%s.vol", conf->conf_dir, key); +- if (-1 == ret) +- goto out; +- +- free_filename = 1; +- } +- if (!filename) +- filename = DEFAULT_VOLUME_FILE_PATH; +- +- ret = -1; +- +- if ((filename) && (path_len > strlen (filename))) { +- strcpy (path, filename); +- ret = strlen (filename); +- } +- +-out: +- if (free_filename) +- GF_FREE (filename); +- +- return ret; +-} +- +-int +-_validate_volfile_checksum (xlator_t *this, char *key, +- uint32_t checksum) +-{ +- char filename[PATH_MAX] = {0,}; +- server_conf_t *conf = NULL; +- struct _volfile_ctx *temp_volfile = NULL; +- int ret = 0; +- int fd = 0; +- uint32_t local_checksum = 0; +- +- conf = this->private; +- temp_volfile = conf->volfile; +- +- if (!checksum) +- goto out; +- +- if (!temp_volfile) { +- ret = getspec_build_volfile_path (this, key, filename, +- sizeof (filename)); +- if (ret <= 0) +- goto out; +- fd = open (filename, O_RDONLY); +- if (-1 == fd) { +- ret = 0; +- gf_msg (this->name, GF_LOG_INFO, errno, +- PS_MSG_VOL_FILE_OPEN_FAILED, +- "failed to open volume file (%s) : %s", +- filename, strerror (errno)); +- goto out; +- } +- get_checksum_for_file (fd, &local_checksum); +- _volfile_update_checksum (this, key, local_checksum); +- sys_close (fd); +- } +- +- temp_volfile = conf->volfile; +- while (temp_volfile) { +- if ((NULL == key) && (NULL == temp_volfile->key)) +- break; +- if ((NULL == key) || (NULL == temp_volfile->key)) { +- temp_volfile = temp_volfile->next; +- continue; +- } +- if (strcmp (temp_volfile->key, key) == 0) +- break; +- temp_volfile = temp_volfile->next; +- } +- +- if (!temp_volfile) +- goto out; +- +- if ((temp_volfile->checksum) && +- (checksum != temp_volfile->checksum)) +- ret = -1; +- +-out: +- return ret; +-} +- +- +-int + server_getspec (rpcsvc_request_t *req) + { +- int32_t ret = -1; ++ int ret = 0; + int32_t op_errno = ENOENT; +- int32_t spec_fd = -1; +- size_t file_len = 0; +- char filename[PATH_MAX] = {0,}; +- struct stat stbuf = {0,}; +- uint32_t checksum = 0; +- char *key = NULL; +- server_conf_t *conf = NULL; +- xlator_t *this = NULL; + gf_getspec_req args = {0,}; + gf_getspec_rsp rsp = {0,}; + +- this = req->svc->xl; +- conf = this->private; + ret = xdr_to_generic (req->msg[0], &args, + (xdrproc_t)xdr_gf_getspec_req); + if (ret < 0) { +@@ -245,58 +54,11 @@ server_getspec (rpcsvc_request_t *req) + goto fail; + } + +- ret = getspec_build_volfile_path (this, args.key, +- filename, sizeof (filename)); +- if (ret > 0) { +- /* to allocate the proper buffer to hold the file data */ +- ret = sys_stat (filename, &stbuf); +- if (ret < 0){ +- gf_msg (this->name, GF_LOG_ERROR, errno, +- PS_MSG_STAT_ERROR, "Unable to stat %s (%s)", +- filename, strerror (errno)); +- op_errno = errno; +- goto fail; +- } +- +- spec_fd = open (filename, O_RDONLY); +- if (spec_fd < 0) { +- gf_msg (this->name, GF_LOG_ERROR, errno, +- PS_MSG_FILE_OP_FAILED, "Unable to open %s " +- "(%s)", filename, strerror (errno)); +- op_errno = errno; +- goto fail; +- } +- ret = file_len = stbuf.st_size; +- +- if (conf->verify_volfile) { +- get_checksum_for_file (spec_fd, &checksum); +- _volfile_update_checksum (this, key, checksum); +- } +- } else { +- op_errno = ENOENT; +- } +- +- if (file_len) { +- rsp.spec = GF_CALLOC (file_len, sizeof (char), +- gf_server_mt_rsp_buf_t); +- if (!rsp.spec) { +- ret = -1; +- op_errno = ENOMEM; +- goto fail; +- } +- ret = sys_read (spec_fd, rsp.spec, file_len); +- } +- +- /* convert to XDR */ +- op_errno = errno; ++ op_errno = ENOSYS; + fail: +- if (!rsp.spec) +- rsp.spec = ""; ++ rsp.spec = ""; + rsp.op_errno = gf_errno_to_error (op_errno); +- rsp.op_ret = ret; +- +- if (spec_fd != -1) +- sys_close (spec_fd); ++ rsp.op_ret = -1; + + server_submit_reply (NULL, req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gf_getspec_rsp); +@@ -459,9 +221,7 @@ server_setvolume (rpcsvc_request_t *req) + char *clnt_version = NULL; + xlator_t *xl = NULL; + char *msg = NULL; +- char *volfile_key = NULL; + xlator_t *this = NULL; +- uint32_t checksum = 0; + int32_t ret = -1; + int32_t op_ret = -1; + int32_t op_errno = EINVAL; +@@ -756,34 +516,6 @@ server_setvolume (rpcsvc_request_t *req) + goto fail; + } + +- if (conf->verify_volfile) { +- ret = dict_get_uint32 (params, "volfile-checksum", &checksum); +- if (ret == 0) { +- ret = dict_get_str (params, "volfile-key", +- &volfile_key); +- if (ret) +- gf_msg_debug (this->name, 0, "failed to get " +- "'volfile-key'"); +- +- ret = _validate_volfile_checksum (this, volfile_key, +- checksum); +- if (-1 == ret) { +- ret = dict_set_str (reply, "ERROR", +- "volume-file checksum " +- "varies from earlier " +- "access"); +- if (ret < 0) +- gf_msg_debug (this->name, 0, "failed " +- "to set error msg"); +- +- op_ret = -1; +- op_errno = ESTALE; +- goto fail; +- } +- } +- } +- +- + peerinfo = &req->trans->peerinfo; + if (peerinfo) { + ret = dict_set_static_ptr (params, "peer-info", peerinfo); +diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c +index 11ee7ba..d0e815e 100644 +--- a/xlators/protocol/server/src/server.c ++++ b/xlators/protocol/server/src/server.c +@@ -1797,9 +1797,6 @@ struct volume_options options[] = { + .description = "Specifies the limit on the number of inodes " + "in the lru list of the inode cache." + }, +- { .key = {"verify-volfile-checksum"}, +- .type = GF_OPTION_TYPE_BOOL +- }, + { .key = {"trace"}, + .type = GF_OPTION_TYPE_BOOL + }, +-- +1.8.3.1 + diff --git a/0397-features-locks-add-buffer-overflow-checks-in-pl_getx.patch b/0397-features-locks-add-buffer-overflow-checks-in-pl_getx.patch new file mode 100644 index 0000000..6ce6947 --- /dev/null +++ b/0397-features-locks-add-buffer-overflow-checks-in-pl_getx.patch @@ -0,0 +1,59 @@ +From b29b4b4ec846861c975bfa580386d25d48eaa087 Mon Sep 17 00:00:00 2001 +From: Ravishankar N +Date: Mon, 8 Oct 2018 11:04:14 +0530 +Subject: [PATCH 397/399] features/locks: add buffer overflow checks in + pl_getxattr + +Problem: +A compromised client can send a variable length buffer value for the +GF_XATTR_CLRLK_CMD virtual xattr. If the length is greater than the +size of the "key" used to send the response back, locks xlator can +segfault when it tries to do a dict_set because of the buffer overflow +in strncpy of pl_getxattr(). + +Fix: +Perform size checks while forming the 'key'. + +Note: +This fix is already there in the master branch upstream. + +Also, it looks like the size PATH_MAX used for 'key' array is not really +needed since the maximum length seems to be +"strlen(glusterfs.clrlk.tentry.kblocked) + NAME_MAX" where NAME_MAX is +used for the basename value in the clear-locks CLI: + +'gluster volume clear-locks VOLNAME path kind {blocked | granted | all} {inode range | entry basename | posix range}' + +But that can be done some other day. + +Fixes: CVE-2018-14652 +BUG: 1634669 +Change-Id: I101693e91f9ea2bd26cef6c0b7d82527fefcb3e2 +Signed-off-by: Ravishankar N +Reviewed-on: https://code.engineering.redhat.com/gerrit/152038 +Reviewed-by: Amar Tumballi +Reviewed-by: Krutika Dhananjay +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/features/locks/src/posix.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c +index 63bcf31..63f914c 100644 +--- a/xlators/features/locks/src/posix.c ++++ b/xlators/features/locks/src/posix.c +@@ -1120,7 +1120,10 @@ pl_getxattr (call_frame_t *frame, xlator_t *this, loc_t *loc, + goto out; + } + +- strncpy (key, name, strlen (name)); ++ if (snprintf(key, sizeof(key), "%s", name) >= sizeof(key)) { ++ op_ret = -1; ++ goto out; ++ } + if (dict_set_dynstr (dict, key, lk_summary)) { + op_ret = -1; + op_errno = ENOMEM; +-- +1.8.3.1 + diff --git a/0398-lock-Do-not-allow-meta-lock-count-to-be-more-than-on.patch b/0398-lock-Do-not-allow-meta-lock-count-to-be-more-than-on.patch new file mode 100644 index 0000000..b7adbb7 --- /dev/null +++ b/0398-lock-Do-not-allow-meta-lock-count-to-be-more-than-on.patch @@ -0,0 +1,88 @@ +From 2334f5b162e81d81673b59555baaf0a26189e603 Mon Sep 17 00:00:00 2001 +From: Susant Palai +Date: Mon, 8 Oct 2018 11:38:09 +0530 +Subject: [PATCH 398/399] lock: Do not allow meta-lock count to be more than + one + +In the current scheme of glusterfs where lock migration is +experimental, (ideally) the rebalance process which is migrating +the file should request for a metalock. Hence, the metalock count +should not be more than one for an inode. In future, if there is a +need for meta-lock from other clients, this patch can be reverted. + +Since pl_metalk is called as part of setxattr operation, any client +process(non-rebalance) residing outside trusted network can exhaust +memory of the server node by issuing setxattr repetitively on the +metalock key. The current patch makes sure that more than +one metalock cannot be granted on an inode. + +Fixes: CVE-2018-14660 + +Change-Id: I5a1dde0b24b0aedcfb29cc89dffc04ccc5a88bcb +BUG: 1636308 +Signed-off-by: Susant Palai +Reviewed-on: https://code.engineering.redhat.com/gerrit/152041 +Reviewed-by: Amar Tumballi +Tested-by: Amar Tumballi +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + xlators/features/locks/src/posix.c | 36 +++++++++++++++++++++++++++++++++++- + 1 file changed, 35 insertions(+), 1 deletion(-) + +diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c +index 63f914c..c58e6ba 100644 +--- a/xlators/features/locks/src/posix.c ++++ b/xlators/features/locks/src/posix.c +@@ -2938,6 +2938,40 @@ pl_metalk (call_frame_t *frame, xlator_t *this, inode_t *inode) + goto out; + } + ++ /* Non rebalance process trying to do metalock */ ++ if (frame->root->pid != GF_CLIENT_PID_DEFRAG) { ++ ret = -1; ++ goto out; ++ } ++ ++ ++ /* Note: In the current scheme of glusterfs where lock migration is ++ * experimental, (ideally) the rebalance process which is migrating ++ * the file should request for a metalock. Hence, the metalock count ++ * should not be more than one for an inode. In future, if there is a ++ * need for meta-lock from other clients, the following block can be ++ * removed. ++ * ++ * Since pl_metalk is called as part of setxattr operation, any client ++ * process(non-rebalance) residing outside trusted network can exhaust ++ * memory of the server node by issuing setxattr repetitively on the ++ * metalock key. The following code makes sure that more than ++ * one metalock cannot be granted on an inode*/ ++ pthread_mutex_lock (&pl_inode->mutex); ++ { ++ if (pl_metalock_is_active(pl_inode)) { ++ gf_msg (this->name, GF_LOG_WARNING, EINVAL, 0, ++ "More than one meta-lock can not be granted on" ++ "the inode"); ++ ret = -1; ++ } ++ } ++ pthread_mutex_lock (&pl_inode->mutex); ++ ++ if (ret == -1) { ++ goto out; ++ } ++ + if (frame->root->client) { + ctx = pl_ctx_get (frame->root->client, this); + if (!ctx) { +@@ -3118,7 +3152,7 @@ pl_setxattr (call_frame_t *frame, xlator_t *this, + loc_t *loc, dict_t *dict, int flags, dict_t *xdata) + { + int op_ret = 0; +- int op_errno = 0; ++ int op_errno = EINVAL; + dict_t *xdata_rsp = NULL; + + PL_LOCAL_GET_REQUESTS (frame, this, xdata, NULL, loc, NULL); +-- +1.8.3.1 + diff --git a/0399-all-fix-the-format-string-exceptions.patch b/0399-all-fix-the-format-string-exceptions.patch new file mode 100644 index 0000000..96cc6ea --- /dev/null +++ b/0399-all-fix-the-format-string-exceptions.patch @@ -0,0 +1,643 @@ +From bff03720f92bfcde848f46dca6a2cfad7adaf42e Mon Sep 17 00:00:00 2001 +From: Amar Tumballi +Date: Tue, 9 Oct 2018 12:32:41 +0530 +Subject: [PATCH 399/399] all: fix the format string exceptions + +Currently, there are possibilities in few places, where a user-controlled +(like filename, program parameter etc) string can be passed as 'fmt' for +printf(), which can lead to segfault, if the user's string contains '%s', +'%d' in it. + +While fixing it, makes sense to make the explicit check for such issues +across the codebase, by making the format call properly. + +Fixes: CVE-2018-14661 + +BUG: 1637084 +Change-Id: I63d6b65c61106f77c55f0922dc08a5b8fe421f23 +Signed-off-by: Amar Tumballi +Reviewed-on: https://code.engineering.redhat.com/gerrit/152221 +Reviewed-by: Xavi Hernandez +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya +--- + cli/src/cli-cmd-volume.c | 2 +- + libglusterfs/src/client_t.c | 2 +- + libglusterfs/src/fd.c | 4 ++-- + libglusterfs/src/inode.c | 2 +- + libglusterfs/src/iobuf.c | 8 ++++---- + libglusterfs/src/latency.c | 2 +- + libglusterfs/src/logging.h | 3 ++- + libglusterfs/src/mem-pool.h | 3 ++- + libglusterfs/src/run.h | 3 ++- + libglusterfs/src/statedump.h | 6 ++++-- + xlators/cluster/afr/src/afr-common.c | 2 +- + xlators/cluster/ec/src/ec.c | 2 +- + xlators/debug/trace/src/trace.c | 2 +- + xlators/features/barrier/src/barrier.c | 4 ++-- + xlators/features/gfid-access/src/gfid-access.c | 2 +- + xlators/features/locks/src/posix.c | 10 +++++----- + xlators/features/shard/src/shard.c | 2 +- + xlators/mgmt/glusterd/src/glusterd-rebalance.c | 2 +- + xlators/mgmt/glusterd/src/glusterd-statedump.c | 2 +- + xlators/mount/fuse/src/fuse-bridge.c | 2 +- + xlators/performance/io-cache/src/io-cache.c | 8 ++++---- + xlators/performance/io-threads/src/io-threads.c | 2 +- + xlators/performance/md-cache/src/md-cache.c | 2 +- + xlators/performance/nl-cache/src/nl-cache-helper.c | 2 +- + xlators/performance/nl-cache/src/nl-cache.c | 2 +- + xlators/performance/open-behind/src/open-behind.c | 4 ++-- + xlators/performance/quick-read/src/quick-read.c | 4 ++-- + xlators/performance/read-ahead/src/read-ahead.c | 6 +++--- + xlators/performance/write-behind/src/write-behind.c | 6 +++--- + xlators/protocol/client/src/client.c | 2 +- + xlators/protocol/server/src/server.c | 2 +- + xlators/storage/posix/src/posix.c | 2 +- + 32 files changed, 56 insertions(+), 51 deletions(-) + +diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c +index 2639afa..a1f0840 100644 +--- a/cli/src/cli-cmd-volume.c ++++ b/cli/src/cli-cmd-volume.c +@@ -2846,7 +2846,7 @@ cli_launch_glfs_heal (int heal_op, dict_t *options) + runner_add_args (&runner, "source-brick", NULL); + runner_argprintf (&runner, "%s:%s", hostname, path); + if (dict_get_str (options, "file", &filename) == 0) +- runner_argprintf (&runner, filename); ++ runner_argprintf (&runner, "%s", filename); + break; + case GF_SHD_OP_SPLIT_BRAIN_FILES: + runner_add_args (&runner, "split-brain-info", NULL); +diff --git a/libglusterfs/src/client_t.c b/libglusterfs/src/client_t.c +index 17e3026..dce27c1 100644 +--- a/libglusterfs/src/client_t.c ++++ b/libglusterfs/src/client_t.c +@@ -650,7 +650,7 @@ clienttable_dump (clienttable_t *clienttable, char *prefix) + clienttable->cliententries[i].next_free) { + gf_proc_dump_build_key(key, prefix, + "cliententry[%d]", i); +- gf_proc_dump_add_section(key); ++ gf_proc_dump_add_section("%s", key); + cliententry_dump(&clienttable->cliententries[i], + key); + } +diff --git a/libglusterfs/src/fd.c b/libglusterfs/src/fd.c +index 2dc52ba..27c8e13 100644 +--- a/libglusterfs/src/fd.c ++++ b/libglusterfs/src/fd.c +@@ -1056,7 +1056,7 @@ fd_dump (fd_t *fd, char *prefix) + + if (fd->inode) { + gf_proc_dump_build_key (key, "inode", NULL); +- gf_proc_dump_add_section(key); ++ gf_proc_dump_add_section("%s", key); + inode_dump (fd->inode, key); + } + +@@ -1104,7 +1104,7 @@ fdtable_dump (fdtable_t *fdtable, char *prefix) + if (GF_FDENTRY_ALLOCATED == + fdtable->fdentries[i].next_free) { + gf_proc_dump_build_key(key, prefix, "fdentry[%d]", i); +- gf_proc_dump_add_section(key); ++ gf_proc_dump_add_section("%s", key); + fdentry_dump(&fdtable->fdentries[i], key); + } + } +diff --git a/libglusterfs/src/inode.c b/libglusterfs/src/inode.c +index cf264d8..1bc05a4 100644 +--- a/libglusterfs/src/inode.c ++++ b/libglusterfs/src/inode.c +@@ -31,7 +31,7 @@ + list_for_each_entry (inode, head, list) { \ + gf_proc_dump_build_key(key_buf, key_prefix, \ + "%s.%d",list_type, i++); \ +- gf_proc_dump_add_section(key_buf); \ ++ gf_proc_dump_add_section("%s", key_buf); \ + inode_dump(inode, key); \ + } \ + } +diff --git a/libglusterfs/src/iobuf.c b/libglusterfs/src/iobuf.c +index 76584fc..f6b8558 100644 +--- a/libglusterfs/src/iobuf.c ++++ b/libglusterfs/src/iobuf.c +@@ -1174,7 +1174,7 @@ iobuf_arena_info_dump (struct iobuf_arena *iobuf_arena, const char *key_prefix) + gf_proc_dump_write(key, "%"PRIu64, iobuf_arena->page_size); + list_for_each_entry (trav, &iobuf_arena->active.list, list) { + gf_proc_dump_build_key(key, key_prefix,"active_iobuf.%d", i++); +- gf_proc_dump_add_section(key); ++ gf_proc_dump_add_section("%s", key); + iobuf_info_dump(trav, key); + } + +@@ -1215,21 +1215,21 @@ iobuf_stats_dump (struct iobuf_pool *iobuf_pool) + list_for_each_entry (trav, &iobuf_pool->arenas[j], list) { + snprintf(msg, sizeof(msg), + "arena.%d", i); +- gf_proc_dump_add_section(msg); ++ gf_proc_dump_add_section("%s", msg); + iobuf_arena_info_dump(trav,msg); + i++; + } + list_for_each_entry (trav, &iobuf_pool->purge[j], list) { + snprintf(msg, sizeof(msg), + "purge.%d", i); +- gf_proc_dump_add_section(msg); ++ gf_proc_dump_add_section("%s", msg); + iobuf_arena_info_dump(trav,msg); + i++; + } + list_for_each_entry (trav, &iobuf_pool->filled[j], list) { + snprintf(msg, sizeof(msg), + "filled.%d", i); +- gf_proc_dump_add_section(msg); ++ gf_proc_dump_add_section("%s", msg); + iobuf_arena_info_dump(trav,msg); + i++; + } +diff --git a/libglusterfs/src/latency.c b/libglusterfs/src/latency.c +index 1d75f5b..a890454 100644 +--- a/libglusterfs/src/latency.c ++++ b/libglusterfs/src/latency.c +@@ -169,7 +169,7 @@ gf_proc_dump_latency_info (xlator_t *xl) + int i; + + snprintf (key_prefix, GF_DUMP_MAX_BUF_LEN, "%s.latency", xl->name); +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + + for (i = 0; i < GF_FOP_MAXVALUE; i++) { + gf_proc_dump_build_key (key, key_prefix, "%s", +diff --git a/libglusterfs/src/logging.h b/libglusterfs/src/logging.h +index fd9a36d..4ed2a82 100644 +--- a/libglusterfs/src/logging.h ++++ b/libglusterfs/src/logging.h +@@ -172,7 +172,8 @@ int _gf_log_callingfn (const char *domain, const char *file, + const char *fmt, ...) + __attribute__ ((__format__ (__printf__, 6, 7))); + +-int _gf_log_eh (const char *function, const char *fmt, ...); ++int _gf_log_eh (const char *function, const char *fmt, ...) ++ __attribute__ ((__format__ (__printf__, 2, 3))); + + + +diff --git a/libglusterfs/src/mem-pool.h b/libglusterfs/src/mem-pool.h +index 1272ad4..dfe1f9a 100644 +--- a/libglusterfs/src/mem-pool.h ++++ b/libglusterfs/src/mem-pool.h +@@ -86,7 +86,8 @@ int + gf_vasprintf (char **string_ptr, const char *format, va_list arg); + + int +-gf_asprintf (char **string_ptr, const char *format, ...); ++gf_asprintf (char **string_ptr, const char *format, ...) ++ __attribute__ ((__format__ (__printf__, 2, 3))); + + void + __gf_free (void *ptr); +diff --git a/libglusterfs/src/run.h b/libglusterfs/src/run.h +index 1dc4bf9..e47ce11 100644 +--- a/libglusterfs/src/run.h ++++ b/libglusterfs/src/run.h +@@ -76,7 +76,8 @@ void runner_add_args (runner_t *runner, ...); + * @param runner pointer to runner_t instance + * @param format printf style format specifier + */ +-void runner_argprintf (runner_t *runner, const char *format, ...); ++void runner_argprintf (runner_t *runner, const char *format, ...) ++ __attribute__ ((__format__ (__printf__, 2, 3))); + + /** + * log a message about the command to be run. +diff --git a/libglusterfs/src/statedump.h b/libglusterfs/src/statedump.h +index 0a7a97e..ee97cdb 100644 +--- a/libglusterfs/src/statedump.h ++++ b/libglusterfs/src/statedump.h +@@ -73,9 +73,11 @@ void gf_proc_dump_cleanup(void); + + void gf_proc_dump_info(int signum, glusterfs_ctx_t *ctx); + +-int gf_proc_dump_add_section(char *key,...); ++int gf_proc_dump_add_section(char *key, ...) ++ __attribute__ ((__format__ (__printf__, 1, 2))); + +-int gf_proc_dump_write(char *key, char *value,...); ++int gf_proc_dump_write(char *key, char *value, ...) ++ __attribute__ ((__format__ (__printf__, 2, 3))); + + void inode_table_dump(inode_table_t *itable, char *prefix); + +diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c +index bded6a2..e8107c9 100644 +--- a/xlators/cluster/afr/src/afr-common.c ++++ b/xlators/cluster/afr/src/afr-common.c +@@ -4536,7 +4536,7 @@ afr_priv_dump (xlator_t *this) + + GF_ASSERT (priv); + snprintf(key_prefix, GF_DUMP_MAX_BUF_LEN, "%s.%s", this->type, this->name); +- gf_proc_dump_add_section(key_prefix); ++ gf_proc_dump_add_section("%s", key_prefix); + gf_proc_dump_write("child_count", "%u", priv->child_count); + for (i = 0; i < priv->child_count; i++) { + sprintf (key, "child_up[%d]", i); +diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c +index 9a23a45..9cb9580 100644 +--- a/xlators/cluster/ec/src/ec.c ++++ b/xlators/cluster/ec/src/ec.c +@@ -1316,7 +1316,7 @@ int32_t ec_dump_private(xlator_t *this) + GF_ASSERT(ec); + + snprintf(key_prefix, GF_DUMP_MAX_BUF_LEN, "%s.%s", this->type, this->name); +- gf_proc_dump_add_section(key_prefix); ++ gf_proc_dump_add_section("%s", key_prefix); + gf_proc_dump_write("nodes", "%u", ec->nodes); + gf_proc_dump_write("redundancy", "%u", ec->redundancy); + gf_proc_dump_write("fragment_size", "%u", ec->fragment_size); +diff --git a/xlators/debug/trace/src/trace.c b/xlators/debug/trace/src/trace.c +index 451ef9a..34ac4ca 100644 +--- a/xlators/debug/trace/src/trace.c ++++ b/xlators/debug/trace/src/trace.c +@@ -3059,7 +3059,7 @@ trace_dump_history (xlator_t *this) + if (conf && conf->log_history == _gf_true) { + gf_proc_dump_build_key (key_prefix, "xlator.debug.trace", + "history"); +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + eh_dump (this->history, NULL, dump_history_trace); + } + ret = 0; +diff --git a/xlators/features/barrier/src/barrier.c b/xlators/features/barrier/src/barrier.c +index ce3a255..8e964e8 100644 +--- a/xlators/features/barrier/src/barrier.c ++++ b/xlators/features/barrier/src/barrier.c +@@ -713,7 +713,7 @@ __barrier_dump_queue (barrier_priv_t *priv) + + list_for_each_entry (stub, &priv->queue, list) { + snprintf (key, sizeof (key), "stub.%d", i++); +- gf_proc_dump_add_section (key); ++ gf_proc_dump_add_section ("%s", key); + barrier_dump_stub(stub, key); + } + +@@ -735,7 +735,7 @@ barrier_dump_priv (xlator_t *this) + return 0; + + gf_proc_dump_build_key (key, "xlator.features.barrier", "priv"); +- gf_proc_dump_add_section (key); ++ gf_proc_dump_add_section ("%s", key); + + LOCK (&priv->lock); + { +diff --git a/xlators/features/gfid-access/src/gfid-access.c b/xlators/features/gfid-access/src/gfid-access.c +index 7d75b09..aa8aac1 100644 +--- a/xlators/features/gfid-access/src/gfid-access.c ++++ b/xlators/features/gfid-access/src/gfid-access.c +@@ -1382,7 +1382,7 @@ ga_dump_inodectx (xlator_t *this, inode_t *inode) + if (ret == 0) { + tmp_inode = (void*) value; + gf_proc_dump_build_key (key_prefix, this->name, "inode"); +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + gf_proc_dump_write ("real-gfid", "%s", + uuid_utoa (tmp_inode->gfid)); + } +diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c +index c58e6ba..b434a08 100644 +--- a/xlators/features/locks/src/posix.c ++++ b/xlators/features/locks/src/posix.c +@@ -3292,7 +3292,7 @@ __dump_entrylks (pl_inode_t *pl_inode) + blocked, granted); + } + +- gf_proc_dump_write(key, tmp); ++ gf_proc_dump_write(key, "%s", tmp); + + count++; + } +@@ -3313,7 +3313,7 @@ __dump_entrylks (pl_inode_t *pl_inode) + lkowner_utoa (&lock->owner), lock->client, + lock->connection_id, blocked); + +- gf_proc_dump_write(key, tmp); ++ gf_proc_dump_write(key, "%s", tmp); + + count++; + } +@@ -3364,7 +3364,7 @@ __dump_inodelks (pl_inode_t *pl_inode) + &lock->granted_time.tv_sec, + &lock->blkd_time.tv_sec, + _gf_true); +- gf_proc_dump_write(key, tmp); ++ gf_proc_dump_write(key, "%s", tmp); + + count++; + } +@@ -3380,7 +3380,7 @@ __dump_inodelks (pl_inode_t *pl_inode) + lock->client, lock->connection_id, + 0, &lock->blkd_time.tv_sec, + _gf_false); +- gf_proc_dump_write(key, tmp); ++ gf_proc_dump_write(key, "%s", tmp); + + count++; + } +@@ -3421,7 +3421,7 @@ __dump_posixlks (pl_inode_t *pl_inode) + &lock->owner, lock->client, NULL, + &lock->granted_time.tv_sec, &lock->blkd_time.tv_sec, + (lock->blocked)? _gf_false: _gf_true); +- gf_proc_dump_write(key, tmp); ++ gf_proc_dump_write(key, "%s", tmp); + + count++; + } +diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c +index d67cdf4..f5fb181 100644 +--- a/xlators/features/shard/src/shard.c ++++ b/xlators/features/shard/src/shard.c +@@ -5557,7 +5557,7 @@ shard_priv_dump (xlator_t *this) + + snprintf (key_prefix, GF_DUMP_MAX_BUF_LEN, "%s.%s", this->type, + this->name); +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + gf_proc_dump_write ("shard-block-size", "%s", + gf_uint64_2human_readable (priv->block_size)); + gf_proc_dump_write ("inode-count", "%d", priv->inode_count); +diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c +index 848e689..5ab828c 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c ++++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c +@@ -310,7 +310,7 @@ glusterd_handle_defrag_start (glusterd_volinfo_t *volinfo, char *op_errstr, + runner_add_arg (&runner, "--pid-file"); + runner_argprintf (&runner, "%s",pidfile); + runner_add_arg (&runner, "-l"); +- runner_argprintf (&runner, logfile); ++ runner_argprintf (&runner, "%s", logfile); + if (volinfo->memory_accounting) + runner_add_arg (&runner, "--mem-accounting"); + if (dict_get_str (priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY, +diff --git a/xlators/mgmt/glusterd/src/glusterd-statedump.c b/xlators/mgmt/glusterd/src/glusterd-statedump.c +index d0a9705..8c2c4b5 100644 +--- a/xlators/mgmt/glusterd/src/glusterd-statedump.c ++++ b/xlators/mgmt/glusterd/src/glusterd-statedump.c +@@ -197,7 +197,7 @@ glusterd_dump_priv (xlator_t *this) + return 0; + + gf_proc_dump_build_key (key, "xlator.glusterd", "priv"); +- gf_proc_dump_add_section (key); ++ gf_proc_dump_add_section ("%s", key); + + pthread_mutex_lock (&priv->mutex); + { +diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c +index 85cee73..1c4f4e4 100644 +--- a/xlators/mount/fuse/src/fuse-bridge.c ++++ b/xlators/mount/fuse/src/fuse-bridge.c +@@ -5189,7 +5189,7 @@ fuse_history_dump (xlator_t *this) + + gf_proc_dump_build_key (key_prefix, "xlator.mount.fuse", + "history"); +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + eh_dump (this->history, NULL, dump_history_fuse); + + ret = 0; +diff --git a/xlators/performance/io-cache/src/io-cache.c b/xlators/performance/io-cache/src/io-cache.c +index 8963942..700d8c2 100644 +--- a/xlators/performance/io-cache/src/io-cache.c ++++ b/xlators/performance/io-cache/src/io-cache.c +@@ -2008,7 +2008,7 @@ ioc_inode_dump (xlator_t *this, inode_t *inode) + if (gf_uuid_is_null (ioc_inode->inode->gfid)) + goto unlock; + +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + section_added = _gf_true; + + __inode_path (ioc_inode->inode, NULL, &path); +@@ -2031,7 +2031,7 @@ unlock: + out: + if (ret && ioc_inode) { + if (section_added == _gf_false) +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + gf_proc_dump_write ("Unable to print the status of ioc_inode", + "(Lock acquisition failed) %s", + uuid_utoa (inode->gfid)); +@@ -2053,7 +2053,7 @@ ioc_priv_dump (xlator_t *this) + priv = this->private; + + gf_proc_dump_build_key (key_prefix, "io-cache", "priv"); +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + add_section = _gf_true; + + ret = pthread_mutex_trylock (&priv->table_lock); +@@ -2074,7 +2074,7 @@ out: + if (!add_section) { + gf_proc_dump_build_key (key_prefix, "xlator." + "performance.io-cache", "priv"); +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + } + gf_proc_dump_write ("Unable to dump the state of private " + "structure of io-cache xlator", "(Lock " +diff --git a/xlators/performance/io-threads/src/io-threads.c b/xlators/performance/io-threads/src/io-threads.c +index 5c47072..41d48ab 100644 +--- a/xlators/performance/io-threads/src/io-threads.c ++++ b/xlators/performance/io-threads/src/io-threads.c +@@ -911,7 +911,7 @@ iot_priv_dump (xlator_t *this) + snprintf (key_prefix, GF_DUMP_MAX_BUF_LEN, "%s.%s", this->type, + this->name); + +- gf_proc_dump_add_section(key_prefix); ++ gf_proc_dump_add_section("%s", key_prefix); + + gf_proc_dump_write("maximum_threads_count", "%d", conf->max_count); + gf_proc_dump_write("current_threads_count", "%d", conf->curr_count); +diff --git a/xlators/performance/md-cache/src/md-cache.c b/xlators/performance/md-cache/src/md-cache.c +index 9d2eea6..e7452e9 100644 +--- a/xlators/performance/md-cache/src/md-cache.c ++++ b/xlators/performance/md-cache/src/md-cache.c +@@ -3087,7 +3087,7 @@ mdc_priv_dump (xlator_t *this) + conf = this->private; + + snprintf(key_prefix, GF_DUMP_MAX_BUF_LEN, "%s.%s", this->type, this->name); +- gf_proc_dump_add_section(key_prefix); ++ gf_proc_dump_add_section("%s", key_prefix); + + gf_proc_dump_write("stat_hit_count", "%"PRId64, + conf->mdc_counter.stat_hit.cnt); +diff --git a/xlators/performance/nl-cache/src/nl-cache-helper.c b/xlators/performance/nl-cache/src/nl-cache-helper.c +index 0b6c884..583d67b 100644 +--- a/xlators/performance/nl-cache/src/nl-cache-helper.c ++++ b/xlators/performance/nl-cache/src/nl-cache-helper.c +@@ -1177,7 +1177,7 @@ nlc_dump_inodectx (xlator_t *this, inode_t *inode) + gf_proc_dump_build_key (key_prefix, + "xlator.performance.nl-cache", + "nlc_inode"); +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + + __inode_path (inode, NULL, &path); + if (path != NULL) { +diff --git a/xlators/performance/nl-cache/src/nl-cache.c b/xlators/performance/nl-cache/src/nl-cache.c +index 7dad8d9..6365d82 100644 +--- a/xlators/performance/nl-cache/src/nl-cache.c ++++ b/xlators/performance/nl-cache/src/nl-cache.c +@@ -615,7 +615,7 @@ nlc_priv_dump (xlator_t *this) + conf = this->private; + + snprintf(key_prefix, GF_DUMP_MAX_BUF_LEN, "%s.%s", this->type, this->name); +- gf_proc_dump_add_section(key_prefix); ++ gf_proc_dump_add_section("%s", key_prefix); + + gf_proc_dump_write("negative_lookup_hit_count", "%"PRId64, + conf->nlc_counter.nlc_hit.cnt); +diff --git a/xlators/performance/open-behind/src/open-behind.c b/xlators/performance/open-behind/src/open-behind.c +index 3be35bc..2d77f2d 100644 +--- a/xlators/performance/open-behind/src/open-behind.c ++++ b/xlators/performance/open-behind/src/open-behind.c +@@ -897,7 +897,7 @@ ob_priv_dump (xlator_t *this) + gf_proc_dump_build_key (key_prefix, "xlator.performance.open-behind", + "priv"); + +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + + gf_proc_dump_write ("use_anonymous_fd", "%d", conf->use_anonymous_fd); + +@@ -926,7 +926,7 @@ ob_fdctx_dump (xlator_t *this, fd_t *fd) + + gf_proc_dump_build_key (key_prefix, "xlator.performance.open-behind", + "file"); +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + + gf_proc_dump_write ("fd", "%p", fd); + +diff --git a/xlators/performance/quick-read/src/quick-read.c b/xlators/performance/quick-read/src/quick-read.c +index 61232c1..ca228b8 100644 +--- a/xlators/performance/quick-read/src/quick-read.c ++++ b/xlators/performance/quick-read/src/quick-read.c +@@ -748,7 +748,7 @@ qr_inodectx_dump (xlator_t *this, inode_t *inode) + + gf_proc_dump_build_key (key_prefix, "xlator.performance.quick-read", + "inodectx"); +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + + gf_proc_dump_write ("entire-file-cached", "%s", qr_inode->data ? "yes" : "no"); + +@@ -794,7 +794,7 @@ qr_priv_dump (xlator_t *this) + gf_proc_dump_build_key (key_prefix, "xlator.performance.quick-read", + "priv"); + +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + + gf_proc_dump_write ("max_file_size", "%d", conf->max_file_size); + gf_proc_dump_write ("cache_timeout", "%d", conf->cache_timeout); +diff --git a/xlators/performance/read-ahead/src/read-ahead.c b/xlators/performance/read-ahead/src/read-ahead.c +index 242b579..74ddf49 100644 +--- a/xlators/performance/read-ahead/src/read-ahead.c ++++ b/xlators/performance/read-ahead/src/read-ahead.c +@@ -823,7 +823,7 @@ ra_fdctx_dump (xlator_t *this, fd_t *fd) + "xlator.performance.read-ahead", + "file"); + +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + + ret = __inode_path (fd->inode, NULL, &path); + if (path != NULL) { +@@ -1068,7 +1068,7 @@ ra_priv_dump (xlator_t *this) + gf_proc_dump_build_key (key_prefix, "xlator.performance.read-ahead", + "priv"); + +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + add_section = _gf_true; + + ret = pthread_mutex_trylock (&conf->conf_lock); +@@ -1086,7 +1086,7 @@ ra_priv_dump (xlator_t *this) + out: + if (ret && conf) { + if (add_section == _gf_false) +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + + gf_proc_dump_write ("Unable to dump priv", + "(Lock acquisition failed) %s", this->name); +diff --git a/xlators/performance/write-behind/src/write-behind.c b/xlators/performance/write-behind/src/write-behind.c +index 478985a..ef02e18 100644 +--- a/xlators/performance/write-behind/src/write-behind.c ++++ b/xlators/performance/write-behind/src/write-behind.c +@@ -2763,7 +2763,7 @@ wb_priv_dump (xlator_t *this) + gf_proc_dump_build_key (key_prefix, "xlator.performance.write-behind", + "priv"); + +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + + gf_proc_dump_write ("aggregate_size", "%d", conf->aggregate_size); + gf_proc_dump_write ("window_size", "%d", conf->window_size); +@@ -2787,7 +2787,7 @@ __wb_dump_requests (struct list_head *head, char *prefix) + gf_proc_dump_build_key (key_prefix, key, "%s", + (char *)gf_fop_list[req->fop]); + +- gf_proc_dump_add_section(key_prefix); ++ gf_proc_dump_add_section("%s", key_prefix); + + gf_proc_dump_write ("unique", "%"PRIu64, req->unique); + +@@ -2859,7 +2859,7 @@ wb_inode_dump (xlator_t *this, inode_t *inode) + gf_proc_dump_build_key (key_prefix, "xlator.performance.write-behind", + "wb_inode"); + +- gf_proc_dump_add_section (key_prefix); ++ gf_proc_dump_add_section ("%s", key_prefix); + + __inode_path (inode, NULL, &path); + if (path != NULL) { +diff --git a/xlators/protocol/client/src/client.c b/xlators/protocol/client/src/client.c +index 26b0907..674f1aa 100644 +--- a/xlators/protocol/client/src/client.c ++++ b/xlators/protocol/client/src/client.c +@@ -2855,7 +2855,7 @@ client_priv_dump (xlator_t *this) + gf_proc_dump_build_key(key_prefix, "xlator.protocol.client", + "%s.priv", this->name); + +- gf_proc_dump_add_section(key_prefix); ++ gf_proc_dump_add_section("%s", key_prefix); + + list_for_each_entry(tmp, &conf->saved_fds, sfd_pos) { + sprintf (key, "fd.%d.remote_fd", i); +diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c +index d0e815e..3a429bc 100644 +--- a/xlators/protocol/server/src/server.c ++++ b/xlators/protocol/server/src/server.c +@@ -320,7 +320,7 @@ server_priv (xlator_t *this) + return 0; + + gf_proc_dump_build_key (key, "xlator.protocol.server", "priv"); +- gf_proc_dump_add_section (key); ++ gf_proc_dump_add_section ("%s", key); + + ret = pthread_mutex_trylock (&conf->mutex); + if (ret != 0) +diff --git a/xlators/storage/posix/src/posix.c b/xlators/storage/posix/src/posix.c +index f79dbda..7bfe780 100644 +--- a/xlators/storage/posix/src/posix.c ++++ b/xlators/storage/posix/src/posix.c +@@ -6960,7 +6960,7 @@ posix_priv (xlator_t *this) + + (void) snprintf(key_prefix, GF_DUMP_MAX_BUF_LEN, "%s.%s", + this->type, this->name); +- gf_proc_dump_add_section(key_prefix); ++ gf_proc_dump_add_section("%s", key_prefix); + + if (!this) + return 0; +-- +1.8.3.1 + diff --git a/glusterfs.spec b/glusterfs.spec index ca60b6a..13d9d29 100644 --- a/glusterfs.spec +++ b/glusterfs.spec @@ -192,7 +192,7 @@ Release: 0.1%{?prereltag:.%{prereltag}}%{?dist} %else Name: glusterfs Version: 3.12.2 -Release: 21%{?dist} +Release: 22%{?dist} %endif License: GPLv2 or LGPLv3+ Group: System Environment/Base @@ -652,6 +652,18 @@ Patch0384: 0384-features-uss-Use-xxh64-to-generate-gfid-instead-of-m.patch Patch0385: 0385-afr-fix-incorrect-reporting-of-directory-split-brain.patch Patch0386: 0386-glusterd-make-sure-that-brickinfo-uuid-is-not-null.patch Patch0387: 0387-georep-Fix-config-set-of-monitor-status.patch +Patch0388: 0388-glusterd-handshake-prevent-a-buffer-overflow.patch +Patch0389: 0389-server-don-t-allow-in-basename.patch +Patch0390: 0390-core-glusterfsd-keeping-fd-open-in-index-xlator.patch +Patch0391: 0391-glusterd-Use-GF_ATOMIC-to-update-blockers-counter-at.patch +Patch0392: 0392-glusterd-don-t-wait-for-blockers-flag-for-stop-volum.patch +Patch0393: 0393-core-Pass-xlator_name-in-server_call_xlator_mem_clea.patch +Patch0394: 0394-io-stats-prevent-taking-file-dump-on-server-side.patch +Patch0395: 0395-index-prevent-arbitrary-file-creation-outside-entry-.patch +Patch0396: 0396-protocol-remove-the-option-verify-volfile-checksum.patch +Patch0397: 0397-features-locks-add-buffer-overflow-checks-in-pl_getx.patch +Patch0398: 0398-lock-Do-not-allow-meta-lock-count-to-be-more-than-on.patch +Patch0399: 0399-all-fix-the-format-string-exceptions.patch %description GlusterFS is a distributed file-system capable of scaling to several @@ -2600,6 +2612,9 @@ fi %endif %changelog +* Tue Oct 09 2018 Milind Changire - 3.12.2-22 +- fixes bugs bz#1631329 bz#1631372 + * Wed Oct 03 2018 Milind Changire - 3.12.2-21 - fixes bugs bz#1623749 bz#1630997