autobuild v3.12.2-12

Resolves: bz#1558989 bz#1580344 bz#1581057 bz#1581219
Signed-off-by: Milind Changire <mchangir@redhat.com>
This commit is contained in:
Milind Changire 2018-05-24 06:11:18 -04:00
parent 11f2b2fe34
commit 22237a34a8
9 changed files with 1211 additions and 1 deletions

View File

@ -0,0 +1,171 @@
From aed804b0f10c93ade88e109dd89a5b593ff1b1e5 Mon Sep 17 00:00:00 2001
From: Ravishankar N <ravishankar@redhat.com>
Date: Fri, 18 May 2018 15:38:29 +0530
Subject: [PATCH 275/282] afr: fix bug-1363721.t failure
Backport of https://review.gluster.org/#/c/20036/
Problem:
In the .t, when the only good brick was brought down, writes on the fd were
still succeeding on the bad bricks. The inflight split-brain check was
marking the write as failure but since the write succeeded on all the
bad bricks, afr_txn_nothing_failed() was set to true and we were
unwinding writev with success to DHT and then catching the failure in
post-op in the background.
Fix:
Don't wind the FOP phase if the write_subvol (which is populated with readable
subvols obtained in pre-op cbk) does not have at least 1 good brick which was up
when the transaction started.
Note: This fix is not related to brick muliplexing. I ran the .t
10 times with this fix and brick-mux enabled without any failures.
Change-Id: I915c9c366aa32cd342b1565827ca2d83cb02ae85
BUG: 1581057
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/139440
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
---
tests/bugs/replicate/bug-1363721.t | 12 +++++++---
xlators/cluster/afr/src/afr-common.c | 14 ++++++++++++
xlators/cluster/afr/src/afr-transaction.c | 38 +++++++++++++++++++++++++++++++
xlators/cluster/afr/src/afr.h | 3 +++
4 files changed, 64 insertions(+), 3 deletions(-)
diff --git a/tests/bugs/replicate/bug-1363721.t b/tests/bugs/replicate/bug-1363721.t
index ec39889..0ed34d8 100644
--- a/tests/bugs/replicate/bug-1363721.t
+++ b/tests/bugs/replicate/bug-1363721.t
@@ -18,6 +18,10 @@ function size_increased {
fi
}
+function has_write_failed {
+ local pid=$1
+ if [ -d /proc/$pid ]; then echo "N"; else echo "Y"; fi
+}
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
@@ -27,7 +31,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal off
TEST $CLI volume set $V0 cluster.metadata-self-heal off
TEST $CLI volume set $V0 cluster.entry-self-heal off
TEST $CLI volume start $V0
-TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0 --direct-io-mode=enable
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --direct-io-mode=enable $M0
cd $M0
@@ -67,8 +71,10 @@ sleep 3
# Now kill the second brick
kill_brick $V0 $H0 $B0/${V0}2
-# At this point the write should have been failed. But make sure that the second
-# brick is never an accused.
+# At this point the write should have been failed.
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "Y" has_write_failed $dd_pid
+
+# Also make sure that the second brick is never an accused.
md5sum_2=$(md5sum $B0/${V0}2/file1 | awk '{print $1}')
diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
index 6025a60..a85549b 100644
--- a/xlators/cluster/afr/src/afr-common.c
+++ b/xlators/cluster/afr/src/afr-common.c
@@ -6270,6 +6270,20 @@ out:
return ret;
}
+uint64_t
+afr_write_subvol_get (call_frame_t *frame, xlator_t *this)
+{
+ afr_local_t *local = NULL;
+ uint64_t write_subvol = 0;
+
+ local = frame->local;
+ LOCK(&local->inode->lock);
+ write_subvol = local->inode_ctx->write_subvol;
+ UNLOCK (&local->inode->lock);
+
+ return write_subvol;
+}
+
int
afr_write_subvol_set (call_frame_t *frame, xlator_t *this)
{
diff --git a/xlators/cluster/afr/src/afr-transaction.c b/xlators/cluster/afr/src/afr-transaction.c
index 0506a78..ff07319 100644
--- a/xlators/cluster/afr/src/afr-transaction.c
+++ b/xlators/cluster/afr/src/afr-transaction.c
@@ -167,6 +167,34 @@ afr_changelog_has_quorum (afr_local_t *local, xlator_t *this)
return _gf_false;
}
+
+gf_boolean_t
+afr_is_write_subvol_valid (call_frame_t *frame, xlator_t *this)
+{
+ int i = 0;
+ afr_local_t *local = NULL;
+ afr_private_t *priv = NULL;
+ uint64_t write_subvol = 0;
+ unsigned char *writable = NULL;
+ uint16_t datamap = 0;
+
+ local = frame->local;
+ priv = this->private;
+ writable = alloca0 (priv->child_count);
+
+ write_subvol = afr_write_subvol_get (frame, this);
+ datamap = (write_subvol & 0x00000000ffff0000) >> 16;
+ for (i = 0; i < priv->child_count; i++) {
+ if (datamap & (1 << i))
+ writable[i] = 1;
+
+ if (writable[i] && !local->transaction.failed_subvols[i])
+ return _gf_true;
+ }
+
+ return _gf_false;
+}
+
int
afr_transaction_fop (call_frame_t *frame, xlator_t *this)
{
@@ -189,6 +217,16 @@ afr_transaction_fop (call_frame_t *frame, xlator_t *this)
afr_transaction_resume (frame, this);
return 0;
}
+
+ /* Fail if at least one writeable brick isn't up.*/
+ if (local->transaction.type == AFR_DATA_TRANSACTION &&
+ !afr_is_write_subvol_valid (frame, this)) {
+ local->op_ret = -1;
+ local->op_errno = EIO;
+ afr_transaction_resume (frame, this);
+ return 0;
+ }
+
local->call_count = call_count;
for (i = 0; i < priv->child_count; i++) {
if (local->transaction.pre_op[i] && !failed_subvols[i]) {
diff --git a/xlators/cluster/afr/src/afr.h b/xlators/cluster/afr/src/afr.h
index 6be59dc..35928a9 100644
--- a/xlators/cluster/afr/src/afr.h
+++ b/xlators/cluster/afr/src/afr.h
@@ -1199,6 +1199,9 @@ afr_serialize_xattrs_with_delimiter (call_frame_t *frame, xlator_t *this,
int
__afr_inode_ctx_get (xlator_t *this, inode_t *inode, afr_inode_ctx_t **ctx);
+uint64_t
+afr_write_subvol_get (call_frame_t *frame, xlator_t *this);
+
int
afr_write_subvol_set (call_frame_t *frame, xlator_t *this);
--
1.8.3.1

View File

@ -0,0 +1,45 @@
From d2a15381b5cd414aa1dcf585a495200a29390f7e Mon Sep 17 00:00:00 2001
From: karthik-us <ksubrahm@redhat.com>
Date: Tue, 22 May 2018 17:11:11 +0530
Subject: [PATCH 276/282] tests: check volume status for shd being up
so that glusterd is also aware that shd is up and running.
While not reproducible locally, on the jenkins slaves, 'gluster vol heal patchy'
fails with "Self-heal daemon is not running. Check self-heal daemon log file.",
while infact the afr_child_up_status_in_shd() checks before that passed. In the
shd log also, I see the shd being up and connected to at least one brick before
the heal is launched.
Upstream Patch: https://review.gluster.org/#/c/19185/
> Change-Id: Id3801fa4ab56a70b1f0bd6a7e240f69bea74a5fc
> BUG: 1515163
> Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Change-Id: Iec44500dc52b0991cc45befaa4b40351ab1ad870
BUG: 1581219
Signed-off-by: karthik-us <ksubrahm@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/139486
Reviewed-by: Ravishankar Narayanankutty <ravishankar@redhat.com>
Tested-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
---
tests/bugs/replicate/bug-1292379.t | 1 +
1 file changed, 1 insertion(+)
diff --git a/tests/bugs/replicate/bug-1292379.t b/tests/bugs/replicate/bug-1292379.t
index f086502..be1bf69 100644
--- a/tests/bugs/replicate/bug-1292379.t
+++ b/tests/bugs/replicate/bug-1292379.t
@@ -39,6 +39,7 @@ TEST $CLI volume start $V0 force
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
TEST fd_write $wfd "pqrs"
TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
--
1.8.3.1

View File

@ -0,0 +1,35 @@
From 25f242ceae72453de48e5e7ac4a8622d395271ec Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Mon, 21 May 2018 12:14:06 +0530
Subject: [PATCH 277/282] Revert "rpcsvc: Turn off ownthreads for Glusterfs
program"
This reverts commit 0336d2fa2e8ee0b80c5d5891883c2551f6dbe4db.
> Reviewed-on: https://code.engineering.redhat.com/gerrit/139002
Change-Id: I023c56be4686f69c41c613954a19cd7314df592e
BUG: 1558989
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/139305
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Raghavendra Gowdappa <rgowdapp@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/protocol/server/src/server-rpc-fops.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/xlators/protocol/server/src/server-rpc-fops.c b/xlators/protocol/server/src/server-rpc-fops.c
index ba6cc1f..91d5c03 100644
--- a/xlators/protocol/server/src/server-rpc-fops.c
+++ b/xlators/protocol/server/src/server-rpc-fops.c
@@ -6160,5 +6160,5 @@ struct rpcsvc_program glusterfs3_3_fop_prog = {
.progver = GLUSTER_FOP_VERSION,
.numactors = GLUSTER_FOP_PROCCNT,
.actors = glusterfs3_3_fop_actors,
- .ownthread = _gf_false,
+ .ownthread = _gf_true,
};
--
1.8.3.1

View File

@ -0,0 +1,42 @@
From 471455f0fce403570b367ea5677ed9a9e198a1c7 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Mon, 21 May 2018 12:15:59 +0530
Subject: [PATCH 278/282] Revert "rpcsvc: correct event-thread scaling"
This reverts commit 96146ccae552c648f33a19783fad824cf8101790.
> Reviewed-on: https://code.engineering.redhat.com/gerrit/132509
Change-Id: I5b44bcce0dd36620b580acf1b0aedbc7b79934f5
BUG: 1558989
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/139306
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Raghavendra Gowdappa <rgowdapp@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/protocol/server/src/server.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c
index d40457c..79f68e8 100644
--- a/xlators/protocol/server/src/server.c
+++ b/xlators/protocol/server/src/server.c
@@ -1042,11 +1042,10 @@ do_rpc:
}
/*
- * Update:
- * We don't need to reset auto_thread_count since it has been derived
- * out of the total bricks attached. We can reconfigure event threads
- * but not auto threads.
+ * Let the event subsystem know that we're auto-scaling, with an
+ * initial count of one.
*/
+ ((struct event_pool *)(this->ctx->event_pool))->auto_thread_count = 1;
GF_OPTION_RECONF ("event-threads", new_nthread, options, int32, out);
ret = server_check_event_threads (this, conf, new_nthread);
--
1.8.3.1

View File

@ -0,0 +1,265 @@
From 435e0a9cfd3df40498ca835ed573c994da2fc66b Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Mon, 21 May 2018 12:16:42 +0530
Subject: [PATCH 279/282] Revert "rpc: make actor search parallel"
This reverts commit 72dc3a3eff84c5e17bbc3bfddec9daf50338464f.
> Reviewed-on: https://code.engineering.redhat.com/gerrit/131909
Change-Id: I0ce4f1c6c14f89031a31d5bb7ffb66ea16348cb9
BUG: 1558989
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/139307
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Raghavendra Gowdappa <rgowdapp@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
rpc/rpc-lib/src/rpcsvc-common.h | 2 +-
rpc/rpc-lib/src/rpcsvc.c | 54 ++++++++++++++++++++---------------------
2 files changed, 28 insertions(+), 28 deletions(-)
diff --git a/rpc/rpc-lib/src/rpcsvc-common.h b/rpc/rpc-lib/src/rpcsvc-common.h
index ab715d3..dd95803 100644
--- a/rpc/rpc-lib/src/rpcsvc-common.h
+++ b/rpc/rpc-lib/src/rpcsvc-common.h
@@ -42,7 +42,7 @@ typedef struct rpcsvc_state {
* other options.
*/
- pthread_rwlock_t rpclock;
+ pthread_mutex_t rpclock;
unsigned int memfactor;
diff --git a/rpc/rpc-lib/src/rpcsvc.c b/rpc/rpc-lib/src/rpcsvc.c
index fbd1071..31b5eb5 100644
--- a/rpc/rpc-lib/src/rpcsvc.c
+++ b/rpc/rpc-lib/src/rpcsvc.c
@@ -88,11 +88,11 @@ rpcsvc_listener_destroy (rpcsvc_listener_t *listener)
goto listener_free;
}
- pthread_rwlock_wrlock (&svc->rpclock);
+ pthread_mutex_lock (&svc->rpclock);
{
list_del_init (&listener->list);
}
- pthread_rwlock_unlock (&svc->rpclock);
+ pthread_mutex_unlock (&svc->rpclock);
listener_free:
GF_FREE (listener);
@@ -110,7 +110,7 @@ rpcsvc_get_program_vector_sizer (rpcsvc_t *svc, uint32_t prognum,
if (!svc)
return NULL;
- pthread_rwlock_rdlock (&svc->rpclock);
+ pthread_mutex_lock (&svc->rpclock);
{
/* Find the matching RPC program from registered list */
list_for_each_entry (program, &svc->programs, program) {
@@ -121,7 +121,7 @@ rpcsvc_get_program_vector_sizer (rpcsvc_t *svc, uint32_t prognum,
}
}
}
- pthread_rwlock_unlock (&svc->rpclock);
+ pthread_mutex_unlock (&svc->rpclock);
if (found) {
/* Make sure the requested procnum is supported by RPC prog */
@@ -237,7 +237,7 @@ rpcsvc_program_actor (rpcsvc_request_t *req)
svc = req->svc;
peername = req->trans->peerinfo.identifier;
- pthread_rwlock_rdlock (&svc->rpclock);
+ pthread_mutex_lock (&svc->rpclock);
{
list_for_each_entry (program, &svc->programs, program) {
if (program->prognum == req->prognum) {
@@ -251,7 +251,7 @@ rpcsvc_program_actor (rpcsvc_request_t *req)
}
}
}
- pthread_rwlock_unlock (&svc->rpclock);
+ pthread_mutex_unlock (&svc->rpclock);
if (!found) {
if (err != PROG_MISMATCH) {
@@ -735,7 +735,7 @@ rpcsvc_handle_disconnect (rpcsvc_t *svc, rpc_transport_t *trans)
event = (trans->listener == NULL) ? RPCSVC_EVENT_LISTENER_DEAD
: RPCSVC_EVENT_DISCONNECT;
- pthread_rwlock_rdlock (&svc->rpclock);
+ pthread_mutex_lock (&svc->rpclock);
{
if (!svc->notify_count)
goto unlock;
@@ -755,7 +755,7 @@ rpcsvc_handle_disconnect (rpcsvc_t *svc, rpc_transport_t *trans)
wrapper_count = i;
}
unlock:
- pthread_rwlock_unlock (&svc->rpclock);
+ pthread_mutex_unlock (&svc->rpclock);
if (wrappers) {
for (i = 0; i < wrapper_count; i++) {
@@ -1495,7 +1495,7 @@ rpcsvc_get_listener (rpcsvc_t *svc, uint16_t port, rpc_transport_t *trans)
goto out;
}
- pthread_rwlock_rdlock (&svc->rpclock);
+ pthread_mutex_lock (&svc->rpclock);
{
list_for_each_entry (listener, &svc->listeners, list) {
if (trans != NULL) {
@@ -1521,7 +1521,7 @@ rpcsvc_get_listener (rpcsvc_t *svc, uint16_t port, rpc_transport_t *trans)
}
}
}
- pthread_rwlock_unlock (&svc->rpclock);
+ pthread_mutex_unlock (&svc->rpclock);
if (!found) {
listener = NULL;
@@ -1566,7 +1566,7 @@ rpcsvc_program_unregister (rpcsvc_t *svc, rpcsvc_program_t *program)
" program failed");
goto out;
}
- pthread_rwlock_rdlock (&svc->rpclock);
+ pthread_mutex_lock (&svc->rpclock);
{
list_for_each_entry (prog, &svc->programs, program) {
if ((prog->prognum == program->prognum)
@@ -1575,7 +1575,7 @@ rpcsvc_program_unregister (rpcsvc_t *svc, rpcsvc_program_t *program)
}
}
}
- pthread_rwlock_unlock (&svc->rpclock);
+ pthread_mutex_unlock (&svc->rpclock);
if (prog == NULL) {
ret = -1;
@@ -1592,11 +1592,11 @@ rpcsvc_program_unregister (rpcsvc_t *svc, rpcsvc_program_t *program)
goto out;
}
- pthread_rwlock_wrlock (&svc->rpclock);
+ pthread_mutex_lock (&svc->rpclock);
{
list_del_init (&prog->program);
}
- pthread_rwlock_unlock (&svc->rpclock);
+ pthread_mutex_unlock (&svc->rpclock);
ret = 0;
out:
@@ -1655,11 +1655,11 @@ rpcsvc_listener_alloc (rpcsvc_t *svc, rpc_transport_t *trans)
INIT_LIST_HEAD (&listener->list);
- pthread_rwlock_wrlock (&svc->rpclock);
+ pthread_mutex_lock (&svc->rpclock);
{
list_add_tail (&listener->list, &svc->listeners);
}
- pthread_rwlock_unlock (&svc->rpclock);
+ pthread_mutex_unlock (&svc->rpclock);
out:
return listener;
}
@@ -1813,7 +1813,7 @@ rpcsvc_unregister_notify (rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata)
goto out;
}
- pthread_rwlock_wrlock (&svc->rpclock);
+ pthread_mutex_lock (&svc->rpclock);
{
list_for_each_entry_safe (wrapper, tmp, &svc->notify, list) {
if ((wrapper->notify == notify)
@@ -1824,7 +1824,7 @@ rpcsvc_unregister_notify (rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata)
}
}
}
- pthread_rwlock_unlock (&svc->rpclock);
+ pthread_mutex_unlock (&svc->rpclock);
out:
return ret;
@@ -1844,12 +1844,12 @@ rpcsvc_register_notify (rpcsvc_t *svc, rpcsvc_notify_t notify, void *mydata)
wrapper->data = mydata;
wrapper->notify = notify;
- pthread_rwlock_wrlock (&svc->rpclock);
+ pthread_mutex_lock (&svc->rpclock);
{
list_add_tail (&wrapper->list, &svc->notify);
svc->notify_count++;
}
- pthread_rwlock_unlock (&svc->rpclock);
+ pthread_mutex_unlock (&svc->rpclock);
ret = 0;
out:
@@ -1987,7 +1987,7 @@ rpcsvc_program_register (rpcsvc_t *svc, rpcsvc_program_t *program)
goto out;
}
- pthread_rwlock_rdlock (&svc->rpclock);
+ pthread_mutex_lock (&svc->rpclock);
{
list_for_each_entry (newprog, &svc->programs, program) {
if ((newprog->prognum == program->prognum)
@@ -1997,7 +1997,7 @@ rpcsvc_program_register (rpcsvc_t *svc, rpcsvc_program_t *program)
}
}
}
- pthread_rwlock_unlock (&svc->rpclock);
+ pthread_mutex_unlock (&svc->rpclock);
if (already_registered) {
ret = 0;
@@ -2031,11 +2031,11 @@ rpcsvc_program_register (rpcsvc_t *svc, rpcsvc_program_t *program)
}
}
- pthread_rwlock_wrlock (&svc->rpclock);
+ pthread_mutex_lock (&svc->rpclock);
{
list_add_tail (&newprog->program, &svc->programs);
}
- pthread_rwlock_unlock (&svc->rpclock);
+ pthread_mutex_unlock (&svc->rpclock);
ret = 0;
gf_log (GF_RPCSVC, GF_LOG_DEBUG, "New program registered: %s, Num: %d,"
@@ -2077,7 +2077,7 @@ build_prog_details (rpcsvc_request_t *req, gf_dump_rsp *rsp)
if (!req || !req->trans || !req->svc)
goto out;
- pthread_rwlock_rdlock (&req->svc->rpclock);
+ pthread_mutex_lock (&req->svc->rpclock);
{
list_for_each_entry (program, &req->svc->programs, program) {
prog = GF_CALLOC (1, sizeof (*prog), 0);
@@ -2098,7 +2098,7 @@ build_prog_details (rpcsvc_request_t *req, gf_dump_rsp *rsp)
ret = 0;
}
unlock:
- pthread_rwlock_unlock (&req->svc->rpclock);
+ pthread_mutex_unlock (&req->svc->rpclock);
out:
return ret;
}
@@ -2456,7 +2456,7 @@ rpcsvc_init (xlator_t *xl, glusterfs_ctx_t *ctx, dict_t *options,
if (!svc)
return NULL;
- pthread_rwlock_init (&svc->rpclock, NULL);
+ pthread_mutex_init (&svc->rpclock, NULL);
INIT_LIST_HEAD (&svc->authschemes);
INIT_LIST_HEAD (&svc->notify);
INIT_LIST_HEAD (&svc->listeners);
--
1.8.3.1

View File

@ -0,0 +1,361 @@
From 1183db5268c3957975cdc6ad882113bfff7a8f32 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Mon, 21 May 2018 12:17:06 +0530
Subject: [PATCH 280/282] Revert "rpcsvc: scale rpcsvc_request_handler threads"
This reverts commit 8503ed9b94777d47352f19ebfa844e151352b87f.
> Reviewed-on: https://code.engineering.redhat.com/gerrit/131596
Change-Id: I537a40d29b027a7e06babb775b39bb111f0d0e3f
BUG: 1558989
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/139308
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Raghavendra Gowdappa <rgowdapp@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfsd/src/Makefile.am | 1 -
glusterfsd/src/glusterfsd-mgmt.c | 16 +----
glusterfsd/src/glusterfsd.h | 2 +-
libglusterfs/src/event-poll.c | 7 --
rpc/rpc-lib/src/rpcsvc.c | 129 ++++-------------------------------
rpc/rpc-lib/src/rpcsvc.h | 8 ---
xlators/protocol/server/src/server.c | 11 +--
7 files changed, 20 insertions(+), 154 deletions(-)
diff --git a/glusterfsd/src/Makefile.am b/glusterfsd/src/Makefile.am
index 8ab585c..0196204 100644
--- a/glusterfsd/src/Makefile.am
+++ b/glusterfsd/src/Makefile.am
@@ -22,7 +22,6 @@ AM_CPPFLAGS = $(GF_CPPFLAGS) \
-I$(top_srcdir)/rpc/xdr/src \
-I$(top_builddir)/rpc/xdr/src \
-I$(top_srcdir)/xlators/nfs/server/src \
- -I$(top_srcdir)/xlators/protocol/server/src \
-I$(top_srcdir)/api/src
AM_CFLAGS = -Wall $(GF_CFLAGS)
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index 3b9671c..665b62c 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -33,7 +33,6 @@
#include "syncop.h"
#include "xlator.h"
#include "syscall.h"
-#include "server.h"
static gf_boolean_t is_mgmt_rpc_reconnect = _gf_false;
int need_emancipate = 0;
@@ -186,15 +185,12 @@ glusterfs_terminate_response_send (rpcsvc_request_t *req, int op_ret)
}
void
-glusterfs_autoscale_threads (glusterfs_ctx_t *ctx, int incr, xlator_t *this)
+glusterfs_autoscale_threads (glusterfs_ctx_t *ctx, int incr)
{
struct event_pool *pool = ctx->event_pool;
- server_conf_t *conf = this->private;
- int thread_count = pool->eventthreadcount;
pool->auto_thread_count += incr;
- (void) event_reconfigure_threads (pool, thread_count+incr);
- rpcsvc_ownthread_reconf (conf->rpc, pool->eventthreadcount);
+ (void) event_reconfigure_threads (pool, pool->eventthreadcount+incr);
}
static int
@@ -964,7 +960,6 @@ glusterfs_handle_attach (rpcsvc_request_t *req)
xlator_t *nextchild = NULL;
glusterfs_graph_t *newgraph = NULL;
glusterfs_ctx_t *ctx = NULL;
- xlator_t *protocol_server = NULL;
GF_ASSERT (req);
this = THIS;
@@ -1002,12 +997,7 @@ glusterfs_handle_attach (rpcsvc_request_t *req)
nextchild->name);
goto out;
}
- /* we need a protocol/server xlator as
- * nextchild
- */
- protocol_server = this->ctx->active->first;
- glusterfs_autoscale_threads (this->ctx, 1,
- protocol_server);
+ glusterfs_autoscale_threads (this->ctx, 1);
}
} else {
gf_log (this->name, GF_LOG_WARNING,
diff --git a/glusterfsd/src/glusterfsd.h b/glusterfsd/src/glusterfsd.h
index a72acc8..f66947b 100644
--- a/glusterfsd/src/glusterfsd.h
+++ b/glusterfsd/src/glusterfsd.h
@@ -124,7 +124,7 @@ int glusterfs_volume_top_read_perf (uint32_t blk_size, uint32_t blk_count,
char *brick_path, double *throughput,
double *time);
void
-glusterfs_autoscale_threads (glusterfs_ctx_t *ctx, int incr, xlator_t *this);
+glusterfs_autoscale_threads (glusterfs_ctx_t *ctx, int incr);
void
xlator_mem_cleanup (xlator_t *this);
diff --git a/libglusterfs/src/event-poll.c b/libglusterfs/src/event-poll.c
index b1aca82..3bffc47 100644
--- a/libglusterfs/src/event-poll.c
+++ b/libglusterfs/src/event-poll.c
@@ -173,13 +173,6 @@ event_pool_new_poll (int count, int eventthreadcount)
"thread count (%d) ignored", eventthreadcount);
}
- /* although, eventhreadcount for poll implementaiton is always
- * going to be 1, eventthreadcount needs to be set to 1 so that
- * rpcsvc_request_handler() thread scaling works flawlessly in
- * both epoll and poll models
- */
- event_pool->eventthreadcount = 1;
-
return event_pool;
}
diff --git a/rpc/rpc-lib/src/rpcsvc.c b/rpc/rpc-lib/src/rpcsvc.c
index 31b5eb5..68e27ab 100644
--- a/rpc/rpc-lib/src/rpcsvc.c
+++ b/rpc/rpc-lib/src/rpcsvc.c
@@ -1877,105 +1877,39 @@ rpcsvc_request_handler (void *arg)
goto unlock;
}
- while (list_empty (&program->request_queue) &&
- (program->threadcount <=
- program->eventthreadcount)) {
+ while (list_empty (&program->request_queue))
pthread_cond_wait (&program->queue_cond,
&program->queue_lock);
- }
- if (program->threadcount > program->eventthreadcount) {
- done = 1;
- program->threadcount--;
-
- gf_log (GF_RPCSVC, GF_LOG_INFO,
- "program '%s' thread terminated; "
- "total count:%d",
- program->progname,
- program->threadcount);
- } else if (!list_empty (&program->request_queue)) {
- req = list_entry (program->request_queue.next,
- typeof (*req), request_list);
-
- list_del_init (&req->request_list);
- }
+ req = list_entry (program->request_queue.next,
+ typeof (*req), request_list);
+
+ list_del_init (&req->request_list);
}
unlock:
pthread_mutex_unlock (&program->queue_lock);
- if (req) {
- THIS = req->svc->xl;
- actor = rpcsvc_program_actor (req);
- ret = actor->actor (req);
-
- if (ret != 0) {
- rpcsvc_check_and_reply_error (ret, NULL, req);
- }
- req = NULL;
- }
-
if (done)
break;
- }
- return NULL;
-}
+ THIS = req->svc->xl;
-int
-rpcsvc_spawn_threads (rpcsvc_t *svc, rpcsvc_program_t *program)
-{
- int ret = 0, delta = 0, creates = 0;
+ actor = rpcsvc_program_actor (req);
- if (!program || !svc)
- goto out;
-
- pthread_mutex_lock (&program->queue_lock);
- {
- delta = program->eventthreadcount - program->threadcount;
-
- if (delta >= 0) {
- while (delta--) {
- ret = gf_thread_create (&program->thread, NULL,
- rpcsvc_request_handler,
- program, "rpcrqhnd");
- if (!ret) {
- program->threadcount++;
- creates++;
- }
- }
+ ret = actor->actor (req);
- if (creates) {
- gf_log (GF_RPCSVC, GF_LOG_INFO,
- "spawned %d threads for program '%s'; "
- "total count:%d",
- creates,
- program->progname,
- program->threadcount);
- }
- } else {
- gf_log (GF_RPCSVC, GF_LOG_INFO,
- "terminating %d threads for program '%s'",
- -delta, program->progname);
-
- /* this signal is to just wake up the threads so they
- * test for the change in eventthreadcount and kill
- * themselves until the program thread count becomes
- * equal to the event thread count
- */
- pthread_cond_broadcast (&program->queue_cond);
+ if (ret != 0) {
+ rpcsvc_check_and_reply_error (ret, NULL, req);
}
}
- pthread_mutex_unlock (&program->queue_lock);
-out:
- return creates;
+ return NULL;
}
int
rpcsvc_program_register (rpcsvc_t *svc, rpcsvc_program_t *program)
{
int ret = -1;
- int creates = -1;
rpcsvc_program_t *newprog = NULL;
char already_registered = 0;
@@ -2023,12 +1957,9 @@ rpcsvc_program_register (rpcsvc_t *svc, rpcsvc_program_t *program)
newprog->ownthread = _gf_false;
if (newprog->ownthread) {
- newprog->eventthreadcount = 1;
- creates = rpcsvc_spawn_threads (svc, newprog);
-
- if (creates < 1) {
- goto out;
- }
+ gf_thread_create (&newprog->thread, NULL,
+ rpcsvc_request_handler,
+ newprog, "rpcsvcrh");
}
pthread_mutex_lock (&svc->rpclock);
@@ -2885,38 +2816,6 @@ out:
return ret;
}
-/* During reconfigure, Make sure to call this function after event-threads are
- * reconfigured as programs' threadcount will be made equal to event threads.
- */
-
-int
-rpcsvc_ownthread_reconf (rpcsvc_t *svc, int new_eventthreadcount)
-{
- int ret = -1;
- rpcsvc_program_t *program = NULL;
-
- if (!svc) {
- ret = 0;
- goto out;
- }
-
- pthread_rwlock_wrlock (&svc->rpclock);
- {
- list_for_each_entry (program, &svc->programs, program) {
- if (program->ownthread) {
- program->eventthreadcount =
- new_eventthreadcount;
- rpcsvc_spawn_threads (svc, program);
- }
- }
- }
- pthread_rwlock_unlock (&svc->rpclock);
-
- ret = 0;
-out:
- return ret;
-}
-
rpcsvc_actor_t gluster_dump_actors[GF_DUMP_MAXVALUE] = {
[GF_DUMP_NULL] = {"NULL", GF_DUMP_NULL, NULL, NULL, 0, DRC_NA},
diff --git a/rpc/rpc-lib/src/rpcsvc.h b/rpc/rpc-lib/src/rpcsvc.h
index 4ae2350..73507b6 100644
--- a/rpc/rpc-lib/src/rpcsvc.h
+++ b/rpc/rpc-lib/src/rpcsvc.h
@@ -412,12 +412,6 @@ struct rpcsvc_program {
pthread_mutex_t queue_lock;
pthread_cond_t queue_cond;
pthread_t thread;
- int threadcount;
- /* eventthreadcount is just a readonly copy of the actual value
- * owned by the event sub-system
- * It is used to control the scaling of rpcsvc_request_handler threads
- */
- int eventthreadcount;
};
typedef struct rpcsvc_cbk_program {
@@ -629,6 +623,4 @@ rpcsvc_auth_array (rpcsvc_t *svc, char *volname, int *autharr, int arrlen);
rpcsvc_vector_sizer
rpcsvc_get_program_vector_sizer (rpcsvc_t *svc, uint32_t prognum,
uint32_t progver, int procnum);
-extern int
-rpcsvc_ownthread_reconf (rpcsvc_t *svc, int new_eventthreadcount);
#endif
diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c
index 79f68e8..bc87a80 100644
--- a/xlators/protocol/server/src/server.c
+++ b/xlators/protocol/server/src/server.c
@@ -685,7 +685,7 @@ unref_transport:
UNLOCK (&ctx->volfile_lock);
if (victim_found) {
xlator_mem_cleanup (travxl);
- glusterfs_autoscale_threads (ctx, -1, this);
+ glusterfs_autoscale_threads (ctx, -1);
}
}
GF_FREE (xlator_name);
@@ -1054,12 +1054,6 @@ do_rpc:
ret = server_init_grace_timer (this, options, conf);
- /* rpcsvc thread reconfigure should be after events thread
- * reconfigure
- */
- new_nthread =
- ((struct event_pool *)(this->ctx->event_pool))->eventthreadcount;
- ret = rpcsvc_ownthread_reconf (rpc_conf, new_nthread);
out:
THIS = oldTHIS;
gf_msg_debug ("", 0, "returning %d", ret);
@@ -1672,10 +1666,9 @@ notify (xlator_t *this, int32_t event, void *data, ...)
glusterfs_mgmt_pmap_signout (ctx,
victim->name);
-
if (!xprt_found && victim_found) {
xlator_mem_cleanup (victim);
- glusterfs_autoscale_threads (ctx, -1, this);
+ glusterfs_autoscale_threads (ctx, -1);
}
}
break;
--
1.8.3.1

View File

@ -0,0 +1,223 @@
From 9fc6e38ec39cfeb58d1b0fa0c2878c608d49f8e8 Mon Sep 17 00:00:00 2001
From: Milind Changire <mchangir@redhat.com>
Date: Mon, 21 May 2018 19:04:37 +0530
Subject: [PATCH 281/282] Revert "program/GF-DUMP: Shield ping processing from
traffic to Glusterfs"
This reverts commit 2e72b24707f1886833db0b09e48b3f48b8d68d37.
> Reviewed-on: https://review.gluster.org/17105
Change-Id: Iad5dc50d24f7ab2c8261d1d44afd04b67c24c96d
BUG: 1558989
Signed-off-by: Milind Changire <mchangir@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/139309
Reviewed-by: Raghavendra Gowdappa <rgowdapp@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
rpc/rpc-lib/src/rpcsvc.c | 89 +--------------------------
rpc/rpc-lib/src/rpcsvc.h | 17 +----
xlators/protocol/server/src/server-rpc-fops.c | 1 -
3 files changed, 3 insertions(+), 104 deletions(-)
diff --git a/rpc/rpc-lib/src/rpcsvc.c b/rpc/rpc-lib/src/rpcsvc.c
index 68e27ab..9938b8f 100644
--- a/rpc/rpc-lib/src/rpcsvc.c
+++ b/rpc/rpc-lib/src/rpcsvc.c
@@ -303,7 +303,6 @@ rpcsvc_program_actor (rpcsvc_request_t *req)
goto err;
}
- req->ownthread = program->ownthread;
req->synctask = program->synctask;
err = SUCCESS;
@@ -426,7 +425,6 @@ rpcsvc_request_init (rpcsvc_t *svc, rpc_transport_t *trans,
req->trans_private = msg->private;
INIT_LIST_HEAD (&req->txlist);
- INIT_LIST_HEAD (&req->request_list);
req->payloadsize = 0;
/* By this time, the data bytes for the auth scheme would have already
@@ -577,7 +575,7 @@ rpcsvc_handle_rpc_call (rpcsvc_t *svc, rpc_transport_t *trans,
rpcsvc_request_t *req = NULL;
int ret = -1;
uint16_t port = 0;
- gf_boolean_t is_unix = _gf_false, empty = _gf_false;
+ gf_boolean_t is_unix = _gf_false;
gf_boolean_t unprivileged = _gf_false;
drc_cached_op_t *reply = NULL;
rpcsvc_drc_globals_t *drc = NULL;
@@ -693,20 +691,6 @@ rpcsvc_handle_rpc_call (rpcsvc_t *svc, rpc_transport_t *trans,
(synctask_fn_t) actor_fn,
rpcsvc_check_and_reply_error, NULL,
req);
- } else if (req->ownthread) {
- pthread_mutex_lock (&req->prog->queue_lock);
- {
- empty = list_empty (&req->prog->request_queue);
-
- list_add_tail (&req->request_list,
- &req->prog->request_queue);
-
- if (empty)
- pthread_cond_signal (&req->prog->queue_cond);
- }
- pthread_mutex_unlock (&req->prog->queue_lock);
-
- ret = 0;
} else {
ret = actor_fn (req);
}
@@ -1586,12 +1570,6 @@ rpcsvc_program_unregister (rpcsvc_t *svc, rpcsvc_program_t *program)
" Ver: %d, Port: %d", prog->progname, prog->prognum,
prog->progver, prog->progport);
- if (prog->ownthread) {
- prog->alive = _gf_false;
- ret = 0;
- goto out;
- }
-
pthread_mutex_lock (&svc->rpclock);
{
list_del_init (&prog->program);
@@ -1856,56 +1834,6 @@ out:
return ret;
}
-void *
-rpcsvc_request_handler (void *arg)
-{
- rpcsvc_program_t *program = arg;
- rpcsvc_request_t *req = NULL;
- rpcsvc_actor_t *actor = NULL;
- gf_boolean_t done = _gf_false;
- int ret = 0;
-
- if (!program)
- return NULL;
-
- while (1) {
- pthread_mutex_lock (&program->queue_lock);
- {
- if (!program->alive
- && list_empty (&program->request_queue)) {
- done = 1;
- goto unlock;
- }
-
- while (list_empty (&program->request_queue))
- pthread_cond_wait (&program->queue_cond,
- &program->queue_lock);
-
- req = list_entry (program->request_queue.next,
- typeof (*req), request_list);
-
- list_del_init (&req->request_list);
- }
- unlock:
- pthread_mutex_unlock (&program->queue_lock);
-
- if (done)
- break;
-
- THIS = req->svc->xl;
-
- actor = rpcsvc_program_actor (req);
-
- ret = actor->actor (req);
-
- if (ret != 0) {
- rpcsvc_check_and_reply_error (ret, NULL, req);
- }
- }
-
- return NULL;
-}
-
int
rpcsvc_program_register (rpcsvc_t *svc, rpcsvc_program_t *program)
{
@@ -1946,21 +1874,6 @@ rpcsvc_program_register (rpcsvc_t *svc, rpcsvc_program_t *program)
memcpy (newprog, program, sizeof (*program));
INIT_LIST_HEAD (&newprog->program);
- INIT_LIST_HEAD (&newprog->request_queue);
- pthread_mutex_init (&newprog->queue_lock, NULL);
- pthread_cond_init (&newprog->queue_cond, NULL);
-
- newprog->alive = _gf_true;
-
- /* make sure synctask gets priority over ownthread */
- if (newprog->synctask)
- newprog->ownthread = _gf_false;
-
- if (newprog->ownthread) {
- gf_thread_create (&newprog->thread, NULL,
- rpcsvc_request_handler,
- newprog, "rpcsvcrh");
- }
pthread_mutex_lock (&svc->rpclock);
{
diff --git a/rpc/rpc-lib/src/rpcsvc.h b/rpc/rpc-lib/src/rpcsvc.h
index 73507b6..34429b4 100644
--- a/rpc/rpc-lib/src/rpcsvc.h
+++ b/rpc/rpc-lib/src/rpcsvc.h
@@ -233,9 +233,7 @@ struct rpcsvc_request {
*/
rpcsvc_auth_data_t verf;
- /* Execute this request's actor function in ownthread of program?*/
- gf_boolean_t ownthread;
-
+ /* Execute this request's actor function as a synctask?*/
gf_boolean_t synctask;
/* Container for a RPC program wanting to store a temp
* request-specific item.
@@ -247,9 +245,6 @@ struct rpcsvc_request {
/* pointer to cached reply for use in DRC */
drc_cached_op_t *reply;
-
- /* request queue in rpcsvc */
- struct list_head request_list;
};
#define rpcsvc_request_program(req) ((rpcsvc_program_t *)((req)->prog))
@@ -400,18 +395,10 @@ struct rpcsvc_program {
*/
int min_auth;
- /* Execute actor function in program's own thread? This will reduce */
- /* the workload on poller threads */
- gf_boolean_t ownthread;
- gf_boolean_t alive;
-
+ /* Execute actor function as a synctask? */
gf_boolean_t synctask;
/* list member to link to list of registered services with rpcsvc */
struct list_head program;
- struct list_head request_queue;
- pthread_mutex_t queue_lock;
- pthread_cond_t queue_cond;
- pthread_t thread;
};
typedef struct rpcsvc_cbk_program {
diff --git a/xlators/protocol/server/src/server-rpc-fops.c b/xlators/protocol/server/src/server-rpc-fops.c
index 91d5c03..0bf41d8 100644
--- a/xlators/protocol/server/src/server-rpc-fops.c
+++ b/xlators/protocol/server/src/server-rpc-fops.c
@@ -6160,5 +6160,4 @@ struct rpcsvc_program glusterfs3_3_fop_prog = {
.progver = GLUSTER_FOP_VERSION,
.numactors = GLUSTER_FOP_PROCCNT,
.actors = glusterfs3_3_fop_actors,
- .ownthread = _gf_true,
};
--
1.8.3.1

View File

@ -0,0 +1,57 @@
From 5d649e42f6566f6f2f3425298cfee48d170ab4ea Mon Sep 17 00:00:00 2001
From: N Balachandran <nbalacha@redhat.com>
Date: Wed, 16 May 2018 18:15:02 +0530
Subject: [PATCH 282/282] cluster/dht: Remove EIO from dht_inode_missing
Removed EIO from the list of errnos that triggered
a migrate check task.
upstream patch: https://review.gluster.org/#/c/20028/
> Change-Id: I7f89c7a16056421588f1af2377cebe6affddcb47
> fixes: bz#1578823
> Signed-off-by: N Balachandran <nbalacha@redhat.com>
Change-Id: I1962f0db916af71d2903814dc01f0c0809e57ce7
BUG: 1580344
Signed-off-by: N Balachandran <nbalacha@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/139188
Tested-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
---
xlators/cluster/dht/src/dht-common.h | 4 +---
xlators/cluster/dht/src/dht-inode-read.c | 2 +-
2 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/xlators/cluster/dht/src/dht-common.h b/xlators/cluster/dht/src/dht-common.h
index 10b7c7e..a70342f 100644
--- a/xlators/cluster/dht/src/dht-common.h
+++ b/xlators/cluster/dht/src/dht-common.h
@@ -795,9 +795,7 @@ typedef struct dht_fd_ctx {
} \
} while (0)
-#define dht_inode_missing(op_errno) (op_errno == ENOENT || op_errno == ESTALE \
- || op_errno == EIO) \
-/*Bad fix. Please revert the commit after fixing the bug 1329505*/
+#define dht_inode_missing(op_errno) (op_errno == ENOENT || op_errno == ESTALE)
#define check_is_dir(i,s,x) (IA_ISDIR(s->ia_type))
diff --git a/xlators/cluster/dht/src/dht-inode-read.c b/xlators/cluster/dht/src/dht-inode-read.c
index d1895eb..7e4aef4 100644
--- a/xlators/cluster/dht/src/dht-inode-read.c
+++ b/xlators/cluster/dht/src/dht-inode-read.c
@@ -78,7 +78,7 @@ dht_open2 (xlator_t *this, xlator_t *subvol, call_frame_t *frame, int ret)
goto out;
local = frame->local;
- op_errno = ENOENT;
+ op_errno = local->op_errno;
if (we_are_not_migrating (ret)) {
/* This DHT layer is not migrating the file */
--
1.8.3.1

View File

@ -192,7 +192,7 @@ Release: 0.1%{?prereltag:.%{prereltag}}%{?dist}
%else %else
Name: glusterfs Name: glusterfs
Version: 3.12.2 Version: 3.12.2
Release: 11%{?dist} Release: 12%{?dist}
%endif %endif
License: GPLv2 or LGPLv3+ License: GPLv2 or LGPLv3+
Group: System Environment/Base Group: System Environment/Base
@ -539,6 +539,14 @@ Patch0271: 0271-cli-Fix-for-gluster-volume-info-xml.patch
Patch0272: 0272-readdir-ahead-Fix-an-issue-with-parallel-readdir-and.patch Patch0272: 0272-readdir-ahead-Fix-an-issue-with-parallel-readdir-and.patch
Patch0273: 0273-rpcsvc-Turn-off-ownthreads-for-Glusterfs-program.patch Patch0273: 0273-rpcsvc-Turn-off-ownthreads-for-Glusterfs-program.patch
Patch0274: 0274-client-protocol-fix-the-log-level-for-removexattr_cb.patch Patch0274: 0274-client-protocol-fix-the-log-level-for-removexattr_cb.patch
Patch0275: 0275-afr-fix-bug-1363721.t-failure.patch
Patch0276: 0276-tests-check-volume-status-for-shd-being-up.patch
Patch0277: 0277-Revert-rpcsvc-Turn-off-ownthreads-for-Glusterfs-prog.patch
Patch0278: 0278-Revert-rpcsvc-correct-event-thread-scaling.patch
Patch0279: 0279-Revert-rpc-make-actor-search-parallel.patch
Patch0280: 0280-Revert-rpcsvc-scale-rpcsvc_request_handler-threads.patch
Patch0281: 0281-Revert-program-GF-DUMP-Shield-ping-processing-from-t.patch
Patch0282: 0282-cluster-dht-Remove-EIO-from-dht_inode_missing.patch
%description %description
GlusterFS is a distributed file-system capable of scaling to several GlusterFS is a distributed file-system capable of scaling to several
@ -2485,6 +2493,9 @@ fi
%endif %endif
%changelog %changelog
* Thu May 24 2018 Milind Changire <mchangir@redhat.com> - 3.12.2-12
- fixes bugs bz#1558989 bz#1580344 bz#1581057 bz#1581219
* Thu May 17 2018 Milind Changire <mchangir@redhat.com> - 3.12.2-11 * Thu May 17 2018 Milind Changire <mchangir@redhat.com> - 3.12.2-11
- fixes bugs bz#1558989 bz#1575555 bz#1578647 - fixes bugs bz#1558989 bz#1575555 bz#1578647