293 lines
8.5 KiB
Diff
293 lines
8.5 KiB
Diff
From f6d967cd70ff41a0f93c54d50128c468e9d5dea9 Mon Sep 17 00:00:00 2001
|
|
From: Mohammed Rafi KC <rkavunga@redhat.com>
|
|
Date: Thu, 11 Jul 2019 12:49:21 +0530
|
|
Subject: [PATCH 244/255] Revert "ec/shd: Cleanup self heal daemon resources
|
|
during ec fini"
|
|
|
|
This reverts commit edc238e40060773f5f5fd59fcdad8ae27d65749f.
|
|
|
|
BUG: 1471742
|
|
Change-Id: If6cb5941b964f005454a21a67938b354ef1a2037
|
|
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
|
|
Reviewed-on: https://code.engineering.redhat.com/gerrit/175953
|
|
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
|
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
|
---
|
|
libglusterfs/src/syncop-utils.c | 2 -
|
|
xlators/cluster/afr/src/afr-self-heald.c | 5 ---
|
|
xlators/cluster/ec/src/ec-heald.c | 77 +++++---------------------------
|
|
xlators/cluster/ec/src/ec-heald.h | 3 --
|
|
xlators/cluster/ec/src/ec-messages.h | 3 +-
|
|
xlators/cluster/ec/src/ec.c | 47 -------------------
|
|
6 files changed, 13 insertions(+), 124 deletions(-)
|
|
|
|
diff --git a/libglusterfs/src/syncop-utils.c b/libglusterfs/src/syncop-utils.c
|
|
index 4167db4..b842142 100644
|
|
--- a/libglusterfs/src/syncop-utils.c
|
|
+++ b/libglusterfs/src/syncop-utils.c
|
|
@@ -354,8 +354,6 @@ syncop_mt_dir_scan(call_frame_t *frame, xlator_t *subvol, loc_t *loc, int pid,
|
|
|
|
if (frame) {
|
|
this = frame->this;
|
|
- } else {
|
|
- this = THIS;
|
|
}
|
|
|
|
/*For this functionality to be implemented in general, we need
|
|
diff --git a/xlators/cluster/afr/src/afr-self-heald.c b/xlators/cluster/afr/src/afr-self-heald.c
|
|
index 522fe5d..8bc4720 100644
|
|
--- a/xlators/cluster/afr/src/afr-self-heald.c
|
|
+++ b/xlators/cluster/afr/src/afr-self-heald.c
|
|
@@ -524,11 +524,6 @@ afr_shd_full_heal(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
|
|
afr_private_t *priv = NULL;
|
|
|
|
priv = this->private;
|
|
-
|
|
- if (this->cleanup_starting) {
|
|
- return -ENOTCONN;
|
|
- }
|
|
-
|
|
if (!priv->shd.enabled)
|
|
return -EBUSY;
|
|
|
|
diff --git a/xlators/cluster/ec/src/ec-heald.c b/xlators/cluster/ec/src/ec-heald.c
|
|
index edf5e11..cba111a 100644
|
|
--- a/xlators/cluster/ec/src/ec-heald.c
|
|
+++ b/xlators/cluster/ec/src/ec-heald.c
|
|
@@ -71,11 +71,6 @@ disabled_loop:
|
|
break;
|
|
}
|
|
|
|
- if (ec->shutdown) {
|
|
- healer->running = _gf_false;
|
|
- return -1;
|
|
- }
|
|
-
|
|
ret = healer->rerun;
|
|
healer->rerun = 0;
|
|
|
|
@@ -246,11 +241,9 @@ ec_shd_index_sweep(struct subvol_healer *healer)
|
|
goto out;
|
|
}
|
|
|
|
- _mask_cancellation();
|
|
ret = syncop_mt_dir_scan(NULL, subvol, &loc, GF_CLIENT_PID_SELF_HEALD,
|
|
healer, ec_shd_index_heal, xdata,
|
|
ec->shd.max_threads, ec->shd.wait_qlength);
|
|
- _unmask_cancellation();
|
|
out:
|
|
if (xdata)
|
|
dict_unref(xdata);
|
|
@@ -270,11 +263,6 @@ ec_shd_full_heal(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
|
|
int ret = 0;
|
|
|
|
ec = this->private;
|
|
-
|
|
- if (this->cleanup_starting) {
|
|
- return -ENOTCONN;
|
|
- }
|
|
-
|
|
if (ec->xl_up_count <= ec->fragments) {
|
|
return -ENOTCONN;
|
|
}
|
|
@@ -317,15 +305,11 @@ ec_shd_full_sweep(struct subvol_healer *healer, inode_t *inode)
|
|
{
|
|
ec_t *ec = NULL;
|
|
loc_t loc = {0};
|
|
- int ret = -1;
|
|
|
|
ec = healer->this->private;
|
|
loc.inode = inode;
|
|
- _mask_cancellation();
|
|
- ret = syncop_ftw(ec->xl_list[healer->subvol], &loc,
|
|
- GF_CLIENT_PID_SELF_HEALD, healer, ec_shd_full_heal);
|
|
- _unmask_cancellation();
|
|
- return ret;
|
|
+ return syncop_ftw(ec->xl_list[healer->subvol], &loc,
|
|
+ GF_CLIENT_PID_SELF_HEALD, healer, ec_shd_full_heal);
|
|
}
|
|
|
|
void *
|
|
@@ -333,16 +317,13 @@ ec_shd_index_healer(void *data)
|
|
{
|
|
struct subvol_healer *healer = NULL;
|
|
xlator_t *this = NULL;
|
|
- int run = 0;
|
|
|
|
healer = data;
|
|
THIS = this = healer->this;
|
|
ec_t *ec = this->private;
|
|
|
|
for (;;) {
|
|
- run = ec_shd_healer_wait(healer);
|
|
- if (run == -1)
|
|
- break;
|
|
+ ec_shd_healer_wait(healer);
|
|
|
|
if (ec->xl_up_count > ec->fragments) {
|
|
gf_msg_debug(this->name, 0, "starting index sweep on subvol %s",
|
|
@@ -371,12 +352,16 @@ ec_shd_full_healer(void *data)
|
|
|
|
rootloc.inode = this->itable->root;
|
|
for (;;) {
|
|
- run = ec_shd_healer_wait(healer);
|
|
- if (run < 0) {
|
|
- break;
|
|
- } else if (run == 0) {
|
|
- continue;
|
|
+ pthread_mutex_lock(&healer->mutex);
|
|
+ {
|
|
+ run = __ec_shd_healer_wait(healer);
|
|
+ if (!run)
|
|
+ healer->running = _gf_false;
|
|
}
|
|
+ pthread_mutex_unlock(&healer->mutex);
|
|
+
|
|
+ if (!run)
|
|
+ break;
|
|
|
|
if (ec->xl_up_count > ec->fragments) {
|
|
gf_msg(this->name, GF_LOG_INFO, 0, EC_MSG_FULL_SWEEP_START,
|
|
@@ -577,41 +562,3 @@ out:
|
|
dict_del(output, this->name);
|
|
return ret;
|
|
}
|
|
-
|
|
-void
|
|
-ec_destroy_healer_object(xlator_t *this, struct subvol_healer *healer)
|
|
-{
|
|
- if (!healer)
|
|
- return;
|
|
-
|
|
- pthread_cond_destroy(&healer->cond);
|
|
- pthread_mutex_destroy(&healer->mutex);
|
|
-}
|
|
-
|
|
-void
|
|
-ec_selfheal_daemon_fini(xlator_t *this)
|
|
-{
|
|
- struct subvol_healer *healer = NULL;
|
|
- ec_self_heald_t *shd = NULL;
|
|
- ec_t *priv = NULL;
|
|
- int i = 0;
|
|
-
|
|
- priv = this->private;
|
|
- if (!priv)
|
|
- return;
|
|
-
|
|
- shd = &priv->shd;
|
|
- if (!shd->iamshd)
|
|
- return;
|
|
-
|
|
- for (i = 0; i < priv->nodes; i++) {
|
|
- healer = &shd->index_healers[i];
|
|
- ec_destroy_healer_object(this, healer);
|
|
-
|
|
- healer = &shd->full_healers[i];
|
|
- ec_destroy_healer_object(this, healer);
|
|
- }
|
|
-
|
|
- GF_FREE(shd->index_healers);
|
|
- GF_FREE(shd->full_healers);
|
|
-}
|
|
diff --git a/xlators/cluster/ec/src/ec-heald.h b/xlators/cluster/ec/src/ec-heald.h
|
|
index 8184cf4..2eda2a7 100644
|
|
--- a/xlators/cluster/ec/src/ec-heald.h
|
|
+++ b/xlators/cluster/ec/src/ec-heald.h
|
|
@@ -24,7 +24,4 @@ ec_selfheal_daemon_init(xlator_t *this);
|
|
void
|
|
ec_shd_index_healer_wake(ec_t *ec);
|
|
|
|
-void
|
|
-ec_selfheal_daemon_fini(xlator_t *this);
|
|
-
|
|
#endif /* __EC_HEALD_H__ */
|
|
diff --git a/xlators/cluster/ec/src/ec-messages.h b/xlators/cluster/ec/src/ec-messages.h
|
|
index ce299bb..7c28808 100644
|
|
--- a/xlators/cluster/ec/src/ec-messages.h
|
|
+++ b/xlators/cluster/ec/src/ec-messages.h
|
|
@@ -55,7 +55,6 @@ GLFS_MSGID(EC, EC_MSG_INVALID_CONFIG, EC_MSG_HEAL_FAIL,
|
|
EC_MSG_CONFIG_XATTR_INVALID, EC_MSG_EXTENSION, EC_MSG_EXTENSION_NONE,
|
|
EC_MSG_EXTENSION_UNKNOWN, EC_MSG_EXTENSION_UNSUPPORTED,
|
|
EC_MSG_EXTENSION_FAILED, EC_MSG_NO_GF, EC_MSG_MATRIX_FAILED,
|
|
- EC_MSG_DYN_CREATE_FAILED, EC_MSG_DYN_CODEGEN_FAILED,
|
|
- EC_MSG_THREAD_CLEANUP_FAILED);
|
|
+ EC_MSG_DYN_CREATE_FAILED, EC_MSG_DYN_CODEGEN_FAILED);
|
|
|
|
#endif /* !_EC_MESSAGES_H_ */
|
|
diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
|
|
index 264582a..3c8013e 100644
|
|
--- a/xlators/cluster/ec/src/ec.c
|
|
+++ b/xlators/cluster/ec/src/ec.c
|
|
@@ -429,51 +429,6 @@ ec_disable_delays(ec_t *ec)
|
|
}
|
|
|
|
void
|
|
-ec_cleanup_healer_object(ec_t *ec)
|
|
-{
|
|
- struct subvol_healer *healer = NULL;
|
|
- ec_self_heald_t *shd = NULL;
|
|
- void *res = NULL;
|
|
- int i = 0;
|
|
- gf_boolean_t is_join = _gf_false;
|
|
-
|
|
- shd = &ec->shd;
|
|
- if (!shd->iamshd)
|
|
- return;
|
|
-
|
|
- for (i = 0; i < ec->nodes; i++) {
|
|
- healer = &shd->index_healers[i];
|
|
- pthread_mutex_lock(&healer->mutex);
|
|
- {
|
|
- healer->rerun = 1;
|
|
- if (healer->running) {
|
|
- pthread_cond_signal(&healer->cond);
|
|
- is_join = _gf_true;
|
|
- }
|
|
- }
|
|
- pthread_mutex_unlock(&healer->mutex);
|
|
- if (is_join) {
|
|
- pthread_join(healer->thread, &res);
|
|
- is_join = _gf_false;
|
|
- }
|
|
-
|
|
- healer = &shd->full_healers[i];
|
|
- pthread_mutex_lock(&healer->mutex);
|
|
- {
|
|
- healer->rerun = 1;
|
|
- if (healer->running) {
|
|
- pthread_cond_signal(&healer->cond);
|
|
- is_join = _gf_true;
|
|
- }
|
|
- }
|
|
- pthread_mutex_unlock(&healer->mutex);
|
|
- if (is_join) {
|
|
- pthread_join(healer->thread, &res);
|
|
- is_join = _gf_false;
|
|
- }
|
|
- }
|
|
-}
|
|
-void
|
|
ec_pending_fops_completed(ec_t *ec)
|
|
{
|
|
if (ec->shutdown) {
|
|
@@ -589,7 +544,6 @@ ec_notify(xlator_t *this, int32_t event, void *data, void *data2)
|
|
/* If there aren't pending fops running after we have waken up
|
|
* them, we immediately propagate the notification. */
|
|
propagate = ec_disable_delays(ec);
|
|
- ec_cleanup_healer_object(ec);
|
|
goto unlock;
|
|
}
|
|
|
|
@@ -805,7 +759,6 @@ failed:
|
|
void
|
|
fini(xlator_t *this)
|
|
{
|
|
- ec_selfheal_daemon_fini(this);
|
|
__ec_destroy_private(this);
|
|
}
|
|
|
|
--
|
|
1.8.3.1
|
|
|