autobuild v3.12.2-23

Resolves: bz#1631372 bz#1636902 bz#1637084
Signed-off-by: Sunil Kumar Acharya <sheggodu@redhat.com>
This commit is contained in:
Sunil Kumar Acharya 2018-10-16 12:04:08 -04:00
parent 86e0efee73
commit 40397910b3
6 changed files with 891 additions and 1 deletions

View File

@ -0,0 +1,427 @@
From c86df7778fd1d93a74892f36fa6fec14d3d20707 Mon Sep 17 00:00:00 2001
From: Amar Tumballi <amarts@redhat.com>
Date: Wed, 10 Oct 2018 10:42:53 +0530
Subject: [PATCH 400/404] all: fix the format warnings due to strict check
In the fix for CVE listed below, we added a strict format check,
and that has revealed multiple issues with the formats. Fixed all
warnings introduced because of the fix.
Updates: CVE-2018-14661
BUG: 1637084
Change-Id: Ic1702b264fa4c8ad23d3836fcd1d6dc2ca8bc4b1
Signed-off-by: Amar Tumballi <amarts@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/152352
Reviewed-by: Xavi Hernandez <xhernandez@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/fd.c | 2 +-
libglusterfs/src/inode.c | 2 +-
libglusterfs/src/iobuf.c | 8 ++++----
libglusterfs/src/stack.c | 6 +++---
libglusterfs/src/statedump.c | 11 ++++++-----
rpc/rpc-lib/src/rpc-drc.c | 4 ++--
xlators/cluster/dht/src/dht-shared.c | 2 +-
xlators/cluster/stripe/src/stripe.c | 4 ++--
xlators/debug/trace/src/trace.c | 2 +-
xlators/features/quota/src/quota.c | 2 +-
xlators/mount/fuse/src/fuse-bridge.c | 6 +++---
xlators/performance/io-cache/src/io-cache.c | 4 ++--
xlators/performance/nl-cache/src/nl-cache-helper.c | 6 +++---
xlators/performance/open-behind/src/open-behind.c | 2 +-
xlators/performance/quick-read/src/quick-read.c | 4 ++--
xlators/performance/read-ahead/src/read-ahead.c | 6 ++----
xlators/performance/write-behind/src/write-behind.c | 6 +++---
xlators/protocol/client/src/client.c | 2 +-
xlators/storage/posix/src/posix.c | 10 +++++-----
19 files changed, 44 insertions(+), 45 deletions(-)
diff --git a/libglusterfs/src/fd.c b/libglusterfs/src/fd.c
index 27c8e13..ed80bd3 100644
--- a/libglusterfs/src/fd.c
+++ b/libglusterfs/src/fd.c
@@ -1050,7 +1050,7 @@ fd_dump (fd_t *fd, char *prefix)
return;
memset(key, 0, sizeof(key));
- gf_proc_dump_write("pid", "%llu", fd->pid);
+ gf_proc_dump_write("pid", "%" PRIu64, fd->pid);
gf_proc_dump_write("refcount", "%d", fd->refcount);
gf_proc_dump_write("flags", "%d", fd->flags);
diff --git a/libglusterfs/src/inode.c b/libglusterfs/src/inode.c
index 1bc05a4..29d3c8f 100644
--- a/libglusterfs/src/inode.c
+++ b/libglusterfs/src/inode.c
@@ -2415,7 +2415,7 @@ inode_table_dump (inode_table_t *itable, char *prefix)
}
gf_proc_dump_build_key(key, prefix, "hashsize");
- gf_proc_dump_write(key, "%d", itable->hashsize);
+ gf_proc_dump_write(key, "%"GF_PRI_SIZET, itable->hashsize);
gf_proc_dump_build_key(key, prefix, "name");
gf_proc_dump_write(key, "%s", itable->name);
diff --git a/libglusterfs/src/iobuf.c b/libglusterfs/src/iobuf.c
index f6b8558..a22dbd3 100644
--- a/libglusterfs/src/iobuf.c
+++ b/libglusterfs/src/iobuf.c
@@ -1169,7 +1169,7 @@ iobuf_arena_info_dump (struct iobuf_arena *iobuf_arena, const char *key_prefix)
gf_proc_dump_build_key(key, key_prefix, "alloc_cnt");
gf_proc_dump_write(key, "%"PRIu64, iobuf_arena->alloc_cnt);
gf_proc_dump_build_key(key, key_prefix, "max_active");
- gf_proc_dump_write(key, "%"PRIu64, iobuf_arena->max_active);
+ gf_proc_dump_write(key, "%d", iobuf_arena->max_active);
gf_proc_dump_build_key(key, key_prefix, "page_size");
gf_proc_dump_write(key, "%"PRIu64, iobuf_arena->page_size);
list_for_each_entry (trav, &iobuf_arena->active.list, list) {
@@ -1202,9 +1202,9 @@ iobuf_stats_dump (struct iobuf_pool *iobuf_pool)
}
gf_proc_dump_add_section("iobuf.global");
gf_proc_dump_write("iobuf_pool","%p", iobuf_pool);
- gf_proc_dump_write("iobuf_pool.default_page_size", "%d",
- iobuf_pool->default_page_size);
- gf_proc_dump_write("iobuf_pool.arena_size", "%d",
+ gf_proc_dump_write("iobuf_pool.default_page_size", "%" GF_PRI_SIZET,
+ iobuf_pool->default_page_size);
+ gf_proc_dump_write("iobuf_pool.arena_size", "%" GF_PRI_SIZET,
iobuf_pool->arena_size);
gf_proc_dump_write("iobuf_pool.arena_cnt", "%d",
iobuf_pool->arena_cnt);
diff --git a/libglusterfs/src/stack.c b/libglusterfs/src/stack.c
index d64ac8a..6fbd2bb 100644
--- a/libglusterfs/src/stack.c
+++ b/libglusterfs/src/stack.c
@@ -144,7 +144,7 @@ gf_proc_dump_call_frame (call_frame_t *call_frame, const char *key_buf,...)
out:
if (ret) {
gf_proc_dump_write("Unable to dump the frame information",
- "(Lock acquisition failed) %p", my_frame);
+ "(Lock acquisition failed)");
return;
}
}
@@ -183,7 +183,7 @@ gf_proc_dump_call_stack (call_stack_t *call_stack, const char *key_buf,...)
gf_proc_dump_write("uid", "%d", call_stack->uid);
gf_proc_dump_write("gid", "%d", call_stack->gid);
gf_proc_dump_write("pid", "%d", call_stack->pid);
- gf_proc_dump_write("unique", "%Ld", call_stack->unique);
+ gf_proc_dump_write("unique", "%" PRIu64, call_stack->unique);
gf_proc_dump_write("lk-owner", "%s", lkowner_utoa (&call_stack->lk_owner));
if (call_stack->type == GF_OP_TYPE_FOP)
@@ -222,7 +222,7 @@ gf_proc_dump_pending_frames (call_pool_t *call_pool)
gf_proc_dump_add_section("global.callpool");
section_added = _gf_true;
gf_proc_dump_write("callpool_address","%p", call_pool);
- gf_proc_dump_write("callpool.cnt","%d", call_pool->cnt);
+ gf_proc_dump_write("callpool.cnt", "%" PRId64, call_pool->cnt);
list_for_each_entry (trav, &call_pool->all_frames, all_frames) {
diff --git a/libglusterfs/src/statedump.c b/libglusterfs/src/statedump.c
index e9ecef5..a123adb 100644
--- a/libglusterfs/src/statedump.c
+++ b/libglusterfs/src/statedump.c
@@ -240,10 +240,11 @@ gf_proc_dump_xlator_mem_info (xlator_t *xl)
gf_proc_dump_add_section ("%s.%s - usage-type %s memusage",
xl->type, xl->name,
xl->mem_acct->rec[i].typestr);
- gf_proc_dump_write ("size", "%u", xl->mem_acct->rec[i].size);
+ gf_proc_dump_write ("size", "%" GF_PRI_SIZET,
+ xl->mem_acct->rec[i].size);
gf_proc_dump_write ("num_allocs", "%u",
xl->mem_acct->rec[i].num_allocs);
- gf_proc_dump_write ("max_size", "%u",
+ gf_proc_dump_write ("max_size", "%" GF_PRI_SIZET,
xl->mem_acct->rec[i].max_size);
gf_proc_dump_write ("max_num_allocs", "%u",
xl->mem_acct->rec[i].max_num_allocs);
@@ -275,9 +276,9 @@ gf_proc_dump_xlator_mem_info_only_in_use (xlator_t *xl)
gf_proc_dump_add_section ("%s.%s - usage-type %d", xl->type,
xl->name,i);
- gf_proc_dump_write ("size", "%u",
+ gf_proc_dump_write ("size", "%" GF_PRI_SIZET,
xl->mem_acct->rec[i].size);
- gf_proc_dump_write ("max_size", "%u",
+ gf_proc_dump_write ("max_size", "%" GF_PRI_SIZET,
xl->mem_acct->rec[i].max_size);
gf_proc_dump_write ("num_allocs", "%u",
xl->mem_acct->rec[i].num_allocs);
@@ -475,7 +476,7 @@ gf_proc_dump_dict_info (glusterfs_ctx_t *ctx)
total_dicts = GF_ATOMIC_GET (ctx->stats.total_dicts_used);
total_pairs = GF_ATOMIC_GET (ctx->stats.total_pairs_used);
- gf_proc_dump_write ("max-pairs-per-dict", "%u",
+ gf_proc_dump_write ("max-pairs-per-dict", "%" PRIu64,
GF_ATOMIC_GET (ctx->stats.max_dict_pairs));
gf_proc_dump_write ("total-pairs-used", "%lu", total_pairs);
gf_proc_dump_write ("total-dicts-used", "%lu", total_dicts);
diff --git a/rpc/rpc-lib/src/rpc-drc.c b/rpc/rpc-lib/src/rpc-drc.c
index fb7d2f1..f597432 100644
--- a/rpc/rpc-lib/src/rpc-drc.c
+++ b/rpc/rpc-lib/src/rpc-drc.c
@@ -565,10 +565,10 @@ rpcsvc_drc_priv (rpcsvc_drc_globals_t *drc)
gf_proc_dump_write (key, "%d", drc->lru_factor);
gf_proc_dump_build_key (key, "drc", "duplicate_request_count");
- gf_proc_dump_write (key, "%d", drc->cache_hits);
+ gf_proc_dump_write (key, "%" PRIu64, drc->cache_hits);
gf_proc_dump_build_key (key, "drc", "in_transit_duplicate_requests");
- gf_proc_dump_write (key, "%d", drc->intransit_hits);
+ gf_proc_dump_write (key, "%" PRIu64, drc->intransit_hits);
list_for_each_entry (client, &drc->clients_head, client_list) {
gf_proc_dump_build_key (key, "client", "%d.ip-address", i);
diff --git a/xlators/cluster/dht/src/dht-shared.c b/xlators/cluster/dht/src/dht-shared.c
index 2f0d8ce..4aef8ff 100644
--- a/xlators/cluster/dht/src/dht-shared.c
+++ b/xlators/cluster/dht/src/dht-shared.c
@@ -179,7 +179,7 @@ dht_priv_dump (xlator_t *this)
conf->du_stats[i].avail_inodes);
snprintf (key, sizeof (key), "du_stats[%d].log", i);
- gf_proc_dump_write (key, "%lu",
+ gf_proc_dump_write (key, "%" PRIu32,
conf->du_stats[i].log);
}
}
diff --git a/xlators/cluster/stripe/src/stripe.c b/xlators/cluster/stripe/src/stripe.c
index 67006ab..6b32f7f 100644
--- a/xlators/cluster/stripe/src/stripe.c
+++ b/xlators/cluster/stripe/src/stripe.c
@@ -5688,12 +5688,12 @@ stripe_priv_dump (xlator_t *this)
options = priv->pattern;
while (options != NULL) {
gf_proc_dump_write ("path_pattern", "%s", priv->pattern->path_pattern);
- gf_proc_dump_write ("options_block_size", "%ul", options->block_size);
+ gf_proc_dump_write ("options_block_size", "%" PRIu64, options->block_size);
options = options->next;
}
- gf_proc_dump_write ("block_size", "%ul", priv->block_size);
+ gf_proc_dump_write ("block_size", "%" PRIu64, priv->block_size);
gf_proc_dump_write ("nodes-down", "%d", priv->nodes_down);
gf_proc_dump_write ("first-child_down", "%d", priv->first_child_down);
gf_proc_dump_write ("xattr_supported", "%d", priv->xattr_supported);
diff --git a/xlators/debug/trace/src/trace.c b/xlators/debug/trace/src/trace.c
index 34ac4ca..602e130 100644
--- a/xlators/debug/trace/src/trace.c
+++ b/xlators/debug/trace/src/trace.c
@@ -68,7 +68,7 @@ dump_history_trace (circular_buffer_t *cb, void *data)
".%"GF_PRI_SUSECONDS, cb->tv.tv_usec);
gf_proc_dump_write ("TIME", "%s", timestr);
- gf_proc_dump_write ("FOP", "%s\n", cb->data);
+ gf_proc_dump_write ("FOP", "%s\n", (char *)cb->data);
return 0;
}
diff --git a/xlators/features/quota/src/quota.c b/xlators/features/quota/src/quota.c
index 3d68ffa..71068d3 100644
--- a/xlators/features/quota/src/quota.c
+++ b/xlators/features/quota/src/quota.c
@@ -5224,7 +5224,7 @@ quota_priv_dump (xlator_t *this)
if (!priv)
goto out;
- gf_proc_dump_add_section ("xlators.features.quota.priv", this->name);
+ gf_proc_dump_add_section ("xlators.features.quota.priv");
ret = TRY_LOCK (&priv->lock);
if (ret)
diff --git a/xlators/mount/fuse/src/fuse-bridge.c b/xlators/mount/fuse/src/fuse-bridge.c
index 1c4f4e4..fbb4c53 100644
--- a/xlators/mount/fuse/src/fuse-bridge.c
+++ b/xlators/mount/fuse/src/fuse-bridge.c
@@ -5148,11 +5148,11 @@ fuse_priv_dump (xlator_t *this)
private->proto_minor);
gf_proc_dump_write("volfile", "%s",
private->volfile?private->volfile:"None");
- gf_proc_dump_write("volfile_size", "%d",
+ gf_proc_dump_write("volfile_size", "%" GF_PRI_SIZET,
private->volfile_size);
gf_proc_dump_write("mount_point", "%s",
private->mount_point);
- gf_proc_dump_write("iobuf", "%u",
+ gf_proc_dump_write("iobuf", "%p",
private->iobuf);
gf_proc_dump_write("fuse_thread_started", "%d",
(int)private->fuse_thread_started);
@@ -5208,7 +5208,7 @@ dump_history_fuse (circular_buffer_t *cb, void *data)
".%"GF_PRI_SUSECONDS, cb->tv.tv_usec);
gf_proc_dump_write ("TIME", "%s", timestr);
- gf_proc_dump_write ("message", "%s\n", cb->data);
+ gf_proc_dump_write ("message", "%s\n", (char *)cb->data);
return 0;
}
diff --git a/xlators/performance/io-cache/src/io-cache.c b/xlators/performance/io-cache/src/io-cache.c
index 700d8c2..de44ad2 100644
--- a/xlators/performance/io-cache/src/io-cache.c
+++ b/xlators/performance/io-cache/src/io-cache.c
@@ -2065,8 +2065,8 @@ ioc_priv_dump (xlator_t *this)
gf_proc_dump_write ("cache_used", "%ld", priv->cache_used);
gf_proc_dump_write ("inode_count", "%u", priv->inode_count);
gf_proc_dump_write ("cache_timeout", "%u", priv->cache_timeout);
- gf_proc_dump_write ("min-file-size", "%u", priv->min_file_size);
- gf_proc_dump_write ("max-file-size", "%u", priv->max_file_size);
+ gf_proc_dump_write ("min-file-size", "%" PRIu64, priv->min_file_size);
+ gf_proc_dump_write ("max-file-size", "%" PRIu64, priv->max_file_size);
}
pthread_mutex_unlock (&priv->table_lock);
out:
diff --git a/xlators/performance/nl-cache/src/nl-cache-helper.c b/xlators/performance/nl-cache/src/nl-cache-helper.c
index 583d67b..b6f1a17 100644
--- a/xlators/performance/nl-cache/src/nl-cache-helper.c
+++ b/xlators/performance/nl-cache/src/nl-cache-helper.c
@@ -1192,14 +1192,14 @@ nlc_dump_inodectx (xlator_t *this, inode_t *inode)
gf_proc_dump_write ("state", "%"PRIu64, nlc_ctx->state);
gf_proc_dump_write ("timer", "%p", nlc_ctx->timer);
- gf_proc_dump_write ("cache-time", "%lld", nlc_ctx->cache_time);
+ gf_proc_dump_write ("cache-time", "%" GF_PRI_TIME, nlc_ctx->cache_time);
gf_proc_dump_write ("cache-size", "%zu", nlc_ctx->cache_size);
gf_proc_dump_write ("refd-inodes", "%"PRIu64, nlc_ctx->refd_inodes);
if (IS_PE_VALID (nlc_ctx->state))
list_for_each_entry_safe (pe, tmp, &nlc_ctx->pe, list) {
- gf_proc_dump_write ("pe", "%p, %s", pe,
- pe->inode, pe->name);
+ gf_proc_dump_write ("pe", "%p, %p, %s", (void *)pe,
+ (void *)pe->inode, pe->name);
}
if (IS_NE_VALID (nlc_ctx->state))
diff --git a/xlators/performance/open-behind/src/open-behind.c b/xlators/performance/open-behind/src/open-behind.c
index 2d77f2d..3245c8f 100644
--- a/xlators/performance/open-behind/src/open-behind.c
+++ b/xlators/performance/open-behind/src/open-behind.c
@@ -933,7 +933,7 @@ ob_fdctx_dump (xlator_t *this, fd_t *fd)
gf_proc_dump_write ("open_frame", "%p", ob_fd->open_frame);
if (ob_fd->open_frame)
- gf_proc_dump_write ("open_frame.root.unique", "%p",
+ gf_proc_dump_write ("open_frame.root.unique", "%" PRIu64,
ob_fd->open_frame->root->unique);
gf_proc_dump_write ("loc.path", "%s", ob_fd->loc.path);
diff --git a/xlators/performance/quick-read/src/quick-read.c b/xlators/performance/quick-read/src/quick-read.c
index ca228b8..e377ac3 100644
--- a/xlators/performance/quick-read/src/quick-read.c
+++ b/xlators/performance/quick-read/src/quick-read.c
@@ -796,7 +796,7 @@ qr_priv_dump (xlator_t *this)
gf_proc_dump_add_section ("%s", key_prefix);
- gf_proc_dump_write ("max_file_size", "%d", conf->max_file_size);
+ gf_proc_dump_write ("max_file_size", "%" PRIu64, conf->max_file_size);
gf_proc_dump_write ("cache_timeout", "%d", conf->cache_timeout);
if (!table) {
@@ -811,7 +811,7 @@ qr_priv_dump (xlator_t *this)
}
gf_proc_dump_write ("total_files_cached", "%d", file_count);
- gf_proc_dump_write ("total_cache_used", "%d", total_size);
+ gf_proc_dump_write ("total_cache_used", "%" PRIu64, total_size);
out:
return 0;
diff --git a/xlators/performance/read-ahead/src/read-ahead.c b/xlators/performance/read-ahead/src/read-ahead.c
index 74ddf49..e02ca9f 100644
--- a/xlators/performance/read-ahead/src/read-ahead.c
+++ b/xlators/performance/read-ahead/src/read-ahead.c
@@ -808,7 +808,6 @@ ra_fdctx_dump (xlator_t *this, fd_t *fd)
int32_t ret = 0, i = 0;
uint64_t tmp_file = 0;
char *path = NULL;
- char key[GF_DUMP_MAX_BUF_LEN] = {0, };
char key_prefix[GF_DUMP_MAX_BUF_LEN] = {0, };
fd_ctx_get (fd, this, &tmp_file);
@@ -849,8 +848,7 @@ ra_fdctx_dump (xlator_t *this, fd_t *fd)
for (page = file->pages.next; page != &file->pages;
page = page->next) {
- sprintf (key, "page[%d]", i);
- gf_proc_dump_write (key, "%p", page[i++]);
+ gf_proc_dump_write ("page", "%d: %p", i++, (void *)page);
ra_page_dump (page);
}
@@ -1075,7 +1073,7 @@ ra_priv_dump (xlator_t *this)
if (ret)
goto out;
{
- gf_proc_dump_write ("page_size", "%d", conf->page_size);
+ gf_proc_dump_write ("page_size", "%" PRIu64, conf->page_size);
gf_proc_dump_write ("page_count", "%d", conf->page_count);
gf_proc_dump_write ("force_atime_update", "%d",
conf->force_atime_update);
diff --git a/xlators/performance/write-behind/src/write-behind.c b/xlators/performance/write-behind/src/write-behind.c
index ef02e18..d655843 100644
--- a/xlators/performance/write-behind/src/write-behind.c
+++ b/xlators/performance/write-behind/src/write-behind.c
@@ -2765,8 +2765,8 @@ wb_priv_dump (xlator_t *this)
gf_proc_dump_add_section ("%s", key_prefix);
- gf_proc_dump_write ("aggregate_size", "%d", conf->aggregate_size);
- gf_proc_dump_write ("window_size", "%d", conf->window_size);
+ gf_proc_dump_write ("aggregate_size", "%" PRIu64, conf->aggregate_size);
+ gf_proc_dump_write ("window_size", "%" PRIu64, conf->window_size);
gf_proc_dump_write ("flush_behind", "%d", conf->flush_behind);
gf_proc_dump_write ("trickling_writes", "%d", conf->trickling_writes);
@@ -2798,7 +2798,7 @@ __wb_dump_requests (struct list_head *head, char *prefix)
else
gf_proc_dump_write ("wound", "no");
- gf_proc_dump_write ("generation-number", "%d", req->gen);
+ gf_proc_dump_write ("generation-number", "%" PRIu64, req->gen);
gf_proc_dump_write ("req->op_ret", "%d", req->op_ret);
gf_proc_dump_write ("req->op_errno", "%d", req->op_errno);
diff --git a/xlators/protocol/client/src/client.c b/xlators/protocol/client/src/client.c
index 674f1aa..1e69977 100644
--- a/xlators/protocol/client/src/client.c
+++ b/xlators/protocol/client/src/client.c
@@ -2859,7 +2859,7 @@ client_priv_dump (xlator_t *this)
list_for_each_entry(tmp, &conf->saved_fds, sfd_pos) {
sprintf (key, "fd.%d.remote_fd", i);
- gf_proc_dump_write(key, "%d", tmp->remote_fd);
+ gf_proc_dump_write(key, "%" PRId64, tmp->remote_fd);
client_fd_lk_ctx_dump (this, tmp->lk_ctx, i);
i++;
}
diff --git a/xlators/storage/posix/src/posix.c b/xlators/storage/posix/src/posix.c
index 7bfe780..e46fe99 100644
--- a/xlators/storage/posix/src/posix.c
+++ b/xlators/storage/posix/src/posix.c
@@ -6970,11 +6970,11 @@ posix_priv (xlator_t *this)
if (!priv)
return 0;
- gf_proc_dump_write("base_path","%s", priv->base_path);
- gf_proc_dump_write("base_path_length","%d", priv->base_path_length);
- gf_proc_dump_write("max_read","%d", priv->read_value);
- gf_proc_dump_write("max_write","%d", priv->write_value);
- gf_proc_dump_write("nr_files","%ld", priv->nr_files);
+ gf_proc_dump_write("base_path", "%s", priv->base_path);
+ gf_proc_dump_write("base_path_length", "%d", priv->base_path_length);
+ gf_proc_dump_write("max_read", "%" PRId64, priv->read_value);
+ gf_proc_dump_write("max_write", "%" PRId64, priv->write_value);
+ gf_proc_dump_write("nr_files", "%ld", priv->nr_files);
return 0;
}
--
1.8.3.1

View File

@ -0,0 +1,41 @@
From 2b764dcf58da83cdc9138bbe2f9a503400ce66c6 Mon Sep 17 00:00:00 2001
From: Amar Tumballi <amarts@redhat.com>
Date: Fri, 12 Oct 2018 10:42:35 +0530
Subject: [PATCH 401/404] client_t.c: fix the format error
Updates: CVE-2018-14661
BUG: 1637084
Change-Id: Ieee5b41d24993a00fbe237a613d5db9dd20eee95
Signed-off-by: Amar Tumballi <amarts@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/152630
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
libglusterfs/src/client_t.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/libglusterfs/src/client_t.c b/libglusterfs/src/client_t.c
index dce27c1..a9ae62c 100644
--- a/libglusterfs/src/client_t.c
+++ b/libglusterfs/src/client_t.c
@@ -603,7 +603,7 @@ client_dump (client_t *client, char *prefix)
return;
memset(key, 0, sizeof key);
- gf_proc_dump_write("refcount", GF_PRI_ATOMIC,
+ gf_proc_dump_write("refcount", "%"GF_PRI_ATOMIC,
GF_ATOMIC_GET (client->count));
}
@@ -807,7 +807,7 @@ gf_client_dump_fdtables (xlator_t *this)
}
gf_proc_dump_build_key (key, "conn", "%d.ref",
count);
- gf_proc_dump_write (key, GF_PRI_ATOMIC,
+ gf_proc_dump_write (key, "%"GF_PRI_ATOMIC,
GF_ATOMIC_GET (client->count));
if (client->bound_xl) {
gf_proc_dump_build_key (key, "conn",
--
1.8.3.1

View File

@ -0,0 +1,263 @@
From fad234b5a62df48b7abc726549f2abb6b0af7c04 Mon Sep 17 00:00:00 2001
From: Mohit Agrawal <moagrawa@redhat.com>
Date: Tue, 16 Oct 2018 07:50:47 +0530
Subject: [PATCH 402/404] core: glusterfsd keeping fd open in index xlator
Problem: At the time of processing GF_EVENT_PARENT_DOWN
at brick xlator, it forwards the event to next xlator
only while xlator ensures no stub is in progress.
At io-thread xlator it decreases stub_cnt before the process
a stub and notify EVENT to next xlator
Solution: Introduce a new counter to save stub_cnt and decrease
the counter after process the stub completely at io-thread
xlator.
To avoid brick crash at the time of call xlator_mem_cleanup
move only brick xlator if detach brick name has found in
the graph
Note: Thanks to pranith for sharing a simple reproducer to
reproduce the same
> fixes bz#1637934
> Change-Id: I1a694a001f7a5417e8771e3adf92c518969b6baa
> (Cherry pick from commit 7bf95631b52bd05b06122180f8bd4aa62c70b1a9)
> (Reviewed on upstream link https://review.gluster.org/#/c/glusterfs/+/21379/)
Change-Id: I54b8ebb19819f9bbcbdd1448474ab084c0fd2eb6
BUG: 1631372
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/152908
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
glusterfsd/src/glusterfsd-mgmt.c | 15 +----
tests/bugs/glusterd/brick-mux-fd-cleanup.t | 78 +++++++++++++++++++++++++
xlators/performance/io-threads/src/io-threads.c | 23 ++++----
xlators/performance/io-threads/src/io-threads.h | 3 +-
4 files changed, 94 insertions(+), 25 deletions(-)
create mode 100644 tests/bugs/glusterd/brick-mux-fd-cleanup.t
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index cbd436a..e3fceeb 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -270,6 +270,7 @@ xlator_mem_cleanup (xlator_t *this) {
top = glusterfsd_ctx->active->first;
LOCK (&ctx->volfile_lock);
/* TODO here we have leak for xlator node in a graph */
+ /* Need to move only top xlator from a graph */
for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
victim = (*trav_p)->xlator;
if (victim->call_cleanup && !strcmp (victim->name, this->name)) {
@@ -277,20 +278,6 @@ xlator_mem_cleanup (xlator_t *this) {
break;
}
}
- /* TODO Sometime brick xlator is not moved from graph so followed below
- approach to move brick xlator from a graph, will move specific brick
- xlator from graph only while inode table and mem_acct are cleaned up
- */
- trav_p = &top->children;
- while (*trav_p) {
- victim = (*trav_p)->xlator;
- if (victim->call_cleanup && !victim->itable && !victim->mem_acct) {
- (*trav_p) = (*trav_p)->next;
- } else {
- trav_p = &(*trav_p)->next;
- }
- }
- UNLOCK (&ctx->volfile_lock);
}
}
diff --git a/tests/bugs/glusterd/brick-mux-fd-cleanup.t b/tests/bugs/glusterd/brick-mux-fd-cleanup.t
new file mode 100644
index 0000000..de11c17
--- /dev/null
+++ b/tests/bugs/glusterd/brick-mux-fd-cleanup.t
@@ -0,0 +1,78 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This .t tests that the fds from client are closed on brick when gluster volume
+#stop is executed in brick-mux setup.
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+function keep_fd_open {
+#This function has to be run as background job because opening the fd in
+#foreground and running commands is leading to flush calls on these fds
+#which is making it very difficult to create the race where fds will be left
+#open even after the brick dies.
+ exec 5>$M1/a
+ exec 6>$M1/b
+ while [ -f $M0/a ]; do sleep 1; done
+}
+
+function count_open_files {
+ local brick_pid="$1"
+ local pattern="$2"
+ ls -l /proc/$brick_pid/fd | grep -i "$pattern" | wc -l
+}
+
+TEST $CLI volume set all cluster.brick-multiplex on
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume create $V1 replica 2 $H0:$B0/${V1}{2,3}
+#Have same configuration on both bricks so that they are multiplexed
+#Delay flush fop for a second
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume heal $V1 disable
+TEST $CLI volume set $V0 delay-gen posix
+TEST $CLI volume set $V0 delay-gen.enable flush
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
+TEST $CLI volume set $V0 delay-gen.delay-duration 1000000
+TEST $CLI volume set $V1 delay-gen posix
+TEST $CLI volume set $V1 delay-gen.enable flush
+TEST $CLI volume set $V1 delay-gen.delay-percentage 100
+TEST $CLI volume set $V1 delay-gen.delay-duration 1000000
+
+TEST $CLI volume start $V0
+TEST $CLI volume start $V1
+
+TEST $GFS -s $H0 --volfile-id=$V0 --direct-io-mode=enable $M0
+TEST $GFS -s $H0 --volfile-id=$V1 --direct-io-mode=enable $M1
+
+TEST touch $M0/a
+keep_fd_open &
+TEST $CLI volume profile $V1 start
+brick_pid=$(get_brick_pid $V1 $H0 $B0/${V1}2)
+TEST count_open_files $brick_pid "$B0/${V1}2/a"
+TEST count_open_files $brick_pid "$B0/${V1}2/b"
+TEST count_open_files $brick_pid "$B0/${V1}3/a"
+TEST count_open_files $brick_pid "$B0/${V1}3/b"
+
+#If any other flush fops are introduced into the system other than the one at
+#cleanup it interferes with the race, so test for it
+EXPECT "^0$" echo "$($CLI volume profile $V1 info incremental | grep -i flush | wc -l)"
+#Stop the volume
+TEST $CLI volume stop $V1
+
+#Wait for cleanup resources or volume V1
+EXPECT_WITHIN $GRAPH_SWITCH_TIMEOUT "^0$" count_open_files $brick_pid "$B0/${V1}2/a"
+EXPECT_WITHIN $GRAPH_SWITCH_TIMEOUT "^0$" count_open_files $brick_pid "$B0/${V1}2/b"
+EXPECT_WITHIN $GRAPH_SWITCH_TIMEOUT "^0$" count_open_files $brick_pid "$B0/${V1}3/a"
+EXPECT_WITHIN $GRAPH_SWITCH_TIMEOUT "^0$" count_open_files $brick_pid "$B0/${V1}3/b"
+
+TEST rm -f $M0/a #Exit keep_fd_open()
+wait
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+
+cleanup
diff --git a/xlators/performance/io-threads/src/io-threads.c b/xlators/performance/io-threads/src/io-threads.c
index 41d48ab..2944c7d 100644
--- a/xlators/performance/io-threads/src/io-threads.c
+++ b/xlators/performance/io-threads/src/io-threads.c
@@ -120,7 +120,7 @@ __iot_dequeue (iot_conf_t *conf, int *pri)
if (!stub)
return NULL;
- GF_ATOMIC_DEC(conf->queue_size);
+ conf->queue_size--;
conf->queue_sizes[*pri]--;
return stub;
@@ -153,7 +153,8 @@ __iot_enqueue (iot_conf_t *conf, call_stub_t *stub, int pri)
}
list_add_tail (&stub->list, &ctx->reqs);
- GF_ATOMIC_INC(conf->queue_size);
+ conf->queue_size++;
+ GF_ATOMIC_INC(conf->stub_cnt);
conf->queue_sizes[pri]++;
}
@@ -182,7 +183,7 @@ iot_worker (void *data)
conf->ac_iot_count[pri]--;
pri = -1;
}
- while (GF_ATOMIC_GET(conf->queue_size) == 0) {
+ while (conf->queue_size == 0) {
if (conf->down) {
bye = _gf_true;/*Avoid sleep*/
break;
@@ -220,8 +221,10 @@ iot_worker (void *data)
}
pthread_mutex_unlock (&conf->mutex);
- if (stub) /* guard against spurious wakeups */
+ if (stub) { /* guard against spurious wakeups */
call_resume (stub);
+ GF_ATOMIC_DEC(conf->stub_cnt);
+ }
stub = NULL;
if (bye)
@@ -816,7 +819,7 @@ __iot_workers_scale (iot_conf_t *conf)
gf_msg_debug (conf->this->name, 0,
"scaled threads to %d (queue_size=%d/%d)",
conf->curr_count,
- GF_ATOMIC_GET(conf->queue_size), scale);
+ conf->queue_size, scale);
} else {
break;
}
@@ -1030,7 +1033,7 @@ init (xlator_t *this)
bool, out);
conf->this = this;
- GF_ATOMIC_INIT(conf->queue_size, 0);
+ GF_ATOMIC_INIT(conf->stub_cnt, 0);
for (i = 0; i < IOT_PRI_MAX; i++) {
INIT_LIST_HEAD (&conf->clients[i]);
@@ -1075,7 +1078,7 @@ notify (xlator_t *this, int32_t event, void *data, ...)
{
iot_conf_t *conf = this->private;
xlator_t *victim = data;
- uint64_t queue_size = 0;
+ uint64_t stub_cnt = 0;
struct timespec sleep_till = {0, };
if (GF_EVENT_PARENT_DOWN == event) {
@@ -1083,14 +1086,14 @@ notify (xlator_t *this, int32_t event, void *data, ...)
clock_gettime(CLOCK_REALTIME, &sleep_till);
sleep_till.tv_sec += 1;
/* Wait for draining stub from queue before notify PARENT_DOWN */
- queue_size = GF_ATOMIC_GET(conf->queue_size);
+ stub_cnt = GF_ATOMIC_GET(conf->stub_cnt);
pthread_mutex_lock(&conf->mutex);
{
- while (queue_size) {
+ while (stub_cnt) {
(void)pthread_cond_timedwait(&conf->cond, &conf->mutex,
&sleep_till);
- queue_size = GF_ATOMIC_GET(conf->queue_size);
+ stub_cnt = GF_ATOMIC_GET(conf->stub_cnt);
}
}
pthread_mutex_unlock(&conf->mutex);
diff --git a/xlators/performance/io-threads/src/io-threads.h b/xlators/performance/io-threads/src/io-threads.h
index 7a6973c..57a136e 100644
--- a/xlators/performance/io-threads/src/io-threads.h
+++ b/xlators/performance/io-threads/src/io-threads.h
@@ -75,7 +75,8 @@ struct iot_conf {
int32_t ac_iot_limit[IOT_PRI_MAX];
int32_t ac_iot_count[IOT_PRI_MAX];
int queue_sizes[IOT_PRI_MAX];
- gf_atomic_t queue_size;
+ int32_t queue_size;
+ gf_atomic_t stub_cnt;
pthread_attr_t w_attr;
gf_boolean_t least_priority; /*Enable/Disable least-priority */
--
1.8.3.1

View File

@ -0,0 +1,98 @@
From 03e4bab925b20832492c9954d3ecb6c10fe56548 Mon Sep 17 00:00:00 2001
From: Ravishankar N <ravishankar@redhat.com>
Date: Wed, 10 Oct 2018 17:57:33 +0530
Subject: [PATCH 403/404] afr: prevent winding inodelks twice for arbiter
volumes
Backport of https://review.gluster.org/#/c/glusterfs/+/21380/
Problem:
In an arbiter volume, if there is a pending data heal of a file only on
arbiter brick, self-heal takes inodelks twice due to a code-bug but unlocks
it only once, leaving behind a stale lock on the brick. This causes
the next write to the file to hang.
Fix:
Fix the code-bug to take lock only once. This bug was introduced master
with commit eb472d82a083883335bc494b87ea175ac43471ff
Thanks to Pranith Kumar K <pkarampu@redhat.com> for finding the RCA.
Change-Id: I15ad969e10a6a3c4bd255e2948b6be6dcddc61e1
BUG: 1636902
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/152552
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
.../bug-1637802-arbiter-stale-data-heal-lock.t | 44 ++++++++++++++++++++++
xlators/cluster/afr/src/afr-self-heal-data.c | 2 +-
2 files changed, 45 insertions(+), 1 deletion(-)
create mode 100644 tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t
diff --git a/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t b/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t
new file mode 100644
index 0000000..91ed39b
--- /dev/null
+++ b/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+# Test to check that data self-heal does not leave any stale lock.
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Create base entry in indices/xattrop
+echo "Data" > $M0/FILE
+
+# Kill arbiter brick and write to FILE.
+TEST kill_brick $V0 $H0 $B0/${V0}2
+echo "arbiter down" >> $M0/FILE
+EXPECT 2 get_pending_heal_count $V0
+
+# Bring it back up and let heal complete.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# write to the FILE must succeed.
+echo "this must succeed" >> $M0/FILE
+TEST [ $? -eq 0 ]
+cleanup;
diff --git a/xlators/cluster/afr/src/afr-self-heal-data.c b/xlators/cluster/afr/src/afr-self-heal-data.c
index d3deb8f..2ac6e47 100644
--- a/xlators/cluster/afr/src/afr-self-heal-data.c
+++ b/xlators/cluster/afr/src/afr-self-heal-data.c
@@ -765,7 +765,7 @@ restore_time:
afr_selfheal_restore_time (frame, this, fd->inode, source,
healed_sinks, locked_replies);
- if (!is_arbiter_the_only_sink || !empty_file) {
+ if (!is_arbiter_the_only_sink && !empty_file) {
ret = afr_selfheal_inodelk (frame, this, fd->inode, this->name,
0, 0, data_lock);
if (ret < priv->child_count) {
--
1.8.3.1

View File

@ -0,0 +1,53 @@
From 117b04bf6379a85d21f77a1d961241e95ad67a44 Mon Sep 17 00:00:00 2001
From: Mohit Agrawal <moagrawa@redhat.com>
Date: Tue, 16 Oct 2018 19:55:10 +0530
Subject: [PATCH 404/404] core: Resolve some warnings to release a build
Change-Id: I365073fbda9f19ef919f8d869f84a7018eb66d72
BUG: 1631372
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/152991
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Amar Tumballi <amarts@redhat.com>
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-quota.c | 2 +-
xlators/protocol/server/src/server.c | 2 --
2 files changed, 1 insertion(+), 3 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-quota.c b/xlators/mgmt/glusterd/src/glusterd-quota.c
index 6d3918b..55bbac7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-quota.c
+++ b/xlators/mgmt/glusterd/src/glusterd-quota.c
@@ -2101,7 +2101,7 @@ glusterd_op_stage_quota (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (errno == ERANGE || hard_limit < 0)
gf_asprintf (op_errstr, "Hard-limit "
"value out of range (0 - %"PRId64
- "): %s", hard_limit_str);
+ "): %s", hard_limit, hard_limit_str);
else
gf_msg (this->name, GF_LOG_ERROR, errno,
GD_MSG_CONVERSION_FAILED,
diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c
index 3a429bc..65d712f 100644
--- a/xlators/protocol/server/src/server.c
+++ b/xlators/protocol/server/src/server.c
@@ -710,7 +710,6 @@ server_graph_janitor_threads(void *data)
{
xlator_t *victim = NULL;
xlator_t *this = NULL;
- server_conf_t *conf = NULL;
glusterfs_ctx_t *ctx = NULL;
char *victim_name = NULL;
server_cleanup_xprt_arg_t *arg = NULL;
@@ -724,7 +723,6 @@ server_graph_janitor_threads(void *data)
this = arg->this;
victim_name = arg->victim_name;
THIS = arg->this;
- conf = this->private;
ctx = THIS->ctx;
GF_VALIDATE_OR_GOTO(this->name, ctx, out);
--
1.8.3.1

View File

@ -192,7 +192,7 @@ Release: 0.1%{?prereltag:.%{prereltag}}%{?dist}
%else
Name: glusterfs
Version: 3.12.2
Release: 22%{?dist}
Release: 23%{?dist}
%endif
License: GPLv2 or LGPLv3+
Group: System Environment/Base
@ -664,6 +664,11 @@ Patch0396: 0396-protocol-remove-the-option-verify-volfile-checksum.patch
Patch0397: 0397-features-locks-add-buffer-overflow-checks-in-pl_getx.patch
Patch0398: 0398-lock-Do-not-allow-meta-lock-count-to-be-more-than-on.patch
Patch0399: 0399-all-fix-the-format-string-exceptions.patch
Patch0400: 0400-all-fix-the-format-warnings-due-to-strict-check.patch
Patch0401: 0401-client_t.c-fix-the-format-error.patch
Patch0402: 0402-core-glusterfsd-keeping-fd-open-in-index-xlator.patch
Patch0403: 0403-afr-prevent-winding-inodelks-twice-for-arbiter-volum.patch
Patch0404: 0404-core-Resolve-some-warnings-to-release-a-build.patch
%description
GlusterFS is a distributed file-system capable of scaling to several
@ -2612,6 +2617,9 @@ fi
%endif
%changelog
* Tue Oct 16 2018 Sunil Kumar Acharya <sheggodu@redhat.com> - 3.12.2-23
- fixes bugs bz#1631372 bz#1636902
* Tue Oct 09 2018 Milind Changire <mchangir@redhat.com> - 3.12.2-22
- fixes bugs bz#1631329 bz#1631372