341 lines
12 KiB
Diff
341 lines
12 KiB
Diff
From 1a8b001a121ada4d3d338b52b312896f1790f2bb Mon Sep 17 00:00:00 2001
|
|
From: Vinayak hariharmath <65405035+VHariharmath-rh@users.noreply.github.com>
|
|
Date: Mon, 11 Jan 2021 12:34:55 +0530
|
|
Subject: [PATCH 570/584] features/shard: avoid repeatative calls to
|
|
gf_uuid_unparse()
|
|
|
|
The issue is shard_make_block_abspath() calls gf_uuid_unparse()
|
|
every time while constructing shard path. The gfid can be parsed
|
|
and saved once and passed while constructing the path. Thus
|
|
we can avoid calling gf_uuid_unparse().
|
|
|
|
Backport of:
|
|
> Upstream-patch: https://github.com/gluster/glusterfs/pull/1689
|
|
> Fixes: #1423
|
|
> Change-Id: Ia26fbd5f09e812bbad9e5715242f14143c013c9c
|
|
> Signed-off-by: Vinayakswami Hariharmath vharihar@redhat.com
|
|
|
|
BUG: 1925425
|
|
Change-Id: Ia26fbd5f09e812bbad9e5715242f14143c013c9c
|
|
Signed-off-by: Vinayakswami Hariharmath vharihar@redhat.com
|
|
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/244964
|
|
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
|
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
|
---
|
|
tests/bugs/shard/issue-1425.t | 9 ++-
|
|
xlators/features/shard/src/shard.c | 119 ++++++++++++++++++-------------------
|
|
2 files changed, 65 insertions(+), 63 deletions(-)
|
|
|
|
diff --git a/tests/bugs/shard/issue-1425.t b/tests/bugs/shard/issue-1425.t
|
|
index bbe82c0..8b77705 100644
|
|
--- a/tests/bugs/shard/issue-1425.t
|
|
+++ b/tests/bugs/shard/issue-1425.t
|
|
@@ -21,7 +21,13 @@ TEST $CLI volume profile $V0 start
|
|
|
|
TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
|
|
|
|
+$CLI volume profile $V0 info clear
|
|
+
|
|
TEST fallocate -l 20M $M0/foo
|
|
+
|
|
+# There should be 1+4 shards and we expect 4 lookups less than on the build without this patch
|
|
+EXPECT "5" echo `$CLI volume profile $V0 info incremental | grep -w LOOKUP | awk '{print $8}'`
|
|
+
|
|
gfid_new=$(get_gfid_string $M0/foo)
|
|
|
|
# Check for the base shard
|
|
@@ -31,9 +37,6 @@ TEST stat $B0/${V0}0/foo
|
|
# There should be 4 associated shards
|
|
EXPECT_WITHIN $FILE_COUNT_TIME 4 get_file_count $B0/${V0}0/.shard/$gfid_new
|
|
|
|
-# There should be 1+4 shards and we expect 4 lookups less than on the build without this patch
|
|
-EXPECT "21" echo `$CLI volume profile $V0 info incremental | grep -w LOOKUP | awk '{print $8}'`
|
|
-
|
|
# Delete the base shard and check shards get cleaned up
|
|
TEST unlink $M0/foo
|
|
|
|
diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c
|
|
index a6ad1b8..d1d7d7a 100644
|
|
--- a/xlators/features/shard/src/shard.c
|
|
+++ b/xlators/features/shard/src/shard.c
|
|
@@ -16,6 +16,8 @@
|
|
#include <glusterfs/defaults.h>
|
|
#include <glusterfs/statedump.h>
|
|
|
|
+#define SHARD_PATH_MAX (sizeof(GF_SHARD_DIR) + GF_UUID_BUF_SIZE + 16)
|
|
+
|
|
static gf_boolean_t
|
|
__is_shard_dir(uuid_t gfid)
|
|
{
|
|
@@ -49,15 +51,19 @@ shard_make_block_bname(int block_num, uuid_t gfid, char *buf, size_t len)
|
|
snprintf(buf, len, "%s.%d", gfid_str, block_num);
|
|
}
|
|
|
|
-void
|
|
-shard_make_block_abspath(int block_num, uuid_t gfid, char *filepath, size_t len)
|
|
+static int
|
|
+shard_make_base_path(char *path, uuid_t gfid)
|
|
{
|
|
- char gfid_str[GF_UUID_BUF_SIZE] = {
|
|
- 0,
|
|
- };
|
|
+ strcpy(path, "/" GF_SHARD_DIR "/");
|
|
+ uuid_utoa_r(gfid, path + sizeof(GF_SHARD_DIR) + 1);
|
|
+ return (sizeof(GF_SHARD_DIR) + GF_UUID_BUF_SIZE);
|
|
+}
|
|
|
|
- gf_uuid_unparse(gfid, gfid_str);
|
|
- snprintf(filepath, len, "/%s/%s.%d", GF_SHARD_DIR, gfid_str, block_num);
|
|
+static inline void
|
|
+shard_append_index(char *path, int path_size, int prefix_len,
|
|
+ int shard_idx_iter)
|
|
+{
|
|
+ snprintf(path + prefix_len, path_size - prefix_len, ".%d", shard_idx_iter);
|
|
}
|
|
|
|
int
|
|
@@ -1004,9 +1010,8 @@ shard_common_resolve_shards(call_frame_t *frame, xlator_t *this,
|
|
{
|
|
int i = -1;
|
|
uint32_t shard_idx_iter = 0;
|
|
- char path[PATH_MAX] = {
|
|
- 0,
|
|
- };
|
|
+ int prefix_len = 0;
|
|
+ char path[SHARD_PATH_MAX];
|
|
uuid_t gfid = {
|
|
0,
|
|
};
|
|
@@ -1055,6 +1060,9 @@ shard_common_resolve_shards(call_frame_t *frame, xlator_t *this,
|
|
else
|
|
gf_uuid_copy(gfid, local->base_gfid);
|
|
|
|
+ /* Build base shard path before appending index of the shard */
|
|
+ prefix_len = shard_make_base_path(path, gfid);
|
|
+
|
|
while (shard_idx_iter <= resolve_count) {
|
|
i++;
|
|
if (shard_idx_iter == 0) {
|
|
@@ -1062,16 +1070,13 @@ shard_common_resolve_shards(call_frame_t *frame, xlator_t *this,
|
|
shard_idx_iter++;
|
|
continue;
|
|
}
|
|
-
|
|
- shard_make_block_abspath(shard_idx_iter, gfid, path, sizeof(path));
|
|
-
|
|
+ shard_append_index(path, SHARD_PATH_MAX, prefix_len, shard_idx_iter);
|
|
inode = NULL;
|
|
inode = inode_resolve(this->itable, path);
|
|
if (inode) {
|
|
gf_msg_debug(this->name, 0,
|
|
- "Shard %d already "
|
|
- "present. gfid=%s. Saving inode for future.",
|
|
- shard_idx_iter, uuid_utoa(inode->gfid));
|
|
+ "Shard %s already present. Saving inode for future.",
|
|
+ path);
|
|
local->inode_list[i] = inode;
|
|
/* Let the ref on the inodes that are already present
|
|
* in inode table still be held so that they don't get
|
|
@@ -2153,9 +2158,8 @@ shard_truncate_htol(call_frame_t *frame, xlator_t *this, inode_t *inode)
|
|
int call_count = 0;
|
|
uint32_t cur_block = 0;
|
|
uint32_t last_block = 0;
|
|
- char path[PATH_MAX] = {
|
|
- 0,
|
|
- };
|
|
+ int prefix_len = 0;
|
|
+ char path[SHARD_PATH_MAX];
|
|
char *bname = NULL;
|
|
loc_t loc = {
|
|
0,
|
|
@@ -2216,6 +2220,10 @@ shard_truncate_htol(call_frame_t *frame, xlator_t *this, inode_t *inode)
|
|
return 0;
|
|
}
|
|
|
|
+ /* Build base shard path before appending index of the shard */
|
|
+ prefix_len = shard_make_base_path(path, inode->gfid);
|
|
+ bname = path + sizeof(GF_SHARD_DIR) + 1;
|
|
+
|
|
SHARD_SET_ROOT_FS_ID(frame, local);
|
|
while (cur_block <= last_block) {
|
|
if (!local->inode_list[i]) {
|
|
@@ -2229,15 +2237,12 @@ shard_truncate_htol(call_frame_t *frame, xlator_t *this, inode_t *inode)
|
|
goto next;
|
|
}
|
|
|
|
- shard_make_block_abspath(cur_block, inode->gfid, path, sizeof(path));
|
|
- bname = strrchr(path, '/') + 1;
|
|
+ shard_append_index(path, SHARD_PATH_MAX, prefix_len, cur_block);
|
|
loc.parent = inode_ref(priv->dot_shard_inode);
|
|
ret = inode_path(loc.parent, bname, (char **)&(loc.path));
|
|
if (ret < 0) {
|
|
gf_msg(this->name, GF_LOG_ERROR, 0, SHARD_MSG_INODE_PATH_FAILED,
|
|
- "Inode path failed"
|
|
- " on %s. Base file gfid = %s",
|
|
- bname, uuid_utoa(inode->gfid));
|
|
+ "Inode path failed on %s.", bname);
|
|
local->op_ret = -1;
|
|
local->op_errno = ENOMEM;
|
|
loc_wipe(&loc);
|
|
@@ -2465,13 +2470,8 @@ shard_common_lookup_shards(call_frame_t *frame, xlator_t *this, inode_t *inode,
|
|
int call_count = 0;
|
|
int32_t shard_idx_iter = 0;
|
|
int lookup_count = 0;
|
|
- char path[PATH_MAX] = {
|
|
- 0,
|
|
- };
|
|
+ char path[SHARD_PATH_MAX];
|
|
char *bname = NULL;
|
|
- uuid_t gfid = {
|
|
- 0,
|
|
- };
|
|
loc_t loc = {
|
|
0,
|
|
};
|
|
@@ -2489,10 +2489,16 @@ shard_common_lookup_shards(call_frame_t *frame, xlator_t *this, inode_t *inode,
|
|
if (local->lookup_shards_barriered)
|
|
local->barrier.waitfor = local->call_count;
|
|
|
|
+ /* Build base shard path before appending index of the shard */
|
|
+ strcpy(path, "/" GF_SHARD_DIR "/");
|
|
+
|
|
if (inode)
|
|
- gf_uuid_copy(gfid, inode->gfid);
|
|
+ uuid_utoa_r(inode->gfid, path + sizeof(GF_SHARD_DIR) + 1);
|
|
else
|
|
- gf_uuid_copy(gfid, local->base_gfid);
|
|
+ uuid_utoa_r(local->base_gfid, path + sizeof(GF_SHARD_DIR) + 1);
|
|
+
|
|
+ int prefix_len = sizeof(GF_SHARD_DIR) + GF_UUID_BUF_SIZE;
|
|
+ bname = path + sizeof(GF_SHARD_DIR) + 1;
|
|
|
|
while (shard_idx_iter <= lookup_count) {
|
|
if (local->inode_list[i]) {
|
|
@@ -2508,18 +2514,14 @@ shard_common_lookup_shards(call_frame_t *frame, xlator_t *this, inode_t *inode,
|
|
goto next;
|
|
}
|
|
|
|
- shard_make_block_abspath(shard_idx_iter, gfid, path, sizeof(path));
|
|
-
|
|
- bname = strrchr(path, '/') + 1;
|
|
+ shard_append_index(path, SHARD_PATH_MAX, prefix_len, shard_idx_iter);
|
|
loc.inode = inode_new(this->itable);
|
|
loc.parent = inode_ref(priv->dot_shard_inode);
|
|
gf_uuid_copy(loc.pargfid, priv->dot_shard_gfid);
|
|
ret = inode_path(loc.parent, bname, (char **)&(loc.path));
|
|
if (ret < 0 || !(loc.inode)) {
|
|
gf_msg(this->name, GF_LOG_ERROR, 0, SHARD_MSG_INODE_PATH_FAILED,
|
|
- "Inode path failed"
|
|
- " on %s, base file gfid = %s",
|
|
- bname, uuid_utoa(gfid));
|
|
+ "Inode path failed on %s", bname);
|
|
local->op_ret = -1;
|
|
local->op_errno = ENOMEM;
|
|
loc_wipe(&loc);
|
|
@@ -3168,12 +3170,7 @@ shard_unlink_shards_do(call_frame_t *frame, xlator_t *this, inode_t *inode)
|
|
uint32_t cur_block = 0;
|
|
uint32_t cur_block_idx = 0; /*this is idx into inode_list[] array */
|
|
char *bname = NULL;
|
|
- char path[PATH_MAX] = {
|
|
- 0,
|
|
- };
|
|
- uuid_t gfid = {
|
|
- 0,
|
|
- };
|
|
+ char path[SHARD_PATH_MAX];
|
|
loc_t loc = {
|
|
0,
|
|
};
|
|
@@ -3184,10 +3181,16 @@ shard_unlink_shards_do(call_frame_t *frame, xlator_t *this, inode_t *inode)
|
|
priv = this->private;
|
|
local = frame->local;
|
|
|
|
+ /* Build base shard path before appending index of the shard */
|
|
+ strcpy(path, "/" GF_SHARD_DIR "/");
|
|
+
|
|
if (inode)
|
|
- gf_uuid_copy(gfid, inode->gfid);
|
|
+ uuid_utoa_r(inode->gfid, path + sizeof(GF_SHARD_DIR) + 1);
|
|
else
|
|
- gf_uuid_copy(gfid, local->base_gfid);
|
|
+ uuid_utoa_r(local->base_gfid, path + sizeof(GF_SHARD_DIR) + 1);
|
|
+
|
|
+ int prefix_len = sizeof(GF_SHARD_DIR) + GF_UUID_BUF_SIZE;
|
|
+ bname = path + sizeof(GF_SHARD_DIR) + 1;
|
|
|
|
for (i = 0; i < local->num_blocks; i++) {
|
|
if (!local->inode_list[i])
|
|
@@ -3203,7 +3206,7 @@ shard_unlink_shards_do(call_frame_t *frame, xlator_t *this, inode_t *inode)
|
|
gf_msg_debug(this->name, 0,
|
|
"All shards that need to be "
|
|
"unlinked are non-existent: %s",
|
|
- uuid_utoa(gfid));
|
|
+ path);
|
|
return 0;
|
|
}
|
|
|
|
@@ -3221,15 +3224,12 @@ shard_unlink_shards_do(call_frame_t *frame, xlator_t *this, inode_t *inode)
|
|
goto next;
|
|
}
|
|
|
|
- shard_make_block_abspath(cur_block, gfid, path, sizeof(path));
|
|
- bname = strrchr(path, '/') + 1;
|
|
+ shard_append_index(path, SHARD_PATH_MAX, prefix_len, cur_block);
|
|
loc.parent = inode_ref(priv->dot_shard_inode);
|
|
ret = inode_path(loc.parent, bname, (char **)&(loc.path));
|
|
if (ret < 0) {
|
|
gf_msg(this->name, GF_LOG_ERROR, 0, SHARD_MSG_INODE_PATH_FAILED,
|
|
- "Inode path failed"
|
|
- " on %s, base file gfid = %s",
|
|
- bname, uuid_utoa(gfid));
|
|
+ "Inode path failed on %s", bname);
|
|
local->op_ret = -1;
|
|
local->op_errno = ENOMEM;
|
|
loc_wipe(&loc);
|
|
@@ -4971,9 +4971,8 @@ shard_common_resume_mknod(call_frame_t *frame, xlator_t *this,
|
|
int last_block = 0;
|
|
int ret = 0;
|
|
int call_count = 0;
|
|
- char path[PATH_MAX] = {
|
|
- 0,
|
|
- };
|
|
+ int prefix_len = 0;
|
|
+ char path[SHARD_PATH_MAX];
|
|
mode_t mode = 0;
|
|
char *bname = NULL;
|
|
shard_priv_t *priv = NULL;
|
|
@@ -4996,6 +4995,10 @@ shard_common_resume_mknod(call_frame_t *frame, xlator_t *this,
|
|
call_count = local->call_count = local->create_count;
|
|
local->post_mknod_handler = post_mknod_handler;
|
|
|
|
+ /* Build base shard path before appending index of the shard */
|
|
+ prefix_len = shard_make_base_path(path, fd->inode->gfid);
|
|
+ bname = path + sizeof(GF_SHARD_DIR) + 1;
|
|
+
|
|
SHARD_SET_ROOT_FS_ID(frame, local);
|
|
|
|
ret = shard_inode_ctx_get_all(fd->inode, this, &ctx_tmp);
|
|
@@ -5022,10 +5025,7 @@ shard_common_resume_mknod(call_frame_t *frame, xlator_t *this,
|
|
-1, ENOMEM, NULL, NULL, NULL, NULL, NULL);
|
|
goto next;
|
|
}
|
|
-
|
|
- shard_make_block_abspath(shard_idx_iter, fd->inode->gfid, path,
|
|
- sizeof(path));
|
|
-
|
|
+ shard_append_index(path, SHARD_PATH_MAX, prefix_len, shard_idx_iter);
|
|
xattr_req = shard_create_gfid_dict(local->xattr_req);
|
|
if (!xattr_req) {
|
|
local->op_ret = -1;
|
|
@@ -5036,7 +5036,6 @@ shard_common_resume_mknod(call_frame_t *frame, xlator_t *this,
|
|
goto next;
|
|
}
|
|
|
|
- bname = strrchr(path, '/') + 1;
|
|
loc.inode = inode_new(this->itable);
|
|
loc.parent = inode_ref(priv->dot_shard_inode);
|
|
ret = inode_path(loc.parent, bname, (char **)&(loc.path));
|
|
--
|
|
1.8.3.1
|
|
|