307 lines
10 KiB
Diff
307 lines
10 KiB
Diff
From 2cf22e54c8424949607c4a20df84887b838b2702 Mon Sep 17 00:00:00 2001
|
|
From: Krutika Dhananjay <kdhananj@redhat.com>
|
|
Date: Fri, 15 May 2020 11:29:36 +0530
|
|
Subject: [PATCH 380/382] features/shard: Aggregate size, block-count in iatt
|
|
before unwinding setxattr
|
|
|
|
Backport of:
|
|
> Upstream patch - https://review.gluster.org/c/glusterfs/+/24471
|
|
> Fixes: #1243
|
|
> Change-Id: I4da0eceb4235b91546df79270bcc0af8cd64e9ea
|
|
> Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
|
|
|
|
Posix translator returns pre and postbufs in the dict in {F}SETXATTR fops.
|
|
These iatts are further cached at layers like md-cache.
|
|
Shard translator, in its current state, simply returns these values without
|
|
updating the aggregated file size and block-count.
|
|
|
|
This patch fixes this problem.
|
|
|
|
Change-Id: I4da0eceb4235b91546df79270bcc0af8cd64e9ea
|
|
BUG: 1823423
|
|
Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
|
|
Reviewed-on: https://code.engineering.redhat.com/gerrit/201135
|
|
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
|
Reviewed-by: Xavi Hernandez Juan <xhernandez@redhat.com>
|
|
---
|
|
tests/bugs/shard/issue-1243.t | 31 ++++++
|
|
xlators/features/shard/src/shard.c | 218 +++++++++++++++++++++++++++++++++----
|
|
2 files changed, 225 insertions(+), 24 deletions(-)
|
|
create mode 100644 tests/bugs/shard/issue-1243.t
|
|
|
|
diff --git a/tests/bugs/shard/issue-1243.t b/tests/bugs/shard/issue-1243.t
|
|
new file mode 100644
|
|
index 0000000..b0c092c
|
|
--- /dev/null
|
|
+++ b/tests/bugs/shard/issue-1243.t
|
|
@@ -0,0 +1,31 @@
|
|
+#!/bin/bash
|
|
+
|
|
+. $(dirname $0)/../../include.rc
|
|
+
|
|
+cleanup;
|
|
+
|
|
+TEST glusterd
|
|
+TEST pidof glusterd
|
|
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
|
|
+TEST $CLI volume set $V0 features.shard on
|
|
+TEST $CLI volume set $V0 features.shard-block-size 4MB
|
|
+TEST $CLI volume set $V0 performance.quick-read off
|
|
+TEST $CLI volume set $V0 performance.io-cache off
|
|
+TEST $CLI volume set $V0 performance.read-ahead off
|
|
+TEST $CLI volume set $V0 performance.strict-o-direct on
|
|
+TEST $CLI volume start $V0
|
|
+
|
|
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
|
|
+
|
|
+TEST $CLI volume set $V0 md-cache-timeout 10
|
|
+
|
|
+# Write data into a file such that its size crosses shard-block-size
|
|
+TEST dd if=/dev/zero of=$M0/foo bs=1048576 count=8 oflag=direct
|
|
+
|
|
+# Execute a setxattr on the file.
|
|
+TEST setfattr -n trusted.libvirt -v some-value $M0/foo
|
|
+
|
|
+# Size of the file should be the aggregated size, not the shard-block-size
|
|
+EXPECT '8388608' stat -c %s $M0/foo
|
|
+
|
|
+cleanup
|
|
diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c
|
|
index ee38ed2..6ae4c41 100644
|
|
--- a/xlators/features/shard/src/shard.c
|
|
+++ b/xlators/features/shard/src/shard.c
|
|
@@ -5929,36 +5929,206 @@ out:
|
|
return 0;
|
|
}
|
|
|
|
-int32_t shard_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
|
|
- dict_t *dict, int32_t flags, dict_t *xdata) {
|
|
- int op_errno = EINVAL;
|
|
+int32_t shard_common_set_xattr_cbk(call_frame_t *frame, void *cookie,
|
|
+ xlator_t *this, int32_t op_ret,
|
|
+ int32_t op_errno, dict_t *xdata) {
|
|
+ int ret = -1;
|
|
+ struct iatt *prebuf = NULL;
|
|
+ struct iatt *postbuf = NULL;
|
|
+ struct iatt *stbuf = NULL;
|
|
+ data_t *data = NULL;
|
|
+ shard_local_t *local = NULL;
|
|
|
|
- if (frame->root->pid != GF_CLIENT_PID_GSYNCD) {
|
|
- GF_IF_INTERNAL_XATTR_GOTO(SHARD_XATTR_PREFIX "*", dict, op_errno, out);
|
|
- }
|
|
+ local = frame->local;
|
|
|
|
- STACK_WIND_TAIL(frame, FIRST_CHILD(this), FIRST_CHILD(this)->fops->fsetxattr,
|
|
- fd, dict, flags, xdata);
|
|
- return 0;
|
|
-out:
|
|
- shard_common_failure_unwind(GF_FOP_FSETXATTR, frame, -1, op_errno);
|
|
- return 0;
|
|
+ if (op_ret < 0) {
|
|
+ local->op_ret = op_ret;
|
|
+ local->op_errno = op_errno;
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ if (!xdata)
|
|
+ goto unwind;
|
|
+
|
|
+ data = dict_get(xdata, GF_PRESTAT);
|
|
+ if (data) {
|
|
+ stbuf = data_to_iatt(data, GF_PRESTAT);
|
|
+ prebuf = GF_MALLOC(sizeof(struct iatt), gf_common_mt_char);
|
|
+ if (prebuf == NULL) {
|
|
+ local->op_ret = -1;
|
|
+ local->op_errno = ENOMEM;
|
|
+ goto err;
|
|
+ }
|
|
+ *prebuf = *stbuf;
|
|
+ prebuf->ia_size = local->prebuf.ia_size;
|
|
+ prebuf->ia_blocks = local->prebuf.ia_blocks;
|
|
+ ret = dict_set_iatt(xdata, GF_PRESTAT, prebuf, false);
|
|
+ if (ret < 0) {
|
|
+ local->op_ret = -1;
|
|
+ local->op_errno = ENOMEM;
|
|
+ goto err;
|
|
+ }
|
|
+ prebuf = NULL;
|
|
+ }
|
|
+
|
|
+ data = dict_get(xdata, GF_POSTSTAT);
|
|
+ if (data) {
|
|
+ stbuf = data_to_iatt(data, GF_POSTSTAT);
|
|
+ postbuf = GF_MALLOC(sizeof(struct iatt), gf_common_mt_char);
|
|
+ if (postbuf == NULL) {
|
|
+ local->op_ret = -1;
|
|
+ local->op_errno = ENOMEM;
|
|
+ goto err;
|
|
+ }
|
|
+ *postbuf = *stbuf;
|
|
+ postbuf->ia_size = local->prebuf.ia_size;
|
|
+ postbuf->ia_blocks = local->prebuf.ia_blocks;
|
|
+ ret = dict_set_iatt(xdata, GF_POSTSTAT, postbuf, false);
|
|
+ if (ret < 0) {
|
|
+ local->op_ret = -1;
|
|
+ local->op_errno = ENOMEM;
|
|
+ goto err;
|
|
+ }
|
|
+ postbuf = NULL;
|
|
+ }
|
|
+
|
|
+unwind:
|
|
+ if (local->fd)
|
|
+ SHARD_STACK_UNWIND(fsetxattr, frame, local->op_ret, local->op_errno,
|
|
+ xdata);
|
|
+ else
|
|
+ SHARD_STACK_UNWIND(setxattr, frame, local->op_ret, local->op_errno,
|
|
+ xdata);
|
|
+ return 0;
|
|
+
|
|
+err:
|
|
+ GF_FREE(prebuf);
|
|
+ GF_FREE(postbuf);
|
|
+ shard_common_failure_unwind(local->fop, frame, local->op_ret,
|
|
+ local->op_errno);
|
|
+ return 0;
|
|
}
|
|
|
|
-int32_t shard_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
|
|
- dict_t *dict, int32_t flags, dict_t *xdata) {
|
|
- int op_errno = EINVAL;
|
|
+int32_t shard_post_lookup_set_xattr_handler(call_frame_t *frame,
|
|
+ xlator_t *this) {
|
|
+ shard_local_t *local = NULL;
|
|
|
|
- if (frame->root->pid != GF_CLIENT_PID_GSYNCD) {
|
|
- GF_IF_INTERNAL_XATTR_GOTO(SHARD_XATTR_PREFIX "*", dict, op_errno, out);
|
|
- }
|
|
+ local = frame->local;
|
|
|
|
- STACK_WIND_TAIL(frame, FIRST_CHILD(this), FIRST_CHILD(this)->fops->setxattr,
|
|
- loc, dict, flags, xdata);
|
|
- return 0;
|
|
-out:
|
|
- shard_common_failure_unwind(GF_FOP_SETXATTR, frame, -1, op_errno);
|
|
- return 0;
|
|
+ if (local->op_ret < 0) {
|
|
+ shard_common_failure_unwind(local->fop, frame, local->op_ret,
|
|
+ local->op_errno);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (local->fd)
|
|
+ STACK_WIND(frame, shard_common_set_xattr_cbk, FIRST_CHILD(this),
|
|
+ FIRST_CHILD(this)->fops->fsetxattr, local->fd,
|
|
+ local->xattr_req, local->flags, local->xattr_rsp);
|
|
+ else
|
|
+ STACK_WIND(frame, shard_common_set_xattr_cbk, FIRST_CHILD(this),
|
|
+ FIRST_CHILD(this)->fops->setxattr, &local->loc,
|
|
+ local->xattr_req, local->flags, local->xattr_rsp);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int32_t shard_common_set_xattr(call_frame_t *frame, xlator_t *this,
|
|
+ glusterfs_fop_t fop, loc_t *loc, fd_t *fd,
|
|
+ dict_t *dict, int32_t flags, dict_t *xdata) {
|
|
+ int ret = -1;
|
|
+ int op_errno = ENOMEM;
|
|
+ uint64_t block_size = 0;
|
|
+ shard_local_t *local = NULL;
|
|
+ inode_t *inode = loc ? loc->inode : fd->inode;
|
|
+
|
|
+ if ((IA_ISDIR(inode->ia_type)) || (IA_ISLNK(inode->ia_type))) {
|
|
+ if (loc)
|
|
+ STACK_WIND_TAIL(frame, FIRST_CHILD(this),
|
|
+ FIRST_CHILD(this)->fops->setxattr, loc, dict, flags,
|
|
+ xdata);
|
|
+ else
|
|
+ STACK_WIND_TAIL(frame, FIRST_CHILD(this),
|
|
+ FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags,
|
|
+ xdata);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* Sharded or not, if shard's special xattrs are attempted to be set,
|
|
+ * fail the fop with EPERM (except if the client is gsyncd.
|
|
+ */
|
|
+ if (frame->root->pid != GF_CLIENT_PID_GSYNCD) {
|
|
+ GF_IF_INTERNAL_XATTR_GOTO(SHARD_XATTR_PREFIX "*", dict, op_errno, err);
|
|
+ }
|
|
+
|
|
+ ret = shard_inode_ctx_get_block_size(inode, this, &block_size);
|
|
+ if (ret) {
|
|
+ gf_msg(this->name, GF_LOG_ERROR, 0, SHARD_MSG_INODE_CTX_GET_FAILED,
|
|
+ "Failed to get block size from inode ctx of %s",
|
|
+ uuid_utoa(inode->gfid));
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ if (!block_size || frame->root->pid == GF_CLIENT_PID_GSYNCD) {
|
|
+ if (loc)
|
|
+ STACK_WIND_TAIL(frame, FIRST_CHILD(this),
|
|
+ FIRST_CHILD(this)->fops->setxattr, loc, dict, flags,
|
|
+ xdata);
|
|
+ else
|
|
+ STACK_WIND_TAIL(frame, FIRST_CHILD(this),
|
|
+ FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags,
|
|
+ xdata);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ local = mem_get0(this->local_pool);
|
|
+ if (!local)
|
|
+ goto err;
|
|
+
|
|
+ frame->local = local;
|
|
+ local->fop = fop;
|
|
+ if (loc) {
|
|
+ if (loc_copy(&local->loc, loc) != 0)
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ if (fd) {
|
|
+ local->fd = fd_ref(fd);
|
|
+ local->loc.inode = inode_ref(fd->inode);
|
|
+ gf_uuid_copy(local->loc.gfid, fd->inode->gfid);
|
|
+ }
|
|
+ local->flags = flags;
|
|
+ /* Reusing local->xattr_req and local->xattr_rsp to store the setxattr dict
|
|
+ * and the xdata dict
|
|
+ */
|
|
+ if (dict)
|
|
+ local->xattr_req = dict_ref(dict);
|
|
+ if (xdata)
|
|
+ local->xattr_rsp = dict_ref(xdata);
|
|
+
|
|
+ /* To-Do: Switch from LOOKUP which is path-based, to FSTAT if the fop is
|
|
+ * on an fd. This comes under a generic class of bugs in shard tracked by
|
|
+ * bz #1782428.
|
|
+ */
|
|
+ shard_lookup_base_file(frame, this, &local->loc,
|
|
+ shard_post_lookup_set_xattr_handler);
|
|
+ return 0;
|
|
+err:
|
|
+ shard_common_failure_unwind(fop, frame, -1, op_errno);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int32_t shard_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
|
|
+ dict_t *dict, int32_t flags, dict_t *xdata) {
|
|
+ shard_common_set_xattr(frame, this, GF_FOP_FSETXATTR, NULL, fd, dict, flags,
|
|
+ xdata);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int32_t shard_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
|
|
+ dict_t *dict, int32_t flags, dict_t *xdata) {
|
|
+ shard_common_set_xattr(frame, this, GF_FOP_SETXATTR, loc, NULL, dict, flags,
|
|
+ xdata);
|
|
+ return 0;
|
|
}
|
|
|
|
int shard_post_setattr_handler(call_frame_t *frame, xlator_t *this) {
|
|
--
|
|
1.8.3.1
|
|
|