diff --git a/.gitignore b/.gitignore index a241ccb..24421d5 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -SOURCES/LVM2.2.03.08.tgz +SOURCES/LVM2.2.03.09.tgz diff --git a/.lvm2.metadata b/.lvm2.metadata index 6126a43..e2642d8 100644 --- a/.lvm2.metadata +++ b/.lvm2.metadata @@ -1 +1 @@ -ed5cd9f81b22ad5d76b927711c977463879d8afd SOURCES/LVM2.2.03.08.tgz +15a90d5039a2a1e9f67611a2a6c2faa72e8996aa SOURCES/LVM2.2.03.09.tgz diff --git a/SOURCES/0001-Merge-master-up-to-commit-53803821de16.patch b/SOURCES/0001-Merge-master-up-to-commit-53803821de16.patch new file mode 100644 index 0000000..1dbef5b --- /dev/null +++ b/SOURCES/0001-Merge-master-up-to-commit-53803821de16.patch @@ -0,0 +1,3317 @@ + base/data-struct/list.h | 4 +- + daemons/lvmlockd/lvmlockd-client.h | 1 + + daemons/lvmlockd/lvmlockd-core.c | 370 +++++++-------- + daemons/lvmlockd/lvmlockd-dlm.c | 8 +- + daemons/lvmlockd/lvmlockd-internal.h | 7 +- + lib/cache/lvmcache.c | 551 ++++++++++------------ + lib/cache/lvmcache.h | 12 +- + lib/commands/toolcontext.c | 3 +- + lib/device/dev-io.c | 3 + + lib/format_text/archiver.c | 2 +- + lib/format_text/format-text.c | 28 +- + lib/format_text/text_label.c | 8 +- + lib/label/hints.c | 6 + + lib/label/label.c | 2 +- + lib/label/label.h | 2 +- + lib/locking/lvmlockd.c | 14 +- + lib/metadata/metadata-exported.h | 3 - + lib/metadata/metadata.c | 68 +-- + lib/metadata/metadata.h | 8 +- + man/lvmlockd.8_main | 14 +- + test/shell/duplicate-vgnames.sh | 660 +++++++++++++++++++++++++++ + test/shell/duplicate-vgrename.sh | 319 +++++++++++++ + test/shell/integrity-dmeventd.sh | 6 +- + test/shell/integrity-large.sh | 6 +- + test/shell/integrity-misc.sh | 6 +- + test/shell/integrity.sh | 6 +- + test/shell/process-each-duplicate-vgnames.sh | 55 --- + test/shell/thin-foreign-repair.sh | 4 +- + tools/command.c | 12 +- + tools/lvconvert.c | 3 +- + tools/pvck.c | 8 +- + tools/pvscan.c | 2 +- + tools/toollib.c | 2 - + tools/vgchange.c | 9 +- + tools/vgimportclone.c | 4 +- + tools/vgmerge.c | 4 +- + tools/vgrename.c | 2 +- + tools/vgsplit.c | 2 +- + 39 files changed, 1571 insertions(+), 654 deletions(-) + create mode 100644 test/shell/duplicate-vgnames.sh + create mode 100644 test/shell/duplicate-vgrename.sh + delete mode 100644 test/shell/process-each-duplicate-vgnames.sh + +diff --git a/base/data-struct/list.h b/base/data-struct/list.h +index 54cb1c1..e0a6256 100644 +--- a/base/data-struct/list.h ++++ b/base/data-struct/list.h +@@ -1,7 +1,7 @@ + #ifndef BASE_DATA_STRUCT_LIST_H + #define BASE_DATA_STRUCT_LIST_H + +-#include /* offsetof */ ++#include "base/memory/container_of.h" + + //---------------------------------------------------------------- + +@@ -100,7 +100,7 @@ struct dm_list *dm_list_next(const struct dm_list *head, const struct dm_list *e + * contained in a structure of type t, return the containing structure. + */ + #define dm_list_struct_base(v, t, head) \ +- ((t *)((const char *)(v) - offsetof(t, head))) ++ container_of(v, t, head) + + /* + * Given the address v of an instance of 'struct dm_list list' contained in +diff --git a/daemons/lvmlockd/lvmlockd-client.h b/daemons/lvmlockd/lvmlockd-client.h +index 16d1613..62ffb73 100644 +--- a/daemons/lvmlockd/lvmlockd-client.h ++++ b/daemons/lvmlockd/lvmlockd-client.h +@@ -14,6 +14,7 @@ + #include "libdaemon/client/daemon-client.h" + + #define LVMLOCKD_SOCKET DEFAULT_RUN_DIR "/lvmlockd.socket" ++#define LVMLOCKD_ADOPT_FILE DEFAULT_RUN_DIR "/lvmlockd.adopt" + + /* Wrappers to open/close connection */ + +diff --git a/daemons/lvmlockd/lvmlockd-core.c b/daemons/lvmlockd/lvmlockd-core.c +index 39275fb..84272c4 100644 +--- a/daemons/lvmlockd/lvmlockd-core.c ++++ b/daemons/lvmlockd/lvmlockd-core.c +@@ -38,6 +38,8 @@ + #define EXTERN + #include "lvmlockd-internal.h" + ++static int str_to_mode(const char *str); ++ + /* + * Basic operation of lvmlockd + * +@@ -142,6 +144,8 @@ static const char *lvmlockd_protocol = "lvmlockd"; + static const int lvmlockd_protocol_version = 1; + static int daemon_quit; + static int adopt_opt; ++static uint32_t adopt_update_count; ++static const char *adopt_file; + + /* + * We use a separate socket for dumping daemon info. +@@ -812,6 +816,144 @@ int version_from_args(char *args, unsigned int *major, unsigned int *minor, unsi + } + + /* ++ * Write new info when a command exits if that command has acquired a new LV ++ * lock. If the command has released an LV lock we don't bother updating the ++ * info. When adopting, we eliminate any LV lock adoptions if there is no dm ++ * device for that LV. If lvmlockd is terminated after acquiring but before ++ * writing this file, those LV locks would not be adopted on restart. ++ */ ++ ++#define ADOPT_VERSION_MAJOR 1 ++#define ADOPT_VERSION_MINOR 0 ++ ++static void write_adopt_file(void) ++{ ++ struct lockspace *ls; ++ struct resource *r; ++ struct lock *lk; ++ time_t t; ++ FILE *fp; ++ ++ if (!(fp = fopen(adopt_file, "w"))) ++ return; ++ ++ adopt_update_count++; ++ ++ t = time(NULL); ++ fprintf(fp, "lvmlockd adopt_version %u.%u pid %d updates %u %s", ++ ADOPT_VERSION_MAJOR, ADOPT_VERSION_MINOR, getpid(), adopt_update_count, ctime(&t)); ++ ++ pthread_mutex_lock(&lockspaces_mutex); ++ list_for_each_entry(ls, &lockspaces, list) { ++ if (ls->lm_type == LD_LM_DLM && !strcmp(ls->name, gl_lsname_dlm)) ++ continue; ++ fprintf(fp, "VG: %38s %s %s %s\n", ++ ls->vg_uuid, ls->vg_name, lm_str(ls->lm_type), ls->vg_args); ++ list_for_each_entry(r, &ls->resources, list) { ++ if (r->type != LD_RT_LV) ++ continue; ++ if ((r->mode != LD_LK_EX) && (r->mode != LD_LK_SH)) ++ continue; ++ list_for_each_entry(lk, &r->locks, list) { ++ fprintf(fp, "LV: %38s %s %s %s %u\n", ++ ls->vg_uuid, r->name, r->lv_args, mode_str(r->mode), r->version); ++ } ++ } ++ } ++ pthread_mutex_unlock(&lockspaces_mutex); ++ ++ fflush(fp); ++ fclose(fp); ++} ++ ++static int read_adopt_file(struct list_head *vg_lockd) ++{ ++ char adopt_line[512]; ++ char vg_uuid[72]; ++ char lm_type_str[16]; ++ char mode[8]; ++ struct lockspace *ls, *ls2; ++ struct resource *r; ++ FILE *fp; ++ ++ if (MAX_ARGS != 64 || MAX_NAME != 64) ++ return -1; ++ ++ if (!(fp = fopen(adopt_file, "r"))) ++ return 0; ++ ++ while (fgets(adopt_line, sizeof(adopt_line), fp)) { ++ if (adopt_line[0] == '#') ++ continue; ++ else if (!strncmp(adopt_line, "lvmlockd", 8)) { ++ unsigned int v_major = 0, v_minor = 0; ++ sscanf(adopt_line, "lvmlockd adopt_version %u.%u", &v_major, &v_minor); ++ if (v_major != ADOPT_VERSION_MAJOR) ++ goto fail; ++ ++ } else if (!strncmp(adopt_line, "VG:", 3)) { ++ if (!(ls = alloc_lockspace())) ++ goto fail; ++ ++ memset(vg_uuid, 0, sizeof(vg_uuid)); ++ ++ if (sscanf(adopt_line, "VG: %63s %64s %16s %64s", ++ vg_uuid, ls->vg_name, lm_type_str, ls->vg_args) != 4) { ++ goto fail; ++ } ++ ++ memcpy(ls->vg_uuid, vg_uuid, 64); ++ ++ if ((ls->lm_type = str_to_lm(lm_type_str)) < 0) ++ goto fail; ++ ++ list_add(&ls->list, vg_lockd); ++ ++ } else if (!strncmp(adopt_line, "LV:", 3)) { ++ if (!(r = alloc_resource())) ++ goto fail; ++ ++ r->type = LD_RT_LV; ++ ++ memset(vg_uuid, 0, sizeof(vg_uuid)); ++ ++ if (sscanf(adopt_line, "LV: %64s %64s %s %8s %u", ++ vg_uuid, r->name, r->lv_args, mode, &r->version) != 5) { ++ goto fail; ++ } ++ ++ if ((r->adopt_mode = str_to_mode(mode)) == LD_LK_IV) ++ goto fail; ++ ++ if (ls && !memcmp(ls->vg_uuid, vg_uuid, 64)) { ++ list_add(&r->list, &ls->resources); ++ r = NULL; ++ } else { ++ list_for_each_entry(ls2, vg_lockd, list) { ++ if (memcmp(ls2->vg_uuid, vg_uuid, 64)) ++ continue; ++ list_add(&r->list, &ls2->resources); ++ r = NULL; ++ break; ++ } ++ } ++ ++ if (r) { ++ log_error("No lockspace found for resource %s vg_uuid %s", r->name, vg_uuid); ++ goto fail; ++ } ++ } ++ } ++ ++ fclose(fp); ++ return 0; ++ ++fail: ++ fclose(fp); ++ return -1; ++} ++ ++/* + * These are few enough that arrays of function pointers can + * be avoided. + */ +@@ -4689,6 +4831,7 @@ static void *client_thread_main(void *arg_in) + struct client *cl; + struct action *act; + struct action *act_un; ++ uint32_t lock_acquire_count = 0, lock_acquire_written = 0; + int rv; + + while (1) { +@@ -4720,6 +4863,9 @@ static void *client_thread_main(void *arg_in) + rv = -1; + } + ++ if (act->flags & LD_AF_LV_LOCK) ++ lock_acquire_count++; ++ + /* + * The client failed after we acquired an LV lock for + * it, but before getting this reply saying it's done. +@@ -4741,6 +4887,11 @@ static void *client_thread_main(void *arg_in) + continue; + } + ++ if (adopt_opt && (lock_acquire_count > lock_acquire_written)) { ++ lock_acquire_written = lock_acquire_count; ++ write_adopt_file(); ++ } ++ + /* + * Queue incoming actions for lockspace threads + */ +@@ -4814,6 +4965,8 @@ static void *client_thread_main(void *arg_in) + pthread_mutex_unlock(&client_mutex); + } + out: ++ if (adopt_opt && lock_acquire_written) ++ unlink(adopt_file); + return NULL; + } + +@@ -4846,180 +4999,6 @@ static void close_client_thread(void) + log_error("pthread_join client_thread error %d", perrno); + } + +-/* +- * Get a list of all VGs with a lockd type (sanlock|dlm). +- * We'll match this list against a list of existing lockspaces that are +- * found in the lock manager. +- * +- * For each of these VGs, also create a struct resource on ls->resources to +- * represent each LV in the VG that uses a lock. For each of these LVs +- * that are active, we'll attempt to adopt a lock. +- */ +- +-static int get_lockd_vgs(struct list_head *vg_lockd) +-{ +- /* FIXME: get VGs some other way */ +- return -1; +-#if 0 +- struct list_head update_vgs; +- daemon_reply reply; +- struct dm_config_node *cn; +- struct dm_config_node *metadata; +- struct dm_config_node *md_cn; +- struct dm_config_node *lv_cn; +- struct lockspace *ls, *safe; +- struct resource *r; +- const char *vg_name; +- const char *vg_uuid; +- const char *lv_uuid; +- const char *lock_type; +- const char *lock_args; +- char find_str_path[PATH_MAX]; +- int rv = 0; +- +- INIT_LIST_HEAD(&update_vgs); +- +- reply = send_lvmetad("vg_list", "token = %s", "skip", NULL); +- +- if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK")) { +- log_error("vg_list from lvmetad failed %d", reply.error); +- rv = -EINVAL; +- goto destroy; +- } +- +- if (!(cn = dm_config_find_node(reply.cft->root, "volume_groups"))) { +- log_error("get_lockd_vgs no vgs"); +- rv = -EINVAL; +- goto destroy; +- } +- +- /* create an update_vgs list of all vg uuids */ +- +- for (cn = cn->child; cn; cn = cn->sib) { +- vg_uuid = cn->key; +- +- if (!(ls = alloc_lockspace())) { +- rv = -ENOMEM; +- break; +- } +- +- strncpy(ls->vg_uuid, vg_uuid, 64); +- list_add_tail(&ls->list, &update_vgs); +- log_debug("get_lockd_vgs %s", vg_uuid); +- } +- destroy: +- daemon_reply_destroy(reply); +- +- if (rv < 0) +- goto out; +- +- /* get vg_name and lock_type for each vg uuid entry in update_vgs */ +- +- list_for_each_entry(ls, &update_vgs, list) { +- reply = send_lvmetad("vg_lookup", +- "token = %s", "skip", +- "uuid = %s", ls->vg_uuid, +- NULL); +- +- if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK")) { +- log_error("vg_lookup from lvmetad failed %d", reply.error); +- rv = -EINVAL; +- goto next; +- } +- +- vg_name = daemon_reply_str(reply, "name", NULL); +- if (!vg_name) { +- log_error("get_lockd_vgs %s no name", ls->vg_uuid); +- rv = -EINVAL; +- goto next; +- } +- +- strncpy(ls->vg_name, vg_name, MAX_NAME); +- +- metadata = dm_config_find_node(reply.cft->root, "metadata"); +- if (!metadata) { +- log_error("get_lockd_vgs %s name %s no metadata", +- ls->vg_uuid, ls->vg_name); +- rv = -EINVAL; +- goto next; +- } +- +- lock_type = dm_config_find_str(metadata, "metadata/lock_type", NULL); +- ls->lm_type = str_to_lm(lock_type); +- +- if ((ls->lm_type != LD_LM_SANLOCK) && (ls->lm_type != LD_LM_DLM)) { +- log_debug("get_lockd_vgs %s not lockd type", ls->vg_name); +- continue; +- } +- +- lock_args = dm_config_find_str(metadata, "metadata/lock_args", NULL); +- if (lock_args) +- strncpy(ls->vg_args, lock_args, MAX_ARGS); +- +- log_debug("get_lockd_vgs %s lock_type %s lock_args %s", +- ls->vg_name, lock_type, lock_args ?: "none"); +- +- /* +- * Make a record (struct resource) of each lv that uses a lock. +- * For any lv that uses a lock, we'll check if the lv is active +- * and if so try to adopt a lock for it. +- */ +- +- for (md_cn = metadata->child; md_cn; md_cn = md_cn->sib) { +- if (strcmp(md_cn->key, "logical_volumes")) +- continue; +- +- for (lv_cn = md_cn->child; lv_cn; lv_cn = lv_cn->sib) { +- snprintf(find_str_path, PATH_MAX, "%s/lock_args", lv_cn->key); +- lock_args = dm_config_find_str(lv_cn, find_str_path, NULL); +- if (!lock_args) +- continue; +- +- snprintf(find_str_path, PATH_MAX, "%s/id", lv_cn->key); +- lv_uuid = dm_config_find_str(lv_cn, find_str_path, NULL); +- +- if (!lv_uuid) { +- log_error("get_lock_vgs no lv id for name %s", lv_cn->key); +- continue; +- } +- +- if (!(r = alloc_resource())) { +- rv = -ENOMEM; +- goto next; +- } +- +- r->use_vb = 0; +- r->type = LD_RT_LV; +- strncpy(r->name, lv_uuid, MAX_NAME); +- if (lock_args) +- strncpy(r->lv_args, lock_args, MAX_ARGS); +- list_add_tail(&r->list, &ls->resources); +- log_debug("get_lockd_vgs %s lv %s %s (name %s)", +- ls->vg_name, r->name, lock_args ? lock_args : "", lv_cn->key); +- } +- } +- next: +- daemon_reply_destroy(reply); +- +- if (rv < 0) +- break; +- } +-out: +- /* Return lockd VG's on the vg_lockd list. */ +- +- list_for_each_entry_safe(ls, safe, &update_vgs, list) { +- list_del(&ls->list); +- +- if ((ls->lm_type == LD_LM_SANLOCK) || (ls->lm_type == LD_LM_DLM)) +- list_add_tail(&ls->list, vg_lockd); +- else +- free(ls); +- } +- +- return rv; +-#endif +-} +- + static char _dm_uuid[DM_UUID_LEN]; + + static char *get_dm_uuid(char *dm_name) +@@ -5236,9 +5215,9 @@ static void adopt_locks(void) + INIT_LIST_HEAD(&to_unlock); + + /* +- * Get list of lockspaces from lock managers. +- * Get list of VGs from lvmetad with a lockd type. +- * Get list of active lockd type LVs from /dev. ++ * Get list of lockspaces from currently running lock managers. ++ * Get list of shared VGs from file written by prior lvmlockd. ++ * Get list of active LVs (in the shared VGs) from the file. + */ + + if (lm_support_dlm() && lm_is_running_dlm()) { +@@ -5262,12 +5241,17 @@ static void adopt_locks(void) + * Adds a struct lockspace to vg_lockd for each lockd VG. + * Adds a struct resource to ls->resources for each LV. + */ +- rv = get_lockd_vgs(&vg_lockd); ++ rv = read_adopt_file(&vg_lockd); + if (rv < 0) { +- log_error("adopt_locks get_lockd_vgs failed"); ++ log_error("adopt_locks read_adopt_file failed"); + goto fail; + } + ++ if (list_empty(&vg_lockd)) { ++ log_debug("No lockspaces in adopt file"); ++ return; ++ } ++ + /* + * For each resource on each lockspace, check if the + * corresponding LV is active. If so, leave the +@@ -5506,7 +5490,7 @@ static void adopt_locks(void) + goto fail; + act->op = LD_OP_LOCK; + act->rt = LD_RT_LV; +- act->mode = LD_LK_EX; ++ act->mode = r->adopt_mode; + act->flags = (LD_AF_ADOPT | LD_AF_PERSISTENT); + act->client_id = INTERNAL_CLIENT_ID; + act->lm_type = ls->lm_type; +@@ -5604,8 +5588,9 @@ static void adopt_locks(void) + * Adopt failed because the orphan has a different mode + * than initially requested. Repeat the lock-adopt operation + * with the other mode. N.B. this logic depends on first +- * trying sh then ex for GL/VG locks, and ex then sh for +- * LV locks. ++ * trying sh then ex for GL/VG locks; for LV locks the mode ++ * from the adopt file is tried first, the alternate ++ * (if the mode in adopt file was wrong somehow.) + */ + + if ((act->rt != LD_RT_LV) && (act->mode == LD_LK_SH)) { +@@ -5613,9 +5598,12 @@ static void adopt_locks(void) + act->mode = LD_LK_EX; + rv = add_lock_action(act); + +- } else if ((act->rt == LD_RT_LV) && (act->mode == LD_LK_EX)) { +- /* LV locks: attempt to adopt sh after ex failed. */ +- act->mode = LD_LK_SH; ++ } else if (act->rt == LD_RT_LV) { ++ /* LV locks: attempt to adopt the other mode. */ ++ if (act->mode == LD_LK_EX) ++ act->mode = LD_LK_SH; ++ else if (act->mode == LD_LK_SH) ++ act->mode = LD_LK_EX; + rv = add_lock_action(act); + + } else { +@@ -5750,10 +5738,13 @@ static void adopt_locks(void) + if (count_start_fail || count_adopt_fail) + goto fail; + ++ unlink(adopt_file); ++ write_adopt_file(); + log_debug("adopt_locks done"); + return; + + fail: ++ unlink(adopt_file); + log_error("adopt_locks failed, reset host"); + } + +@@ -6028,6 +6019,8 @@ static void usage(char *prog, FILE *file) + fprintf(file, " Set path to the pid file. [%s]\n", LVMLOCKD_PIDFILE); + fprintf(file, " --socket-path | -s \n"); + fprintf(file, " Set path to the socket to listen on. [%s]\n", LVMLOCKD_SOCKET); ++ fprintf(file, " --adopt-file \n"); ++ fprintf(file, " Set path to the adopt file. [%s]\n", LVMLOCKD_ADOPT_FILE); + fprintf(file, " --syslog-priority | -S err|warning|debug\n"); + fprintf(file, " Write log messages from this level up to syslog. [%s]\n", _syslog_num_to_name(LOG_SYSLOG_PRIO)); + fprintf(file, " --gl-type | -g \n"); +@@ -6063,6 +6056,7 @@ int main(int argc, char *argv[]) + {"daemon-debug", no_argument, 0, 'D' }, + {"pid-file", required_argument, 0, 'p' }, + {"socket-path", required_argument, 0, 's' }, ++ {"adopt-file", required_argument, 0, 128 }, + {"gl-type", required_argument, 0, 'g' }, + {"host-id", required_argument, 0, 'i' }, + {"host-id-file", required_argument, 0, 'F' }, +@@ -6085,6 +6079,9 @@ int main(int argc, char *argv[]) + switch (c) { + case '0': + break; ++ case 128: ++ adopt_file = strdup(optarg); ++ break; + case 'h': + usage(argv[0], stdout); + exit(EXIT_SUCCESS); +@@ -6146,6 +6143,9 @@ int main(int argc, char *argv[]) + if (!ds.socket_path) + ds.socket_path = LVMLOCKD_SOCKET; + ++ if (!adopt_file) ++ adopt_file = LVMLOCKD_ADOPT_FILE; ++ + /* runs daemon_main/main_loop */ + daemon_start(ds); + +diff --git a/daemons/lvmlockd/lvmlockd-dlm.c b/daemons/lvmlockd/lvmlockd-dlm.c +index 75e6dee..7915cc0 100644 +--- a/daemons/lvmlockd/lvmlockd-dlm.c ++++ b/daemons/lvmlockd/lvmlockd-dlm.c +@@ -398,12 +398,18 @@ static int lm_adopt_dlm(struct lockspace *ls, struct resource *r, int ld_mode, + (void *)1, (void *)1, (void *)1, + NULL, NULL); + +- if (rv == -1 && errno == -EAGAIN) { ++ if (rv == -1 && (errno == EAGAIN)) { + log_debug("S %s R %s adopt_dlm adopt mode %d try other mode", + ls->name, r->name, ld_mode); + rv = -EUCLEAN; + goto fail; + } ++ if (rv == -1 && (errno == ENOENT)) { ++ log_debug("S %s R %s adopt_dlm adopt mode %d no lock", ++ ls->name, r->name, ld_mode); ++ rv = -ENOENT; ++ goto fail; ++ } + if (rv < 0) { + log_debug("S %s R %s adopt_dlm mode %d flags %x error %d errno %d", + ls->name, r->name, mode, flags, rv, errno); +diff --git a/daemons/lvmlockd/lvmlockd-internal.h b/daemons/lvmlockd/lvmlockd-internal.h +index 85e8caf..191c449 100644 +--- a/daemons/lvmlockd/lvmlockd-internal.h ++++ b/daemons/lvmlockd/lvmlockd-internal.h +@@ -11,6 +11,8 @@ + #ifndef _LVM_LVMLOCKD_INTERNAL_H + #define _LVM_LVMLOCKD_INTERNAL_H + ++#include "base/memory/container_of.h" ++ + #define MAX_NAME 64 + #define MAX_ARGS 64 + +@@ -145,6 +147,7 @@ struct resource { + char name[MAX_NAME+1]; /* vg name or lv name */ + int8_t type; /* resource type LD_RT_ */ + int8_t mode; ++ int8_t adopt_mode; + unsigned int sh_count; /* number of sh locks on locks list */ + uint32_t version; + uint32_t last_client_id; /* last client_id to lock or unlock resource */ +@@ -216,10 +219,6 @@ struct val_blk { + /* lm_unlock flags */ + #define LMUF_FREE_VG 0x00000001 + +-#define container_of(ptr, type, member) ({ \ +- const typeof( ((type *)0)->member ) *__mptr = (ptr); \ +- (type *)( (char *)__mptr - offsetof(type,member) );}) +- + static inline void INIT_LIST_HEAD(struct list_head *list) + { + list->next = list; +diff --git a/lib/cache/lvmcache.c b/lib/cache/lvmcache.c +index 2c8c614..6cb5ff0 100644 +--- a/lib/cache/lvmcache.c ++++ b/lib/cache/lvmcache.c +@@ -49,7 +49,7 @@ struct lvmcache_info { + + /* One per VG */ + struct lvmcache_vginfo { +- struct dm_list list; /* Join these vginfos together */ ++ struct dm_list list; /* _vginfos */ + struct dm_list infos; /* List head for lvmcache_infos */ + struct dm_list outdated_infos; /* vg_read moves info from infos to outdated_infos */ + struct dm_list pvsummaries; /* pv_list taken directly from vgsummary */ +@@ -58,7 +58,6 @@ struct lvmcache_vginfo { + uint32_t status; + char vgid[ID_LEN + 1]; + char _padding[7]; +- struct lvmcache_vginfo *next; /* Another VG with same name? */ + char *creation_host; + char *system_id; + char *lock_type; +@@ -66,8 +65,16 @@ struct lvmcache_vginfo { + size_t mda_size; + int seqno; + bool scan_summary_mismatch; /* vgsummary from devs had mismatching seqno or checksum */ ++ bool has_duplicate_local_vgname; /* this local vg and another local vg have same name */ ++ bool has_duplicate_foreign_vgname; /* this foreign vg and another foreign vg have same name */ + }; + ++/* ++ * Each VG found during scan gets a vginfo struct. ++ * Each vginfo is in _vginfos and _vgid_hash, and ++ * _vgname_hash (unless disabled due to duplicate vgnames). ++ */ ++ + static struct dm_hash_table *_pvid_hash = NULL; + static struct dm_hash_table *_vgid_hash = NULL; + static struct dm_hash_table *_vgname_hash = NULL; +@@ -262,16 +269,6 @@ void lvmcache_get_mdas(struct cmd_context *cmd, + } + } + +-static void _vginfo_attach_info(struct lvmcache_vginfo *vginfo, +- struct lvmcache_info *info) +-{ +- if (!vginfo) +- return; +- +- info->vginfo = vginfo; +- dm_list_add(&vginfo->infos, &info->list); +-} +- + static void _vginfo_detach_info(struct lvmcache_info *info) + { + if (!dm_list_empty(&info->list)) { +@@ -282,57 +279,80 @@ static void _vginfo_detach_info(struct lvmcache_info *info) + info->vginfo = NULL; + } + +-/* If vgid supplied, require a match. */ +-struct lvmcache_vginfo *lvmcache_vginfo_from_vgname(const char *vgname, const char *vgid) ++static struct lvmcache_vginfo *_search_vginfos_list(const char *vgname, const char *vgid) + { + struct lvmcache_vginfo *vginfo; + +- if (!vgname) +- return lvmcache_vginfo_from_vgid(vgid); +- +- if (!_vgname_hash) { +- log_debug_cache(INTERNAL_ERROR "Internal lvmcache is no yet initialized."); +- return NULL; +- } +- +- if (!(vginfo = dm_hash_lookup(_vgname_hash, vgname))) { +- log_debug_cache("lvmcache has no info for vgname \"%s\"%s" FMTVGID ".", +- vgname, (vgid) ? " with VGID " : "", (vgid) ? : ""); +- return NULL; +- } +- +- if (vgid) +- do +- if (!strncmp(vgid, vginfo->vgid, ID_LEN)) ++ if (vgid) { ++ dm_list_iterate_items(vginfo, &_vginfos) { ++ if (!strcmp(vgid, vginfo->vgid)) + return vginfo; +- while ((vginfo = vginfo->next)); +- +- if (!vginfo) +- log_debug_cache("lvmcache has not found vgname \"%s\"%s" FMTVGID ".", +- vgname, (vgid) ? " with VGID " : "", (vgid) ? : ""); +- +- return vginfo; ++ } ++ } else { ++ dm_list_iterate_items(vginfo, &_vginfos) { ++ if (!strcmp(vgname, vginfo->vgname)) ++ return vginfo; ++ } ++ } ++ return NULL; + } + +-struct lvmcache_vginfo *lvmcache_vginfo_from_vgid(const char *vgid) ++static struct lvmcache_vginfo *_vginfo_lookup(const char *vgname, const char *vgid) + { + struct lvmcache_vginfo *vginfo; + char id[ID_LEN + 1] __attribute__((aligned(8))); + +- if (!_vgid_hash || !vgid) { +- log_debug_cache(INTERNAL_ERROR "Internal cache cannot lookup vgid."); +- return NULL; ++ if (vgid) { ++ /* vgid not necessarily NULL-terminated */ ++ (void) dm_strncpy(id, vgid, sizeof(id)); ++ ++ if ((vginfo = dm_hash_lookup(_vgid_hash, id))) { ++ if (vgname && strcmp(vginfo->vgname, vgname)) { ++ /* should never happen */ ++ log_error(INTERNAL_ERROR "vginfo_lookup vgid %s has two names %s %s", ++ id, vginfo->vgname, vgname); ++ return NULL; ++ } ++ return vginfo; ++ } else { ++ /* lookup by vgid that doesn't exist */ ++ return NULL; ++ } + } + +- /* vgid not necessarily NULL-terminated */ +- (void) dm_strncpy(id, vgid, sizeof(id)); ++ if (vgname && !_found_duplicate_vgnames) { ++ if ((vginfo = dm_hash_lookup(_vgname_hash, vgname))) { ++ if (vginfo->has_duplicate_local_vgname) { ++ /* should never happen, found_duplicate_vgnames should be set */ ++ log_error(INTERNAL_ERROR "vginfo_lookup %s %s has_duplicate_local_vgname", vgname, vgid); ++ return NULL; ++ } ++ return vginfo; ++ } ++ } + +- if (!(vginfo = dm_hash_lookup(_vgid_hash, id))) { +- log_debug_cache("lvmcache has no info for vgid \"%s\"", id); +- return NULL; ++ if (vgname && _found_duplicate_vgnames) { ++ if ((vginfo = _search_vginfos_list(vgname, vgid))) { ++ if (vginfo->has_duplicate_local_vgname) { ++ log_debug("vginfo_lookup %s %s has_duplicate_local_vgname return none", vgname, vgid); ++ return NULL; ++ } ++ return vginfo; ++ } + } + +- return vginfo; ++ /* lookup by vgname that doesn't exist */ ++ return NULL; ++} ++ ++struct lvmcache_vginfo *lvmcache_vginfo_from_vgname(const char *vgname, const char *vgid) ++{ ++ return _vginfo_lookup(vgname, vgid); ++} ++ ++struct lvmcache_vginfo *lvmcache_vginfo_from_vgid(const char *vgid) ++{ ++ return _vginfo_lookup(NULL, vgid); + } + + const char *lvmcache_vgname_from_vgid(struct dm_pool *mem, const char *vgid) +@@ -353,17 +373,43 @@ const char *lvmcache_vgid_from_vgname(struct cmd_context *cmd, const char *vgnam + { + struct lvmcache_vginfo *vginfo; + +- if (!(vginfo = dm_hash_lookup(_vgname_hash, vgname))) +- return_NULL; ++ if (_found_duplicate_vgnames) { ++ if (!(vginfo = _search_vginfos_list(vgname, NULL))) ++ return_NULL; ++ } else { ++ if (!(vginfo = dm_hash_lookup(_vgname_hash, vgname))) ++ return_NULL; ++ } + +- if (!vginfo->next) +- return dm_pool_strdup(cmd->mem, vginfo->vgid); ++ if (vginfo->has_duplicate_local_vgname) { ++ /* ++ * return NULL if there is a local VG with the same name since ++ * we don't know which to use. ++ */ ++ return NULL; ++ } + +- /* +- * There are multiple VGs with this name to choose from. +- * Return an error because we don't know which VG is intended. +- */ +- return NULL; ++ if (vginfo->has_duplicate_foreign_vgname) ++ return NULL; ++ ++ return dm_pool_strdup(cmd->mem, vginfo->vgid); ++} ++ ++bool lvmcache_has_duplicate_local_vgname(const char *vgid, const char *vgname) ++{ ++ struct lvmcache_vginfo *vginfo; ++ ++ if (_found_duplicate_vgnames) { ++ if (!(vginfo = _search_vginfos_list(vgname, vgid))) ++ return false; ++ } else { ++ if (!(vginfo = dm_hash_lookup(_vgname_hash, vgname))) ++ return false; ++ } ++ ++ if (vginfo->has_duplicate_local_vgname) ++ return true; ++ return false; + } + + /* +@@ -986,15 +1032,6 @@ int lvmcache_label_scan(struct cmd_context *cmd) + + log_debug_cache("Finding VG info"); + +- /* FIXME: can this happen? */ +- if (!cmd->filter) { +- log_error("label scan is missing filter"); +- goto out; +- } +- +- if (!refresh_filters(cmd)) +- log_error("Scan failed to refresh device filter."); +- + /* + * Duplicates found during this label scan are added to _initial_duplicates. + */ +@@ -1057,7 +1094,6 @@ int lvmcache_label_scan(struct cmd_context *cmd) + + r = 1; + +- out: + dm_list_iterate_items(vginfo, &_vginfos) { + if (is_orphan_vg(vginfo->vgname)) + continue; +@@ -1148,49 +1184,20 @@ int lvmcache_pvid_in_unused_duplicates(const char *pvid) + return 0; + } + +-static int _free_vginfo(struct lvmcache_vginfo *vginfo) ++static void _free_vginfo(struct lvmcache_vginfo *vginfo) + { +- struct lvmcache_vginfo *primary_vginfo, *vginfo2; +- int r = 1; +- +- vginfo2 = primary_vginfo = lvmcache_vginfo_from_vgname(vginfo->vgname, NULL); +- +- if (vginfo == primary_vginfo) { +- dm_hash_remove(_vgname_hash, vginfo->vgname); +- if (vginfo->next && !dm_hash_insert(_vgname_hash, vginfo->vgname, +- vginfo->next)) { +- log_error("_vgname_hash re-insertion for %s failed", +- vginfo->vgname); +- r = 0; +- } +- } else +- while (vginfo2) { +- if (vginfo2->next == vginfo) { +- vginfo2->next = vginfo->next; +- break; +- } +- vginfo2 = vginfo2->next; +- } +- +- free(vginfo->system_id); + free(vginfo->vgname); ++ free(vginfo->system_id); + free(vginfo->creation_host); +- +- if (*vginfo->vgid && _vgid_hash && +- lvmcache_vginfo_from_vgid(vginfo->vgid) == vginfo) +- dm_hash_remove(_vgid_hash, vginfo->vgid); +- +- dm_list_del(&vginfo->list); +- ++ if (vginfo->lock_type) ++ free(vginfo->lock_type); + free(vginfo); +- +- return r; + } + + /* +- * vginfo must be info->vginfo unless info is NULL ++ * Remove vginfo from standard lists/hashes. + */ +-static int _drop_vginfo(struct lvmcache_info *info, struct lvmcache_vginfo *vginfo) ++static void _drop_vginfo(struct lvmcache_info *info, struct lvmcache_vginfo *vginfo) + { + if (info) + _vginfo_detach_info(info); +@@ -1198,12 +1205,16 @@ static int _drop_vginfo(struct lvmcache_info *info, struct lvmcache_vginfo *vgin + /* vginfo still referenced? */ + if (!vginfo || is_orphan_vg(vginfo->vgname) || + !dm_list_empty(&vginfo->infos)) +- return 1; ++ return; + +- if (!_free_vginfo(vginfo)) +- return_0; ++ if (dm_hash_lookup(_vgname_hash, vginfo->vgname) == vginfo) ++ dm_hash_remove(_vgname_hash, vginfo->vgname); + +- return 1; ++ dm_hash_remove(_vgid_hash, vginfo->vgid); ++ ++ dm_list_del(&vginfo->list); /* _vginfos list */ ++ ++ _free_vginfo(vginfo); + } + + void lvmcache_del(struct lvmcache_info *info) +@@ -1261,180 +1272,150 @@ static int _lvmcache_update_vgid(struct lvmcache_info *info, + return 1; + } + +-static int _insert_vginfo(struct lvmcache_vginfo *new_vginfo, const char *vgid, +- uint32_t vgstatus, const char *creation_host, +- struct lvmcache_vginfo *primary_vginfo) ++static int _lvmcache_update_vgname(struct cmd_context *cmd, ++ struct lvmcache_info *info, ++ const char *vgname, const char *vgid, ++ const char *system_id, ++ const struct format_type *fmt) + { +- struct lvmcache_vginfo *last_vginfo = primary_vginfo; +- char uuid_primary[64] __attribute__((aligned(8))); +- char uuid_new[64] __attribute__((aligned(8))); +- int use_new = 0; +- +- /* Pre-existing VG takes precedence. Unexported VG takes precedence. */ +- if (primary_vginfo) { +- if (!id_write_format((const struct id *)vgid, uuid_new, sizeof(uuid_new))) +- return_0; ++ char vgid_str[64] __attribute__((aligned(8))); ++ char other_str[64] __attribute__((aligned(8))); ++ struct lvmcache_vginfo *vginfo; ++ struct lvmcache_vginfo *other; ++ int vginfo_is_allowed; ++ int other_is_allowed; + +- if (!id_write_format((const struct id *)&primary_vginfo->vgid, uuid_primary, +- sizeof(uuid_primary))) +- return_0; ++ if (!vgname || (info && info->vginfo && !strcmp(info->vginfo->vgname, vgname))) ++ return 1; + +- _found_duplicate_vgnames = 1; ++ if (!id_write_format((const struct id *)vgid, vgid_str, sizeof(vgid_str))) ++ stack; + +- /* +- * vginfo is kept for each VG with the same name. +- * They are saved with the vginfo->next list. +- * These checks just decide the ordering of +- * that list. +- * +- * FIXME: it should no longer matter what order +- * the vginfo's are kept in, so we can probably +- * remove these comparisons and reordering entirely. +- * +- * If Primary not exported, new exported => keep +- * Else Primary exported, new not exported => change +- * Else Primary has hostname for this machine => keep +- * Else Primary has no hostname, new has one => change +- * Else New has hostname for this machine => change +- * Else Keep primary. +- */ +- if (!(primary_vginfo->status & EXPORTED_VG) && +- (vgstatus & EXPORTED_VG)) +- log_verbose("Cache: Duplicate VG name %s: " +- "Existing %s takes precedence over " +- "exported %s", new_vginfo->vgname, +- uuid_primary, uuid_new); +- else if ((primary_vginfo->status & EXPORTED_VG) && +- !(vgstatus & EXPORTED_VG)) { +- log_verbose("Cache: Duplicate VG name %s: " +- "%s takes precedence over exported %s", +- new_vginfo->vgname, uuid_new, +- uuid_primary); +- use_new = 1; +- } else if (primary_vginfo->creation_host && +- !strcmp(primary_vginfo->creation_host, +- primary_vginfo->fmt->cmd->hostname)) +- log_verbose("Cache: Duplicate VG name %s: " +- "Existing %s (created here) takes precedence " +- "over %s", new_vginfo->vgname, uuid_primary, +- uuid_new); +- else if (!primary_vginfo->creation_host && creation_host) { +- log_verbose("Cache: Duplicate VG name %s: " +- "%s (with creation_host) takes precedence over %s", +- new_vginfo->vgname, uuid_new, +- uuid_primary); +- use_new = 1; +- } else if (creation_host && +- !strcmp(creation_host, +- primary_vginfo->fmt->cmd->hostname)) { +- log_verbose("Cache: Duplicate VG name %s: " +- "%s (created here) takes precedence over %s", +- new_vginfo->vgname, uuid_new, +- uuid_primary); +- use_new = 1; +- } else { +- log_verbose("Cache: Duplicate VG name %s: " +- "Prefer existing %s vs new %s", +- new_vginfo->vgname, uuid_primary, uuid_new); ++ /* ++ * Add vginfo for orphan VG ++ */ ++ if (!info) { ++ if (!(vginfo = zalloc(sizeof(*vginfo)))) { ++ log_error("lvmcache adding vg list alloc failed %s", vgname); ++ return 0; + } +- +- if (!use_new) { +- while (last_vginfo->next) +- last_vginfo = last_vginfo->next; +- last_vginfo->next = new_vginfo; +- return 1; ++ if (!(vginfo->vgname = strdup(vgname))) { ++ free(vginfo); ++ log_error("lvmcache adding vg name alloc failed %s", vgname); ++ return 0; + } ++ dm_list_init(&vginfo->infos); ++ dm_list_init(&vginfo->outdated_infos); ++ dm_list_init(&vginfo->pvsummaries); ++ vginfo->fmt = fmt; + +- dm_hash_remove(_vgname_hash, primary_vginfo->vgname); +- } +- +- if (!dm_hash_insert(_vgname_hash, new_vginfo->vgname, new_vginfo)) { +- log_error("cache_update: vg hash insertion failed: %s", +- new_vginfo->vgname); +- return 0; +- } +- +- if (primary_vginfo) +- new_vginfo->next = primary_vginfo; +- +- return 1; +-} ++ if (!dm_hash_insert(_vgname_hash, vgname, vginfo)) { ++ free(vginfo->vgname); ++ free(vginfo); ++ return_0; ++ } + +-static int _lvmcache_update_vgname(struct lvmcache_info *info, +- const char *vgname, const char *vgid, +- uint32_t vgstatus, const char *creation_host, +- const struct format_type *fmt) +-{ +- struct lvmcache_vginfo *vginfo, *primary_vginfo; +- char mdabuf[32]; ++ if (!_lvmcache_update_vgid(NULL, vginfo, vgid)) { ++ free(vginfo->vgname); ++ free(vginfo); ++ return_0; ++ } + +- if (!vgname || (info && info->vginfo && !strcmp(info->vginfo->vgname, vgname))) ++ /* Ensure orphans appear last on list_iterate */ ++ dm_list_add(&_vginfos, &vginfo->list); + return 1; ++ } + +- /* Remove existing vginfo entry */ +- if (info) +- _drop_vginfo(info, info->vginfo); ++ _drop_vginfo(info, info->vginfo); + +- if (!(vginfo = lvmcache_vginfo_from_vgname(vgname, vgid))) { ++ if (!(vginfo = lvmcache_vginfo_from_vgid(vgid))) { + /* + * Create a vginfo struct for this VG and put the vginfo + * into the hash table. + */ + ++ log_debug_cache("lvmcache adding vginfo for %s %s", vgname, vgid_str); ++ + if (!(vginfo = zalloc(sizeof(*vginfo)))) { +- log_error("lvmcache_update_vgname: list alloc failed"); ++ log_error("lvmcache adding vg list alloc failed %s", vgname); + return 0; + } + if (!(vginfo->vgname = strdup(vgname))) { + free(vginfo); +- log_error("cache vgname alloc failed for %s", vgname); ++ log_error("lvmcache adding vg name alloc failed %s", vgname); + return 0; + } + dm_list_init(&vginfo->infos); + dm_list_init(&vginfo->outdated_infos); + dm_list_init(&vginfo->pvsummaries); + +- /* +- * A different VG (different uuid) can exist with the same name. +- * In this case, the two VGs will have separate vginfo structs, +- * but the second will be linked onto the existing vginfo->next, +- * not in the hash. +- */ +- primary_vginfo = lvmcache_vginfo_from_vgname(vgname, NULL); ++ if ((other = dm_hash_lookup(_vgname_hash, vgname))) { ++ log_debug_cache("lvmcache adding vginfo found duplicate VG name %s", vgname); + +- if (!_insert_vginfo(vginfo, vgid, vgstatus, creation_host, primary_vginfo)) { +- free(vginfo->vgname); +- free(vginfo); +- return 0; ++ /* ++ * A different VG (different uuid) can exist with the ++ * same name. In this case, the two VGs will have ++ * separate vginfo structs, but one will be in the ++ * vgname_hash. If both vginfos are local/accessible, ++ * then _found_duplicate_vgnames is set which will ++ * disable any further use of the vgname_hash. ++ */ ++ ++ if (!memcmp(other->vgid, vgid, ID_LEN)) { ++ /* shouldn't happen since we looked up by vgid above */ ++ log_error(INTERNAL_ERROR "lvmcache_update_vgname %s %s %s %s", ++ vgname, vgid_str, other->vgname, other->vgid); ++ free(vginfo->vgname); ++ free(vginfo); ++ return 0; ++ } ++ ++ vginfo_is_allowed = is_system_id_allowed(cmd, system_id); ++ other_is_allowed = is_system_id_allowed(cmd, other->system_id); ++ ++ if (vginfo_is_allowed && other_is_allowed) { ++ if (!id_write_format((const struct id *)other->vgid, other_str, sizeof(other_str))) ++ stack; ++ ++ vginfo->has_duplicate_local_vgname = 1; ++ other->has_duplicate_local_vgname = 1; ++ _found_duplicate_vgnames = 1; ++ ++ log_warn("WARNING: VG name %s is used by VGs %s and %s.", ++ vgname, vgid_str, other_str); ++ log_warn("Fix duplicate VG names with vgrename uuid, a device filter, or system IDs."); ++ } ++ ++ if (!vginfo_is_allowed && !other_is_allowed) { ++ vginfo->has_duplicate_foreign_vgname = 1; ++ other->has_duplicate_foreign_vgname = 1; ++ } ++ ++ if (!other_is_allowed && vginfo_is_allowed) { ++ /* the accessible vginfo must be in vgnames_hash */ ++ dm_hash_remove(_vgname_hash, vgname); ++ if (!dm_hash_insert(_vgname_hash, vgname, vginfo)) { ++ log_error("lvmcache adding vginfo to name hash failed %s", vgname); ++ return 0; ++ } ++ } ++ } else { ++ if (!dm_hash_insert(_vgname_hash, vgname, vginfo)) { ++ log_error("lvmcache adding vg to name hash failed %s", vgname); ++ free(vginfo->vgname); ++ free(vginfo); ++ return 0; ++ } + } + +- /* Ensure orphans appear last on list_iterate */ +- if (is_orphan_vg(vgname)) +- dm_list_add(&_vginfos, &vginfo->list); +- else +- dm_list_add_h(&_vginfos, &vginfo->list); ++ dm_list_add_h(&_vginfos, &vginfo->list); + } + +- if (info) +- _vginfo_attach_info(vginfo, info); +- else if (!_lvmcache_update_vgid(NULL, vginfo, vgid)) /* Orphans */ +- return_0; +- +- /* FIXME Check consistency of list! */ + vginfo->fmt = fmt; ++ info->vginfo = vginfo; ++ dm_list_add(&vginfo->infos, &info->list); + +- if (info) { +- if (info->mdas.n) +- sprintf(mdabuf, " with %u mda(s)", dm_list_size(&info->mdas)); +- else +- mdabuf[0] = '\0'; +- log_debug_cache("lvmcache %s: now in VG %s%s%s%s%s.", +- dev_name(info->dev), +- vgname, vginfo->vgid[0] ? " (" : "", +- vginfo->vgid[0] ? vginfo->vgid : "", +- vginfo->vgid[0] ? ")" : "", mdabuf); +- } else +- log_debug_cache("lvmcache: Initialised VG %s.", vgname); ++ log_debug_cache("lvmcache %s: now in VG %s %s", dev_name(info->dev), vgname, vgid_str); + + return 1; + } +@@ -1511,9 +1492,9 @@ out: + return 1; + } + +-int lvmcache_add_orphan_vginfo(const char *vgname, struct format_type *fmt) ++int lvmcache_add_orphan_vginfo(struct cmd_context *cmd, const char *vgname, struct format_type *fmt) + { +- return _lvmcache_update_vgname(NULL, vgname, vgname, 0, "", fmt); ++ return _lvmcache_update_vgname(cmd, NULL, vgname, vgname, "", fmt); + } + + static void _lvmcache_update_pvsummaries(struct lvmcache_vginfo *vginfo, struct lvmcache_vgsummary *vgsummary) +@@ -1532,7 +1513,7 @@ static void _lvmcache_update_pvsummaries(struct lvmcache_vginfo *vginfo, struct + * Returning 0 causes the caller to remove the info struct for this + * device from lvmcache, which will make it look like a missing device. + */ +-int lvmcache_update_vgname_and_id(struct lvmcache_info *info, struct lvmcache_vgsummary *vgsummary) ++int lvmcache_update_vgname_and_id(struct cmd_context *cmd, struct lvmcache_info *info, struct lvmcache_vgsummary *vgsummary) + { + const char *vgname = vgsummary->vgname; + const char *vgid = (char *)&vgsummary->vgid; +@@ -1545,6 +1526,7 @@ int lvmcache_update_vgname_and_id(struct lvmcache_info *info, struct lvmcache_vg + vgid = vgname; + } + ++ /* FIXME: remove this, it shouldn't be needed */ + /* If PV without mdas is already in a real VG, don't make it orphan */ + if (is_orphan_vg(vgname) && info->vginfo && + mdas_empty_or_ignored(&info->mdas) && +@@ -1556,7 +1538,7 @@ int lvmcache_update_vgname_and_id(struct lvmcache_info *info, struct lvmcache_vg + * and attaches the info struct for the dev to the vginfo. + * Puts the vginfo into the vgname hash table. + */ +- if (!_lvmcache_update_vgname(info, vgname, vgid, vgsummary->vgstatus, vgsummary->creation_host, info->fmt)) { ++ if (!_lvmcache_update_vgname(cmd, info, vgname, vgid, vgsummary->system_id, info->fmt)) { + /* shouldn't happen, internal error */ + log_error("Failed to update VG %s info in lvmcache.", vgname); + return 0; +@@ -1735,7 +1717,7 @@ int lvmcache_update_vg_from_write(struct volume_group *vg) + (void) dm_strncpy(pvid_s, (char *) &pvl->pv->id, sizeof(pvid_s)); + /* FIXME Could pvl->pv->dev->pvid ever be different? */ + if ((info = lvmcache_info_from_pvid(pvid_s, pvl->pv->dev, 0)) && +- !lvmcache_update_vgname_and_id(info, &vgsummary)) ++ !lvmcache_update_vgname_and_id(vg->cmd, info, &vgsummary)) + return_0; + } + +@@ -1819,7 +1801,7 @@ int lvmcache_update_vg_from_read(struct volume_group *vg, unsigned precommitted) + * info's for PVs without metadata were not connected to the + * vginfo by label_scan, so do it here. + */ +- if (!lvmcache_update_vgname_and_id(info, &vgsummary)) { ++ if (!lvmcache_update_vgname_and_id(vg->cmd, info, &vgsummary)) { + log_debug_cache("lvmcache_update_vg %s failed to update info for %s", + vg->name, dev_name(info->dev)); + } +@@ -1927,7 +1909,7 @@ static struct lvmcache_info * _create_info(struct labeller *labeller, struct dev + return info; + } + +-struct lvmcache_info *lvmcache_add(struct labeller *labeller, ++struct lvmcache_info *lvmcache_add(struct cmd_context *cmd, struct labeller *labeller, + const char *pvid, struct device *dev, uint64_t label_sector, + const char *vgname, const char *vgid, uint32_t vgstatus, + int *is_duplicate) +@@ -2042,7 +2024,7 @@ update_vginfo: + if (vgid) + strncpy((char *)&vgsummary.vgid, vgid, sizeof(vgsummary.vgid)); + +- if (!lvmcache_update_vgname_and_id(info, &vgsummary)) { ++ if (!lvmcache_update_vgname_and_id(cmd, info, &vgsummary)) { + if (created) { + dm_hash_remove(_pvid_hash, pvid_s); + strcpy(info->dev->pvid, ""); +@@ -2055,7 +2037,7 @@ update_vginfo: + return info; + } + +-static void _lvmcache_destroy_entry(struct lvmcache_info *info) ++static void _lvmcache_destroy_info(struct lvmcache_info *info) + { + _vginfo_detach_info(info); + info->dev->pvid[0] = 0; +@@ -2063,20 +2045,11 @@ static void _lvmcache_destroy_entry(struct lvmcache_info *info) + free(info); + } + +-static void _lvmcache_destroy_vgnamelist(struct lvmcache_vginfo *vginfo) +-{ +- struct lvmcache_vginfo *next; +- +- do { +- next = vginfo->next; +- if (!_free_vginfo(vginfo)) +- stack; +- } while ((vginfo = next)); +-} +- + void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset) + { +- log_debug_cache("Dropping VG info"); ++ struct lvmcache_vginfo *vginfo, *vginfo2; ++ ++ log_debug_cache("Destroy lvmcache content"); + + if (_vgid_hash) { + dm_hash_destroy(_vgid_hash); +@@ -2084,20 +2057,24 @@ void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset) + } + + if (_pvid_hash) { +- dm_hash_iter(_pvid_hash, (dm_hash_iterate_fn) _lvmcache_destroy_entry); ++ dm_hash_iter(_pvid_hash, (dm_hash_iterate_fn) _lvmcache_destroy_info); + dm_hash_destroy(_pvid_hash); + _pvid_hash = NULL; + } + + if (_vgname_hash) { +- dm_hash_iter(_vgname_hash, +- (dm_hash_iterate_fn) _lvmcache_destroy_vgnamelist); + dm_hash_destroy(_vgname_hash); + _vgname_hash = NULL; + } + ++ dm_list_iterate_items_safe(vginfo, vginfo2, &_vginfos) { ++ dm_list_del(&vginfo->list); ++ _free_vginfo(vginfo); ++ } ++ + if (!dm_list_empty(&_vginfos)) +- log_error(INTERNAL_ERROR "_vginfos list should be empty"); ++ log_error(INTERNAL_ERROR "vginfos list should be empty"); ++ + dm_list_init(&_vginfos); + + /* +@@ -2109,6 +2086,8 @@ void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset) + * We want the same preferred devices to be chosen each time, so save + * the unpreferred devs here so that _choose_preferred_devs can use + * this to make the same choice each time. ++ * ++ * FIXME: I don't think is is needed any more. + */ + _destroy_device_list(&_prev_unused_duplicate_devs); + dm_list_splice(&_prev_unused_duplicate_devs, &_unused_duplicates); +@@ -2122,7 +2101,7 @@ void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset) + stack; + + dm_list_iterate_items(fmt, &cmd->formats) { +- if (!lvmcache_add_orphan_vginfo(fmt->orphan_vg_name, fmt)) ++ if (!lvmcache_add_orphan_vginfo(cmd, fmt->orphan_vg_name, fmt)) + stack; + } + } +@@ -2567,36 +2546,6 @@ int lvmcache_vginfo_has_pvid(struct lvmcache_vginfo *vginfo, char *pvid) + return 0; + } + +-struct metadata_area *lvmcache_get_mda(struct cmd_context *cmd, +- const char *vgname, +- struct device *dev, +- int use_mda_num) +-{ +- struct lvmcache_vginfo *vginfo; +- struct lvmcache_info *info; +- struct metadata_area *mda; +- +- if (!use_mda_num) +- use_mda_num = 1; +- +- if (!(vginfo = lvmcache_vginfo_from_vgname(vgname, NULL))) +- return NULL; +- +- dm_list_iterate_items(info, &vginfo->infos) { +- if (info->dev != dev) +- continue; +- +- dm_list_iterate_items(mda, &info->mdas) { +- if ((use_mda_num == 1) && (mda->status & MDA_PRIMARY)) +- return mda; +- if ((use_mda_num == 2) && !(mda->status & MDA_PRIMARY)) +- return mda; +- } +- return NULL; +- } +- return NULL; +-} +- + /* + * This is used by the metadata repair command to check if + * the metadata on a dev needs repair because it's old. +diff --git a/lib/cache/lvmcache.h b/lib/cache/lvmcache.h +index 0c8c789..6cef4d1 100644 +--- a/lib/cache/lvmcache.h ++++ b/lib/cache/lvmcache.h +@@ -71,16 +71,16 @@ int lvmcache_label_rescan_vg(struct cmd_context *cmd, const char *vgname, const + int lvmcache_label_rescan_vg_rw(struct cmd_context *cmd, const char *vgname, const char *vgid); + + /* Add/delete a device */ +-struct lvmcache_info *lvmcache_add(struct labeller *labeller, const char *pvid, ++struct lvmcache_info *lvmcache_add(struct cmd_context *cmd, struct labeller *labeller, const char *pvid, + struct device *dev, uint64_t label_sector, + const char *vgname, const char *vgid, + uint32_t vgstatus, int *is_duplicate); +-int lvmcache_add_orphan_vginfo(const char *vgname, struct format_type *fmt); ++int lvmcache_add_orphan_vginfo(struct cmd_context *cmd, const char *vgname, struct format_type *fmt); + void lvmcache_del(struct lvmcache_info *info); + void lvmcache_del_dev(struct device *dev); + + /* Update things */ +-int lvmcache_update_vgname_and_id(struct lvmcache_info *info, ++int lvmcache_update_vgname_and_id(struct cmd_context *cmd, struct lvmcache_info *info, + struct lvmcache_vgsummary *vgsummary); + int lvmcache_update_vg_from_read(struct volume_group *vg, unsigned precommitted); + int lvmcache_update_vg_from_write(struct volume_group *vg); +@@ -161,11 +161,6 @@ struct device *lvmcache_device(struct lvmcache_info *info); + unsigned lvmcache_mda_count(struct lvmcache_info *info); + uint64_t lvmcache_smallest_mda_size(struct lvmcache_info *info); + +-struct metadata_area *lvmcache_get_mda(struct cmd_context *cmd, +- const char *vgname, +- struct device *dev, +- int use_mda_num); +- + bool lvmcache_has_duplicate_devs(void); + void lvmcache_del_dev_from_duplicates(struct device *dev); + bool lvmcache_dev_is_unused_duplicate(struct device *dev); +@@ -174,6 +169,7 @@ int lvmcache_get_unused_duplicates(struct cmd_context *cmd, struct dm_list *head + int vg_has_duplicate_pvs(struct volume_group *vg); + + int lvmcache_found_duplicate_vgnames(void); ++bool lvmcache_has_duplicate_local_vgname(const char *vgid, const char *vgname); + + int lvmcache_contains_lock_type_sanlock(struct cmd_context *cmd); + +diff --git a/lib/commands/toolcontext.c b/lib/commands/toolcontext.c +index 88d5b3e..63b6811 100644 +--- a/lib/commands/toolcontext.c ++++ b/lib/commands/toolcontext.c +@@ -1276,7 +1276,7 @@ int init_lvmcache_orphans(struct cmd_context *cmd) + struct format_type *fmt; + + dm_list_iterate_items(fmt, &cmd->formats) +- if (!lvmcache_add_orphan_vginfo(fmt->orphan_vg_name, fmt)) ++ if (!lvmcache_add_orphan_vginfo(cmd, fmt->orphan_vg_name, fmt)) + return_0; + + return 1; +@@ -1598,6 +1598,7 @@ struct cmd_context *create_toolcontext(unsigned is_clvmd, + dm_list_init(&cmd->formats); + dm_list_init(&cmd->segtypes); + dm_list_init(&cmd->tags); ++ dm_list_init(&cmd->hints); + dm_list_init(&cmd->config_files); + label_init(); + +diff --git a/lib/device/dev-io.c b/lib/device/dev-io.c +index 735441f..33b9345 100644 +--- a/lib/device/dev-io.c ++++ b/lib/device/dev-io.c +@@ -86,6 +86,9 @@ static int _dev_get_size_dev(struct device *dev, uint64_t *size) + int fd = dev->bcache_fd; + int do_close = 0; + ++ if (dm_list_empty(&dev->aliases)) ++ return 0; ++ + if (dev->size_seqno == _dev_size_seqno) { + log_very_verbose("%s: using cached size %" PRIu64 " sectors", + name, dev->size); +diff --git a/lib/format_text/archiver.c b/lib/format_text/archiver.c +index 3a741da..733e62b 100644 +--- a/lib/format_text/archiver.c ++++ b/lib/format_text/archiver.c +@@ -315,7 +315,7 @@ struct volume_group *backup_read_vg(struct cmd_context *cmd, + } + + dm_list_iterate_items(mda, &tf->metadata_areas_in_use) { +- if (!(vg = mda->ops->vg_read(tf, vg_name, mda, NULL, NULL))) ++ if (!(vg = mda->ops->vg_read(cmd, tf, vg_name, mda, NULL, NULL))) + stack; + break; + } +diff --git a/lib/format_text/format-text.c b/lib/format_text/format-text.c +index 268bd64..e448712 100644 +--- a/lib/format_text/format-text.c ++++ b/lib/format_text/format-text.c +@@ -290,7 +290,8 @@ static int _raw_write_mda_header(const struct format_type *fmt, + * in the label scanning path. + */ + +-static struct raw_locn *_read_metadata_location_vg(struct device_area *dev_area, ++static struct raw_locn *_read_metadata_location_vg(struct cmd_context *cmd, ++ struct device_area *dev_area, + struct mda_header *mdah, int primary_mda, + const char *vgname, + int *precommitted) +@@ -369,7 +370,7 @@ static struct raw_locn *_read_metadata_location_vg(struct device_area *dev_area, + vgnamebuf, vgname); + + if ((info = lvmcache_info_from_pvid(dev_area->dev->pvid, dev_area->dev, 0)) && +- !lvmcache_update_vgname_and_id(info, &vgsummary_orphan)) ++ !lvmcache_update_vgname_and_id(cmd, info, &vgsummary_orphan)) + stack; + + return NULL; +@@ -447,7 +448,8 @@ static uint64_t _next_rlocn_offset(struct volume_group *vg, struct raw_locn *rlo + return new_start; + } + +-static struct volume_group *_vg_read_raw_area(struct format_instance *fid, ++static struct volume_group *_vg_read_raw_area(struct cmd_context *cmd, ++ struct format_instance *fid, + const char *vgname, + struct device_area *area, + struct cached_vg_fmtdata **vg_fmtdata, +@@ -468,7 +470,7 @@ static struct volume_group *_vg_read_raw_area(struct format_instance *fid, + goto out; + } + +- if (!(rlocn = _read_metadata_location_vg(area, mdah, primary_mda, vgname, &precommitted))) { ++ if (!(rlocn = _read_metadata_location_vg(cmd, area, mdah, primary_mda, vgname, &precommitted))) { + log_debug_metadata("VG %s not found on %s", vgname, dev_name(area->dev)); + goto out; + } +@@ -503,7 +505,8 @@ static struct volume_group *_vg_read_raw_area(struct format_instance *fid, + return vg; + } + +-static struct volume_group *_vg_read_raw(struct format_instance *fid, ++static struct volume_group *_vg_read_raw(struct cmd_context *cmd, ++ struct format_instance *fid, + const char *vgname, + struct metadata_area *mda, + struct cached_vg_fmtdata **vg_fmtdata, +@@ -512,12 +515,13 @@ static struct volume_group *_vg_read_raw(struct format_instance *fid, + struct mda_context *mdac = (struct mda_context *) mda->metadata_locn; + struct volume_group *vg; + +- vg = _vg_read_raw_area(fid, vgname, &mdac->area, vg_fmtdata, use_previous_vg, 0, mda_is_primary(mda)); ++ vg = _vg_read_raw_area(cmd, fid, vgname, &mdac->area, vg_fmtdata, use_previous_vg, 0, mda_is_primary(mda)); + + return vg; + } + +-static struct volume_group *_vg_read_precommit_raw(struct format_instance *fid, ++static struct volume_group *_vg_read_precommit_raw(struct cmd_context *cmd, ++ struct format_instance *fid, + const char *vgname, + struct metadata_area *mda, + struct cached_vg_fmtdata **vg_fmtdata, +@@ -526,7 +530,7 @@ static struct volume_group *_vg_read_precommit_raw(struct format_instance *fid, + struct mda_context *mdac = (struct mda_context *) mda->metadata_locn; + struct volume_group *vg; + +- vg = _vg_read_raw_area(fid, vgname, &mdac->area, vg_fmtdata, use_previous_vg, 1, mda_is_primary(mda)); ++ vg = _vg_read_raw_area(cmd, fid, vgname, &mdac->area, vg_fmtdata, use_previous_vg, 1, mda_is_primary(mda)); + + return vg; + } +@@ -1321,7 +1325,7 @@ static struct volume_group *_vg_read_file_name(struct format_instance *fid, + return vg; + } + +-static struct volume_group *_vg_read_file(struct format_instance *fid, ++static struct volume_group *_vg_read_file(struct cmd_context *cmd, struct format_instance *fid, + const char *vgname, + struct metadata_area *mda, + struct cached_vg_fmtdata **vg_fmtdata, +@@ -1332,7 +1336,7 @@ static struct volume_group *_vg_read_file(struct format_instance *fid, + return _vg_read_file_name(fid, vgname, tc->path_live); + } + +-static struct volume_group *_vg_read_precommit_file(struct format_instance *fid, ++static struct volume_group *_vg_read_precommit_file(struct cmd_context *cmd, struct format_instance *fid, + const char *vgname, + struct metadata_area *mda, + struct cached_vg_fmtdata **vg_fmtdata, +@@ -1713,7 +1717,7 @@ static int _set_ext_flags(struct physical_volume *pv, struct lvmcache_info *info + } + + /* Only for orphans - FIXME That's not true any more */ +-static int _text_pv_write(const struct format_type *fmt, struct physical_volume *pv) ++static int _text_pv_write(struct cmd_context *cmd, const struct format_type *fmt, struct physical_volume *pv) + { + struct format_instance *fid = pv->fid; + const char *pvid = (const char *) (*pv->old_id.uuid ? &pv->old_id : &pv->id); +@@ -1725,7 +1729,7 @@ static int _text_pv_write(const struct format_type *fmt, struct physical_volume + unsigned mda_index; + + /* Add a new cache entry with PV info or update existing one. */ +- if (!(info = lvmcache_add(fmt->labeller, (const char *) &pv->id, ++ if (!(info = lvmcache_add(cmd, fmt->labeller, (const char *) &pv->id, + pv->dev, pv->label_sector, pv->vg_name, + is_orphan_vg(pv->vg_name) ? pv->vg_name : pv->vg ? (const char *) &pv->vg->id : NULL, 0, NULL))) + return_0; +diff --git a/lib/format_text/text_label.c b/lib/format_text/text_label.c +index 9241eca..1674126 100644 +--- a/lib/format_text/text_label.c ++++ b/lib/format_text/text_label.c +@@ -370,7 +370,7 @@ static int _read_mda_header_and_metadata(const struct format_type *fmt, + * the metadata is at for those PVs. + */ + +-static int _text_read(struct labeller *labeller, struct device *dev, void *label_buf, ++static int _text_read(struct cmd_context *cmd, struct labeller *labeller, struct device *dev, void *label_buf, + uint64_t label_sector, int *is_duplicate) + { + struct lvmcache_vgsummary vgsummary; +@@ -410,7 +410,7 @@ static int _text_read(struct labeller *labeller, struct device *dev, void *label + * + * Other reasons for lvmcache_add to return NULL are internal errors. + */ +- if (!(info = lvmcache_add(labeller, (char *)pvhdr->pv_uuid, dev, label_sector, ++ if (!(info = lvmcache_add(cmd, labeller, (char *)pvhdr->pv_uuid, dev, label_sector, + FMT_TEXT_ORPHAN_VG_NAME, + FMT_TEXT_ORPHAN_VG_NAME, 0, is_duplicate))) + return_0; +@@ -503,7 +503,7 @@ static int _text_read(struct labeller *labeller, struct device *dev, void *label + rv1 = _read_mda_header_and_metadata(fmt, mda1, &vgsummary, &bad_fields); + + if (rv1 && !vgsummary.zero_offset && !vgsummary.mda_ignored) { +- if (!lvmcache_update_vgname_and_id(info, &vgsummary)) { ++ if (!lvmcache_update_vgname_and_id(cmd, info, &vgsummary)) { + /* I believe this is only an internal error. */ + + dm_list_del(&mda1->list); +@@ -554,7 +554,7 @@ static int _text_read(struct labeller *labeller, struct device *dev, void *label + rv2 = _read_mda_header_and_metadata(fmt, mda2, &vgsummary, &bad_fields); + + if (rv2 && !vgsummary.zero_offset && !vgsummary.mda_ignored) { +- if (!lvmcache_update_vgname_and_id(info, &vgsummary)) { ++ if (!lvmcache_update_vgname_and_id(cmd, info, &vgsummary)) { + dm_list_del(&mda2->list); + + /* Are there other cases besides mismatch and internal error? */ +diff --git a/lib/label/hints.c b/lib/label/hints.c +index 48fb661..9546f48 100644 +--- a/lib/label/hints.c ++++ b/lib/label/hints.c +@@ -351,6 +351,7 @@ static void _unlock_hints(struct cmd_context *cmd) + + void hints_exit(struct cmd_context *cmd) + { ++ free_hints(&cmd->hints); + if (_hints_fd == -1) + return; + return _unlock_hints(cmd); +@@ -419,6 +420,9 @@ static int _dev_in_hint_hash(struct cmd_context *cmd, struct device *dev) + { + uint64_t devsize = 0; + ++ if (dm_list_empty(&dev->aliases)) ++ return 0; ++ + if (!cmd->filter->passes_filter(cmd, cmd->filter, dev, "regex")) + return 0; + +@@ -1318,6 +1322,7 @@ int get_hints(struct cmd_context *cmd, struct dm_list *hints_out, int *newhints, + */ + if (!_read_hint_file(cmd, &hints_list, &needs_refresh)) { + log_debug("get_hints: read fail"); ++ free_hints(&hints_list); + _unlock_hints(cmd); + return 0; + } +@@ -1330,6 +1335,7 @@ int get_hints(struct cmd_context *cmd, struct dm_list *hints_out, int *newhints, + */ + if (needs_refresh) { + log_debug("get_hints: needs refresh"); ++ free_hints(&hints_list); + + if (!_lock_hints(cmd, LOCK_EX, NONBLOCK)) + return 0; +diff --git a/lib/label/label.c b/lib/label/label.c +index 0458313..4d37cef 100644 +--- a/lib/label/label.c ++++ b/lib/label/label.c +@@ -431,7 +431,7 @@ static int _process_block(struct cmd_context *cmd, struct dev_filter *f, + * info/vginfo structs. That lvmcache info is used later when the + * command wants to read the VG to do something to it. + */ +- ret = labeller->ops->read(labeller, dev, label_buf, sector, &is_duplicate); ++ ret = labeller->ops->read(cmd, labeller, dev, label_buf, sector, &is_duplicate); + + if (!ret) { + if (is_duplicate) { +diff --git a/lib/label/label.h b/lib/label/label.h +index 4108906..9a4b630 100644 +--- a/lib/label/label.h ++++ b/lib/label/label.h +@@ -64,7 +64,7 @@ struct label_ops { + /* + * Read a label from a volume. + */ +- int (*read) (struct labeller * l, struct device * dev, ++ int (*read) (struct cmd_context *cmd, struct labeller * l, struct device * dev, + void *label_buf, uint64_t label_sector, int *is_duplicate); + + /* +diff --git a/lib/locking/lvmlockd.c b/lib/locking/lvmlockd.c +index e378fe6..dca7954 100644 +--- a/lib/locking/lvmlockd.c ++++ b/lib/locking/lvmlockd.c +@@ -635,7 +635,6 @@ static int _init_vg_sanlock(struct cmd_context *cmd, struct volume_group *vg, in + const char *vg_lock_args = NULL; + const char *opts = NULL; + struct pv_list *pvl; +- struct device *sector_dev; + uint32_t sector_size = 0; + unsigned int physical_block_size, logical_block_size; + int num_mb = 0; +@@ -656,16 +655,11 @@ static int _init_vg_sanlock(struct cmd_context *cmd, struct volume_group *vg, in + dm_list_iterate_items(pvl, &vg->pvs) { + if (!dev_get_direct_block_sizes(pvl->pv->dev, &physical_block_size, &logical_block_size)) + continue; +- +- if (!sector_size) { +- sector_size = logical_block_size; +- sector_dev = pvl->pv->dev; +- } else if (sector_size != logical_block_size) { +- log_error("Inconsistent logical block sizes for %s and %s.", +- dev_name(pvl->pv->dev), dev_name(sector_dev)); +- return 0; +- } ++ if ((physical_block_size == 4096) || (logical_block_size == 4096)) ++ sector_size = 4096; + } ++ if (!sector_size) ++ sector_size = 512; + + log_debug("Using sector size %u for sanlock LV", sector_size); + +diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h +index 52bc776..083f74a 100644 +--- a/lib/metadata/metadata-exported.h ++++ b/lib/metadata/metadata-exported.h +@@ -744,9 +744,6 @@ struct volume_group *vg_read_for_update(struct cmd_context *cmd, const char *vg_ + const char *vgid, uint32_t read_flags, uint32_t lockd_state); + struct volume_group *vg_read_orphans(struct cmd_context *cmd, const char *orphan_vgname); + +-/* this is historical and being removed, don't use */ +-uint32_t vg_read_error(struct volume_group *vg_handle); +- + /* pe_start and pe_end relate to any existing data so that new metadata + * areas can avoid overlap */ + struct physical_volume *pv_create(const struct cmd_context *cmd, +diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c +index 9c44388..4b8dce9 100644 +--- a/lib/metadata/metadata.c ++++ b/lib/metadata/metadata.c +@@ -3666,7 +3666,7 @@ int pv_write(struct cmd_context *cmd, + return 0; + } + +- if (!pv->fmt->ops->pv_write(pv->fmt, pv)) ++ if (!pv->fmt->ops->pv_write(cmd, pv->fmt, pv)) + return_0; + + pv->status &= ~UNLABELLED_PV; +@@ -4010,17 +4010,6 @@ static int _access_vg_exported(struct cmd_context *cmd, struct volume_group *vg) + return 0; + } + +-/* +- * Test the validity of a VG handle returned by vg_read() or vg_read_for_update(). +- */ +-uint32_t vg_read_error(struct volume_group *vg_handle) +-{ +- if (!vg_handle) +- return FAILED_ALLOCATION; +- +- return SUCCESS; +-} +- + struct format_instance *alloc_fid(const struct format_type *fmt, + const struct format_instance_ctx *fic) + { +@@ -4751,18 +4740,6 @@ static struct volume_group *_vg_read(struct cmd_context *cmd, + lvmcache_label_rescan_vg(cmd, vgname, vgid); + } + +- /* Now determine the correct vgname if none was supplied */ +- if (!vgname && !(vgname = lvmcache_vgname_from_vgid(cmd->mem, vgid))) { +- log_debug_metadata("Cache did not find VG name from vgid %s", vgid); +- return NULL; +- } +- +- /* Determine the correct vgid if none was supplied */ +- if (!vgid && !(vgid = lvmcache_vgid_from_vgname(cmd, vgname))) { +- log_debug_metadata("Cache did not find VG vgid from name %s", vgname); +- return NULL; +- } +- + /* + * A "format instance" is an abstraction for a VG location, + * i.e. where a VG's metadata exists on disk. +@@ -4841,7 +4818,7 @@ static struct volume_group *_vg_read(struct cmd_context *cmd, + log_debug_metadata("Reading VG %s precommit metadata from %s %llu", + vgname, dev_name(mda_dev), (unsigned long long)mda->header_start); + +- vg = mda->ops->vg_read_precommit(fid, vgname, mda, &vg_fmtdata, &use_previous_vg); ++ vg = mda->ops->vg_read_precommit(cmd, fid, vgname, mda, &vg_fmtdata, &use_previous_vg); + + if (!vg && !use_previous_vg) { + log_warn("WARNING: Reading VG %s precommit on %s failed.", vgname, dev_name(mda_dev)); +@@ -4852,7 +4829,7 @@ static struct volume_group *_vg_read(struct cmd_context *cmd, + log_debug_metadata("Reading VG %s metadata from %s %llu", + vgname, dev_name(mda_dev), (unsigned long long)mda->header_start); + +- vg = mda->ops->vg_read(fid, vgname, mda, &vg_fmtdata, &use_previous_vg); ++ vg = mda->ops->vg_read(cmd, fid, vgname, mda, &vg_fmtdata, &use_previous_vg); + + if (!vg && !use_previous_vg) { + log_warn("WARNING: Reading VG %s on %s failed.", vgname, dev_name(mda_dev)); +@@ -4999,6 +4976,7 @@ struct volume_group *vg_read(struct cmd_context *cmd, const char *vg_name, const + int missing_pv_dev = 0; + int missing_pv_flag = 0; + uint32_t failure = 0; ++ int original_vgid_set = vgid ? 1 : 0; + int writing = (vg_read_flags & READ_FOR_UPDATE); + int activating = (vg_read_flags & READ_FOR_ACTIVATE); + +@@ -5033,7 +5011,45 @@ struct volume_group *vg_read(struct cmd_context *cmd, const char *vg_name, const + goto bad; + } + ++ /* I belive this is unused, the name is always set. */ ++ if (!vg_name && !(vg_name = lvmcache_vgname_from_vgid(cmd->mem, vgid))) { ++ unlock_vg(cmd, NULL, vg_name); ++ log_error("VG name not found for vgid %s", vgid); ++ failure |= FAILED_NOTFOUND; ++ goto_bad; ++ } ++ ++ /* ++ * If the command is process all vgs, process_each will get a list of vgname+vgid ++ * pairs, and then call vg_read() for each vgname+vigd. In this case we know ++ * which VG to read even if there are duplicate names, and we don't fail. ++ * ++ * If the user has requested one VG by name, process_each passes only the vgname ++ * to vg_read(), and we look up the vgid from lvmcache. lvmcache finds duplicate ++ * vgnames, doesn't know which is intended, returns a NULL vgid, and we fail. ++ */ ++ ++ if (!vgid) ++ vgid = lvmcache_vgid_from_vgname(cmd, vg_name); ++ ++ if (!vgid) { ++ unlock_vg(cmd, NULL, vg_name); ++ /* Some callers don't care if the VG doesn't exist and don't want an error message. */ ++ if (!(vg_read_flags & READ_OK_NOTFOUND)) ++ log_error("Volume group \"%s\" not found", vg_name); ++ failure |= FAILED_NOTFOUND; ++ goto_bad; ++ } ++ ++ /* ++ * vgchange -ay (no vgname arg) will activate multiple local VGs with the same ++ * name, but if the vgs have the same lv name, activating those lvs will fail. ++ */ ++ if (activating && original_vgid_set && lvmcache_has_duplicate_local_vgname(vgid, vg_name)) ++ log_warn("WARNING: activating multiple VGs with the same name is dangerous and may fail."); ++ + if (!(vg = _vg_read(cmd, vg_name, vgid, 0, writing))) { ++ unlock_vg(cmd, NULL, vg_name); + /* Some callers don't care if the VG doesn't exist and don't want an error message. */ + if (!(vg_read_flags & READ_OK_NOTFOUND)) + log_error("Volume group \"%s\" not found.", vg_name); +diff --git a/lib/metadata/metadata.h b/lib/metadata/metadata.h +index f199fc4..2c22450 100644 +--- a/lib/metadata/metadata.h ++++ b/lib/metadata/metadata.h +@@ -76,12 +76,14 @@ struct cached_vg_fmtdata; + /* Per-format per-metadata area operations */ + struct metadata_area_ops { + struct dm_list list; +- struct volume_group *(*vg_read) (struct format_instance * fi, ++ struct volume_group *(*vg_read) (struct cmd_context *cmd, ++ struct format_instance * fi, + const char *vg_name, + struct metadata_area * mda, + struct cached_vg_fmtdata **vg_fmtdata, + unsigned *use_previous_vg); +- struct volume_group *(*vg_read_precommit) (struct format_instance * fi, ++ struct volume_group *(*vg_read_precommit) (struct cmd_context *cmd, ++ struct format_instance * fi, + const char *vg_name, + struct metadata_area * mda, + struct cached_vg_fmtdata **vg_fmtdata, +@@ -326,7 +328,7 @@ struct format_handler { + * Write a PV structure to disk. Fails if the PV is in a VG ie + * pv->vg_name must be a valid orphan VG name + */ +- int (*pv_write) (const struct format_type * fmt, ++ int (*pv_write) (struct cmd_context *cmd, const struct format_type * fmt, + struct physical_volume * pv); + + /* +diff --git a/man/lvmlockd.8_main b/man/lvmlockd.8_main +index 8ed5400..c21f7a9 100644 +--- a/man/lvmlockd.8_main ++++ b/man/lvmlockd.8_main +@@ -58,6 +58,10 @@ For default settings, see lvmlockd -h. + .I path + Set path to the socket to listen on. + ++.B --adopt-file ++.I path ++ Set path to the adopt file. ++ + .B --syslog-priority | -S err|warning|debug + Write log messages from this level up to syslog. + +@@ -76,6 +80,8 @@ For default settings, see lvmlockd -h. + .I seconds + Override the default sanlock I/O timeout. + ++.B --adopt | -A 0|1 ++ Enable (1) or disable (0) lock adoption. + + .SH USAGE + +@@ -548,7 +554,13 @@ necessary locks. + .B lvmlockd failure + + If lvmlockd fails or is killed while holding locks, the locks are orphaned +-in the lock manager. ++in the lock manager. Orphaned locks must be cleared or adopted before the ++associated resources can be accessed normally. If lock adoption is ++enabled, lvmlockd keeps a record of locks in the adopt-file. A subsequent ++instance of lvmlockd will then adopt locks orphaned by the previous ++instance. Adoption must be enabled in both instances (--adopt|-A 1). ++Without adoption, the lock manager or host would require a reset to clear ++orphaned lock state. + + .B dlm/corosync failure + +diff --git a/test/shell/duplicate-vgnames.sh b/test/shell/duplicate-vgnames.sh +new file mode 100644 +index 0000000..0f98f9c +--- /dev/null ++++ b/test/shell/duplicate-vgnames.sh +@@ -0,0 +1,660 @@ ++#!/usr/bin/env bash ++ ++# Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions ++# of the GNU General Public License v.2. ++ ++SKIP_WITH_LVMLOCKD=1 ++SKIP_WITH_LVMPOLLD=1 ++ ++. lib/inittest ++ ++aux prepare_devs 7 ++ ++# test setups: ++# # local vgs named foo # foreign vg named foo ++# a. 0 1 ++# b. 0 2 ++# c. 1 1 ++# d. 1 2 ++# e. 2 0 ++# f. 2 1 ++# g. 2 2 ++# h. 3 3 ++# ++# commands to run for each test setup: ++# ++# vgs ++# all cases show all local ++# ++# vgs --foreign ++# all cases show all local and foreign ++# ++# vgs foo ++# a. not found ++# b. not found ++# c. show 1 local ++# d. show 1 local ++# e-g. dup error ++# ++# vgs --foreign foo ++# a. show 1 foreign ++# b. dup error ++# c. show 1 local ++# d. show 1 local ++# e-g. dup error ++# ++# vgchange -ay ++# a. none ++# b. none ++# c. activate 1 local ++# d. activate 1 local ++# e-g. activate 2 local ++# (if both local vgs have lvs with same name the second will fail to activate) ++# ++# vgchange -ay foo ++# a. none ++# b. none ++# c. activate 1 local ++# d. activate 1 local ++# e-g. dup error ++# ++# lvcreate foo ++# a. none ++# b. none ++# c. create 1 local ++# d. create 1 local ++# e-g. dup error ++# ++# vgremove foo ++# a. none ++# b. none ++# c. remove 1 local ++# d. remove 1 local ++# e-g. dup error ++# (in a couple cases test that vgremove -S vg_uuid=N works for local vg when local dups exist) ++ ++ ++# a. 0 local, 1 foreign ++# setup ++vgcreate $vg1 "$dev1" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other" $vg1 ++ ++vgs -o+uuid |tee out ++not grep $vg1 out ++vgs --foreign -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++ ++not vgs -o+uuid $vg1 |tee out ++not grep $vg1 out ++vgs --foreign -o+uuid $vg1 |tee out ++grep $vg1 out ++ ++vgchange -ay ++lvs --foreign -o vguuid,active |tee out ++not grep active out ++vgchange -an ++ ++not vgchange -ay $vg1 ++lvs --foreign -o vguuid,active |tee out ++not grep active out ++vgchange -an ++ ++not lvcreate -l1 -an -n $lv2 $vg1 ++lvs --foreign -o vguuid,name |tee out ++grep $UUID1 out | not grep $lv2 ++ ++not vgremove $vg1 ++vgs --foreign -o+uuid |tee out ++grep $UUID1 out ++vgremove -y -S vg_uuid=$UUID1 ++vgs --foreign -o+uuid |tee out ++grep $UUID1 out ++ ++aux wipefs_a "$dev1" ++aux wipefs_a "$dev2" ++ ++# b. 0 local, 2 foreign ++# setup ++vgcreate $vg1 "$dev1" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other" $vg1 ++aux disable_dev "$dev1" ++vgcreate $vg1 "$dev2" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other2" $vg1 ++aux enable_dev "$dev1" ++ ++vgs -o+uuid |tee out ++not grep $vg1 out ++vgs --foreign -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++ ++not vgs -o+uuid $vg1 |tee out ++not grep $vg1 out ++not vgs --foreign -o+uuid $vg1 |tee out ++not grep $vg1 out ++ ++vgchange -ay ++lvs --foreign -o vguuid,active |tee out ++not grep active out ++vgchange -an ++ ++not vgchange -ay $vg1 ++lvs --foreign -o vguuid,active |tee out ++not grep active out ++vgchange -an ++ ++not lvcreate -l1 -an -n $lv2 $vg1 ++lvs --foreign -o vguuid,name |tee out ++grep $UUID1 out | not grep $lv2 ++grep $UUID2 out | not grep $lv2 ++ ++not vgremove $vg1 ++vgs --foreign -o+uuid |tee out ++grep $UUID1 out ++ ++aux wipefs_a "$dev1" ++aux wipefs_a "$dev2" ++aux wipefs_a "$dev3" ++ ++# c. 1 local, 1 foreign ++# setup ++vgcreate $vg1 "$dev1" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev1" ++vgcreate $vg1 "$dev2" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other" $vg1 ++aux enable_dev "$dev1" ++ ++vgs -o+uuid |tee out ++cat out ++grep $vg1 out ++grep $UUID1 out ++not grep $UUID2 out ++vgs --foreign -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++ ++vgs -o+uuid $vg1 |tee out ++grep $vg1 out ++grep $UUID1 out ++not grep $UUID2 out ++vgs --foreign -o+uuid $vg1 |tee out ++grep $vg1 out ++grep $UUID1 out ++not grep $UUID2 out ++ ++vgchange -ay ++lvs --foreign -o vguuid,active |tee out ++grep $UUID1 out | grep active ++grep $UUID2 out | not grep active ++vgchange -an ++ ++vgchange -ay $vg1 ++lvs --foreign -o vguuid,active |tee out ++grep $UUID1 out | grep active ++grep $UUID2 out | not grep active ++vgchange -an ++ ++lvcreate -l1 -an -n $lv2 $vg1 ++lvs --foreign -o vguuid,name |tee out ++grep $UUID1 out | grep $lv2 ++grep $UUID2 out | not grep $lv2 ++ ++vgremove -y $vg1 ++vgs -o+uuid |tee out ++not grep $UUID1 out ++vgs --foreign -o+uuid |tee out ++grep $UUID2 out ++ ++aux wipefs_a "$dev1" ++aux wipefs_a "$dev2" ++aux wipefs_a "$dev3" ++ ++# d. 1 local, 2 foreign ++# setup ++vgcreate $vg1 "$dev1" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev1" ++vgcreate $vg1 "$dev2" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other" $vg1 ++aux disable_dev "$dev2" ++vgcreate $vg1 "$dev3" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID3=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other2" $vg1 ++aux enable_dev "$dev1" ++aux enable_dev "$dev2" ++ ++vgs -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++not grep $UUID2 out ++not grep $UUID3 out ++vgs --foreign -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++ ++vgs -o+uuid $vg1 |tee out ++grep $vg1 out ++grep $UUID1 out ++not grep $UUID2 out ++not grep $UUID3 out ++vgs --foreign -o+uuid $vg1 |tee out ++grep $vg1 out ++grep $UUID1 out ++not grep $UUID2 out ++not grep $UUID3 out ++ ++vgchange -ay ++lvs --foreign -o vguuid,active |tee out ++grep $UUID1 out | grep active ++grep $UUID2 out | not grep active ++grep $UUID3 out | not grep active ++vgchange -an ++ ++vgchange -ay $vg1 ++lvs --foreign -o vguuid,active |tee out ++grep $UUID1 out | grep active ++grep $UUID2 out | not grep active ++grep $UUID3 out | not grep active ++vgchange -an ++ ++lvcreate -l1 -an -n $lv2 $vg1 ++lvs --foreign -o vguuid,name |tee out ++grep $UUID1 out | grep $lv2 ++grep $UUID2 out | not grep $lv2 ++grep $UUID3 out | not grep $lv2 ++ ++vgremove -y $vg1 ++vgs -o+uuid |tee out ++not grep $UUID1 out ++vgs --foreign -o+uuid |tee out ++grep $UUID2 out ++grep $UUID3 out ++ ++aux wipefs_a "$dev1" ++aux wipefs_a "$dev2" ++aux wipefs_a "$dev3" ++aux wipefs_a "$dev4" ++ ++# e. 2 local, 0 foreign ++# setup ++vgcreate $vg1 "$dev1" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev1" ++vgcreate $vg1 "$dev2" ++# diff lvname to prevent clash in vgchange -ay ++lvcreate -n ${lv1}_b -l1 -an $vg1 ++UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux enable_dev "$dev1" ++ ++vgs -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++vgs --foreign -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++ ++not vgs -o+uuid $vg1 |tee out ++not grep $vg1 out ++not vgs --foreign -o+uuid $vg1 |tee out ++not grep $vg1 out ++ ++vgchange -ay ++lvs --foreign -o vguuid,active |tee out ++grep $UUID1 out | grep active ++grep $UUID2 out | grep active ++vgchange -an ++ ++not vgchange -ay $vg1 ++lvs --foreign -o vguuid,active |tee out ++grep $UUID1 out | not grep active ++grep $UUID2 out | not grep active ++vgchange -an ++ ++not lvcreate -l1 -an -n $lv2 $vg1 ++lvs --foreign -o vguuid,name |tee out ++grep $UUID1 out | not grep $lv2 ++grep $UUID2 out | not grep $lv2 ++ ++not vgremove $vg1 ++vgs -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++vgremove -y -S vg_uuid=$UUID1 ++vgs -o+uuid |tee out ++not grep $UUID1 out ++grep $UUID2 out ++vgremove -y -S vg_uuid=$UUID2 ++vgs -o+uuid |tee out ++not grep $UUID1 out ++not grep $UUID2 out ++ ++aux wipefs_a "$dev1" ++aux wipefs_a "$dev2" ++aux wipefs_a "$dev3" ++ ++# f. 2 local, 1 foreign ++# setup ++vgcreate $vg1 "$dev1" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev1" ++vgcreate $vg1 "$dev2" ++# diff lvname to prevent clash in vgchange -ay ++lvcreate -n ${lv1}_b -l1 -an $vg1 ++UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev2" ++vgcreate $vg1 "$dev3" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID3=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other" $vg1 ++aux enable_dev "$dev1" ++aux enable_dev "$dev2" ++ ++vgs -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++not group $UUID3 out ++vgs --foreign -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++ ++not vgs -o+uuid $vg1 |tee out ++not grep $vg1 out ++not vgs --foreign -o+uuid $vg1 |tee out ++not grep $vg1 out ++ ++vgchange -ay ++lvs --foreign -o vguuid,active |tee out ++grep $UUID1 out | grep active ++grep $UUID2 out | grep active ++grep $UUID3 out | not grep active ++vgchange -an ++ ++not vgchange -ay $vg1 ++lvs --foreign -o vguuid,active |tee out ++grep $UUID1 out | not grep active ++grep $UUID2 out | not grep active ++grep $UUID3 out | not grep active ++vgchange -an ++ ++not lvcreate -l1 -an -n $lv2 $vg1 ++lvs --foreign -o vguuid,name |tee out ++grep $UUID1 out | not grep $lv2 ++grep $UUID2 out | not grep $lv2 ++grep $UUID3 out | not grep $lv2 ++ ++not vgremove $vg1 ++vgs --foreign -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++vgremove -y -S vg_uuid=$UUID1 ++vgs --foreign -o+uuid |tee out ++not grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++vgremove -y -S vg_uuid=$UUID2 ++vgs --foreign -o+uuid |tee out ++not grep $UUID1 out ++not grep $UUID2 out ++grep $UUID3 out ++ ++aux wipefs_a "$dev1" ++aux wipefs_a "$dev2" ++aux wipefs_a "$dev3" ++aux wipefs_a "$dev4" ++ ++# g. 2 local, 2 foreign ++# setup ++vgcreate $vg1 "$dev1" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev1" ++vgcreate $vg1 "$dev2" ++# diff lvname to prevent clash in vgchange -ay ++lvcreate -n ${lv1}_b -l1 -an $vg1 ++UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev2" ++vgcreate $vg1 "$dev3" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID3=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other" $vg1 ++aux disable_dev "$dev3" ++vgcreate $vg1 "$dev4" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID4=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other2" $vg1 ++aux enable_dev "$dev1" ++aux enable_dev "$dev2" ++aux enable_dev "$dev3" ++ ++vgs -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++not group $UUID3 out ++not group $UUID4 out ++vgs --foreign -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++grep $UUID4 out ++ ++not vgs -o+uuid $vg1 |tee out ++not grep $vg1 out ++not vgs --foreign -o+uuid $vg1 |tee out ++not grep $vg1 out ++ ++vgchange -ay ++lvs --foreign -o vguuid,active |tee out ++grep $UUID1 out | grep active ++grep $UUID2 out | grep active ++grep $UUID3 out | not grep active ++grep $UUID4 out | not grep active ++vgchange -an ++ ++not vgchange -ay $vg1 ++lvs --foreign -o vguuid,active |tee out ++grep $UUID1 out | not grep active ++grep $UUID2 out | not grep active ++grep $UUID3 out | not grep active ++grep $UUID4 out | not grep active ++vgchange -an ++ ++not lvcreate -l1 -an -n $lv2 $vg1 ++lvs --foreign -o vguuid,name |tee out ++grep $UUID1 out | not grep $lv2 ++grep $UUID2 out | not grep $lv2 ++grep $UUID3 out | not grep $lv2 ++grep $UUID4 out | not grep $lv2 ++ ++not vgremove $vg1 ++vgs --foreign -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++grep $UUID4 out ++ ++aux wipefs_a "$dev1" ++aux wipefs_a "$dev2" ++aux wipefs_a "$dev3" ++aux wipefs_a "$dev4" ++aux wipefs_a "$dev5" ++ ++# h. 3 local, 3 foreign ++# setup ++vgcreate $vg1 "$dev1" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev1" ++vgcreate $vg1 "$dev2" ++# diff lvname to prevent clash in vgchange -ay ++lvcreate -n ${lv1}_b -l1 -an $vg1 ++UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev2" ++vgcreate $vg1 "$dev3" ++# diff lvname to prevent clash in vgchange -ay ++lvcreate -n ${lv1}_bb -l1 -an $vg1 ++UUID3=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev3" ++vgcreate $vg1 "$dev4" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID4=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other" $vg1 ++aux disable_dev "$dev4" ++vgcreate $vg1 "$dev5" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID5=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other2" $vg1 ++aux disable_dev "$dev5" ++vgcreate $vg1 "$dev6" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID6=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other3" $vg1 ++aux enable_dev "$dev1" ++aux enable_dev "$dev2" ++aux enable_dev "$dev3" ++aux enable_dev "$dev4" ++aux enable_dev "$dev5" ++ ++vgs -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++not group $UUID4 out ++not group $UUID5 out ++not group $UUID6 out ++vgs --foreign -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++grep $UUID4 out ++grep $UUID5 out ++grep $UUID6 out ++ ++not vgs -o+uuid $vg1 |tee out ++not grep $vg1 out ++not vgs --foreign -o+uuid $vg1 |tee out ++not grep $vg1 out ++ ++vgchange -ay ++lvs --foreign -o vguuid,active |tee out ++grep $UUID1 out | grep active ++grep $UUID2 out | grep active ++grep $UUID3 out | grep active ++grep $UUID4 out | not grep active ++grep $UUID5 out | not grep active ++grep $UUID6 out | not grep active ++vgchange -an ++ ++not vgchange -ay $vg1 ++lvs --foreign -o vguuid,active |tee out ++grep $UUID1 out | not grep active ++grep $UUID2 out | not grep active ++grep $UUID3 out | not grep active ++grep $UUID4 out | not grep active ++grep $UUID5 out | not grep active ++grep $UUID6 out | not grep active ++vgchange -an ++ ++not lvcreate -l1 -an -n $lv2 $vg1 ++lvs --foreign -o vguuid,name |tee out ++grep $UUID1 out | not grep $lv2 ++grep $UUID2 out | not grep $lv2 ++grep $UUID3 out | not grep $lv2 ++grep $UUID4 out | not grep $lv2 ++grep $UUID5 out | not grep $lv2 ++grep $UUID6 out | not grep $lv2 ++ ++not vgremove $vg1 ++vgs --foreign -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++grep $UUID4 out ++grep $UUID5 out ++grep $UUID6 out ++ ++aux wipefs_a "$dev1" ++aux wipefs_a "$dev2" ++aux wipefs_a "$dev3" ++aux wipefs_a "$dev4" ++aux wipefs_a "$dev5" ++aux wipefs_a "$dev6" ++ ++# vgreduce test with 1 local and 1 foreign vg. ++# setup ++vgcreate $vg1 "$dev1" "$dev7" ++lvcreate -n $lv1 -l1 -an $vg1 "$dev1" ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++PV1UUID=$(pvs --noheading -o uuid "$dev1") ++PV7UUID=$(pvs --noheading -o uuid "$dev7") ++aux disable_dev "$dev1" ++aux disable_dev "$dev7" ++vgcreate $vg1 "$dev2" ++PV2UUID=$(pvs --noheading -o uuid "$dev2") ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other" $vg1 ++aux enable_dev "$dev1" ++aux enable_dev "$dev7" ++ ++vgs --foreign -o+uuid |tee out ++grep $vg1 out ++grep $UUID1 out ++grep $UUID2 out ++pvs --foreign -o+uuid |tee out ++grep $PV1UUID out ++grep $PV7UUID out ++grep $PV2UUID out ++ ++vgreduce $vg1 "$dev7" ++ ++pvs --foreign -o+uuid |tee out ++grep $PV1UUID out ++grep $PV7UUID out ++grep $PV2UUID out ++ ++grep $PV7UUID out >out2 ++not grep $vg1 out2 ++ ++vgremove -ff $vg1 ++ ++aux wipefs_a "$dev1" ++aux wipefs_a "$dev2" ++aux wipefs_a "$dev7" +diff --git a/test/shell/duplicate-vgrename.sh b/test/shell/duplicate-vgrename.sh +new file mode 100644 +index 0000000..8628220 +--- /dev/null ++++ b/test/shell/duplicate-vgrename.sh +@@ -0,0 +1,319 @@ ++#!/usr/bin/env bash ++ ++# Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions ++# of the GNU General Public License v.2. ++ ++SKIP_WITH_LVMLOCKD=1 ++SKIP_WITH_LVMPOLLD=1 ++ ++. lib/inittest ++ ++aux prepare_devs 4 ++ ++# a. 0 local, 1 foreign ++# setup ++vgcreate $vg1 "$dev1" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other" $vg1 ++ ++not vgrename $vg1 $vg2 ++vgs --foreign -o+uuid |tee out ++grep $UUID1 out ++not vgrename $UUID1 $vg2 ++vgs --foreign -o+uuid |tee out ++grep $UUID1 out ++ ++lvs --foreign ++ ++aux wipefs_a "$dev1" ++ ++# b. 0 local, 2 foreign ++# setup ++vgcreate $vg1 "$dev1" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other" $vg1 ++aux disable_dev "$dev1" ++vgcreate $vg1 "$dev2" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other2" $vg1 ++aux enable_dev "$dev1" ++ ++not vgrename $vg1 $vg2 ++vgs --foreign -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++not grep $vg2 out ++grep $UUID1 out ++grep $UUID2 out ++not vgrename $UUID1 $vg2 ++vgs --foreign -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++not grep $vg2 out ++grep $UUID1 out ++grep $UUID2 out ++ ++lvs --foreign ++ ++aux wipefs_a "$dev1" ++aux wipefs_a "$dev2" ++ ++# c. 1 local, 1 foreign ++# setup ++vgcreate $vg1 "$dev1" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev1" ++vgcreate $vg1 "$dev2" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other" $vg1 ++aux enable_dev "$dev1" ++ ++vgrename $vg1 $vg2 ++vgs --foreign -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++grep $vg2 out ++grep $UUID1 out ++grep $UUID2 out ++not vgrename $vg2 $vg1 ++vgs --foreign -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++grep $vg2 out ++grep $UUID1 out ++grep $UUID2 out ++ ++lvs --foreign ++ ++aux wipefs_a "$dev1" ++aux wipefs_a "$dev2" ++ ++# d. 1 local, 2 foreign ++# setup ++vgcreate $vg1 "$dev1" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev1" ++vgcreate $vg1 "$dev2" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other" $vg1 ++aux disable_dev "$dev2" ++vgcreate $vg1 "$dev3" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID3=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other2" $vg1 ++aux enable_dev "$dev1" ++aux enable_dev "$dev2" ++ ++vgrename $vg1 $vg2 ++vgs --foreign -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++grep $vg2 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++not vgrename $vg2 $vg1 ++vgs --foreign -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++grep $vg2 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++ ++lvs --foreign ++ ++aux wipefs_a "$dev1" ++aux wipefs_a "$dev2" ++aux wipefs_a "$dev3" ++ ++# e. 2 local, 0 foreign ++# setup ++vgcreate $vg1 "$dev1" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev1" ++vgcreate $vg1 "$dev2" ++lvcreate -n ${lv1}_b -l1 -an $vg1 ++UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux enable_dev "$dev1" ++ ++not vgrename $vg1 $vg2 ++vgs -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++not grep $vg2 out ++grep $UUID1 out ++grep $UUID2 out ++vgrename $UUID1 $vg2 ++vgs -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++grep $vg2 out ++grep $UUID1 out ++grep $UUID2 out ++not vgrename $UUID2 $vg2 ++vgs -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++grep $vg2 out ++grep $UUID1 out ++grep $UUID2 out ++ ++lvs --foreign ++ ++aux wipefs_a "$dev1" ++aux wipefs_a "$dev2" ++ ++# f. 2 local, 1 foreign ++# setup ++vgcreate $vg1 "$dev1" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev1" ++vgcreate $vg1 "$dev2" ++lvcreate -n ${lv1}_b -l1 -an $vg1 ++UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev2" ++vgcreate $vg1 "$dev3" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID3=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++vgchange -y --systemid "other" $vg1 ++aux enable_dev "$dev1" ++aux enable_dev "$dev2" ++lvs --foreign ++ ++not vgrename $vg1 $vg2 ++vgs --foreign -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++not grep $vg2 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++vgrename $UUID1 $vg2 ++vgs --foreign -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++grep $vg2 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++vgrename $vg1 $vg3 ++vgs --foreign -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++grep $vg2 out ++grep $vg3 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++not vgrename $vg2 $vg1 ++vgs --foreign -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++grep $vg2 out ++grep $vg3 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++not vgrename $vg2 $vg3 ++vgs --foreign -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++grep $vg2 out ++grep $vg3 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++ ++lvs --foreign ++ ++aux wipefs_a "$dev1" ++aux wipefs_a "$dev2" ++aux wipefs_a "$dev3" ++ ++# g. 3 local, 0 foreign ++# setup ++vgcreate $vg1 "$dev1" ++lvcreate -n $lv1 -l1 -an $vg1 ++UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev1" ++vgcreate $vg1 "$dev2" ++lvcreate -n ${lv1}_b -l1 -an $vg1 ++UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux disable_dev "$dev2" ++vgcreate $vg1 "$dev3" ++lvcreate -n ${lv1}_c -l1 -an $vg1 ++UUID3=$(vgs --noheading -o vg_uuid $vg1 | xargs) ++aux enable_dev "$dev1" ++aux enable_dev "$dev2" ++ ++not vgrename $vg1 $vg2 ++vgs -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++not grep $vg2 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++vgrename $UUID1 $vg2 ++vgs -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++grep $vg2 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++not vgrename $vg1 $vg2 ++vgs -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++grep $vg2 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++not vgrename $vg1 $vg3 ++vgs -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++grep $vg2 out ++not grep $vg3 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++not vgrename $UUID2 $vg2 ++vgs -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++grep $vg2 out ++not grep $vg3 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++vgrename $UUID2 $vg3 ++vgs -o+uuid |tee out ++lvs --foreign ++grep $vg1 out ++grep $vg2 out ++grep $vg3 out ++grep $UUID1 out ++grep $UUID2 out ++grep $UUID3 out ++ ++lvs --foreign ++ ++aux wipefs_a "$dev1" ++aux wipefs_a "$dev2" ++aux wipefs_a "$dev3" ++ +diff --git a/test/shell/integrity-dmeventd.sh b/test/shell/integrity-dmeventd.sh +index 58899ca..ed2436a 100644 +--- a/test/shell/integrity-dmeventd.sh ++++ b/test/shell/integrity-dmeventd.sh +@@ -22,9 +22,9 @@ mkdir -p $mnt + + aux prepare_devs 6 64 + +-for i in `seq 1 16384`; do echo -n "A" >> fileA; done +-for i in `seq 1 16384`; do echo -n "B" >> fileB; done +-for i in `seq 1 16384`; do echo -n "C" >> fileC; done ++printf "%0.sA" {1..16384} >> fileA ++printf "%0.sB" {1..16384} >> fileB ++printf "%0.sC" {1..16384} >> fileC + + # generate random data + dd if=/dev/urandom of=randA bs=512K count=2 +diff --git a/test/shell/integrity-large.sh b/test/shell/integrity-large.sh +index 0c36e4d..7a333c1 100644 +--- a/test/shell/integrity-large.sh ++++ b/test/shell/integrity-large.sh +@@ -25,9 +25,9 @@ mkdir -p $mnt + # raid1 LV needs to be extended to 512MB to test imeta being exended + aux prepare_devs 4 600 + +-for i in `seq 1 16384`; do echo -n "A" >> fileA; done +-for i in `seq 1 16384`; do echo -n "B" >> fileB; done +-for i in `seq 1 16384`; do echo -n "C" >> fileC; done ++printf "%0.sA" {1..16384} >> fileA ++printf "%0.sB" {1..16384} >> fileB ++printf "%0.sC" {1..16384} >> fileC + + # generate random data + dd if=/dev/urandom of=randA bs=512K count=2 +diff --git a/test/shell/integrity-misc.sh b/test/shell/integrity-misc.sh +index 73b0a67..a176f18 100644 +--- a/test/shell/integrity-misc.sh ++++ b/test/shell/integrity-misc.sh +@@ -22,9 +22,9 @@ mkdir -p $mnt + + aux prepare_devs 5 64 + +-for i in `seq 1 16384`; do echo -n "A" >> fileA; done +-for i in `seq 1 16384`; do echo -n "B" >> fileB; done +-for i in `seq 1 16384`; do echo -n "C" >> fileC; done ++printf "%0.sA" {1..16384} >> fileA ++printf "%0.sB" {1..16384} >> fileB ++printf "%0.sC" {1..16384} >> fileC + + # generate random data + dd if=/dev/urandom of=randA bs=512K count=2 +diff --git a/test/shell/integrity.sh b/test/shell/integrity.sh +index 7e4f2cb..6baccf0 100644 +--- a/test/shell/integrity.sh ++++ b/test/shell/integrity.sh +@@ -23,9 +23,9 @@ mkdir -p $mnt + + aux prepare_devs 5 64 + +-for i in `seq 1 16384`; do echo -n "A" >> fileA; done +-for i in `seq 1 16384`; do echo -n "B" >> fileB; done +-for i in `seq 1 16384`; do echo -n "C" >> fileC; done ++printf "%0.sA" {1..16384} >> fileA ++printf "%0.sB" {1..16384} >> fileB ++printf "%0.sC" {1..16384} >> fileC + + # generate random data + dd if=/dev/urandom of=randA bs=512K count=2 +diff --git a/test/shell/process-each-duplicate-vgnames.sh b/test/shell/process-each-duplicate-vgnames.sh +deleted file mode 100644 +index a59c3bd..0000000 +--- a/test/shell/process-each-duplicate-vgnames.sh ++++ /dev/null +@@ -1,55 +0,0 @@ +-#!/usr/bin/env bash +- +-# Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved. +-# +-# This copyrighted material is made available to anyone wishing to use, +-# modify, copy, or redistribute it subject to the terms and conditions +-# of the GNU General Public License v.2. +- +-test_description='Test vgs with duplicate vg names' +-SKIP_WITH_LVMLOCKD=1 +-SKIP_WITH_LVMPOLLD=1 +- +-. lib/inittest +- +-aux prepare_devs 2 +- +-pvcreate "$dev1" +-pvcreate "$dev2" +- +-aux disable_dev "$dev1" "$dev2" +- +-aux enable_dev "$dev1" +-vgcreate $vg1 "$dev1" +-UUID1=$(vgs --noheading -o vg_uuid $vg1) +-aux disable_dev "$dev1" +- +-aux enable_dev "$dev2" +-vgcreate $vg1 "$dev2" +-UUID2=$(vgs --noheading -o vg_uuid $vg1) +- +-aux enable_dev "$dev1" +-pvscan --cache "$dev1" +-pvs "$dev1" +-pvs "$dev2" +- +-vgs -o+vg_uuid | tee err +-grep $UUID1 err +-grep $UUID2 err +- +-# should we specify and test which should be displayed? +-# vgs --noheading -o vg_uuid $vg1 >err +-# grep $UUID1 err +- +-aux disable_dev "$dev2" +-vgs -o+vg_uuid | tee err +-grep $UUID1 err +-not grep $UUID2 err +-aux enable_dev "$dev2" +-pvscan --cache "$dev2" +- +-aux disable_dev "$dev1" +-vgs -o+vg_uuid | tee err +-grep $UUID2 err +-not grep $UUID1 err +-aux enable_dev "$dev1" +diff --git a/test/shell/thin-foreign-repair.sh b/test/shell/thin-foreign-repair.sh +index 147a9a0..8b4018e 100644 +--- a/test/shell/thin-foreign-repair.sh ++++ b/test/shell/thin-foreign-repair.sh +@@ -56,7 +56,9 @@ dmsetup create "$THIN" --table "0 40960 thin $DM_DEV_DIR/mapper/$POOL 0" + + mkfs.ext4 "$DM_DEV_DIR/mapper/$THIN" + +-dmsetup remove "$THIN" ++aux udev_wait ++ ++dmsetup remove "$THIN" || { sleep .5 ; dmsetup remove "$THIN" } + + lvchange -an $vg/pool + +diff --git a/tools/command.c b/tools/command.c +index 50791b1..511dda1 100644 +--- a/tools/command.c ++++ b/tools/command.c +@@ -2319,7 +2319,8 @@ static void _print_val_man(struct command_name *cname, int opt_enum, int val_enu + } + + if (strchr(str, '|')) { +- line = strdup(str); ++ if (!(line = strdup(str))) ++ return; + _split_line(line, &line_argc, line_argv, '|'); + for (i = 0; i < line_argc; i++) { + if (i) +@@ -3606,9 +3607,12 @@ int main(int argc, char *argv[]) + goto out_free; + } + +- if (optind < argc) +- cmdname = strdup(argv[optind++]); +- else { ++ if (optind < argc) { ++ if (!(cmdname = strdup(argv[optind++]))) { ++ log_error("Out of memory."); ++ goto out_free; ++ } ++ } else { + log_error("Missing command name."); + goto out_free; + } +diff --git a/tools/lvconvert.c b/tools/lvconvert.c +index e969b44..cf93538 100644 +--- a/tools/lvconvert.c ++++ b/tools/lvconvert.c +@@ -5589,7 +5589,8 @@ static struct logical_volume *_lv_writecache_create(struct cmd_context *cmd, + + memcpy(&seg->writecache_settings, settings, sizeof(struct writecache_settings)); + +- add_seg_to_segs_using_this_lv(lv_fast, seg); ++ if (!add_seg_to_segs_using_this_lv(lv_fast, seg)) ++ return_NULL; + + return lv_wcorig; + } +diff --git a/tools/pvck.c b/tools/pvck.c +index 71bfc1b..a0f567e 100644 +--- a/tools/pvck.c ++++ b/tools/pvck.c +@@ -3065,11 +3065,9 @@ int pvck(struct cmd_context *cmd, int argc, char **argv) + + label_scan_setup_bcache(); + +- if (arg_is_set(cmd, dump_ARG)) { ++ if ((dump = arg_str_value(cmd, dump_ARG, NULL))) { + cmd->use_hints = 0; + +- dump = arg_str_value(cmd, dump_ARG, NULL); +- + if (!strcmp(dump, "metadata")) + ret = _dump_metadata(cmd, dump, &set, labelsector, dev, def, PRINT_CURRENT, 0); + +@@ -3096,11 +3094,9 @@ int pvck(struct cmd_context *cmd, int argc, char **argv) + return ECMD_PROCESSED; + } + +- if (arg_is_set(cmd, repairtype_ARG)) { ++ if ((repair = arg_str_value(cmd, repairtype_ARG, NULL))) { + cmd->use_hints = 0; + +- repair = arg_str_value(cmd, repairtype_ARG, NULL); +- + if (!strcmp(repair, "label_header")) + ret = _repair_label_header(cmd, repair, &set, labelsector, dev); + +diff --git a/tools/pvscan.c b/tools/pvscan.c +index 1bf543c..4d811da 100644 +--- a/tools/pvscan.c ++++ b/tools/pvscan.c +@@ -582,7 +582,7 @@ static int _online_pvscan_single(struct metadata_area *mda, void *baton) + + if (mda_is_ignored(mda)) + return 1; +- vg = mda->ops->vg_read(b->fid, "", mda, NULL, NULL); ++ vg = mda->ops->vg_read(b->cmd, b->fid, "", mda, NULL, NULL); + if (!vg) { + /* + * Many or most cases of bad metadata would be found in +diff --git a/tools/toollib.c b/tools/toollib.c +index 96d0d6d..89b6374 100644 +--- a/tools/toollib.c ++++ b/tools/toollib.c +@@ -1853,8 +1853,6 @@ static int _resolve_duplicate_vgnames(struct cmd_context *cmd, + if (lvmcache_vg_is_foreign(cmd, vgnl->vg_name, vgnl->vgid)) { + if (!id_write_format((const struct id*)vgnl->vgid, uuid, sizeof(uuid))) + stack; +- log_warn("WARNING: Ignoring foreign VG with matching name %s UUID %s.", +- vgnl->vg_name, uuid); + dm_list_del(&vgnl->list); + } else { + found++; +diff --git a/tools/vgchange.c b/tools/vgchange.c +index a10bf11..58c8ddc 100644 +--- a/tools/vgchange.c ++++ b/tools/vgchange.c +@@ -991,8 +991,13 @@ static int _vgchange_locktype_single(struct cmd_context *cmd, const char *vg_nam + * deactivate it. + */ + if (vg->lock_type && !strcmp(vg->lock_type, "sanlock") && +- (cmd->command->command_enum == vgchange_locktype_CMD)) +- deactivate_lv(cmd, vg->sanlock_lv); ++ (cmd->command->command_enum == vgchange_locktype_CMD)) { ++ if (!deactivate_lv(cmd, vg->sanlock_lv)) { ++ log_error("Failed to deativate %s.", ++ display_lvname(vg->sanlock_lv)); ++ return ECMD_FAILED; ++ } ++ } + + log_print_unless_silent("Volume group \"%s\" successfully changed", vg->name); + +diff --git a/tools/vgimportclone.c b/tools/vgimportclone.c +index be01861..ee1c28f 100644 +--- a/tools/vgimportclone.c ++++ b/tools/vgimportclone.c +@@ -315,6 +315,8 @@ retry_name: + goto_out; + log_debug("Using new VG name %s.", vp.new_vgname); + ++ lvmcache_destroy(cmd, 1, 0); ++ + /* + * Create a device filter so that we are only working with the devices + * in arg_import. With the original devs hidden (that arg_import were +@@ -325,7 +327,7 @@ retry_name: + init_internal_filtering(1); + dm_list_iterate_items(vd, &vp.arg_import) + internal_filter_allow(cmd->mem, vd->dev); +- lvmcache_destroy(cmd, 1, 0); ++ refresh_filters(cmd); + + log_debug("Changing VG %s to %s.", vp.old_vgname, vp.new_vgname); + +diff --git a/tools/vgmerge.c b/tools/vgmerge.c +index 903504c..895018a 100644 +--- a/tools/vgmerge.c ++++ b/tools/vgmerge.c +@@ -21,10 +21,8 @@ static struct volume_group *_vgmerge_vg_read(struct cmd_context *cmd, + struct volume_group *vg; + log_verbose("Checking for volume group \"%s\"", vg_name); + vg = vg_read_for_update(cmd, vg_name, NULL, 0, 0); +- if (vg_read_error(vg)) { +- release_vg(vg); ++ if (!vg) + return NULL; +- } + + if (vg_is_shared(vg)) { + log_error("vgmerge not allowed for lock_type %s", vg->lock_type); +diff --git a/tools/vgrename.c b/tools/vgrename.c +index 8b76d0b..f442f73 100644 +--- a/tools/vgrename.c ++++ b/tools/vgrename.c +@@ -183,7 +183,7 @@ int vgrename(struct cmd_context *cmd, int argc, char **argv) + vg_name_new = skip_dev_dir(cmd, argv[1], NULL); + + if (!validate_vg_rename_params(cmd, vg_name_old, vg_name_new)) +- return_0; ++ return_ECMD_FAILED; + + if (!(vp.vg_name_old = dm_pool_strdup(cmd->mem, vg_name_old))) + return_ECMD_FAILED; +diff --git a/tools/vgsplit.c b/tools/vgsplit.c +index 3dc19ec..1a422e6 100644 +--- a/tools/vgsplit.c ++++ b/tools/vgsplit.c +@@ -691,7 +691,7 @@ int vgsplit(struct cmd_context *cmd, int argc, char **argv) + + vg_to = vg_read_for_update(cmd, vg_name_to, NULL, 0, 0); + +- if (vg_read_error(vg_to)) { ++ if (!vg_to) { + log_error("Volume group \"%s\" became inconsistent: " + "please fix manually", vg_name_to); + goto bad; +-- +1.8.3.1 + diff --git a/SOURCES/0002-Merge-master-up-to-commit-be61bd6ff5c6.patch b/SOURCES/0002-Merge-master-up-to-commit-be61bd6ff5c6.patch new file mode 100644 index 0000000..0f1b14a --- /dev/null +++ b/SOURCES/0002-Merge-master-up-to-commit-be61bd6ff5c6.patch @@ -0,0 +1,210 @@ +From f540a18fd7f5f65599a6c85c0bd3ba84e54f1cc8 Mon Sep 17 00:00:00 2001 +From: Marian Csontos +Date: Thu, 28 May 2020 18:02:16 +0200 +Subject: [PATCH] Merge master up to commit be61bd6ff5c6 + +--- + VERSION | 2 +- + VERSION_DM | 2 +- + test/shell/cache-single-usage.sh | 13 +++++++++++++ + test/shell/integrity-dmeventd.sh | 8 ++++++++ + test/shell/integrity-large.sh | 8 ++++++++ + test/shell/integrity-misc.sh | 8 ++++++++ + test/shell/integrity.sh | 8 ++++++++ + test/shell/thin-foreign-repair.sh | 14 ++++++++++---- + tools/lvconvert.c | 15 +++++++++++++++ + 9 files changed, 72 insertions(+), 6 deletions(-) + +diff --git a/VERSION b/VERSION +index 00618e0..9ad7a70 100644 +--- a/VERSION ++++ b/VERSION +@@ -1 +1 @@ +-2.03.09(2)-RHEL8 (2020-04-21) ++2.03.09(2)-RHEL8 (2020-05-28) +diff --git a/VERSION_DM b/VERSION_DM +index b9ec43e..bcd97de 100644 +--- a/VERSION_DM ++++ b/VERSION_DM +@@ -1 +1 @@ +-1.02.171-RHEL8 (2020-04-21) ++1.02.171-RHEL8 (2020-05-28) +diff --git a/test/shell/cache-single-usage.sh b/test/shell/cache-single-usage.sh +index a885bf7..8936aa3 100644 +--- a/test/shell/cache-single-usage.sh ++++ b/test/shell/cache-single-usage.sh +@@ -127,4 +127,17 @@ umount "$mount_dir" + lvchange -an $vg/$lv1 + lvchange -an $vg/$lv2 + ++# misc tests ++ ++lvremove $vg ++ ++lvcreate -n $lv1 -l 2 -an $vg "$dev1" ++lvcreate -n $lv2 -l 2 -an $vg "$dev1" ++lvcreate -n $lv3 -l 2 -an $vg "$dev2" ++ ++lvconvert -y --type writecache --cachevol $lv3 $vg/$lv1 ++not lvconvert -y --type writecache --cachevol ${lv3}_cvol $vg/$lv2 ++not lvconvert -y --type cache --cachevol ${lv3}_cvol $vg/$lv2 ++not lvconvert -y --type cache --cachepool ${lv3}_cvol $vg/$lv2 ++ + vgremove -ff $vg +diff --git a/test/shell/integrity-dmeventd.sh b/test/shell/integrity-dmeventd.sh +index ed2436a..296f556 100644 +--- a/test/shell/integrity-dmeventd.sh ++++ b/test/shell/integrity-dmeventd.sh +@@ -109,6 +109,14 @@ _wait_recalc() { + sleep 1 + done + ++ # TODO: There is some strange bug, first leg of RAID with integrity ++ # enabled never gets in sync. I saw this in BB, but not when executing ++ # the commands manually ++ if test -z "$sync"; then ++ echo "TEST WARNING: Resync of dm-integrity device '$checklv' failed" ++ dmsetup status "$DM_DEV_DIR/mapper/${checklv/\//-}" ++ exit ++ fi + echo "timeout waiting for recalc" + return 1 + } +diff --git a/test/shell/integrity-large.sh b/test/shell/integrity-large.sh +index 7a333c1..5aba80e 100644 +--- a/test/shell/integrity-large.sh ++++ b/test/shell/integrity-large.sh +@@ -95,6 +95,14 @@ _wait_recalc() { + sleep 1 + done + ++ # TODO: There is some strange bug, first leg of RAID with integrity ++ # enabled never gets in sync. I saw this in BB, but not when executing ++ # the commands manually ++ if test -z "$sync"; then ++ echo "TEST WARNING: Resync of dm-integrity device '$checklv' failed" ++ dmsetup status "$DM_DEV_DIR/mapper/${checklv/\//-}" ++ exit ++ fi + echo "timeout waiting for recalc" + return 1 + } +diff --git a/test/shell/integrity-misc.sh b/test/shell/integrity-misc.sh +index a176f18..0d05689 100644 +--- a/test/shell/integrity-misc.sh ++++ b/test/shell/integrity-misc.sh +@@ -109,6 +109,14 @@ _wait_recalc() { + sleep 1 + done + ++ # TODO: There is some strange bug, first leg of RAID with integrity ++ # enabled never gets in sync. I saw this in BB, but not when executing ++ # the commands manually ++ if test -z "$sync"; then ++ echo "TEST WARNING: Resync of dm-integrity device '$checklv' failed" ++ dmsetup status "$DM_DEV_DIR/mapper/${checklv/\//-}" ++ exit ++ fi + echo "timeout waiting for recalc" + return 1 + } +diff --git a/test/shell/integrity.sh b/test/shell/integrity.sh +index 6baccf0..77e9430 100644 +--- a/test/shell/integrity.sh ++++ b/test/shell/integrity.sh +@@ -204,6 +204,14 @@ _wait_recalc() { + sleep 1 + done + ++ # TODO: There is some strange bug, first leg of RAID with integrity ++ # enabled never gets in sync. I saw this in BB, but not when executing ++ # the commands manually ++ if test -z "$sync"; then ++ echo "TEST WARNING: Resync of dm-integrity device '$checklv' failed" ++ dmsetup status "$DM_DEV_DIR/mapper/${checklv/\//-}" ++ exit ++ fi + echo "timeout waiting for recalc" + return 1 + } +diff --git a/test/shell/thin-foreign-repair.sh b/test/shell/thin-foreign-repair.sh +index 8b4018e..55e9f62 100644 +--- a/test/shell/thin-foreign-repair.sh ++++ b/test/shell/thin-foreign-repair.sh +@@ -17,9 +17,15 @@ SKIP_WITH_LVMPOLLD=1 + + . lib/inittest + ++clean_thin_() ++{ ++ aux udev_wait ++ dmsetup remove "$THIN" || { sleep .5 ; dmsetup remove "$THIN" ; } ++} ++ + cleanup_mounted_and_teardown() + { +- dmsetup remove $THIN || true ++ clean_thin_ || true + vgremove -ff $vg + aux teardown + } +@@ -56,9 +62,7 @@ dmsetup create "$THIN" --table "0 40960 thin $DM_DEV_DIR/mapper/$POOL 0" + + mkfs.ext4 "$DM_DEV_DIR/mapper/$THIN" + +-aux udev_wait +- +-dmsetup remove "$THIN" || { sleep .5 ; dmsetup remove "$THIN" } ++clean_thin_ + + lvchange -an $vg/pool + +@@ -72,3 +76,5 @@ lvchange -ay $vg/pool + dmsetup create "$THIN" --table "0 40960 thin $DM_DEV_DIR/mapper/$POOL 0" + + fsck -n "$DM_DEV_DIR/mapper/$THIN" ++ ++# exit calls cleanup_mounted_and_teardown +diff --git a/tools/lvconvert.c b/tools/lvconvert.c +index cf93538..8652252 100644 +--- a/tools/lvconvert.c ++++ b/tools/lvconvert.c +@@ -4264,6 +4264,11 @@ static int _lvconvert_cachevol_attach_single(struct cmd_context *cmd, + goto out; + } + ++ if (lv_is_cache_vol(cachevol_lv)) { ++ log_error("LV %s is already used as a cachevol.", display_lvname(cachevol_lv)); ++ goto out; ++ } ++ + /* Ensure the LV is not active elsewhere. */ + if (!lockd_lv(cmd, lv, "ex", 0)) + goto_out; +@@ -4347,6 +4352,11 @@ static int _lvconvert_cachepool_attach_single(struct cmd_context *cmd, + goto out; + } + ++ if (lv_is_cache_vol(cachepool_lv)) { ++ log_error("LV %s is already used as a cachevol.", display_lvname(cachepool_lv)); ++ goto out; ++ } ++ + if (cachepool_lv == lv) { + log_error("Use a different LV for cache pool LV and cache LV %s.", + display_lvname(cachepool_lv)); +@@ -5629,6 +5639,11 @@ static int _lvconvert_writecache_attach_single(struct cmd_context *cmd, + goto bad; + } + ++ if (lv_is_cache_vol(lv_fast)) { ++ log_error("LV %s is already used as a cachevol.", display_lvname(lv_fast)); ++ goto bad; ++ } ++ + /* + * To permit this we need to check the block size of the fs using lv + * (recently in libblkid) so that we can use a matching writecache +-- +1.8.3.1 + diff --git a/SOURCES/0003-Merge-master-up-to-commit-c1d136fea3d1.patch b/SOURCES/0003-Merge-master-up-to-commit-c1d136fea3d1.patch new file mode 100644 index 0000000..02fe5b1 --- /dev/null +++ b/SOURCES/0003-Merge-master-up-to-commit-c1d136fea3d1.patch @@ -0,0 +1,6235 @@ +From d8f301b9244d93695b344c33e9ff7a116b5f17b7 Mon Sep 17 00:00:00 2001 +From: Marian Csontos +Date: Sun, 9 Aug 2020 16:42:09 +0200 +Subject: [PATCH] Merge master up to commit c1d136fea3d1 + +(cherry picked from commit 4ef278fcb7c08721e973af7300fd1bff5d142398) +--- + WHATS_NEW | 11 +- + conf/example.conf.in | 6 +- + daemons/lvmdbusd/cmdhandler.py | 9 + + daemons/lvmdbusd/lv.py | 45 +- + daemons/lvmdbusd/manager.py | 2 +- + device_mapper/all.h | 4 + + device_mapper/libdm-deptree.c | 12 + + include/configure.h.in | 6 +- + lib/cache/lvmcache.c | 4 + + lib/config/config_settings.h | 5 +- + lib/config/defaults.h | 1 + + lib/device/bcache.c | 2 +- + lib/device/dev-cache.c | 1 + + lib/device/dev-md.c | 206 ++++++--- + lib/device/dev-type.c | 36 +- + lib/label/hints.c | 4 +- + lib/metadata/cache_manip.c | 4 + + lib/metadata/integrity_manip.c | 61 ++- + lib/metadata/lv.c | 3 + + lib/metadata/lv_manip.c | 102 +++-- + lib/metadata/merge.c | 3 +- + lib/metadata/metadata-exported.h | 7 +- + lib/metadata/metadata.c | 4 +- + lib/metadata/pool_manip.c | 6 +- + lib/metadata/raid_manip.c | 5 + + lib/metadata/snapshot_manip.c | 2 - + lib/metadata/writecache_manip.c | 365 ++++++++++++++-- + lib/report/report.c | 18 +- + lib/writecache/writecache.c | 49 +++ + man/lvconvert.8_pregen | 58 +++ + man/lvcreate.8_pregen | 246 ++++++++++- + man/lvmcache.7_main | 85 +++- + man/lvs.8_end | 4 + + man/lvs.8_pregen | 4 + + man/vgck.8_pregen | 9 + + scripts/blkdeactivate.sh.in | 6 + + test/dbus/lvmdbustest.py | 30 ++ + test/lib/aux.sh | 1 + + test/shell/cachevol-cachedevice.sh | 222 ++++++++++ + test/shell/integrity-blocksize-2.sh | 128 ++++++ + test/shell/integrity-blocksize-3.sh | 285 ++++++++++++ + test/shell/integrity-blocksize.sh | 108 ++++- + test/shell/integrity-large.sh | 23 +- + test/shell/integrity-misc.sh | 27 +- + test/shell/integrity.sh | 46 +- + test/shell/lvconvert-m-raid1-degraded.sh | 6 +- + test/shell/lvcreate-signature-wiping.sh | 7 + + test/shell/lvcreate-thin.sh | 21 + + test/shell/writecache-blocksize.sh | 342 +++++++++++++++ + test/shell/writecache-large.sh | 153 +++++++ + test/shell/writecache-split.sh | 34 +- + test/shell/writecache.sh | 315 +++++++++----- + tools/args.h | 17 +- + tools/command-lines.in | 145 +++--- + tools/command.c | 3 + + tools/lvchange.c | 85 ++++ + tools/lvconvert.c | 726 +++++++++++++++++++------------ + tools/lvcreate.c | 153 ++++++- + tools/lvmcmdline.c | 8 + + tools/toollib.c | 164 +++++++ + tools/toollib.h | 3 + + tools/tools.h | 11 + + 62 files changed, 3761 insertions(+), 697 deletions(-) + create mode 100644 test/shell/cachevol-cachedevice.sh + create mode 100644 test/shell/integrity-blocksize-2.sh + create mode 100644 test/shell/integrity-blocksize-3.sh + create mode 100644 test/shell/writecache-blocksize.sh + create mode 100644 test/shell/writecache-large.sh + +diff --git a/WHATS_NEW b/WHATS_NEW +index c0267b7..ac99e97 100644 +--- a/WHATS_NEW ++++ b/WHATS_NEW +@@ -1,5 +1,14 @@ + Version 2.03.10 - +-================================= ++================================== ++ Add writecache and integrity support to lvmdbusd. ++ Generate unique cachevol name when default required from lvcreate. ++ Converting RAID1 volume to one with same number of legs now succeeds with a ++ warning. ++ Fix conversion to raid from striped lagging type. ++ Fix conversion to 'mirrored' mirror log with larger regionsize. ++ Zero pool metadata on allocation (disable with allocation/zero_metadata=0). ++ Failure in zeroing or wiping will fail command (bypass with -Zn, -Wn). ++ Fix running out of free buffers for async writing for larger writes. + Add integrity with raid capability. + Fix support for lvconvert --repair used by foreign apps (i.e. Docker). + +diff --git a/conf/example.conf.in b/conf/example.conf.in +index 88858fc..d5807e6 100644 +--- a/conf/example.conf.in ++++ b/conf/example.conf.in +@@ -489,7 +489,7 @@ allocation { + # This configuration option does not have a default value defined. + + # Configuration option allocation/thin_pool_metadata_require_separate_pvs. +- # Thin pool metdata and data will always use different PVs. ++ # Thin pool metadata and data will always use different PVs. + thin_pool_metadata_require_separate_pvs = 0 + + # Configuration option allocation/thin_pool_zero. +@@ -527,6 +527,10 @@ allocation { + # This configuration option has an automatic default value. + # thin_pool_chunk_size_policy = "generic" + ++ # Configuration option allocation/zero_metadata. ++ # Zero whole metadata area before use with thin or cache pool. ++ zero_metadata = 1 ++ + # Configuration option allocation/thin_pool_chunk_size. + # The minimal chunk size in KiB for thin pool volumes. + # Larger chunk sizes may improve performance for plain thin volumes, +diff --git a/daemons/lvmdbusd/cmdhandler.py b/daemons/lvmdbusd/cmdhandler.py +index 7d2f4c4..1c15b78 100644 +--- a/daemons/lvmdbusd/cmdhandler.py ++++ b/daemons/lvmdbusd/cmdhandler.py +@@ -453,6 +453,15 @@ def lv_cache_lv(cache_pool_full_name, lv_full_name, cache_options): + return call(cmd) + + ++def lv_writecache_lv(cache_lv_full_name, lv_full_name, cache_options): ++ # lvconvert --type writecache --cachevol VG/CacheLV VG/OriginLV ++ cmd = ['lvconvert'] ++ cmd.extend(options_to_cli_args(cache_options)) ++ cmd.extend(['-y', '--type', 'writecache', '--cachevol', ++ cache_lv_full_name, lv_full_name]) ++ return call(cmd) ++ ++ + def lv_detach_cache(lv_full_name, detach_options, destroy_cache): + cmd = ['lvconvert'] + if destroy_cache: +diff --git a/daemons/lvmdbusd/lv.py b/daemons/lvmdbusd/lv.py +index fd46f34..edfdd0d 100644 +--- a/daemons/lvmdbusd/lv.py ++++ b/daemons/lvmdbusd/lv.py +@@ -388,7 +388,7 @@ class LvCommon(AutomatedProperties): + 'l': 'mirror log device', 'c': 'under conversion', + 'V': 'thin Volume', 't': 'thin pool', 'T': 'Thin pool data', + 'e': 'raid or pool metadata or pool metadata spare', +- 'd': 'vdo pool', 'D': 'vdo pool data', ++ 'd': 'vdo pool', 'D': 'vdo pool data', 'g': 'integrity', + '-': 'Unspecified'} + return self.attr_struct(0, type_map) + +@@ -743,6 +743,49 @@ class Lv(LvCommon): + cb, cbe, return_tuple=False) + cfg.worker_q.put(r) + ++ @staticmethod ++ def _writecache_lv(lv_uuid, lv_name, lv_object_path, cache_options): ++ # Make sure we have a dbus object representing it ++ dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name) ++ ++ # Make sure we have dbus object representing lv to cache ++ lv_to_cache = cfg.om.get_object_by_path(lv_object_path) ++ ++ if lv_to_cache: ++ fcn = lv_to_cache.lv_full_name() ++ rc, out, err = cmdhandler.lv_writecache_lv( ++ dbo.lv_full_name(), fcn, cache_options) ++ if rc == 0: ++ # When we cache an LV, the cache pool and the lv that is getting ++ # cached need to be removed from the object manager and ++ # re-created as their interfaces have changed! ++ mt_remove_dbus_objects((dbo, lv_to_cache)) ++ cfg.load() ++ ++ lv_converted = cfg.om.get_object_path_by_lvm_id(fcn) ++ else: ++ raise dbus.exceptions.DBusException( ++ LV_INTERFACE, ++ 'Exit code %s, stderr = %s' % (str(rc), err)) ++ else: ++ raise dbus.exceptions.DBusException( ++ LV_INTERFACE, 'LV to cache with object path %s not present!' % ++ lv_object_path) ++ return lv_converted ++ ++ @dbus.service.method( ++ dbus_interface=LV_INTERFACE, ++ in_signature='oia{sv}', ++ out_signature='(oo)', ++ async_callbacks=('cb', 'cbe')) ++ def WriteCacheLv(self, lv_object, tmo, cache_options, cb, cbe): ++ r = RequestEntry( ++ tmo, Lv._writecache_lv, ++ (self.Uuid, self.lvm_id, lv_object, ++ cache_options), cb, cbe) ++ cfg.worker_q.put(r) ++ ++ + # noinspection PyPep8Naming + @utils.dbus_property(VDO_POOL_INTERFACE, 'OperatingMode', 's') + @utils.dbus_property(VDO_POOL_INTERFACE, 'CompressionState', 's') +diff --git a/daemons/lvmdbusd/manager.py b/daemons/lvmdbusd/manager.py +index 2857e9a..573a396 100644 +--- a/daemons/lvmdbusd/manager.py ++++ b/daemons/lvmdbusd/manager.py +@@ -27,7 +27,7 @@ class Manager(AutomatedProperties): + + @property + def Version(self): +- return dbus.String('1.0.0') ++ return dbus.String('1.1.0') + + @staticmethod + def handle_execute(rc, out, err): +diff --git a/device_mapper/all.h b/device_mapper/all.h +index f00b6a5..c3c6219 100644 +--- a/device_mapper/all.h ++++ b/device_mapper/all.h +@@ -951,6 +951,8 @@ struct writecache_settings { + uint64_t autocommit_time; /* in milliseconds */ + uint32_t fua; + uint32_t nofua; ++ uint32_t cleaner; ++ uint32_t max_age; + + /* + * Allow an unrecognized key and its val to be passed to the kernel for +@@ -970,6 +972,8 @@ struct writecache_settings { + unsigned autocommit_time_set:1; + unsigned fua_set:1; + unsigned nofua_set:1; ++ unsigned cleaner_set:1; ++ unsigned max_age_set:1; + }; + + int dm_tree_node_add_writecache_target(struct dm_tree_node *node, +diff --git a/device_mapper/libdm-deptree.c b/device_mapper/libdm-deptree.c +index 9ba24cb..2722a2c 100644 +--- a/device_mapper/libdm-deptree.c ++++ b/device_mapper/libdm-deptree.c +@@ -2670,6 +2670,10 @@ static int _writecache_emit_segment_line(struct dm_task *dmt, + count += 1; + if (seg->writecache_settings.nofua_set) + count += 1; ++ if (seg->writecache_settings.cleaner_set && seg->writecache_settings.cleaner) ++ count += 1; ++ if (seg->writecache_settings.max_age_set) ++ count += 2; + if (seg->writecache_settings.new_key) + count += 2; + +@@ -2713,6 +2717,14 @@ static int _writecache_emit_segment_line(struct dm_task *dmt, + EMIT_PARAMS(pos, " nofua"); + } + ++ if (seg->writecache_settings.cleaner_set && seg->writecache_settings.cleaner) { ++ EMIT_PARAMS(pos, " cleaner"); ++ } ++ ++ if (seg->writecache_settings.max_age_set) { ++ EMIT_PARAMS(pos, " max_age %u", seg->writecache_settings.max_age); ++ } ++ + if (seg->writecache_settings.new_key) { + EMIT_PARAMS(pos, " %s %s", + seg->writecache_settings.new_key, +diff --git a/include/configure.h.in b/include/configure.h.in +index 57736cc..540cee7 100644 +--- a/include/configure.h.in ++++ b/include/configure.h.in +@@ -531,6 +531,9 @@ + /* Define to 1 if the system has the `__builtin_clzll' built-in function */ + #undef HAVE___BUILTIN_CLZLL + ++/* Define to 1 to include built-in support for integrity. */ ++#undef INTEGRITY_INTERNAL ++ + /* Internalization package */ + #undef INTL_PACKAGE + +@@ -678,9 +681,6 @@ + /* Define to 1 to include built-in support for writecache. */ + #undef WRITECACHE_INTERNAL + +-/* Define to 1 to include built-in support for integrity. */ +-#undef INTEGRITY_INTERNAL +- + /* Define to get access to GNU/Linux extension */ + #undef _GNU_SOURCE + +diff --git a/lib/cache/lvmcache.c b/lib/cache/lvmcache.c +index 6cb5ff0..b1d05fb 100644 +--- a/lib/cache/lvmcache.c ++++ b/lib/cache/lvmcache.c +@@ -84,6 +84,7 @@ static DM_LIST_INIT(_unused_duplicates); + static DM_LIST_INIT(_prev_unused_duplicate_devs); + static int _vgs_locked = 0; + static int _found_duplicate_vgnames = 0; ++static int _outdated_warning = 0; + + int lvmcache_init(struct cmd_context *cmd) + { +@@ -1776,6 +1777,9 @@ int lvmcache_update_vg_from_read(struct volume_group *vg, unsigned precommitted) + log_warn("WARNING: outdated PV %s seqno %u has been removed in current VG %s seqno %u.", + dev_name(info->dev), info->summary_seqno, vg->name, vginfo->seqno); + ++ if (!_outdated_warning++) ++ log_warn("See vgck --updatemetadata to clear outdated metadata."); ++ + _drop_vginfo(info, vginfo); /* remove from vginfo->infos */ + dm_list_add(&vginfo->outdated_infos, &info->list); + } +diff --git a/lib/config/config_settings.h b/lib/config/config_settings.h +index dce9705..b38ca11 100644 +--- a/lib/config/config_settings.h ++++ b/lib/config/config_settings.h +@@ -626,7 +626,7 @@ cfg(allocation_cache_pool_max_chunks_CFG, "cache_pool_max_chunks", allocation_CF + "Using cache pool with more chunks may degrade cache performance.\n") + + cfg(allocation_thin_pool_metadata_require_separate_pvs_CFG, "thin_pool_metadata_require_separate_pvs", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_METADATA_REQUIRE_SEPARATE_PVS, vsn(2, 2, 89), NULL, 0, NULL, +- "Thin pool metdata and data will always use different PVs.\n") ++ "Thin pool metadata and data will always use different PVs.\n") + + cfg(allocation_thin_pool_zero_CFG, "thin_pool_zero", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_ZERO, vsn(2, 2, 99), NULL, 0, NULL, + "Thin pool data chunks are zeroed before they are first used.\n" +@@ -657,6 +657,9 @@ cfg(allocation_thin_pool_chunk_size_policy_CFG, "thin_pool_chunk_size_policy", a + " 512KiB.\n" + "#\n") + ++cfg(allocation_zero_metadata_CFG, "zero_metadata", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_ZERO_METADATA, vsn(2, 3, 10), NULL, 0, NULL, ++ "Zero whole metadata area before use with thin or cache pool.\n") ++ + cfg_runtime(allocation_thin_pool_chunk_size_CFG, "thin_pool_chunk_size", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_UNDEFINED, CFG_TYPE_INT, vsn(2, 2, 99), 0, NULL, + "The minimal chunk size in KiB for thin pool volumes.\n" + "Larger chunk sizes may improve performance for plain thin volumes,\n" +diff --git a/lib/config/defaults.h b/lib/config/defaults.h +index be4f5ff..708a575 100644 +--- a/lib/config/defaults.h ++++ b/lib/config/defaults.h +@@ -129,6 +129,7 @@ + #define DEFAULT_THIN_POOL_DISCARDS "passdown" + #define DEFAULT_THIN_POOL_ZERO 1 + #define DEFAULT_POOL_METADATA_SPARE 1 /* thin + cache */ ++#define DEFAULT_ZERO_METADATA 1 /* thin + cache */ + + #ifdef CACHE_CHECK_NEEDS_CHECK + # define DEFAULT_CACHE_CHECK_OPTION1 "-q" +diff --git a/lib/device/bcache.c b/lib/device/bcache.c +index a7d8055..7e7e185 100644 +--- a/lib/device/bcache.c ++++ b/lib/device/bcache.c +@@ -950,7 +950,7 @@ static struct block *_new_block(struct bcache *cache, int fd, block_address i, b + struct block *b; + + b = _alloc_block(cache); +- while (!b && !dm_list_empty(&cache->clean)) { ++ while (!b) { + b = _find_unused_clean_block(cache); + if (!b) { + if (can_wait) { +diff --git a/lib/device/dev-cache.c b/lib/device/dev-cache.c +index 6af559c..c3f7c49 100644 +--- a/lib/device/dev-cache.c ++++ b/lib/device/dev-cache.c +@@ -65,6 +65,7 @@ static int _insert(const char *path, const struct stat *info, + static void _dev_init(struct device *dev) + { + dev->fd = -1; ++ dev->bcache_fd = -1; + dev->read_ahead = -1; + + dev->ext.enabled = 0; +diff --git a/lib/device/dev-md.c b/lib/device/dev-md.c +index 9d0a363..23ce41a 100644 +--- a/lib/device/dev-md.c ++++ b/lib/device/dev-md.c +@@ -16,6 +16,7 @@ + #include "lib/misc/lib.h" + #include "lib/device/dev-type.h" + #include "lib/mm/xlate.h" ++#include "lib/misc/crc.h" + #ifdef UDEV_SYNC_SUPPORT + #include /* for MD detection using udev db records */ + #include "lib/device/dev-ext-udev-constants.h" +@@ -48,44 +49,89 @@ static int _dev_has_md_magic(struct device *dev, uint64_t sb_offset) + return 0; + } + +-/* +- * Calculate the position of the superblock. +- * It is always aligned to a 4K boundary and +- * depending on minor_version, it can be: +- * 0: At least 8K, but less than 12K, from end of device +- * 1: At start of device +- * 2: 4K from start of device. +- */ +-typedef enum { +- MD_MINOR_VERSION_MIN, +- MD_MINOR_V0 = MD_MINOR_VERSION_MIN, +- MD_MINOR_V1, +- MD_MINOR_V2, +- MD_MINOR_VERSION_MAX = MD_MINOR_V2 +-} md_minor_version_t; +- +-static uint64_t _v1_sb_offset(uint64_t size, md_minor_version_t minor_version) ++#define IMSM_SIGNATURE "Intel Raid ISM Cfg Sig. " ++#define IMSM_SIG_LEN (strlen(IMSM_SIGNATURE)) ++ ++static int _dev_has_imsm_magic(struct device *dev, uint64_t devsize_sectors) + { +- uint64_t sb_offset; ++ char imsm_signature[IMSM_SIG_LEN]; ++ uint64_t off = (devsize_sectors * 512) - 1024; + +- switch(minor_version) { +- case MD_MINOR_V0: +- sb_offset = (size - 8 * 2) & ~(4 * 2 - 1ULL); +- break; +- case MD_MINOR_V1: +- sb_offset = 0; +- break; +- case MD_MINOR_V2: +- sb_offset = 4 * 2; +- break; +- default: +- log_warn(INTERNAL_ERROR "WARNING: Unknown minor version %d.", +- minor_version); ++ if (!dev_read_bytes(dev, off, IMSM_SIG_LEN, imsm_signature)) ++ return_0; ++ ++ if (!memcmp(imsm_signature, IMSM_SIGNATURE, IMSM_SIG_LEN)) ++ return 1; ++ ++ return 0; ++} ++ ++#define DDF_MAGIC 0xDE11DE11 ++struct ddf_header { ++ uint32_t magic; ++ uint32_t crc; ++ char guid[24]; ++ char revision[8]; ++ char padding[472]; ++}; ++ ++static int _dev_has_ddf_magic(struct device *dev, uint64_t devsize_sectors, uint64_t *sb_offset) ++{ ++ struct ddf_header hdr; ++ uint32_t crc, our_crc; ++ uint64_t off; ++ uint64_t devsize_bytes = devsize_sectors * 512; ++ ++ if (devsize_bytes < 0x30000) + return 0; ++ ++ /* 512 bytes before the end of device (from libblkid) */ ++ off = ((devsize_bytes / 0x200) - 1) * 0x200; ++ ++ if (!dev_read_bytes(dev, off, 512, &hdr)) ++ return_0; ++ ++ if ((hdr.magic == cpu_to_be32(DDF_MAGIC)) || ++ (hdr.magic == cpu_to_le32(DDF_MAGIC))) { ++ crc = hdr.crc; ++ hdr.crc = 0xffffffff; ++ our_crc = calc_crc(0, (const uint8_t *)&hdr, 512); ++ ++ if ((cpu_to_be32(our_crc) == crc) || ++ (cpu_to_le32(our_crc) == crc)) { ++ *sb_offset = off; ++ return 1; ++ } else { ++ log_debug_devs("Found md ddf magic at %llu wrong crc %x disk %x %s", ++ (unsigned long long)off, our_crc, crc, dev_name(dev)); ++ return 0; ++ } ++ } ++ ++ /* 128KB before the end of device (from libblkid) */ ++ off = ((devsize_bytes / 0x200) - 257) * 0x200; ++ ++ if (!dev_read_bytes(dev, off, 512, &hdr)) ++ return_0; ++ ++ if ((hdr.magic == cpu_to_be32(DDF_MAGIC)) || ++ (hdr.magic == cpu_to_le32(DDF_MAGIC))) { ++ crc = hdr.crc; ++ hdr.crc = 0xffffffff; ++ our_crc = calc_crc(0, (const uint8_t *)&hdr, 512); ++ ++ if ((cpu_to_be32(our_crc) == crc) || ++ (cpu_to_le32(our_crc) == crc)) { ++ *sb_offset = off; ++ return 1; ++ } else { ++ log_debug_devs("Found md ddf magic at %llu wrong crc %x disk %x %s", ++ (unsigned long long)off, our_crc, crc, dev_name(dev)); ++ return 0; ++ } + } +- sb_offset <<= SECTOR_SHIFT; + +- return sb_offset; ++ return 0; + } + + /* +@@ -130,7 +176,6 @@ static int _udev_dev_is_md_component(struct device *dev) + */ + static int _native_dev_is_md_component(struct device *dev, uint64_t *offset_found, int full) + { +- md_minor_version_t minor; + uint64_t size, sb_offset; + int ret; + +@@ -146,9 +191,9 @@ static int _native_dev_is_md_component(struct device *dev, uint64_t *offset_foun + return 0; + + /* +- * Old md versions locate the magic number at the end of the device. +- * Those checks can't be satisfied with the initial bcache data, and +- * would require an extra read i/o at the end of every device. Issuing ++ * Some md versions locate the magic number at the end of the device. ++ * Those checks can't be satisfied with the initial scan data, and ++ * require an extra read i/o at the end of every device. Issuing + * an extra read to every device in every command, just to check for + * the old md format is a bad tradeoff. + * +@@ -159,42 +204,81 @@ static int _native_dev_is_md_component(struct device *dev, uint64_t *offset_foun + * and set it for commands that could possibly write to an md dev + * (pvcreate/vgcreate/vgextend). + */ +- if (!full) { +- sb_offset = 0; +- if (_dev_has_md_magic(dev, sb_offset)) { +- log_debug_devs("Found md magic number at offset 0 of %s.", dev_name(dev)); +- ret = 1; +- goto out; +- } + +- sb_offset = 8 << SECTOR_SHIFT; +- if (_dev_has_md_magic(dev, sb_offset)) { +- log_debug_devs("Found md magic number at offset %d of %s.", (int)sb_offset, dev_name(dev)); +- ret = 1; +- goto out; +- } ++ /* ++ * md superblock version 1.1 at offset 0 from start ++ */ ++ ++ if (_dev_has_md_magic(dev, 0)) { ++ log_debug_devs("Found md magic number at offset 0 of %s.", dev_name(dev)); ++ ret = 1; ++ goto out; ++ } + ++ /* ++ * md superblock version 1.2 at offset 4KB from start ++ */ ++ ++ if (_dev_has_md_magic(dev, 4096)) { ++ log_debug_devs("Found md magic number at offset 4096 of %s.", dev_name(dev)); ++ ret = 1; ++ goto out; ++ } ++ ++ if (!full) { + ret = 0; + goto out; + } + +- /* Check if it is an md component device. */ +- /* Version 0.90.0 */ ++ /* ++ * Handle superblocks at the end of the device. ++ */ ++ ++ /* ++ * md superblock version 0 at 64KB from end of device ++ * (after end is aligned to 64KB) ++ */ ++ + sb_offset = MD_NEW_SIZE_SECTORS(size) << SECTOR_SHIFT; ++ + if (_dev_has_md_magic(dev, sb_offset)) { ++ log_debug_devs("Found md magic number at offset %llu of %s.", (unsigned long long)sb_offset, dev_name(dev)); + ret = 1; + goto out; + } + +- minor = MD_MINOR_VERSION_MIN; +- /* Version 1, try v1.0 -> v1.2 */ +- do { +- sb_offset = _v1_sb_offset(size, minor); +- if (_dev_has_md_magic(dev, sb_offset)) { +- ret = 1; +- goto out; +- } +- } while (++minor <= MD_MINOR_VERSION_MAX); ++ /* ++ * md superblock version 1.0 at 8KB from end of device ++ */ ++ ++ sb_offset = ((size - 8 * 2) & ~(4 * 2 - 1ULL)) << SECTOR_SHIFT; ++ ++ if (_dev_has_md_magic(dev, sb_offset)) { ++ log_debug_devs("Found md magic number at offset %llu of %s.", (unsigned long long)sb_offset, dev_name(dev)); ++ ret = 1; ++ goto out; ++ } ++ ++ /* ++ * md imsm superblock 1K from end of device ++ */ ++ ++ if (_dev_has_imsm_magic(dev, size)) { ++ log_debug_devs("Found md imsm magic number at offset %llu of %s.", (unsigned long long)sb_offset, dev_name(dev)); ++ sb_offset = 1024; ++ ret = 1; ++ goto out; ++ } ++ ++ /* ++ * md ddf superblock 512 bytes from end, or 128KB from end ++ */ ++ ++ if (_dev_has_ddf_magic(dev, size, &sb_offset)) { ++ log_debug_devs("Found md ddf magic number at offset %llu of %s.", (unsigned long long)sb_offset, dev_name(dev)); ++ ret = 1; ++ goto out; ++ } + + ret = 0; + out: +diff --git a/lib/device/dev-type.c b/lib/device/dev-type.c +index deb5d6a..896821d 100644 +--- a/lib/device/dev-type.c ++++ b/lib/device/dev-type.c +@@ -649,37 +649,23 @@ out: + #ifdef BLKID_WIPING_SUPPORT + int get_fs_block_size(struct device *dev, uint32_t *fs_block_size) + { +- blkid_probe probe = NULL; +- const char *block_size_str = NULL; +- uint64_t block_size_val; +- int r = 0; ++ char *block_size_str = NULL; + +- *fs_block_size = 0; +- +- if (!(probe = blkid_new_probe_from_filename(dev_name(dev)))) { +- log_error("Failed to create a new blkid probe for device %s.", dev_name(dev)); +- goto out; ++ if ((block_size_str = blkid_get_tag_value(NULL, "BLOCK_SIZE", dev_name(dev)))) { ++ *fs_block_size = (uint32_t)atoi(block_size_str); ++ free(block_size_str); ++ log_debug("Found blkid BLOCK_SIZE %u for fs on %s", *fs_block_size, dev_name(dev)); ++ return 1; ++ } else { ++ log_debug("No blkid BLOCK_SIZE for fs on %s", dev_name(dev)); ++ *fs_block_size = 0; ++ return 0; + } +- +- blkid_probe_enable_partitions(probe, 1); +- +- (void) blkid_probe_lookup_value(probe, "BLOCK_SIZE", &block_size_str, NULL); +- +- if (!block_size_str) +- goto out; +- +- block_size_val = strtoull(block_size_str, NULL, 10); +- +- *fs_block_size = (uint32_t)block_size_val; +- r = 1; +-out: +- if (probe) +- blkid_free_probe(probe); +- return r; + } + #else + int get_fs_block_size(struct device *dev, uint32_t *fs_block_size) + { ++ log_debug("Disabled blkid BLOCK_SIZE for fs."); + *fs_block_size = 0; + return 0; + } +diff --git a/lib/label/hints.c b/lib/label/hints.c +index 9546f48..efa02f7 100644 +--- a/lib/label/hints.c ++++ b/lib/label/hints.c +@@ -801,10 +801,8 @@ static int _read_hint_file(struct cmd_context *cmd, struct dm_list *hints, int * + if (fclose(fp)) + stack; + +- if (!ret) { +- free_hints(hints); ++ if (!ret) + return 0; +- } + + if (!found) + return 1; +diff --git a/lib/metadata/cache_manip.c b/lib/metadata/cache_manip.c +index 49b3850..a786e8b 100644 +--- a/lib/metadata/cache_manip.c ++++ b/lib/metadata/cache_manip.c +@@ -1094,6 +1094,10 @@ int cache_vol_set_params(struct cmd_context *cmd, + if (!meta_size) { + meta_size = _cache_min_metadata_size(pool_lv->size, chunk_size); + ++ /* fix bad value from _cache_min_metadata_size */ ++ if (meta_size > (pool_lv->size / 2)) ++ meta_size = pool_lv->size / 2; ++ + if (meta_size < min_meta_size) + meta_size = min_meta_size; + +diff --git a/lib/metadata/integrity_manip.c b/lib/metadata/integrity_manip.c +index 7942be0..3322a21 100644 +--- a/lib/metadata/integrity_manip.c ++++ b/lib/metadata/integrity_manip.c +@@ -21,7 +21,6 @@ + #include "lib/metadata/segtype.h" + #include "lib/activate/activate.h" + #include "lib/config/defaults.h" +-#include "lib/activate/dev_manager.h" + + #define DEFAULT_TAG_SIZE 4 /* bytes */ + #define DEFAULT_MODE 'J' +@@ -29,6 +28,7 @@ + #define DEFAULT_BLOCK_SIZE 512 + + #define ONE_MB_IN_BYTES 1048576 ++#define ONE_GB_IN_BYTES 1073741824 + + int lv_is_integrity_origin(const struct logical_volume *lv) + { +@@ -46,10 +46,35 @@ int lv_is_integrity_origin(const struct logical_volume *lv) + /* + * Every 500M of data needs 4M of metadata. + * (From trial and error testing.) ++ * ++ * plus some initial space for journals. ++ * (again from trial and error testing.) + */ + static uint64_t _lv_size_bytes_to_integrity_meta_bytes(uint64_t lv_size_bytes) + { +- return ((lv_size_bytes / (500 * ONE_MB_IN_BYTES)) + 1) * (4 * ONE_MB_IN_BYTES); ++ uint64_t meta_bytes; ++ uint64_t initial_bytes; ++ ++ /* Every 500M of data needs 4M of metadata. */ ++ meta_bytes = ((lv_size_bytes / (500 * ONE_MB_IN_BYTES)) + 1) * (4 * ONE_MB_IN_BYTES); ++ ++ /* ++ * initial space used for journals ++ * lv_size <= 512M -> 4M ++ * lv_size <= 1G -> 8M ++ * lv_size <= 4G -> 32M ++ * lv_size > 4G -> 64M ++ */ ++ if (lv_size_bytes <= (512 * ONE_MB_IN_BYTES)) ++ initial_bytes = 4 * ONE_MB_IN_BYTES; ++ else if (lv_size_bytes <= ONE_GB_IN_BYTES) ++ initial_bytes = 8 * ONE_MB_IN_BYTES; ++ else if (lv_size_bytes <= (4ULL * ONE_GB_IN_BYTES)) ++ initial_bytes = 32 * ONE_MB_IN_BYTES; ++ else if (lv_size_bytes > (4ULL * ONE_GB_IN_BYTES)) ++ initial_bytes = 64 * ONE_MB_IN_BYTES; ++ ++ return meta_bytes + initial_bytes; + } + + /* +@@ -278,7 +303,7 @@ int lv_remove_integrity_from_raid(struct logical_volume *lv) + return 1; + } + +-static int _set_integrity_block_size(struct cmd_context *cmd, struct logical_volume *lv, ++static int _set_integrity_block_size(struct cmd_context *cmd, struct logical_volume *lv, int is_active, + struct integrity_settings *settings, + int lbs_4k, int lbs_512, int pbs_4k, int pbs_512) + { +@@ -375,7 +400,13 @@ static int _set_integrity_block_size(struct cmd_context *cmd, struct logical_vol + } + + if (!settings->block_size) { +- if (fs_block_size <= 4096) ++ if (is_active && lbs_512) { ++ /* increasing the lbs from 512 to 4k under an active LV could cause problems ++ for an application that expects a given io size/alignment is possible. */ ++ settings->block_size = 512; ++ if (fs_block_size > 512) ++ log_print("Limiting integrity block size to 512 because the LV is active."); ++ } else if (fs_block_size <= 4096) + settings->block_size = fs_block_size; + else + settings->block_size = 4096; /* dm-integrity max is 4096 */ +@@ -587,13 +618,33 @@ int lv_add_integrity_to_raid(struct logical_volume *lv, struct integrity_setting + } + } + ++ if (!is_active) { ++ /* checking block size of fs on the lv requires the lv to be active */ ++ if (!activate_lv(cmd, lv)) { ++ log_error("Failed to activate LV to check block size %s", display_lvname(lv)); ++ goto bad; ++ } ++ if (!sync_local_dev_names(cmd)) ++ stack; ++ } ++ + /* + * Set settings->block_size which will be copied to segment settings below. + * integrity block size chosen based on device logical block size and + * file system block size. + */ +- if (!_set_integrity_block_size(cmd, lv, settings, lbs_4k, lbs_512, pbs_4k, pbs_512)) ++ if (!_set_integrity_block_size(cmd, lv, is_active, settings, lbs_4k, lbs_512, pbs_4k, pbs_512)) { ++ if (!is_active && !deactivate_lv(cmd, lv)) ++ stack; + goto_bad; ++ } ++ ++ if (!is_active) { ++ if (!deactivate_lv(cmd, lv)) { ++ log_error("Failed to deactivate LV after checking block size %s", display_lvname(lv)); ++ goto bad; ++ } ++ } + + /* + * For each rimage, move its segments to a new rimage_iorig and give +diff --git a/lib/metadata/lv.c b/lib/metadata/lv.c +index 4ee58b4..fac47e5 100644 +--- a/lib/metadata/lv.c ++++ b/lib/metadata/lv.c +@@ -1412,6 +1412,9 @@ char *lv_attr_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_ + } else if (lvdm->seg_status.type == SEG_STATUS_THIN) { + if (lvdm->seg_status.thin->fail) + repstr[8] = 'F'; ++ } else if (lvdm->seg_status.type == SEG_STATUS_WRITECACHE) { ++ if (lvdm->seg_status.writecache->error) ++ repstr[8] = 'E'; + } else if (lvdm->seg_status.type == SEG_STATUS_UNKNOWN) + repstr[8] = 'X'; /* Unknown */ + +diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c +index 1311f70..f0ba3f0 100644 +--- a/lib/metadata/lv_manip.c ++++ b/lib/metadata/lv_manip.c +@@ -5066,6 +5066,7 @@ static int _lvresize_check(struct logical_volume *lv, + struct lvresize_params *lp) + { + struct volume_group *vg = lv->vg; ++ struct lv_segment *seg = first_seg(lv); + + if (lv_is_external_origin(lv)) { + /* +@@ -5089,6 +5090,12 @@ static int _lvresize_check(struct logical_volume *lv, + return 0; + } + ++ if (seg && (seg_is_raid4(seg) || seg_is_any_raid5(seg)) && seg->area_count < 3) { ++ log_error("Cannot resize %s LV %s. Convert to more stripes first.", ++ lvseg_name(seg), display_lvname(lv)); ++ return 0; ++ } ++ + if (lv_is_raid(lv) && + lp->resize == LV_REDUCE) { + unsigned attrs; +@@ -6568,7 +6575,20 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv, + } + } + +- if (lv_is_used_cache_pool(lv) || lv_is_cache_vol(lv)) { ++ if (lv_is_cache_vol(lv)) { ++ if ((cache_seg = get_only_segment_using_this_lv(lv))) { ++ /* When used with cache, lvremove on cachevol also removes the cache! */ ++ if (seg_is_cache(cache_seg)) { ++ if (!lv_cache_remove(cache_seg->lv)) ++ return_0; ++ } else if (seg_is_writecache(cache_seg)) { ++ log_error("Detach cachevol before removing."); ++ return 0; ++ } ++ } ++ } ++ ++ if (lv_is_used_cache_pool(lv)) { + /* Cache pool removal drops cache layer + * If the cache pool is not linked, we can simply remove it. */ + if (!(cache_seg = get_only_segment_using_this_lv(lv))) +@@ -6832,7 +6852,7 @@ static int _lv_update_and_reload(struct logical_volume *lv, int origin_only) + } + + if (!(origin_only ? suspend_lv_origin(vg->cmd, lock_lv) : suspend_lv(vg->cmd, lock_lv))) { +- log_error("Failed to lock logical volume %s.", ++ log_error("Failed to suspend logical volume %s.", + display_lvname(lock_lv)); + vg_revert(vg); + } else if (!(r = vg_commit(vg))) +@@ -7556,20 +7576,22 @@ int wipe_lv(struct logical_volume *lv, struct wipe_params wp) + struct device *dev; + char name[PATH_MAX]; + uint64_t zero_sectors; ++ int zero_metadata = wp.is_metadata ? ++ find_config_tree_bool(lv->vg->cmd, allocation_zero_metadata_CFG, NULL) : 0; + +- if (!wp.do_zero && !wp.do_wipe_signatures) ++ if (!wp.do_zero && !wp.do_wipe_signatures && !wp.is_metadata) + /* nothing to do */ + return 1; + + if (!lv_is_active(lv)) { +- log_error("Volume \"%s/%s\" is not active locally (volume_list activation filter?).", +- lv->vg->name, lv->name); ++ log_error("Volume %s is not active locally (volume_list activation filter?).", ++ display_lvname(lv)); + return 0; + } + + /* Wait until devices are available */ + if (!sync_local_dev_names(lv->vg->cmd)) { +- log_error("Failed to sync local devices before wiping LV %s.", ++ log_error("Failed to sync local devices before wiping volume %s.", + display_lvname(lv)); + return 0; + } +@@ -7593,40 +7615,59 @@ int wipe_lv(struct logical_volume *lv, struct wipe_params wp) + } + + if (!label_scan_open_rw(dev)) { +- log_error("Failed to open %s/%s for wiping and zeroing.", lv->vg->name, lv->name); +- goto out; ++ log_error("Failed to open %s for wiping and zeroing.", display_lvname(lv)); ++ return 0; + } + + if (wp.do_wipe_signatures) { +- log_verbose("Wiping known signatures on logical volume \"%s/%s\"", +- lv->vg->name, lv->name); ++ log_verbose("Wiping known signatures on logical volume %s.", ++ display_lvname(lv)); + if (!wipe_known_signatures(lv->vg->cmd, dev, name, 0, + TYPE_DM_SNAPSHOT_COW, +- wp.yes, wp.force, NULL)) +- stack; ++ wp.yes, wp.force, NULL)) { ++ log_error("Filed to wipe signatures of logical volume %s.", ++ display_lvname(lv)); ++ return 0; ++ } + } + +- if (wp.do_zero) { +- zero_sectors = wp.zero_sectors ? : UINT64_C(4096) >> SECTOR_SHIFT; +- +- if (zero_sectors > lv->size) ++ if (wp.do_zero || wp.is_metadata) { ++ zero_metadata = !wp.is_metadata ? 0 : ++ find_config_tree_bool(lv->vg->cmd, allocation_zero_metadata_CFG, NULL); ++ if (zero_metadata) { ++ log_debug("Metadata logical volume %s will be fully zeroed.", ++ display_lvname(lv)); + zero_sectors = lv->size; ++ } else { ++ if (wp.is_metadata) /* Verbosely notify metadata will not be fully zeroed */ ++ log_verbose("Metadata logical volume %s not fully zeroed and may contain stale data.", ++ display_lvname(lv)); ++ zero_sectors = UINT64_C(4096) >> SECTOR_SHIFT; ++ if (wp.zero_sectors > zero_sectors) ++ zero_sectors = wp.zero_sectors; + +- log_verbose("Initializing %s of logical volume \"%s/%s\" with value %d.", +- display_size(lv->vg->cmd, zero_sectors), +- lv->vg->name, lv->name, wp.zero_value); ++ if (zero_sectors > lv->size) ++ zero_sectors = lv->size; ++ } + +- if (!wp.zero_value) { +- if (!dev_write_zeros(dev, UINT64_C(0), (size_t) zero_sectors << SECTOR_SHIFT)) +- stack; +- } else { +- if (!dev_set_bytes(dev, UINT64_C(0), (size_t) zero_sectors << SECTOR_SHIFT, (uint8_t)wp.zero_value)) +- stack; ++ log_verbose("Initializing %s of logical volume %s with value %d.", ++ display_size(lv->vg->cmd, zero_sectors), ++ display_lvname(lv), wp.zero_value); ++ ++ if ((!wp.is_metadata && ++ wp.zero_value && !dev_set_bytes(dev, UINT64_C(0), ++ (size_t) zero_sectors << SECTOR_SHIFT, ++ (uint8_t)wp.zero_value)) || ++ !dev_write_zeros(dev, UINT64_C(0), (size_t) zero_sectors << SECTOR_SHIFT)) { ++ log_error("Failed to initialize %s of logical volume %s with value %d.", ++ display_size(lv->vg->cmd, zero_sectors), ++ display_lvname(lv), wp.zero_value); ++ return 0; + } + } + + label_scan_invalidate(dev); +-out: ++ + lv->status &= ~LV_NOSCAN; + + return 1; +@@ -7690,12 +7731,10 @@ int activate_and_wipe_lvlist(struct dm_list *lv_list, int commit) + } + + dm_list_iterate_items(lvl, lv_list) { +- log_verbose("Wiping metadata area %s.", display_lvname(lvl->lv)); + /* Wipe any know signatures */ +- if (!wipe_lv(lvl->lv, (struct wipe_params) { .do_wipe_signatures = 1, .do_zero = 1, .zero_sectors = 1 })) { +- log_error("Failed to wipe %s.", display_lvname(lvl->lv)); ++ if (!wipe_lv(lvl->lv, (struct wipe_params) { .do_zero = 1 /* TODO: is_metadata = 1 */ })) { + r = 0; +- goto out; ++ goto_out; + } + } + out: +@@ -8440,7 +8479,8 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg, + .do_zero = lp->zero, + .do_wipe_signatures = lp->wipe_signatures, + .yes = lp->yes, +- .force = lp->force ++ .force = lp->force, ++ .is_metadata = lp->is_metadata, + })) { + log_error("Aborting. Failed to wipe %s.", lp->snapshot + ? "snapshot exception store" : "start of new LV"); +diff --git a/lib/metadata/merge.c b/lib/metadata/merge.c +index ecd55ef..1d47449 100644 +--- a/lib/metadata/merge.c ++++ b/lib/metadata/merge.c +@@ -441,7 +441,8 @@ static void _check_lv_segment(struct logical_volume *lv, struct lv_segment *seg, + if (seg_is_mirror(seg)) { + if (!seg->region_size) + seg_error("region size is zero"); +- else if (seg->region_size > seg->lv->size) ++ /* Avoid regionsize check in case of 'mirrored' mirror log or larger than mlog regionsize will fail */ ++ else if (!strstr(seg->lv->name, "_mlog") && (seg->region_size > seg->lv->size)) + seg_error("region size is bigger then LV itself"); + else if (!is_power_of_2(seg->region_size)) + seg_error("region size is non power of 2"); +diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h +index 083f74a..06ea757 100644 +--- a/lib/metadata/metadata-exported.h ++++ b/lib/metadata/metadata-exported.h +@@ -89,8 +89,7 @@ + #define PARTIAL_LV UINT64_C(0x0000000001000000) /* LV - derived flag, not + written out in metadata*/ + +-//#define POSTORDER_FLAG UINT64_C(0x0000000002000000) /* Not real flags, reserved for +-//#define POSTORDER_OPEN_FLAG UINT64_C(0x0000000004000000) temporary use inside vg_read_internal. */ ++#define WRITECACHE_ORIGIN UINT64_C(0x0000000002000000) + #define INTEGRITY_METADATA UINT64_C(0x0000000004000000) /* LV - Internal use only */ + #define VIRTUAL_ORIGIN UINT64_C(0x0000000008000000) /* LV - internal use only */ + +@@ -804,6 +803,7 @@ struct wipe_params { + int do_wipe_signatures; /* should we wipe known signatures found on LV? */ + int yes; /* answer yes automatically to all questions */ + force_t force; /* force mode */ ++ int is_metadata; /* wipe volume is metadata LV */ + }; + + /* Zero out LV and/or wipe signatures */ +@@ -955,6 +955,8 @@ struct lvcreate_params { + int thin_chunk_size_calc_policy; + unsigned suppress_zero_warn : 1; + unsigned needs_lockd_init : 1; ++ unsigned ignore_type : 1; ++ unsigned is_metadata : 1; /* created LV will be used as metadata LV (and can be zeroed) */ + + const char *vg_name; /* only-used when VG is not yet opened (in /tools) */ + const char *lv_name; /* all */ +@@ -1097,6 +1099,7 @@ int lv_is_cow(const struct logical_volume *lv); + int lv_is_cache_origin(const struct logical_volume *lv); + int lv_is_writecache_origin(const struct logical_volume *lv); + int lv_is_writecache_cachevol(const struct logical_volume *lv); ++int writecache_settings_to_str_list(struct writecache_settings *settings, struct dm_list *result, struct dm_pool *mem); + + int lv_is_integrity_origin(const struct logical_volume *lv); + +diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c +index 4b8dce9..c0d4206 100644 +--- a/lib/metadata/metadata.c ++++ b/lib/metadata/metadata.c +@@ -4875,8 +4875,10 @@ static struct volume_group *_vg_read(struct cmd_context *cmd, + } + } + +- if (found_old_metadata) ++ if (found_old_metadata) { + log_warn("WARNING: Inconsistent metadata found for VG %s.", vgname); ++ log_warn("See vgck --updatemetadata to correct inconsistency."); ++ } + + vg = NULL; + +diff --git a/lib/metadata/pool_manip.c b/lib/metadata/pool_manip.c +index bed51f1..23b5b63 100644 +--- a/lib/metadata/pool_manip.c ++++ b/lib/metadata/pool_manip.c +@@ -545,8 +545,8 @@ int create_pool(struct logical_volume *pool_lv, + display_lvname(pool_lv)); + goto bad; + } +- /* Clear 4KB of pool metadata device. */ +- if (!(r = wipe_lv(pool_lv, (struct wipe_params) { .do_zero = 1 }))) { ++ /* Clear pool metadata device. */ ++ if (!(r = wipe_lv(pool_lv, (struct wipe_params) { .is_metadata = 1 }))) { + log_error("Aborting. Failed to wipe pool metadata %s.", + display_lvname(pool_lv)); + } +@@ -627,6 +627,7 @@ struct logical_volume *alloc_pool_metadata(struct logical_volume *pool_lv, + .tags = DM_LIST_HEAD_INIT(lvc.tags), + .temporary = 1, + .zero = 1, ++ .is_metadata = 1, + }; + + if (!(lvc.segtype = get_segtype_from_string(pool_lv->vg->cmd, SEG_TYPE_NAME_STRIPED))) +@@ -663,6 +664,7 @@ static struct logical_volume *_alloc_pool_metadata_spare(struct volume_group *vg + .tags = DM_LIST_HEAD_INIT(lp.tags), + .temporary = 1, + .zero = 1, ++ .is_metadata = 1, + }; + + if (!(lp.segtype = get_segtype_from_string(vg->cmd, SEG_TYPE_NAME_STRIPED))) +diff --git a/lib/metadata/raid_manip.c b/lib/metadata/raid_manip.c +index 3b3e1d3..1ff2a0c 100644 +--- a/lib/metadata/raid_manip.c ++++ b/lib/metadata/raid_manip.c +@@ -3229,6 +3229,11 @@ int lv_raid_change_image_count(struct logical_volume *lv, int yes, uint32_t new_ + const char *level = seg->area_count == 1 ? "raid1 with " : ""; + const char *resil = new_count < seg->area_count ? "reducing" : "enhancing"; + ++ if (new_count == seg->area_count) { ++ log_warn("Type %s LV %s already has %u images.", lvseg_name(seg), display_lvname(lv), new_count); ++ return 1; ++ } ++ + /* LV must be active to perform raid conversion operations */ + if (!lv_is_active(lv)) { + log_error("%s must be active to perform this operation.", +diff --git a/lib/metadata/snapshot_manip.c b/lib/metadata/snapshot_manip.c +index 3faea0e..0f48e62 100644 +--- a/lib/metadata/snapshot_manip.c ++++ b/lib/metadata/snapshot_manip.c +@@ -389,8 +389,6 @@ int validate_snapshot_origin(const struct logical_volume *origin_lv) + err = "raid subvolumes"; + } else if (lv_is_raid(origin_lv) && lv_raid_has_integrity((struct logical_volume *)origin_lv)) { + err = "raid with integrity"; +- } else if (lv_is_writecache(origin_lv)) { +- err = "writecache"; + } + + if (err) { +diff --git a/lib/metadata/writecache_manip.c b/lib/metadata/writecache_manip.c +index 31d069e..fade82e 100644 +--- a/lib/metadata/writecache_manip.c ++++ b/lib/metadata/writecache_manip.c +@@ -21,11 +21,21 @@ + #include "lib/metadata/segtype.h" + #include "lib/activate/activate.h" + #include "lib/config/defaults.h" ++#include "lib/datastruct/str_list.h" + + int lv_is_writecache_origin(const struct logical_volume *lv) + { + struct lv_segment *seg; + ++ /* ++ * This flag is needed when removing writecache from an origin ++ * in which case the lv connections have been destroyed and ++ * identifying a writecache origin by these connections doesn't ++ * work. ++ */ ++ if (lv->status & WRITECACHE_ORIGIN) ++ return 1; ++ + /* Make sure there's exactly one segment in segs_using_this_lv! */ + if (dm_list_empty(&lv->segs_using_this_lv) || + (dm_list_size(&lv->segs_using_this_lv) > 1)) +@@ -48,46 +58,6 @@ int lv_is_writecache_cachevol(const struct logical_volume *lv) + return 0; + } + +-static int _lv_writecache_detach(struct cmd_context *cmd, struct logical_volume *lv, +- struct logical_volume *lv_fast) +-{ +- struct lv_segment *seg = first_seg(lv); +- struct logical_volume *origin; +- +- if (!seg_is_writecache(seg)) { +- log_error("LV %s segment is not writecache.", display_lvname(lv)); +- return 0; +- } +- +- if (!seg->writecache) { +- log_error("LV %s writecache segment has no writecache.", display_lvname(lv)); +- return 0; +- } +- +- if (!(origin = seg_lv(seg, 0))) { +- log_error("LV %s writecache segment has no origin", display_lvname(lv)); +- return 0; +- } +- +- if (!remove_seg_from_segs_using_this_lv(seg->writecache, seg)) +- return_0; +- +- lv_set_visible(seg->writecache); +- +- lv->status &= ~WRITECACHE; +- seg->writecache = NULL; +- +- lv_fast->status &= ~LV_CACHE_VOL; +- +- if (!remove_layer_from_lv(lv, origin)) +- return_0; +- +- if (!lv_remove(origin)) +- return_0; +- +- return 1; +-} +- + static int _get_writecache_kernel_error(struct cmd_context *cmd, + struct logical_volume *lv, + uint32_t *kernel_error) +@@ -131,13 +101,64 @@ fail: + return 0; + } + +-int lv_detach_writecache_cachevol(struct logical_volume *lv, int noflush) ++static void _rename_detached_cvol(struct cmd_context *cmd, struct logical_volume *lv_fast) ++{ ++ struct volume_group *vg = lv_fast->vg; ++ char cvol_name[NAME_LEN]; ++ char *suffix, *cvol_name_dup; ++ ++ /* ++ * Rename lv_fast back to its original name, without the _cvol ++ * suffix that was added when lv_fast was attached for caching. ++ * If the name is in use, generate new lvol%d. ++ * Failing to rename is not really a problem, so we intentionally ++ * do not consider some things here as errors. ++ */ ++ if (!dm_strncpy(cvol_name, lv_fast->name, sizeof(cvol_name)) || ++ !(suffix = strstr(cvol_name, "_cvol"))) { ++ log_debug("LV %s has no suffix for cachevol (skipping rename).", ++ display_lvname(lv_fast)); ++ return; ++ } ++ ++ *suffix = 0; ++ if (lv_name_is_used_in_vg(vg, cvol_name, NULL) && ++ !generate_lv_name(vg, "lvol%d", cvol_name, sizeof(cvol_name))) { ++ log_warn("Failed to generate new unique name for unused LV %s", lv_fast->name); ++ return; ++ } ++ ++ if (!(cvol_name_dup = dm_pool_strdup(vg->vgmem, cvol_name))) { ++ stack; ++ return; ++ } ++ ++ lv_fast->name = cvol_name_dup; ++} ++ ++static int _lv_detach_writecache_cachevol_inactive(struct logical_volume *lv, int noflush) + { + struct cmd_context *cmd = lv->vg->cmd; ++ struct volume_group *vg = lv->vg; + struct logical_volume *lv_fast; ++ struct logical_volume *lv_wcorig; ++ struct lv_segment *seg = first_seg(lv); + uint32_t kernel_error = 0; + +- lv_fast = first_seg(lv)->writecache; ++ if (!seg_is_writecache(seg)) { ++ log_error("LV %s segment is not writecache.", display_lvname(lv)); ++ return 0; ++ } ++ ++ if (!(lv_fast = seg->writecache)) { ++ log_error("LV %s writecache segment has no writecache.", display_lvname(lv)); ++ return 0; ++ } ++ ++ if (!(lv_wcorig = seg_lv(seg, 0))) { ++ log_error("LV %s writecache segment has no origin", display_lvname(lv)); ++ return 0; ++ } + + if (noflush) + goto detach; +@@ -157,6 +178,8 @@ int lv_detach_writecache_cachevol(struct logical_volume *lv, int noflush) + + if (!sync_local_dev_names(cmd)) { + log_error("Failed to sync local devices before detaching writecache."); ++ if (!deactivate_lv(cmd, lv)) ++ log_error("Failed to deactivate %s.", display_lvname(lv)); + return 0; + } + +@@ -176,7 +199,8 @@ int lv_detach_writecache_cachevol(struct logical_volume *lv, int noflush) + + if (kernel_error) { + log_error("Failed to flush writecache (error %u) for %s.", kernel_error, display_lvname(lv)); +- deactivate_lv(cmd, lv); ++ if (!deactivate_lv(cmd, lv)) ++ log_error("Failed to deactivate %s.", display_lvname(lv)); + return 0; + } + +@@ -188,11 +212,262 @@ int lv_detach_writecache_cachevol(struct logical_volume *lv, int noflush) + lv->status &= ~LV_TEMPORARY; + + detach: +- if (!_lv_writecache_detach(cmd, lv, lv_fast)) { +- log_error("Failed to detach writecache from %s", display_lvname(lv)); ++ if (!remove_seg_from_segs_using_this_lv(lv_fast, seg)) ++ return_0; ++ ++ lv->status &= ~WRITECACHE; ++ seg->writecache = NULL; ++ ++ if (!remove_layer_from_lv(lv, lv_wcorig)) ++ return_0; ++ ++ if (!lv_remove(lv_wcorig)) ++ return_0; ++ ++ lv_set_visible(lv_fast); ++ lv_fast->status &= ~LV_CACHE_VOL; ++ ++ _rename_detached_cvol(cmd, lv_fast); ++ ++ if (!vg_write(vg) || !vg_commit(vg)) ++ return_0; ++ ++ return 1; ++} ++ ++static int _lv_detach_writecache_cachevol_active(struct logical_volume *lv, int noflush) ++{ ++ struct cmd_context *cmd = lv->vg->cmd; ++ struct volume_group *vg = lv->vg; ++ struct logical_volume *lv_fast; ++ struct logical_volume *lv_wcorig; ++ struct logical_volume *lv_old; ++ struct lv_segment *seg = first_seg(lv); ++ uint32_t kernel_error = 0; ++ ++ if (!seg_is_writecache(seg)) { ++ log_error("LV %s segment is not writecache.", display_lvname(lv)); ++ return 0; ++ } ++ ++ if (!(lv_fast = seg->writecache)) { ++ log_error("LV %s writecache segment has no writecache.", display_lvname(lv)); + return 0; + } + ++ if (!(lv_wcorig = seg_lv(seg, 0))) { ++ log_error("LV %s writecache segment has no origin", display_lvname(lv)); ++ return 0; ++ } ++ ++ if (noflush) ++ goto detach; ++ ++ if (!lv_writecache_message(lv, "flush_on_suspend")) { ++ log_error("Failed to set flush_on_suspend in writecache detach %s.", display_lvname(lv)); ++ return 0; ++ } ++ ++ detach: ++ if (!remove_seg_from_segs_using_this_lv(lv_fast, seg)) { ++ log_error("Failed to remove seg in writecache detach."); ++ return 0; ++ } ++ ++ lv->status &= ~WRITECACHE; ++ seg->writecache = NULL; ++ ++ if (!remove_layer_from_lv(lv, lv_wcorig)) { ++ log_error("Failed to remove lv layer in writecache detach."); ++ return 0; ++ } ++ ++ /* ++ * vg_write(), suspend_lv(), vg_commit(), resume_lv(). ++ * usually done by lv_update_and_reload for an active lv, ++ * but in this case we need to check for writecache errors ++ * after suspend. ++ */ ++ ++ if (!vg_write(vg)) { ++ log_error("Failed to write VG in writecache detach."); ++ return 0; ++ } ++ ++ /* ++ * The version of LV before removal of writecache. When need to ++ * check for kernel errors based on the old version of LV which ++ * is still present in the kernel. ++ */ ++ if (!(lv_old = (struct logical_volume *)lv_committed(lv))) { ++ log_error("Failed to get lv_committed in writecache detach."); ++ return 0; ++ } ++ ++ /* ++ * suspend does not use 'lv' as we know it here, but grabs the ++ * old (precommitted) version of 'lv' using lv_committed(), ++ * which is from vg->vg_comitted. ++ */ ++ log_debug("Suspending writecache to detach %s", display_lvname(lv)); ++ ++ if (!suspend_lv(cmd, lv)) { ++ log_error("Failed to suspend LV in writecache detach."); ++ vg_revert(vg); ++ return 0; ++ } ++ ++ log_debug("Checking writecache errors to detach."); ++ ++ if (!_get_writecache_kernel_error(cmd, lv_old, &kernel_error)) { ++ log_error("Failed to get writecache error status for %s.", display_lvname(lv_old)); ++ return 0; ++ } ++ ++ if (kernel_error) { ++ log_error("Failed to flush writecache (error %u) for %s.", kernel_error, display_lvname(lv)); ++ return 0; ++ } ++ ++ if (!vg_commit(vg)) { ++ log_error("Failed to commit VG in writecache detach."); ++ return 0; ++ } ++ ++ /* ++ * Since vg_commit has happened, vg->vg_committed is now the ++ * newest copy of lv, so resume uses the 'lv' that we know ++ * here. ++ */ ++ log_debug("Resuming after writecache detached %s", display_lvname(lv)); ++ ++ if (!resume_lv(cmd, lv)) { ++ log_error("Failed to resume LV in writecache detach."); ++ return 0; ++ } ++ ++ log_debug("Deactivating previous cachevol %s", display_lvname(lv_fast)); ++ ++ if (!deactivate_lv(cmd, lv_fast)) ++ log_error("Failed to deactivate previous cachevol in writecache detach."); ++ ++ /* ++ * Needed for lv_is_writecache_origin to know lv_wcorig was ++ * a writecache origin, which is needed so that the -real ++ * dm uuid suffix is applied, which is needed for deactivate to ++ * work. This is a hacky roundabout way of setting the -real ++ * uuid suffix (it would be nice to have a deactivate command ++ * that accepts a dm uuid.) ++ */ ++ lv_wcorig->status |= WRITECACHE_ORIGIN; ++ ++ log_debug("Deactivating previous wcorig %s", display_lvname(lv_wcorig)); ++ ++ if (!lv_deactivate(cmd, NULL, lv_wcorig)) ++ log_error("Failed to deactivate previous wcorig LV in writecache detach."); ++ ++ log_debug("Removing previous wcorig %s", display_lvname(lv_wcorig)); ++ ++ if (!lv_remove(lv_wcorig)) { ++ log_error("Failed to remove previous wcorig LV in writecache detach."); ++ return 0; ++ } ++ ++ lv_set_visible(lv_fast); ++ lv_fast->status &= ~LV_CACHE_VOL; ++ ++ _rename_detached_cvol(cmd, lv_fast); ++ ++ if (!vg_write(vg) || !vg_commit(vg)) { ++ log_error("Failed to write and commit VG in writecache detach."); ++ return 0; ++ } ++ ++ return 1; ++} ++ ++int lv_detach_writecache_cachevol(struct logical_volume *lv, int noflush) ++{ ++ if (lv_is_active(lv)) ++ return _lv_detach_writecache_cachevol_active(lv, noflush); ++ else ++ return _lv_detach_writecache_cachevol_inactive(lv, noflush); ++} ++ ++static int _writecache_setting_str_list_add(const char *field, uint64_t val, char *val_str, struct dm_list *result, struct dm_pool *mem) ++{ ++ char buf[128]; ++ char *list_item; ++ int len; ++ ++ if (val_str) { ++ if (dm_snprintf(buf, sizeof(buf), "%s=%s", field, val_str) < 0) ++ return_0; ++ } else { ++ if (dm_snprintf(buf, sizeof(buf), "%s=%llu", field, (unsigned long long)val) < 0) ++ return_0; ++ } ++ ++ len = strlen(buf) + 1; ++ ++ if (!(list_item = dm_pool_zalloc(mem, len))) ++ return_0; ++ ++ memcpy(list_item, buf, len); ++ ++ if (!str_list_add_no_dup_check(mem, result, list_item)) ++ return_0; ++ ++ return 1; ++} ++ ++int writecache_settings_to_str_list(struct writecache_settings *settings, struct dm_list *result, struct dm_pool *mem) ++{ ++ int errors = 0; ++ ++ if (settings->high_watermark_set) ++ if (!_writecache_setting_str_list_add("high_watermark", settings->high_watermark, NULL, result, mem)) ++ errors++; ++ ++ if (settings->low_watermark_set) ++ if (!_writecache_setting_str_list_add("low_watermark", settings->low_watermark, NULL, result, mem)) ++ errors++; ++ ++ if (settings->writeback_jobs_set) ++ if (!_writecache_setting_str_list_add("writeback_jobs", settings->writeback_jobs, NULL, result, mem)) ++ errors++; ++ ++ if (settings->autocommit_blocks_set) ++ if (!_writecache_setting_str_list_add("autocommit_blocks", settings->autocommit_blocks, NULL, result, mem)) ++ errors++; ++ ++ if (settings->autocommit_time_set) ++ if (!_writecache_setting_str_list_add("autocommit_time", settings->autocommit_time, NULL, result, mem)) ++ errors++; ++ ++ if (settings->fua_set) ++ if (!_writecache_setting_str_list_add("fua", (uint64_t)settings->fua, NULL, result, mem)) ++ errors++; ++ ++ if (settings->nofua_set) ++ if (!_writecache_setting_str_list_add("nofua", (uint64_t)settings->nofua, NULL, result, mem)) ++ errors++; ++ ++ if (settings->cleaner_set && settings->cleaner) ++ if (!_writecache_setting_str_list_add("cleaner", (uint64_t)settings->cleaner, NULL, result, mem)) ++ errors++; ++ ++ if (settings->max_age_set) ++ if (!_writecache_setting_str_list_add("max_age", (uint64_t)settings->max_age, NULL, result, mem)) ++ errors++; ++ ++ if (settings->new_key && settings->new_val) ++ if (!_writecache_setting_str_list_add(settings->new_key, 0, settings->new_val, result, mem)) ++ errors++; ++ ++ if (errors) ++ log_warn("Failed to create list of writecache settings."); ++ + return 1; + } + +diff --git a/lib/report/report.c b/lib/report/report.c +index 170df69..979cbee 100644 +--- a/lib/report/report.c ++++ b/lib/report/report.c +@@ -1430,6 +1430,16 @@ static int _cache_settings_disp(struct dm_report *rh, struct dm_pool *mem, + struct _str_list_append_baton baton; + struct dm_list dummy_list; /* dummy list to display "nothing" */ + ++ if (seg_is_writecache(seg)) { ++ if (!(result = str_list_create(mem))) ++ return_0; ++ ++ if (!writecache_settings_to_str_list((struct writecache_settings *)&seg->writecache_settings, result, mem)) ++ return_0; ++ ++ return _field_set_string_list(rh, field, result, private, 0, NULL); ++ } ++ + if (seg_is_cache(seg) && lv_is_cache_vol(seg->pool_lv)) + setting_seg = seg; + +@@ -3802,6 +3812,12 @@ static int _lvhealthstatus_disp(struct dm_report *rh, struct dm_pool *mem, + health = "failed"; + else if (lvdm->seg_status.cache->read_only) + health = "metadata_read_only"; ++ } else if (lv_is_writecache(lv) && (lvdm->seg_status.type != SEG_STATUS_NONE)) { ++ if (lvdm->seg_status.type != SEG_STATUS_WRITECACHE) ++ return _field_set_value(field, GET_FIRST_RESERVED_NAME(health_undef), ++ GET_FIELD_RESERVED_VALUE(health_undef)); ++ if (lvdm->seg_status.writecache->error) ++ health = "error"; + } else if (lv_is_thin_pool(lv) && (lvdm->seg_status.type != SEG_STATUS_NONE)) { + if (lvdm->seg_status.type != SEG_STATUS_THIN_POOL) + return _field_set_value(field, GET_FIRST_RESERVED_NAME(health_undef), +@@ -3945,7 +3961,7 @@ static int _vdo_ ## vdo_field_name ## _disp (struct dm_report *rh, struct dm_poo + if (!seg_is_vdo_pool(seg)) \ + return _field_set_value(field, "", &GET_TYPE_RESERVED_VALUE(num_undef_64)); \ + \ +- size = seg->vdo_params.vdo_field_name ## _mb * (1024 * 1024 >> SECTOR_SHIFT); \ ++ size = seg->vdo_params.vdo_field_name ## _mb * (UINT64_C(1024) * 1024 >> SECTOR_SHIFT); \ + \ + return _size64_disp(rh, mem, field, &size, private);\ + } +diff --git a/lib/writecache/writecache.c b/lib/writecache/writecache.c +index 130922a..c7aea28 100644 +--- a/lib/writecache/writecache.c ++++ b/lib/writecache/writecache.c +@@ -26,6 +26,9 @@ + #include "lib/metadata/lv_alloc.h" + #include "lib/config/defaults.h" + ++static int _writecache_cleaner_supported; ++static int _writecache_max_age_supported; ++ + #define SEG_LOG_ERROR(t, p...) \ + log_error(t " segment %s of logical volume %s.", ## p, \ + dm_config_parent_name(sn), seg->lv->name), 0; +@@ -120,6 +123,18 @@ static int _writecache_text_import(struct lv_segment *seg, + seg->writecache_settings.nofua_set = 1; + } + ++ if (dm_config_has_node(sn, "cleaner")) { ++ if (!dm_config_get_uint32(sn, "cleaner", &seg->writecache_settings.cleaner)) ++ return SEG_LOG_ERROR("Unknown writecache_setting in"); ++ seg->writecache_settings.cleaner_set = 1; ++ } ++ ++ if (dm_config_has_node(sn, "max_age")) { ++ if (!dm_config_get_uint32(sn, "max_age", &seg->writecache_settings.max_age)) ++ return SEG_LOG_ERROR("Unknown writecache_setting in"); ++ seg->writecache_settings.max_age_set = 1; ++ } ++ + if (dm_config_has_node(sn, "writecache_setting_key")) { + const char *key; + const char *val; +@@ -184,6 +199,14 @@ static int _writecache_text_export(const struct lv_segment *seg, + outf(f, "nofua = %u", seg->writecache_settings.nofua); + } + ++ if (seg->writecache_settings.cleaner_set && seg->writecache_settings.cleaner) { ++ outf(f, "cleaner = %u", seg->writecache_settings.cleaner); ++ } ++ ++ if (seg->writecache_settings.max_age_set) { ++ outf(f, "max_age = %u", seg->writecache_settings.max_age); ++ } ++ + if (seg->writecache_settings.new_key && seg->writecache_settings.new_val) { + outf(f, "writecache_setting_key = \"%s\"", + seg->writecache_settings.new_key); +@@ -208,6 +231,7 @@ static int _target_present(struct cmd_context *cmd, + { + static int _writecache_checked = 0; + static int _writecache_present = 0; ++ uint32_t maj, min, patchlevel; + + if (!activation()) + return 0; +@@ -215,6 +239,19 @@ static int _target_present(struct cmd_context *cmd, + if (!_writecache_checked) { + _writecache_checked = 1; + _writecache_present = target_present(cmd, TARGET_NAME_WRITECACHE, 1); ++ ++ if (!target_version(TARGET_NAME_WRITECACHE, &maj, &min, &patchlevel)) ++ return_0; ++ ++ if (maj < 1) { ++ log_error("writecache target version older than minimum 1.0.0"); ++ return 0; ++ } ++ ++ if (min >= 2) { ++ _writecache_cleaner_supported = 1; ++ _writecache_max_age_supported = 1; ++ } + } + + return _writecache_present; +@@ -257,6 +294,18 @@ static int _writecache_add_target_line(struct dev_manager *dm, + return 0; + } + ++ if (!_writecache_cleaner_supported && seg->writecache_settings.cleaner_set && seg->writecache_settings.cleaner) { ++ log_warn("WARNING: ignoring writecache setting \"cleaner\" which is not supported by kernel for LV %s.", seg->lv->name); ++ seg->writecache_settings.cleaner = 0; ++ seg->writecache_settings.cleaner_set = 0; ++ } ++ ++ if (!_writecache_max_age_supported && seg->writecache_settings.max_age_set) { ++ log_warn("WARNING: ignoring writecache setting \"max_age\" which is not supported by kernel for LV %s.", seg->lv->name); ++ seg->writecache_settings.max_age = 0; ++ seg->writecache_settings.max_age_set = 0; ++ } ++ + if ((pmem = lv_on_pmem(seg->writecache)) < 0) + return_0; + +diff --git a/man/lvconvert.8_pregen b/man/lvconvert.8_pregen +index 7440984..b3902a5 100644 +--- a/man/lvconvert.8_pregen ++++ b/man/lvconvert.8_pregen +@@ -23,6 +23,10 @@ lvconvert - Change logical volume layout + .ad b + .br + .ad l ++ \fB--cachedevice\fP \fIPV\fP ++.ad b ++.br ++.ad l + \fB--cachemetadataformat\fP \fBauto\fP|\fB1\fP|\fB2\fP + .ad b + .br +@@ -43,6 +47,10 @@ lvconvert - Change logical volume layout + .ad b + .br + .ad l ++ \fB--cachesize\fP \fISize\fP[m|UNIT] ++.ad b ++.br ++.ad l + \fB--cachevol\fP \fILV\fP + .ad b + .br +@@ -738,6 +746,44 @@ Attach a cache to an LV, converts the LV to type cache. + .br + - + ++Add a writecache to an LV, using a specified cache device. ++.br ++.P ++\fBlvconvert\fP \fB--type\fP \fBwritecache\fP \fB--cachedevice\fP \fIPV\fP \fILV\fP\fI_linear_striped_raid\fP ++.br ++.RS 4 ++.ad l ++[ \fB--cachesize\fP \fISize\fP[m|UNIT] ] ++.ad b ++.br ++.ad l ++[ \fB--cachesettings\fP \fIString\fP ] ++.ad b ++.br ++[ COMMON_OPTIONS ] ++.RE ++.br ++- ++ ++Add a cache to an LV, using a specified cache device. ++.br ++.P ++\fBlvconvert\fP \fB--type\fP \fBcache\fP \fB--cachedevice\fP \fIPV\fP \fILV\fP\fI_linear_striped_thinpool_raid\fP ++.br ++.RS 4 ++.ad l ++[ \fB--cachesize\fP \fISize\fP[m|UNIT] ] ++.ad b ++.br ++.ad l ++[ \fB--cachesettings\fP \fIString\fP ] ++.ad b ++.br ++[ COMMON_OPTIONS ] ++.RE ++.br ++- ++ + Convert LV to type thin-pool. + .br + .P +@@ -1135,6 +1181,12 @@ See \fBlvmcache\fP(7) for more information about LVM caching. + .ad b + .HP + .ad l ++\fB--cachedevice\fP \fIPV\fP ++.br ++The name of a device to use for a cache. ++.ad b ++.HP ++.ad l + \fB--cachemetadataformat\fP \fBauto\fP|\fB1\fP|\fB2\fP + .br + Specifies the cache metadata format used by cache target. +@@ -1182,6 +1234,12 @@ See \fBlvmcache\fP(7) for more information. + .ad b + .HP + .ad l ++\fB--cachesize\fP \fISize\fP[m|UNIT] ++.br ++The size of cache to use. ++.ad b ++.HP ++.ad l + \fB--cachevol\fP \fILV\fP + .br + The name of a cache volume. +diff --git a/man/lvcreate.8_pregen b/man/lvcreate.8_pregen +index be8e783..ee69034 100644 +--- a/man/lvcreate.8_pregen ++++ b/man/lvcreate.8_pregen +@@ -31,6 +31,10 @@ lvcreate - Create a logical volume + .ad b + .br + .ad l ++ \fB--cachedevice\fP \fIPV\fP ++.ad b ++.br ++.ad l + \fB--cachemetadataformat\fP \fBauto\fP|\fB1\fP|\fB2\fP + .ad b + .br +@@ -51,6 +55,14 @@ lvcreate - Create a logical volume + .ad b + .br + .ad l ++ \fB--cachesize\fP \fISize\fP[m|UNIT] ++.ad b ++.br ++.ad l ++ \fB--cachevol\fP \fILV\fP ++.ad b ++.br ++.ad l + \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] + .ad b + .br +@@ -816,11 +828,9 @@ where the new thin pool is named by the --thinpool arg. + .RE + - + +-Create a cache LV, first creating a new origin LV, ++Create a new LV, then attach the specified cachepool + .br +-then combining it with the existing cache pool named +-.br +-by the --cachepool arg. ++which converts the new LV to type cache. + .br + .P + \fBlvcreate\fP \fB--type\fP \fBcache\fP \fB-L\fP|\fB--size\fP \fISize\fP[m|UNIT] +@@ -881,6 +891,190 @@ by the --cachepool arg. + .RE + - + ++Create a new LV, then attach the specified cachevol ++.br ++which converts the new LV to type cache. ++.br ++.P ++\fBlvcreate\fP \fB--type\fP \fBcache\fP \fB-L\fP|\fB--size\fP \fISize\fP[m|UNIT] ++.RS 5 ++ \fB--cachevol\fP \fILV\fP \fIVG\fP ++.RE ++.br ++.RS 4 ++.ad l ++[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ] ++.ad b ++.br ++.ad l ++[ \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] ] ++.ad b ++.br ++.ad l ++[ \fB-i\fP|\fB--stripes\fP \fINumber\fP ] ++.ad b ++.br ++.ad l ++[ \fB-I\fP|\fB--stripesize\fP \fISize\fP[k|UNIT] ] ++.ad b ++.br ++.ad l ++[ \fB--cachemode\fP \fBwritethrough\fP|\fBwriteback\fP|\fBpassthrough\fP ] ++.ad b ++.br ++.ad l ++[ \fB--cachepolicy\fP \fIString\fP ] ++.ad b ++.br ++.ad l ++[ \fB--cachesettings\fP \fIString\fP ] ++.ad b ++.br ++.ad l ++[ \fB--cachemetadataformat\fP \fBauto\fP|\fB1\fP|\fB2\fP ] ++.ad b ++.br ++[ COMMON_OPTIONS ] ++.RE ++.br ++.RS 4 ++[ \fIPV\fP ... ] ++.RE ++- ++ ++Create a new LV, then attach a cachevol created from ++.br ++the specified cache device, which converts the ++.br ++new LV to type cache. ++.br ++.P ++\fBlvcreate\fP \fB--type\fP \fBcache\fP \fB-L\fP|\fB--size\fP \fISize\fP[m|UNIT] ++.RS 5 ++ \fB--cachedevice\fP \fIPV\fP \fIVG\fP ++.RE ++.br ++.RS 4 ++.ad l ++[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ] ++.ad b ++.br ++.ad l ++[ \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] ] ++.ad b ++.br ++.ad l ++[ \fB-i\fP|\fB--stripes\fP \fINumber\fP ] ++.ad b ++.br ++.ad l ++[ \fB-I\fP|\fB--stripesize\fP \fISize\fP[k|UNIT] ] ++.ad b ++.br ++.ad l ++[ \fB--cachemode\fP \fBwritethrough\fP|\fBwriteback\fP|\fBpassthrough\fP ] ++.ad b ++.br ++.ad l ++[ \fB--cachepolicy\fP \fIString\fP ] ++.ad b ++.br ++.ad l ++[ \fB--cachesettings\fP \fIString\fP ] ++.ad b ++.br ++.ad l ++[ \fB--cachemetadataformat\fP \fBauto\fP|\fB1\fP|\fB2\fP ] ++.ad b ++.br ++.ad l ++[ \fB--cachesize\fP \fISize\fP[m|UNIT] ] ++.ad b ++.br ++[ COMMON_OPTIONS ] ++.RE ++.br ++.RS 4 ++[ \fIPV\fP ... ] ++.RE ++- ++ ++Create a new LV, then attach the specified cachevol ++.br ++which converts the new LV to type writecache. ++.br ++.P ++\fBlvcreate\fP \fB--type\fP \fBwritecache\fP \fB-L\fP|\fB--size\fP \fISize\fP[m|UNIT] ++.RS 5 ++ \fB--cachevol\fP \fILV\fP \fIVG\fP ++.RE ++.br ++.RS 4 ++.ad l ++[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ] ++.ad b ++.br ++.ad l ++[ \fB-i\fP|\fB--stripes\fP \fINumber\fP ] ++.ad b ++.br ++.ad l ++[ \fB-I\fP|\fB--stripesize\fP \fISize\fP[k|UNIT] ] ++.ad b ++.br ++.ad l ++[ \fB--cachesettings\fP \fIString\fP ] ++.ad b ++.br ++[ COMMON_OPTIONS ] ++.RE ++.br ++.RS 4 ++[ \fIPV\fP ... ] ++.RE ++- ++ ++Create a new LV, then attach a cachevol created from ++.br ++the specified cache device, which converts the ++.br ++new LV to type writecache. ++.br ++.P ++\fBlvcreate\fP \fB--type\fP \fBwritecache\fP \fB-L\fP|\fB--size\fP \fISize\fP[m|UNIT] ++.RS 5 ++ \fB--cachedevice\fP \fIPV\fP \fIVG\fP ++.RE ++.br ++.RS 4 ++.ad l ++[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ] ++.ad b ++.br ++.ad l ++[ \fB-i\fP|\fB--stripes\fP \fINumber\fP ] ++.ad b ++.br ++.ad l ++[ \fB-I\fP|\fB--stripesize\fP \fISize\fP[k|UNIT] ] ++.ad b ++.br ++.ad l ++[ \fB--cachesize\fP \fISize\fP[m|UNIT] ] ++.ad b ++.br ++.ad l ++[ \fB--cachesettings\fP \fIString\fP ] ++.ad b ++.br ++[ COMMON_OPTIONS ] ++.RE ++.br ++.RS 4 ++[ \fIPV\fP ... ] ++.RE ++- ++ + Common options for command: + . + .RS 4 +@@ -1091,6 +1285,12 @@ See \fBlvmcache\fP(7) for more information about LVM caching. + .ad b + .HP + .ad l ++\fB--cachedevice\fP \fIPV\fP ++.br ++The name of a device to use for a cache. ++.ad b ++.HP ++.ad l + \fB--cachemetadataformat\fP \fBauto\fP|\fB1\fP|\fB2\fP + .br + Specifies the cache metadata format used by cache target. +@@ -1138,6 +1338,18 @@ See \fBlvmcache\fP(7) for more information. + .ad b + .HP + .ad l ++\fB--cachesize\fP \fISize\fP[m|UNIT] ++.br ++The size of cache to use. ++.ad b ++.HP ++.ad l ++\fB--cachevol\fP \fILV\fP ++.br ++The name of a cache volume. ++.ad b ++.HP ++.ad l + \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] + .br + The size of chunks in a snapshot, cache pool or thin pool. +@@ -2659,11 +2871,11 @@ config setting sparse_segtype_default. + .RE + - + +-Create a cache LV, first creating a new origin LV, ++Create a new LV, then attach the specified cachepool + .br +-then combining it with the existing cache pool named ++which converts the new LV to type cache + .br +-by the --cachepool arg (variant, infers --type cache). ++(variant, infers --type cache.) + .br + .P + \fBlvcreate\fP \fB-L\fP|\fB--size\fP \fISize\fP[m|UNIT] \fB--cachepool\fP \fILV\fP\fI_cachepool\fP \fIVG\fP +@@ -2717,11 +2929,11 @@ by the --cachepool arg (variant, infers --type cache). + .RE + - + +-Create a cache LV, first creating a new origin LV, ++Create a new LV, then attach the specified cachepool + .br +-then combining it with the existing cache pool named ++which converts the new LV to type cache. + .br +-in the first arg (variant, also use --cachepool). ++(variant, also use --cachepool). + .br + .P + \fBlvcreate\fP \fB--type\fP \fBcache\fP \fB-L\fP|\fB--size\fP \fISize\fP[m|UNIT] \fILV\fP\fI_cachepool\fP +@@ -2779,19 +2991,15 @@ in the first arg (variant, also use --cachepool). + .RE + - + +-When LV is a cache pool, create a cache LV, +-.br +-first creating a new origin LV, then combining it with +-.br +-the existing cache pool named in the first arg ++When the LV arg is a cachepool, then create a new LV and + .br +-(variant, infers --type cache, also use --cachepool). ++attach the cachepool arg to it. + .br +-When LV is not a cache pool, convert the specified LV ++(variant, use --type cache and --cachepool.) + .br +-to type cache after creating a new cache pool LV to use ++When the LV arg is not a cachepool, then create a new cachepool + .br +-(use lvconvert). ++and attach it to the LV arg (alternative, use lvconvert.) + .br + .P + \fBlvcreate\fP \fB-H\fP|\fB--cache\fP \fB-L\fP|\fB--size\fP \fISize\fP[m|UNIT] \fILV\fP +diff --git a/man/lvmcache.7_main b/man/lvmcache.7_main +index 425904e..37d0e33 100644 +--- a/man/lvmcache.7_main ++++ b/man/lvmcache.7_main +@@ -34,8 +34,6 @@ LVM refers to this using the LV type \fBwritecache\fP. + + .SH USAGE + +-Both kinds of caching use similar lvm commands: +- + .B 1. Identify main LV that needs caching + + The main LV may already exist, and is located on larger, slower devices. +@@ -131,8 +129,35 @@ attached. + LV VG Attr Type Devices + fast vg -wi------- linear /dev/fast_ssd + main vg -wi------- linear /dev/slow_hhd ++ ++To stop caching the main LV and also remove unneeded cache pool, ++use the --uncache: ++ ++.nf ++ $ lvconvert --uncache vg/main ++ ++ $ lvs -a ++ LV VG Attr Type Devices ++ main vg -wi------- linear /dev/slow_hhd ++ + .fi + ++.SS Create a new LV with caching. ++ ++A new LV can be created with caching attached at the time of creation ++using the following command: ++ ++.nf ++$ lvcreate --type cache|writecache -n Name -L Size ++ --cachedevice /dev/fast_ssd vg /dev/slow_hhd ++.fi ++ ++The main LV is created with the specified Name and Size from the slow_hhd. ++A hidden fast LV is created on the fast_ssd and is then attached to the ++new main LV. If the fast_ssd is unused, the entire disk will be used as ++the cache unless the --cachesize option is used to specify a size for the ++fast LV. The --cachedevice option can be repeated to use multiple disks ++for the fast LV. + + .SH OPTIONS + +@@ -156,12 +181,26 @@ same fast LV. This option can be used with dm-writecache or dm-cache. + + Pass this option a cachepool LV or a standard LV. When using a cache + pool, lvm places cache data and cache metadata on different LVs. The two +-LVs together are called a cache pool. This permits specific placement of +-data and metadata. A cache pool is represented as a special type of LV ++LVs together are called a cache pool. This has a bit better performance ++for dm-cache and permits specific placement and segment type selection ++for data and metadata volumes. ++A cache pool is represented as a special type of LV + that cannot be used directly. If a standard LV is passed with this + option, lvm will first convert it to a cache pool by combining it with + another LV to use for metadata. This option can be used with dm-cache. + ++.B --cachedevice ++.I PV ++.br ++ ++This option can be used in place of --cachevol, in which case a cachevol ++LV will be created using the specified device. This option can be ++repeated to create a cachevol using multiple devices, or a tag name can be ++specified in which case the cachevol will be created using any of the ++devices with the given tag. If a named cache device is unused, the entire ++device will be used to create the cachevol. To create a cachevol of a ++specific size from the cache devices, include the --cachesize option. ++ + \& + + .SS dm-cache block size +@@ -335,11 +374,16 @@ $ lvconvert --type cache --cachevol fast \\ + + The size of data blocks managed by dm-cache can be specified with the + --chunksize option when caching is started. The default unit is KiB. The +-value must be a multiple of 32KiB between 32KiB and 1GiB. ++value must be a multiple of 32KiB between 32KiB and 1GiB. Cache chunks ++bigger then 512KiB shall be only used when necessary. + + Using a chunk size that is too large can result in wasteful use of the + cache, in which small reads and writes cause large sections of an LV to be +-stored in the cache. However, choosing a chunk size that is too small ++stored in the cache. It can also require increasing migration threshold ++which defaults to 2048 sectors (1 MiB). Lvm2 ensures migration threshold is ++at least 8 chunks in size. This may in some cases result in very ++high bandwidth load of transfering data between the cache LV and its ++cache origin LV. However, choosing a chunk size that is too small + can result in more overhead trying to manage the numerous chunks that + become mapped into the cache. Overhead can include both excessive CPU + time searching for chunks, and excessive memory tracking chunks. +@@ -357,6 +401,35 @@ The default value is shown by: + .br + .B lvmconfig --type default allocation/cache_pool_chunk_size + ++Checking migration threshold (in sectors) of running cached LV: ++.br ++.B lvs -o+kernel_cache_settings VG/LV ++ ++ ++.SS dm-cache migration threshold ++ ++\& ++ ++Migrating data between the origin and cache LV uses bandwidth. ++The user can set a throttle to prevent more than a certain amount of ++migration occurring at any one time. Currently dm-cache is not taking any ++account of normal io traffic going to the devices. ++ ++User can set migration threshold via cache policy settings as ++"migration_threshold=<#sectors>" to set the maximum number ++of sectors being migrated, the default being 2048 sectors (1MiB). ++ ++Command to set migration threshold to 2MiB (4096 sectors): ++.br ++.B lvcreate --cachepolicy 'migration_threshold=4096' VG/LV ++ ++ ++Command to display the migration threshold: ++.br ++.B lvs -o+kernel_cache_settings,cache_settings VG/LV ++.br ++.B lvs -o+chunksize VG/LV ++ + + .SS dm-cache cache policy + +diff --git a/man/lvs.8_end b/man/lvs.8_end +index 6efc9cb..5a4ecc8 100644 +--- a/man/lvs.8_end ++++ b/man/lvs.8_end +@@ -74,5 +74,9 @@ Related to Thin Logical Volumes: (F)ailed. + .br + (F)ailed is set when related thin pool enters Failed state and no further I/O + is permitted at all. ++.IP ++Related to writecache logical volumes: (E)rror. ++.br ++(E)rror is set dm-writecache reports an error. + .IP 10 3 + s(k)ip activation: this volume is flagged to be skipped during activation. +diff --git a/man/lvs.8_pregen b/man/lvs.8_pregen +index 8c3091d..8aea356 100644 +--- a/man/lvs.8_pregen ++++ b/man/lvs.8_pregen +@@ -577,6 +577,10 @@ Related to Thin Logical Volumes: (F)ailed. + .br + (F)ailed is set when related thin pool enters Failed state and no further I/O + is permitted at all. ++.IP ++Related to writecache logical volumes: (E)rror. ++.br ++(E)rror is set dm-writecache reports an error. + .IP 10 3 + s(k)ip activation: this volume is flagged to be skipped during activation. + .SH SEE ALSO +diff --git a/man/vgck.8_pregen b/man/vgck.8_pregen +index a66de5d..2a1ec23 100644 +--- a/man/vgck.8_pregen ++++ b/man/vgck.8_pregen +@@ -199,6 +199,15 @@ back metadata it believes has changed but hasn't. + \fB--updatemetadata\fP + .br + Update VG metadata to correct problems. ++If VG metadata was updated while a PV was missing, and the PV ++reappears with an old version of metadata, then this option ++(or any other command that writes metadata) will update the ++metadata on the previously missing PV. If a PV was removed ++from a VG while it was missing, and the PV reappears, using ++this option will clear the outdated metadata from the previously ++missing PV. If metadata text is damaged on one PV, using this ++option will replace the damaged metadata text. For more severe ++damage, e.g. with headers, see \fBpvck\fP(8). + .ad b + .HP + .ad l +diff --git a/scripts/blkdeactivate.sh.in b/scripts/blkdeactivate.sh.in +index 57b3e58..7c517b8 100644 +--- a/scripts/blkdeactivate.sh.in ++++ b/scripts/blkdeactivate.sh.in +@@ -330,6 +330,12 @@ deactivate_vdo() { + test -b "$DEV_DIR/mapper/$xname" || return 0 + test -z "${SKIP_DEVICE_LIST["$kname"]}" || return 1 + ++ # Skip VDO device deactivation if VDO tools missing. ++ test "$VDO_AVAILABLE" -eq 0 && { ++ add_device_to_skip_list ++ return 1 ++ } ++ + deactivate_holders "$DEV_DIR/mapper/$xname" || return 1 + + echo -n " [VDO]: deactivating VDO volume $xname... " +diff --git a/test/dbus/lvmdbustest.py b/test/dbus/lvmdbustest.py +index b2986bf..473bb94 100755 +--- a/test/dbus/lvmdbustest.py ++++ b/test/dbus/lvmdbustest.py +@@ -1558,6 +1558,36 @@ class TestDbusService(unittest.TestCase): + cached_lv.Lv.Rename(dbus.String(new_name), dbus.Int32(g_tmo), EOD)) + verify_cache_lv_count() + ++ def test_writecache_lv(self): ++ vg = self._vg_create().Vg ++ data_lv = self._create_lv(size=mib(16), vg=vg) ++ cache_lv = self._create_lv(size=mib(16), vg=vg) ++ ++ # both LVs need to be inactive ++ self.handle_return(data_lv.Lv.Deactivate( ++ dbus.UInt64(0), dbus.Int32(g_tmo), EOD)) ++ data_lv.update() ++ self.handle_return(cache_lv.Lv.Deactivate( ++ dbus.UInt64(0), dbus.Int32(g_tmo), EOD)) ++ cache_lv.update() ++ ++ cached_lv_path = self.handle_return( ++ cache_lv.Lv.WriteCacheLv( ++ dbus.ObjectPath(data_lv.object_path), ++ dbus.Int32(g_tmo), ++ EOD)) ++ ++ intf = (LV_COMMON_INT, LV_INT, CACHE_LV_INT) ++ cached_lv = ClientProxy(self.bus, cached_lv_path, interfaces=intf) ++ self.assertEqual(cached_lv.LvCommon.SegType, ["writecache"]) ++ ++ uncached_lv_path = self.handle_return( ++ cached_lv.CachedLv.DetachCachePool( ++ dbus.Boolean(True), ++ dbus.Int32(g_tmo), ++ EOD)) ++ self.assertTrue('/com/redhat/lvmdbus1/Lv' in uncached_lv_path) ++ + def test_vg_change(self): + vg_proxy = self._vg_create() + +diff --git a/test/lib/aux.sh b/test/lib/aux.sh +index e40da95..17e7935 100644 +--- a/test/lib/aux.sh ++++ b/test/lib/aux.sh +@@ -1234,6 +1234,7 @@ activation/verify_udev_operations = $LVM_VERIFY_UDEV + activation/raid_region_size = 512 + allocation/wipe_signatures_when_zeroing_new_lvs = 0 + allocation/vdo_slab_size_mb = 128 ++allocation/zero_metadata = 0 + backup/archive = 0 + backup/backup = 0 + devices/cache_dir = "$TESTDIR/etc" +diff --git a/test/shell/cachevol-cachedevice.sh b/test/shell/cachevol-cachedevice.sh +new file mode 100644 +index 0000000..3831ee9 +--- /dev/null ++++ b/test/shell/cachevol-cachedevice.sh +@@ -0,0 +1,222 @@ ++#!/usr/bin/env bash ++ ++# Copyright (C) 2018 Red Hat, Inc. All rights reserved. ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions ++# of the GNU General Public License v.2. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write to the Free Software Foundation, ++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ ++SKIP_WITH_LVMPOLLD=1 ++ ++. lib/inittest ++ ++aux have_cache 1 10 0 || skip ++aux have_writecache 1 0 0 || skip ++ ++aux prepare_devs 4 64 ++ ++vgcreate $SHARED $vg "$dev1" "$dev2" ++ ++## cache ++ ++# use existing cachevol ++lvcreate -n $lv1 -l8 -an $vg "$dev1" ++lvcreate --type cache -n $lv2 -L40M --cachevol $lv1 -y $vg "$dev2" ++check lv_field $vg/$lv2 segtype cache ++check lv_field $vg/${lv1}_cvol segtype linear -a ++lvremove -y $vg/$lv2 ++ ++# use entire cachedevice for cachevol ++lvcreate --type cache -n $lv2 -L40M --cachedevice "$dev1" -y $vg "$dev2" ++check lv_field $vg/$lv2 segtype cache ++check lv_field $vg/${lv2}_cache_cvol segtype linear -a ++lvremove -y $vg/$lv2 ++ ++# use part of cachedevice for cachevol ++lvcreate --type cache -n $lv2 -L20M --cachedevice "$dev1" --cachesize 16M -y $vg "$dev2" ++check lv_field $vg/$lv2 segtype cache ++check lv_field $vg/${lv2}_cache_cvol segtype linear -a ++lvcreate --type cache -n $lv3 -L20M --cachedevice "$dev1" --cachesize 16M -y $vg "$dev2" ++check lv_field $vg/$lv3 segtype cache ++check lv_field $vg/${lv3}_cache_cvol segtype linear -a ++lvremove -y $vg/$lv2 ++lvremove -y $vg/$lv3 ++ ++## writecache ++ ++# use existing cachevol ++lvcreate -n $lv1 -l8 -an $vg "$dev1" ++lvcreate --type writecache -n $lv2 -L40M --cachevol $lv1 -y $vg "$dev2" ++check lv_field $vg/$lv2 segtype writecache ++check lv_field $vg/${lv1}_cvol segtype linear -a ++lvremove -y $vg/$lv2 ++ ++# use entire cachedevice for cachevol ++lvcreate --type writecache -n $lv2 -L40M --cachedevice "$dev1" -y $vg "$dev2" ++check lv_field $vg/$lv2 segtype writecache ++check lv_field $vg/${lv2}_cache_cvol segtype linear -a ++lvremove -y $vg/$lv2 ++ ++# use part of cachedevice for cachevol ++lvcreate --type writecache -n $lv2 -L20M --cachedevice "$dev1" --cachesize 16M -y $vg "$dev2" ++check lv_field $vg/$lv2 segtype writecache ++check lv_field $vg/${lv2}_cache_cvol segtype linear -a ++lvcreate --type writecache -n $lv3 -L20M --cachedevice "$dev1" --cachesize 16M -y $vg "$dev2" ++check lv_field $vg/$lv3 segtype writecache ++check lv_field $vg/${lv3}_cache_cvol segtype linear -a ++lvremove -y $vg/$lv2 ++lvremove -y $vg/$lv3 ++ ++## multiple cachedevs ++ ++vgextend $vg "$dev3" "$dev4" ++ ++lvcreate --type writecache -n $lv2 -L100M --cachedevice "$dev1" --cachedevice "$dev3" -y $vg "$dev2" "$dev4" ++check lv_field $vg/${lv2}_cache_cvol lv_size "120.00m" ++lvremove -y $vg/$lv2 ++ ++lvcreate --type writecache -n $lv2 -L100M --cachedevice "$dev1" --cachedevice "$dev3" --cachesize 80M -y $vg "$dev2" "$dev4" ++check lv_field $vg/${lv2}_cache_cvol lv_size "80.00m" ++lvremove -y $vg/$lv2 ++ ++pvchange --addtag slow "$dev2" ++pvchange --addtag slow "$dev4" ++pvchange --addtag fast "$dev1" ++pvchange --addtag fast "$dev3" ++ ++lvcreate --type writecache -n $lv2 -L100M --cachedevice @fast --cachesize 80M -y $vg @slow ++check lv_field $vg/${lv2}_cache_cvol lv_size "80.00m" ++lvremove -y $vg/$lv2 ++ ++lvcreate --type cache -n $lv2 -L100M --cachedevice @fast --cachesize 80M -y $vg @slow ++check lv_field $vg/${lv2}_cache_cvol lv_size "80.00m" ++lvremove -y $vg/$lv2 ++ ++## error cases ++ ++# cachevol doesn't exist ++not lvcreate --type cache -n $lv2 -l8 --cachevol asdf -y $vg "$dev2" ++not lvs $vg/$lv1 ++not lvs $vg/$lv2 ++ ++# cachedevice doesn't exist ++not lvcreate --type cache -n $lv2 -l8 --cachedevice asdf -y $vg "$dev2" ++not lvs $vg/$lv1 ++not lvs $vg/$lv2 ++ ++# cachevol doesn't exist ++not lvcreate --type writecache -n $lv2 -l8 --cachevol asdf -y $vg "$dev2" ++not lvs $vg/$lv1 ++not lvs $vg/$lv2 ++ ++# cachedevice doesn't exist ++not lvcreate --type writecache -n $lv2 -l8 --cachedevice asdf -y $vg "$dev2" ++not lvs $vg/$lv1 ++not lvs $vg/$lv2 ++ ++# when cachedevice is already being used, cachesize is required to use a part of it ++lvcreate -n asdf -l1 $vg "$dev1" ++not lvcreate --type writecache -n $lv2 -l8 --cachedevice "$dev1" -y $vg "$dev2" ++not lvcreate --type writecache -n $lv2 -l8 --cachedevice "$dev1" --cachedevice "$dev3" -y $vg "$dev2" ++not lvs $vg/$lv1 ++not lvs $vg/$lv2 ++lvcreate --type writecache -n $lv2 -l8 --cachedevice "$dev1" --cachesize 8M -y $vg "$dev2" ++lvs $vg/$lv2 ++check lv_field $vg/${lv2}_cache_cvol lv_size "8.00m" ++lvremove -y $vg/$lv2 ++ ++vgremove -ff $vg ++ ++# lvconvert single step cachevol creation and attachment ++# . cache and writecache ++# . one or two cachedevices ++# . with or without --cachesize ++# . using tags for devices ++ ++vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4" ++ ++lvcreate -n $lv1 -l8 -an $vg "$dev1" ++lvconvert -y --type cache --cachedevice "$dev2" $vg/$lv1 ++check lv_field $vg/$lv1 segtype cache ++check lv_field $vg/${lv1}_cache_cvol segtype linear -a ++check lv_field $vg/${lv1}_cache_cvol lv_size "60.00m" ++lvchange -ay $vg/$lv1 ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++lvcreate -n $lv1 -l8 -an $vg "$dev1" ++lvconvert -y --type cache --cachedevice "$dev2" --cachedevice "$dev3" $vg/$lv1 ++check lv_field $vg/$lv1 segtype cache ++check lv_field $vg/${lv1}_cache_cvol lv_size "120.00m" ++lvchange -ay $vg/$lv1 ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++lvcreate -n $lv1 -l8 -an $vg "$dev1" ++lvconvert -y --type cache --cachedevice "$dev2" --cachedevice "$dev3" --cachesize 8M $vg/$lv1 ++check lv_field $vg/$lv1 segtype cache ++check lv_field $vg/${lv1}_cache_cvol lv_size "8.00m" ++lvchange -ay $vg/$lv1 ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++lvcreate -n $lv1 -l8 -an $vg "$dev1" ++lvconvert -y --type writecache --cachedevice "$dev2" $vg/$lv1 ++check lv_field $vg/$lv1 segtype writecache ++check lv_field $vg/${lv1}_cache_cvol lv_size "60.00m" ++lvchange -ay $vg/$lv1 ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++lvcreate -n $lv1 -l8 -an $vg "$dev1" ++lvconvert -y --type writecache --cachedevice "$dev2" --cachedevice "$dev3" $vg/$lv1 ++check lv_field $vg/$lv1 segtype writecache ++check lv_field $vg/${lv1}_cache_cvol lv_size "120.00m" ++lvchange -ay $vg/$lv1 ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++lvcreate -n $lv1 -l8 -an $vg "$dev1" ++lvconvert -y --type writecache --cachedevice "$dev2" --cachedevice "$dev3" --cachesize 8M $vg/$lv1 ++check lv_field $vg/$lv1 segtype writecache ++check lv_field $vg/${lv1}_cache_cvol lv_size "8.00m" ++lvchange -ay $vg/$lv1 ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++pvchange --addtag slow "$dev1" ++pvchange --addtag fast "$dev2" ++pvchange --addtag fast "$dev3" ++ ++lvcreate -n $lv1 -l8 -an $vg @slow ++lvconvert -y --type cache --cachedevice @fast --cachesize 8M $vg/$lv1 ++check lv_field $vg/$lv1 segtype cache ++check lv_field $vg/${lv1}_cache_cvol lv_size "8.00m" ++lvchange -ay $vg/$lv1 ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++lvcreate -n $lv1 -l8 -an $vg @slow ++lvconvert -y --type writecache --cachedevice @fast --cachesize 8M $vg/$lv1 ++check lv_field $vg/$lv1 segtype writecache ++check lv_field $vg/${lv1}_cache_cvol lv_size "8.00m" ++lvchange -ay $vg/$lv1 ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++# if the cache name is used generate a new name ++lvcreate -n $lv1 -l8 -an $vg @slow ++lvcreate -n ${lv1}_cache -l1 -an $vg @slow ++lvconvert -y --type writecache --cachedevice @fast --cachesize 8M $vg/$lv1 ++check lv_field $vg/$lv1 segtype writecache ++check lv_field $vg/${lv1}_cache0_cvol lv_size "8.00m" ++lvchange -ay $vg/$lv1 ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++vgremove -ff $vg ++ +diff --git a/test/shell/integrity-blocksize-2.sh b/test/shell/integrity-blocksize-2.sh +new file mode 100644 +index 0000000..5e0fd9a +--- /dev/null ++++ b/test/shell/integrity-blocksize-2.sh +@@ -0,0 +1,128 @@ ++#!/usr/bin/env bash ++ ++# Copyright (C) 2018 Red Hat, Inc. All rights reserved. ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions ++# of the GNU General Public License v.2. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write to the Free Software Foundation, ++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ ++SKIP_WITH_LVMPOLLD=1 ++ ++. lib/inittest ++ ++aux have_integrity 1 5 0 || skip ++ ++mnt="mnt" ++mkdir -p $mnt ++ ++_sync_percent() { ++ local checklv=$1 ++ get lv_field "$checklv" sync_percent | cut -d. -f1 ++} ++ ++_wait_recalc() { ++ local checklv=$1 ++ ++ for i in $(seq 1 10) ; do ++ sync=$(_sync_percent "$checklv") ++ echo "sync_percent is $sync" ++ ++ if test "$sync" = "100"; then ++ return ++ fi ++ ++ sleep 1 ++ done ++ ++ # TODO: There is some strange bug, first leg of RAID with integrity ++ # enabled never gets in sync. I saw this in BB, but not when executing ++ # the commands manually ++ if test -z "$sync"; then ++ echo "TEST WARNING: Resync of dm-integrity device '$checklv' failed" ++ dmsetup status "$DM_DEV_DIR/mapper/${checklv/\//-}" ++ exit ++ fi ++ echo "timeout waiting for recalc" ++ return 1 ++} ++ ++# prepare_devs uses ramdisk backing which has 512 LBS and 4K PBS ++# This should cause mkfs.xfs to use 4K sector size, ++# and integrity to use 4K block size ++aux prepare_devs 2 64 ++ ++vgcreate $vg "$dev1" "$dev2" ++blockdev --getss "$dev1" ++blockdev --getpbsz "$dev1" ++blockdev --getss "$dev2" ++blockdev --getpbsz "$dev2" ++ ++# add integrity while LV is inactive ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++echo "hello world" > $mnt/hello ++umount $mnt ++lvchange -an $vg ++lvconvert --raidintegrity y $vg/$lv1 ++lvchange -ay $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++cat $mnt/hello ++umount $mnt ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++# add integrity while LV is active, fs unmounted ++# lvconvert will use ribs 512 to avoid increasing LBS from 512 to 4k on active LV ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++echo "hello world" > $mnt/hello ++umount $mnt ++lvchange -an $vg ++lvchange -ay $vg ++lvconvert --raidintegrity y $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++cat $mnt/hello | grep "hello world" ++umount $mnt ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++# add integrity while LV is active, fs mounted ++# lvconvert will use ribs 512 to avoid increasing LBS from 512 to 4k on active LV ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++echo "hello world" > $mnt/hello ++lvconvert --raidintegrity y $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++cat $mnt/hello | grep "hello world" ++umount $mnt ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++cat $mnt/hello | grep "hello world" ++umount $mnt ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++vgremove -ff $vg ++ +diff --git a/test/shell/integrity-blocksize-3.sh b/test/shell/integrity-blocksize-3.sh +new file mode 100644 +index 0000000..4aea972 +--- /dev/null ++++ b/test/shell/integrity-blocksize-3.sh +@@ -0,0 +1,285 @@ ++#!/usr/bin/env bash ++ ++# Copyright (C) 2018 Red Hat, Inc. All rights reserved. ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions ++# of the GNU General Public License v.2. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write to the Free Software Foundation, ++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ ++SKIP_WITH_LVMPOLLD=1 ++ ++. lib/inittest ++ ++aux have_integrity 1 5 0 || skip ++ ++mnt="mnt" ++mkdir -p $mnt ++ ++_sync_percent() { ++ local checklv=$1 ++ get lv_field "$checklv" sync_percent | cut -d. -f1 ++} ++ ++_wait_recalc() { ++ local checklv=$1 ++ ++ for i in $(seq 1 10) ; do ++ sync=$(_sync_percent "$checklv") ++ echo "sync_percent is $sync" ++ ++ if test "$sync" = "100"; then ++ return ++ fi ++ ++ sleep 1 ++ done ++ ++ # TODO: There is some strange bug, first leg of RAID with integrity ++ # enabled never gets in sync. I saw this in BB, but not when executing ++ # the commands manually ++ if test -z "$sync"; then ++ echo "TEST WARNING: Resync of dm-integrity device '$checklv' failed" ++ dmsetup status "$DM_DEV_DIR/mapper/${checklv/\//-}" ++ exit ++ fi ++ echo "timeout waiting for recalc" ++ return 1 ++} ++ ++# scsi_debug devices with 512 LBS 512 PBS ++aux prepare_scsi_debug_dev 256 ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "512" ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "512" ++aux prepare_devs 2 64 ++ ++vgcreate $vg "$dev1" "$dev2" ++blockdev --getss "$dev1" ++blockdev --getpbsz "$dev1" ++blockdev --getss "$dev2" ++blockdev --getpbsz "$dev2" ++ ++# add integrity while LV is inactive ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++echo "hello world" > $mnt/hello ++umount $mnt ++lvchange -an $vg ++lvconvert --raidintegrity y $vg/$lv1 ++lvchange -ay $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++cat $mnt/hello ++umount $mnt ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++# add integrity while LV is active, fs unmounted ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++echo "hello world" > $mnt/hello ++umount $mnt ++lvchange -an $vg ++lvchange -ay $vg ++lvconvert --raidintegrity y $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++cat $mnt/hello | grep "hello world" ++umount $mnt ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++# add integrity while LV is active, fs mounted ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++echo "hello world" > $mnt/hello ++lvconvert --raidintegrity y $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++cat $mnt/hello | grep "hello world" ++umount $mnt ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++cat $mnt/hello | grep "hello world" ++umount $mnt ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++vgremove -ff $vg ++aux cleanup_scsi_debug_dev ++sleep 1 ++ ++# scsi_debug devices with 4K LBS and 4K PBS ++aux prepare_scsi_debug_dev 256 sector_size=4096 ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "4096" ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "4096" ++aux prepare_devs 2 64 ++ ++vgcreate $vg "$dev1" "$dev2" ++blockdev --getss "$dev1" ++blockdev --getpbsz "$dev1" ++blockdev --getss "$dev2" ++blockdev --getpbsz "$dev2" ++ ++# add integrity while LV is inactive ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++echo "hello world" > $mnt/hello ++umount $mnt ++lvchange -an $vg ++lvconvert --raidintegrity y $vg/$lv1 ++lvchange -ay $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++cat $mnt/hello ++umount $mnt ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++# add integrity while LV is active, fs unmounted ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++echo "hello world" > $mnt/hello ++umount $mnt ++lvchange -an $vg ++lvchange -ay $vg ++lvconvert --raidintegrity y $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++cat $mnt/hello | grep "hello world" ++umount $mnt ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++# add integrity while LV is active, fs mounted ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++echo "hello world" > $mnt/hello ++lvconvert --raidintegrity y $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++cat $mnt/hello | grep "hello world" ++umount $mnt ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++cat $mnt/hello | grep "hello world" ++umount $mnt ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++vgremove -ff $vg ++aux cleanup_scsi_debug_dev ++sleep 1 ++ ++# scsi_debug devices with 512 LBS and 4K PBS ++aux prepare_scsi_debug_dev 256 sector_size=512 physblk_exp=3 ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "512" ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "4096" ++aux prepare_devs 2 64 ++ ++vgcreate $vg "$dev1" "$dev2" ++blockdev --getss "$dev1" ++blockdev --getpbsz "$dev1" ++blockdev --getss "$dev2" ++blockdev --getpbsz "$dev2" ++ ++# add integrity while LV is inactive ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++echo "hello world" > $mnt/hello ++umount $mnt ++lvchange -an $vg ++lvconvert --raidintegrity y $vg/$lv1 ++lvchange -ay $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++cat $mnt/hello ++umount $mnt ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++# add integrity while LV is active, fs unmounted ++# lvconvert will use ribs 512 to avoid increasing LBS from 512 to 4k on active LV ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++echo "hello world" > $mnt/hello ++umount $mnt ++lvchange -an $vg ++lvchange -ay $vg ++lvconvert --raidintegrity y $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++cat $mnt/hello | grep "hello world" ++umount $mnt ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++# add integrity while LV is active, fs mounted ++# lvconvert will use ribs 512 to avoid increasing LBS from 512 to 4k on active LV ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++echo "hello world" > $mnt/hello ++lvconvert --raidintegrity y $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++cat $mnt/hello | grep "hello world" ++umount $mnt ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++cat $mnt/hello | grep "hello world" ++umount $mnt ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++vgremove -ff $vg ++aux cleanup_scsi_debug_dev ++sleep 1 ++ +diff --git a/test/shell/integrity-blocksize.sh b/test/shell/integrity-blocksize.sh +index 444e3db..eb6a364 100644 +--- a/test/shell/integrity-blocksize.sh ++++ b/test/shell/integrity-blocksize.sh +@@ -48,9 +48,24 @@ aux extend_filter "a|$LOOP4|" + + aux lvmconf 'devices/scan = "/dev"' + ++mnt="mnt" ++mkdir -p $mnt ++ + vgcreate $vg1 $LOOP1 $LOOP2 + vgcreate $vg2 $LOOP3 $LOOP4 + ++# LOOP1/LOOP2 have LBS 512 and PBS 512 ++# LOOP3/LOOP4 have LBS 4K and PBS 4K ++ ++blockdev --getss $LOOP1 ++blockdev --getpbsz $LOOP1 ++blockdev --getss $LOOP2 ++blockdev --getpbsz $LOOP2 ++blockdev --getss $LOOP3 ++blockdev --getpbsz $LOOP3 ++blockdev --getss $LOOP4 ++blockdev --getpbsz $LOOP4 ++ + # lvcreate on dev512, result 512 + lvcreate --type raid1 -m1 --raidintegrity y -l 8 -n $lv1 $vg1 + pvck --dump metadata $LOOP1 | grep 'block_size = 512' +@@ -105,7 +120,11 @@ lvremove -y $vg2/$lv1 + lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1 + aux wipefs_a /dev/$vg1/$lv1 + mkfs.xfs -f "$DM_DEV_DIR/$vg1/$lv1" ++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"512\" + lvconvert --raidintegrity y $vg1/$lv1 ++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"512\" ++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt ++umount $mnt + pvck --dump metadata $LOOP1 | grep 'block_size = 512' + lvremove -y $vg1/$lv1 + +@@ -113,15 +132,37 @@ lvremove -y $vg1/$lv1 + lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2 + aux wipefs_a /dev/$vg2/$lv1 + mkfs.xfs -f "$DM_DEV_DIR/$vg2/$lv1" ++blkid "$DM_DEV_DIR/$vg2/$lv1" | grep BLOCK_SIZE=\"4096\" + lvconvert --raidintegrity y $vg2/$lv1 ++blkid "$DM_DEV_DIR/$vg2/$lv1" | grep BLOCK_SIZE=\"4096\" ++mount "$DM_DEV_DIR/$vg2/$lv1" $mnt ++umount $mnt + pvck --dump metadata $LOOP3 | grep 'block_size = 4096' + lvremove -y $vg2/$lv1 + +-# lvconvert on dev512, ext4 1024, result 1024 ++# lvconvert on dev512, ext4 1024, result 1024 (LV active when adding) ++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1 ++aux wipefs_a /dev/$vg1/$lv1 ++mkfs.ext4 -b 1024 "$DM_DEV_DIR/$vg1/$lv1" ++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"1024\" ++lvconvert --raidintegrity y $vg1/$lv1 ++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"1024\" ++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt ++umount $mnt ++pvck --dump metadata $LOOP1 | grep 'block_size = 512' ++lvremove -y $vg1/$lv1 ++ ++# lvconvert on dev512, ext4 1024, result 1024 (LV inactive when adding) + lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1 + aux wipefs_a /dev/$vg1/$lv1 + mkfs.ext4 -b 1024 "$DM_DEV_DIR/$vg1/$lv1" ++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"1024\" ++lvchange -an $vg1/$lv1 + lvconvert --raidintegrity y $vg1/$lv1 ++lvchange -ay $vg1/$lv1 ++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"1024\" ++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt ++umount $mnt + pvck --dump metadata $LOOP1 | grep 'block_size = 1024' + lvremove -y $vg1/$lv1 + +@@ -129,7 +170,11 @@ lvremove -y $vg1/$lv1 + lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2 + aux wipefs_a /dev/$vg2/$lv1 + mkfs.ext4 "$DM_DEV_DIR/$vg2/$lv1" ++blkid "$DM_DEV_DIR/$vg2/$lv1" | grep BLOCK_SIZE=\"4096\" + lvconvert --raidintegrity y $vg2/$lv1 ++blkid "$DM_DEV_DIR/$vg2/$lv1" | grep BLOCK_SIZE=\"4096\" ++mount "$DM_DEV_DIR/$vg2/$lv1" $mnt ++umount $mnt + pvck --dump metadata $LOOP3 | grep 'block_size = 4096' + lvremove -y $vg2/$lv1 + +@@ -137,7 +182,11 @@ lvremove -y $vg2/$lv1 + lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1 + aux wipefs_a /dev/$vg1/$lv1 + mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg1/$lv1" ++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"4096\" + lvconvert --raidintegrity y --raidintegrityblocksize 512 $vg1/$lv1 ++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"4096\" ++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt ++umount $mnt + pvck --dump metadata $LOOP1 | grep 'block_size = 512' + lvremove -y $vg1/$lv1 + +@@ -145,7 +194,14 @@ lvremove -y $vg1/$lv1 + lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1 + aux wipefs_a /dev/$vg1/$lv1 + mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg1/$lv1" ++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"4096\" ++lvchange -an $vg1/$lv1 ++# lv needs to be inactive to increase LBS from 512 + lvconvert --raidintegrity y --raidintegrityblocksize 1024 $vg1/$lv1 ++lvchange -ay $vg1/$lv1 ++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"4096\" ++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt ++umount $mnt + pvck --dump metadata $LOOP1 | grep 'block_size = 1024' + lvremove -y $vg1/$lv1 + +@@ -153,7 +209,11 @@ lvremove -y $vg1/$lv1 + lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1 + aux wipefs_a /dev/$vg1/$lv1 + mkfs.ext4 -b 1024 "$DM_DEV_DIR/$vg1/$lv1" ++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"1024\" + lvconvert --raidintegrity y --raidintegrityblocksize 512 $vg1/$lv1 ++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"1024\" ++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt ++umount $mnt + pvck --dump metadata $LOOP1 | grep 'block_size = 512' + lvremove -y $vg1/$lv1 + +@@ -164,10 +224,48 @@ mkfs.ext4 "$DM_DEV_DIR/$vg2/$lv1" + not lvconvert --raidintegrity y --raidintegrityblocksize 512 $vg2/$lv1 + lvremove -y $vg2/$lv1 + +-# FIXME: need to use scsi_debug to create devs with LBS 512 PBS 4k +-# FIXME: lvconvert, fsunknown, LBS 512, PBS 4k: result 512 +-# FIXME: lvconvert --bs 512, fsunknown, LBS 512, PBS 4k: result 512 +-# FIXME: lvconvert --bs 4k, fsunknown, LBS 512, PBS 4k: result 4k ++# TODO: need to use scsi_debug to create devs with LBS 512 PBS 4k ++# TODO: lvconvert, fsunknown, LBS 512, PBS 4k: result 512 ++# TODO: lvconvert --bs 512, fsunknown, LBS 512, PBS 4k: result 512 ++# TODO: lvconvert --bs 4k, fsunknown, LBS 512, PBS 4k: result 4k ++ ++# lvconvert on dev512, xfs 512, result 512, (detect fs with LV inactive) ++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1 ++aux wipefs_a /dev/$vg1/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg1/$lv1" ++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt ++echo "test" > $mnt/test ++umount $mnt ++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"512\" ++lvchange -an $vg1/$lv1 ++lvconvert --raidintegrity y $vg1/$lv1 ++lvchange -ay $vg1/$lv1 ++mount "$DM_DEV_DIR/$vg1/$lv1" $mnt ++cat $mnt/test ++umount $mnt ++blkid "$DM_DEV_DIR/$vg1/$lv1" | grep BLOCK_SIZE=\"512\" ++pvck --dump metadata $LOOP1 | grep 'block_size = 512' ++lvchange -an $vg1/$lv1 ++lvremove -y $vg1/$lv1 ++ ++# lvconvert on dev4k, xfs 4096, result 4096 (detect fs with LV inactive) ++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2 ++aux wipefs_a /dev/$vg2/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg2/$lv1" ++mount "$DM_DEV_DIR/$vg2/$lv1" $mnt ++echo "test" > $mnt/test ++umount $mnt ++blkid "$DM_DEV_DIR/$vg2/$lv1" | grep BLOCK_SIZE=\"4096\" ++lvchange -an $vg2/$lv1 ++lvconvert --raidintegrity y $vg2/$lv1 ++lvchange -ay $vg2/$lv1 ++mount "$DM_DEV_DIR/$vg2/$lv1" $mnt ++cat $mnt/test ++umount $mnt ++blkid "$DM_DEV_DIR/$vg2/$lv1" | grep BLOCK_SIZE=\"4096\" ++pvck --dump metadata $LOOP3 | grep 'block_size = 4096' ++lvchange -an $vg2/$lv1 ++lvremove -y $vg2/$lv1 + + vgremove -ff $vg1 + vgremove -ff $vg2 +diff --git a/test/shell/integrity-large.sh b/test/shell/integrity-large.sh +index 5aba80e..06b0e03 100644 +--- a/test/shell/integrity-large.sh ++++ b/test/shell/integrity-large.sh +@@ -23,7 +23,7 @@ mnt="mnt" + mkdir -p $mnt + + # raid1 LV needs to be extended to 512MB to test imeta being exended +-aux prepare_devs 4 600 ++aux prepare_devs 4 632 + + printf "%0.sA" {1..16384} >> fileA + printf "%0.sB" {1..16384} >> fileB +@@ -115,7 +115,10 @@ lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg + lvchange -an $vg/$lv1 + lvchange -ay $vg/$lv1 + _add_data_to_lv ++# lv needs to be inactive when adding integrity to increase LBS from 512 and get a ribs of 4k ++lvchange -an $vg/$lv1 + lvconvert --raidintegrity y $vg/$lv1 ++lvchange -ay $vg/$lv1 + _wait_recalc $vg/${lv1}_rimage_0 + _wait_recalc $vg/${lv1}_rimage_1 + lvs -a -o+devices $vg +@@ -128,8 +131,8 @@ _verify_data_on_lv + _wait_recalc $vg/${lv1}_rimage_0 + _wait_recalc $vg/${lv1}_rimage_1 + lvs -a -o+devices $vg +-check lv_field $vg/${lv1}_rimage_0_imeta size "8.00m" +-check lv_field $vg/${lv1}_rimage_1_imeta size "8.00m" ++check lv_field $vg/${lv1}_rimage_0_imeta size "12.00m" ++check lv_field $vg/${lv1}_rimage_1_imeta size "12.00m" + + # provide space to extend the images onto new devs + vgextend $vg "$dev3" "$dev4" +@@ -150,33 +153,35 @@ lvconvert --raidintegrity y $vg/$lv1 + _wait_recalc $vg/${lv1}_rimage_0 + _wait_recalc $vg/${lv1}_rimage_1 + lvs -a -o+devices $vg +-check lv_field $vg/${lv1}_rimage_0_imeta size "12.00m" +-check lv_field $vg/${lv1}_rimage_1_imeta size "12.00m" ++check lv_field $vg/${lv1}_rimage_0_imeta size "20.00m" ++check lv_field $vg/${lv1}_rimage_1_imeta size "20.00m" + + lvchange -an $vg/$lv1 + lvremove $vg/$lv1 + + # this succeeds because dev1,dev2 can hold rmeta+rimage + lvcreate --type raid1 -n $lv1 -L 592M -an $vg "$dev1" "$dev2" ++lvs -a -o+devices $vg ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 + + # this fails because dev1,dev2 can hold rmeta+rimage, but not imeta + # and we require imeta to be on same devs as rmeta/rimeta +-not lvcreate --type raid1 --raidintegrity y -n $lv1 -L 592M -an $vg "$dev1" "$dev2" ++not lvcreate --type raid1 --raidintegrity y -n $lv1 -L 624M -an $vg "$dev1" "$dev2" + lvs -a -o+devices $vg +-lvremove $vg/$lv1 + + # this can allocate from more devs so there's enough space for imeta to + # be allocated in the vg, but lvcreate fails because rmeta+rimage are + # allocated from dev1,dev2, we restrict imeta to being allocated on the + # same devs as rmeta/rimage, and dev1,dev2 can't fit imeta. +-not lvcreate --type raid1 --raidintegrity y -n $lv1 -L 592M -an $vg ++not lvcreate --type raid1 --raidintegrity y -n $lv1 -L 624M -an $vg + lvs -a -o+devices $vg + + # counterintuitively, increasing the size will allow lvcreate to succeed + # because rmeta+rimage are pushed to being allocated on dev1,dev2,dev3,dev4 + # which means imeta is now free to be allocated from dev3,dev4 which have + # plenty of space +-lvcreate --type raid1 --raidintegrity y -n $lv1 -L 600M -an $vg ++lvcreate --type raid1 --raidintegrity y -n $lv1 -L 640M -an $vg + lvs -a -o+devices $vg + + vgremove -ff $vg +diff --git a/test/shell/integrity-misc.sh b/test/shell/integrity-misc.sh +index 0d05689..2dae25f 100644 +--- a/test/shell/integrity-misc.sh ++++ b/test/shell/integrity-misc.sh +@@ -95,7 +95,7 @@ _sync_percent() { + get lv_field "$checklv" sync_percent | cut -d. -f1 + } + +-_wait_recalc() { ++_wait_sync() { + local checklv=$1 + + for i in $(seq 1 10) ; do +@@ -124,8 +124,9 @@ _wait_recalc() { + # lvrename + _prepare_vg + lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg +-_wait_recalc $vg/${lv1}_rimage_0 +-_wait_recalc $vg/${lv1}_rimage_1 ++_wait_sync $vg/${lv1}_rimage_0 ++_wait_sync $vg/${lv1}_rimage_1 ++_wait_sync $vg/$lv1 + _add_new_data_to_mnt + umount $mnt + lvrename $vg/$lv1 $vg/$lv2 +@@ -141,8 +142,9 @@ vgremove -ff $vg + # lv must be active + _prepare_vg + lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" +-_wait_recalc $vg/${lv1}_rimage_0 +-_wait_recalc $vg/${lv1}_rimage_1 ++_wait_sync $vg/${lv1}_rimage_0 ++_wait_sync $vg/${lv1}_rimage_1 ++_wait_sync $vg/$lv1 + _add_new_data_to_mnt + lvconvert --replace "$dev1" $vg/$lv1 "$dev3" + lvs -a -o+devices $vg > out +@@ -162,8 +164,9 @@ vgremove -ff $vg + # same as prev but with bitmap mode + _prepare_vg + lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg "$dev1" "$dev2" +-_wait_recalc $vg/${lv1}_rimage_0 +-_wait_recalc $vg/${lv1}_rimage_1 ++_wait_sync $vg/${lv1}_rimage_0 ++_wait_sync $vg/${lv1}_rimage_1 ++_wait_sync $vg/$lv1 + _add_new_data_to_mnt + lvconvert --replace "$dev1" $vg/$lv1 "$dev3" + lvs -a -o+devices $vg > out +@@ -185,8 +188,9 @@ vgremove -ff $vg + # (like lvconvert --replace does for a dev that's not missing). + _prepare_vg + lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" +-_wait_recalc $vg/${lv1}_rimage_0 +-_wait_recalc $vg/${lv1}_rimage_1 ++_wait_sync $vg/${lv1}_rimage_0 ++_wait_sync $vg/${lv1}_rimage_1 ++_wait_sync $vg/$lv1 + _add_new_data_to_mnt + aux disable_dev "$dev2" + lvs -a -o+devices $vg > out +@@ -213,8 +217,9 @@ vgremove -ff $vg + + _prepare_vg + lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" +-_wait_recalc $vg/${lv1}_rimage_0 +-_wait_recalc $vg/${lv1}_rimage_1 ++_wait_sync $vg/${lv1}_rimage_0 ++_wait_sync $vg/${lv1}_rimage_1 ++_wait_sync $vg/$lv1 + _add_new_data_to_mnt + umount $mnt + lvchange -an $vg/$lv1 +diff --git a/test/shell/integrity.sh b/test/shell/integrity.sh +index 77e9430..0143129 100644 +--- a/test/shell/integrity.sh ++++ b/test/shell/integrity.sh +@@ -78,14 +78,14 @@ _test_fs_with_error() { + dd if=$mnt/fileA of=tmp bs=1k + ls -l tmp + stat -c %s tmp +- diff fileA tmp ++ cmp -b fileA tmp + rm tmp + + # read partial fileB which was corrupted + not dd if=$mnt/fileB of=tmp bs=1k + ls -l tmp + stat -c %s tmp | grep 12288 +- not diff fileB tmp ++ not cmp -b fileB tmp + rm tmp + + umount $mnt +@@ -118,14 +118,14 @@ _test_fs_with_raid() { + dd if=$mnt/fileA of=tmp bs=1k + ls -l tmp + stat -c %s tmp | grep 16384 +- diff fileA tmp ++ cmp -b fileA tmp + rm tmp + + # read complete fileB, corruption is corrected by raid + dd if=$mnt/fileB of=tmp bs=1k + ls -l tmp + stat -c %s tmp | grep 16384 +- diff fileB tmp ++ cmp -b fileB tmp + rm tmp + + umount $mnt +@@ -161,15 +161,15 @@ _add_more_data_to_mnt() { + } + + _verify_data_on_mnt() { +- diff randA $mnt/randA +- diff randB $mnt/randB +- diff randC $mnt/randC +- diff fileA $mnt/1/fileA +- diff fileB $mnt/1/fileB +- diff fileC $mnt/1/fileC +- diff fileA $mnt/2/fileA +- diff fileB $mnt/2/fileB +- diff fileC $mnt/2/fileC ++ cmp -b randA $mnt/randA ++ cmp -b randB $mnt/randB ++ cmp -b randC $mnt/randC ++ cmp -b fileA $mnt/1/fileA ++ cmp -b fileB $mnt/1/fileB ++ cmp -b fileC $mnt/1/fileC ++ cmp -b fileA $mnt/2/fileA ++ cmp -b fileB $mnt/2/fileB ++ cmp -b fileC $mnt/2/fileC + } + + _verify_data_on_lv() { +@@ -221,6 +221,8 @@ _wait_recalc() { + + _prepare_vg + lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 + _test_fs_with_raid + lvchange -an $vg/$lv1 + lvconvert --raidintegrity n $vg/$lv1 +@@ -229,6 +231,9 @@ vgremove -ff $vg + + _prepare_vg + lvcreate --type raid1 -m2 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_wait_recalc $vg/${lv1}_rimage_2 + _test_fs_with_raid + lvchange -an $vg/$lv1 + lvconvert --raidintegrity n $vg/$lv1 +@@ -237,6 +242,9 @@ vgremove -ff $vg + + _prepare_vg + lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_wait_recalc $vg/${lv1}_rimage_2 + _test_fs_with_raid + lvchange -an $vg/$lv1 + lvconvert --raidintegrity n $vg/$lv1 +@@ -245,6 +253,9 @@ vgremove -ff $vg + + _prepare_vg + lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_wait_recalc $vg/${lv1}_rimage_2 + _test_fs_with_raid + lvchange -an $vg/$lv1 + lvconvert --raidintegrity n $vg/$lv1 +@@ -253,6 +264,11 @@ vgremove -ff $vg + + _prepare_vg + lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_wait_recalc $vg/${lv1}_rimage_2 ++_wait_recalc $vg/${lv1}_rimage_3 ++_wait_recalc $vg/${lv1}_rimage_4 + _test_fs_with_raid + lvchange -an $vg/$lv1 + lvconvert --raidintegrity n $vg/$lv1 +@@ -261,6 +277,10 @@ vgremove -ff $vg + + _prepare_vg + lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_wait_recalc $vg/${lv1}_rimage_2 ++_wait_recalc $vg/${lv1}_rimage_3 + _test_fs_with_raid + lvchange -an $vg/$lv1 + lvconvert --raidintegrity n $vg/$lv1 +diff --git a/test/shell/lvconvert-m-raid1-degraded.sh b/test/shell/lvconvert-m-raid1-degraded.sh +index 05c3e89..c3f7085 100644 +--- a/test/shell/lvconvert-m-raid1-degraded.sh ++++ b/test/shell/lvconvert-m-raid1-degraded.sh +@@ -33,8 +33,10 @@ aux disable_dev "$dev1" + vgreduce --force --removemissing $vg + check raid_leg_status $vg $lv "DA" + +-# Conversion to 2 legs must fail on degraded 2-legged raid1 LV +-not lvconvert -y -m1 $vg/$lv ++# Conversion to 2 legs does nothing on degraded 2-legged raid1 LV ++lvconvert -y -m1 $vg/$lv 2>&1 | tee out ++grep "already has 2 images" out ++# Check it remains degraded after the successful "conversion" + check raid_leg_status $vg $lv "DA" + + # Repair has to succeed +diff --git a/test/shell/lvcreate-signature-wiping.sh b/test/shell/lvcreate-signature-wiping.sh +index 73fea54..18d7a2f 100644 +--- a/test/shell/lvcreate-signature-wiping.sh ++++ b/test/shell/lvcreate-signature-wiping.sh +@@ -42,6 +42,13 @@ init_lv_ + test_blkid_ || skip + lvremove -f $vg/$lv1 + ++# Zeroing stops the command when there is a failure (write error in this case) ++aux error_dev "$dev1" "$(get first_extent_sector "$dev1"):2" ++not lvcreate -l1 -n $lv1 $vg 2>&1 | tee out ++grep "Failed to initialize" out ++aux enable_dev "$dev1" ++ ++ + aux lvmconf "allocation/wipe_signatures_when_zeroing_new_lvs = 0" + + lvcreate -y -Zn -l1 -n $lv1 $vg 2>&1 | tee out +diff --git a/test/shell/lvcreate-thin.sh b/test/shell/lvcreate-thin.sh +index 9ca7f11..c073eaf 100644 +--- a/test/shell/lvcreate-thin.sh ++++ b/test/shell/lvcreate-thin.sh +@@ -248,4 +248,25 @@ not lvcreate -s $vg/lv1 -L4M -V2G --name $vg/lv4 + not lvcreate -T mirpool -L4M --alloc anywhere -m1 $vg + not lvcreate --thinpool mirpool -L4M --alloc anywhere -m1 $vg + ++ ++# Check pool metadata volume is zeroed, when zero_metadata is enabled. ++# 1st. ensure 8megs of both PVs will have some non-0 data ++lvcreate -L8m -n $lv1 $vg "$dev1" ++lvextend -L+8m $vg/$lv1 "$dev2" ++dd if=/dev/urandom of="$DM_DEV_DIR/$vg/$lv1" bs=1M count=16 oflag=direct conv=fdatasync ++lvremove -ff $vg/$lv1 ++ ++lvcreate -l1 --poolmetadatasize 4m --conf 'allocation/zero_metadata=1' -vvvv -T $vg/pool ++lvchange -an $vg ++# component activation to check device was zeroed ++lvchange -y -ay $vg/pool_tmeta ++dd if="$DM_DEV_DIR/$vg/pool_tmeta" of=file bs=1M count=3 skip=1 iflag=direct conv=fdatasync ++ ++md5sum -b file | tee out ++# md5sum of 3M of zeros ++grep d1dd210d6b1312cb342b56d02bd5e651 out ++lvchange -an $vg ++lvremove -ff $vg ++ ++ + vgremove -ff $vg +diff --git a/test/shell/writecache-blocksize.sh b/test/shell/writecache-blocksize.sh +new file mode 100644 +index 0000000..1300176 +--- /dev/null ++++ b/test/shell/writecache-blocksize.sh +@@ -0,0 +1,342 @@ ++#!/usr/bin/env bash ++ ++# Copyright (C) 2018 Red Hat, Inc. All rights reserved. ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions ++# of the GNU General Public License v.2. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write to the Free Software Foundation, ++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ ++# Test writecache usage ++ ++SKIP_WITH_LVMPOLLD=1 ++ ++. lib/inittest ++ ++aux have_writecache 1 0 0 || skip ++which mkfs.xfs || skip ++ ++# Tests with fs block sizes require a libblkid version that shows BLOCK_SIZE ++aux prepare_devs 1 ++vgcreate $vg "$dev1" ++lvcreate -n $lv1 -l8 $vg ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++blkid "$DM_DEV_DIR/$vg/$lv1" | grep BLOCK_SIZE || skip ++lvchange -an $vg ++vgremove -ff $vg ++aux cleanup_scsi_debug_dev ++ ++mnt="mnt" ++mkdir -p $mnt ++ ++for i in `seq 1 16384`; do echo -n "A" >> fileA; done ++for i in `seq 1 16384`; do echo -n "B" >> fileB; done ++for i in `seq 1 16384`; do echo -n "C" >> fileC; done ++ ++# generate random data ++dd if=/dev/urandom of=randA bs=512K count=2 ++dd if=/dev/urandom of=randB bs=512K count=3 ++dd if=/dev/urandom of=randC bs=512K count=4 ++ ++_add_new_data_to_mnt() { ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ ++ # add original data ++ cp randA $mnt ++ cp randB $mnt ++ cp randC $mnt ++ mkdir $mnt/1 ++ cp fileA $mnt/1 ++ cp fileB $mnt/1 ++ cp fileC $mnt/1 ++ mkdir $mnt/2 ++ cp fileA $mnt/2 ++ cp fileB $mnt/2 ++ cp fileC $mnt/2 ++ sync ++} ++ ++_add_more_data_to_mnt() { ++ mkdir $mnt/more ++ cp fileA $mnt/more ++ cp fileB $mnt/more ++ cp fileC $mnt/more ++ cp randA $mnt/more ++ cp randB $mnt/more ++ cp randC $mnt/more ++ sync ++} ++ ++_verify_data_on_mnt() { ++ diff randA $mnt/randA ++ diff randB $mnt/randB ++ diff randC $mnt/randC ++ diff fileA $mnt/1/fileA ++ diff fileB $mnt/1/fileB ++ diff fileC $mnt/1/fileC ++ diff fileA $mnt/2/fileA ++ diff fileB $mnt/2/fileB ++ diff fileC $mnt/2/fileC ++} ++ ++_verify_more_data_on_mnt() { ++ diff randA $mnt/more/randA ++ diff randB $mnt/more/randB ++ diff randC $mnt/more/randC ++ diff fileA $mnt/more/fileA ++ diff fileB $mnt/more/fileB ++ diff fileC $mnt/more/fileC ++} ++ ++_verify_data_on_lv() { ++ lvchange -ay $vg/$lv1 ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ _verify_data_on_mnt ++ rm $mnt/randA ++ rm $mnt/randB ++ rm $mnt/randC ++ rm -rf $mnt/1 ++ rm -rf $mnt/2 ++ umount $mnt ++ lvchange -an $vg/$lv1 ++} ++ ++# the default is brd ram devs with 512 LBS 4K PBS ++aux prepare_devs 2 64 ++ ++blockdev --getss "$dev1" ++blockdev --getpbsz "$dev1" ++blockdev --getss "$dev2" ++blockdev --getpbsz "$dev2" ++ ++# lbs 512, pbs 4k, xfs 4k, wc 4k ++vgcreate $SHARED $vg "$dev1" ++vgextend $vg "$dev2" ++lvcreate -n $lv1 -l 8 -an $vg "$dev1" ++lvcreate -n $lv2 -l 4 -an $vg "$dev2" ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" |tee out ++grep sectsz=4096 out ++_add_new_data_to_mnt ++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1 ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out ++grep 4096 out ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++_add_more_data_to_mnt ++_verify_data_on_mnt ++lvconvert --splitcache $vg/$lv1 ++check lv_field $vg/$lv1 segtype linear ++check lv_field $vg/$lv2 segtype linear ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++_verify_data_on_mnt ++_verify_more_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++lvchange -an $vg/$lv2 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++lvremove $vg/$lv2 ++vgremove $vg ++ ++# lbs 512, pbs 4k, xfs -s 512, wc 512 ++vgcreate $SHARED $vg "$dev1" ++vgextend $vg "$dev2" ++lvcreate -n $lv1 -l 8 -an $vg "$dev1" ++lvcreate -n $lv2 -l 4 -an $vg "$dev2" ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f -s size=512 "$DM_DEV_DIR/$vg/$lv1" |tee out ++grep sectsz=512 out ++_add_new_data_to_mnt ++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1 ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out ++grep 512 out ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++_add_more_data_to_mnt ++_verify_data_on_mnt ++lvconvert --splitcache $vg/$lv1 ++check lv_field $vg/$lv1 segtype linear ++check lv_field $vg/$lv2 segtype linear ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++_verify_data_on_mnt ++_verify_more_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++lvchange -an $vg/$lv2 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++lvremove $vg/$lv2 ++vgremove $vg ++ ++aux cleanup_scsi_debug_dev ++sleep 1 ++ ++ ++# scsi_debug devices with 512 LBS 512 PBS ++aux prepare_scsi_debug_dev 256 ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "512" ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "512" ++aux prepare_devs 2 64 ++ ++blockdev --getss "$dev1" ++blockdev --getpbsz "$dev1" ++blockdev --getss "$dev2" ++blockdev --getpbsz "$dev2" ++ ++# lbs 512, pbs 512, xfs 512, wc 512 ++vgcreate $SHARED $vg "$dev1" ++vgextend $vg "$dev2" ++lvcreate -n $lv1 -l 8 -an $vg "$dev1" ++lvcreate -n $lv2 -l 4 -an $vg "$dev2" ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" |tee out ++grep sectsz=512 out ++_add_new_data_to_mnt ++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1 ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out ++grep 512 out ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++_add_more_data_to_mnt ++_verify_data_on_mnt ++lvconvert --splitcache $vg/$lv1 ++check lv_field $vg/$lv1 segtype linear ++check lv_field $vg/$lv2 segtype linear ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++_verify_data_on_mnt ++_verify_more_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++lvchange -an $vg/$lv2 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++lvremove $vg/$lv2 ++vgremove $vg ++ ++# lbs 512, pbs 512, xfs -s 4096, wc 4096 ++vgcreate $SHARED $vg "$dev1" ++vgextend $vg "$dev2" ++lvcreate -n $lv1 -l 8 -an $vg "$dev1" ++lvcreate -n $lv2 -l 4 -an $vg "$dev2" ++lvchange -ay $vg/$lv1 ++mkfs.xfs -s size=4096 -f "$DM_DEV_DIR/$vg/$lv1" |tee out ++grep sectsz=4096 out ++_add_new_data_to_mnt ++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1 ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out ++grep 4096 out ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++_add_more_data_to_mnt ++_verify_data_on_mnt ++lvconvert --splitcache $vg/$lv1 ++check lv_field $vg/$lv1 segtype linear ++check lv_field $vg/$lv2 segtype linear ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++_verify_data_on_mnt ++_verify_more_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++lvchange -an $vg/$lv2 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++lvremove $vg/$lv2 ++vgremove $vg ++ ++aux cleanup_scsi_debug_dev ++sleep 1 ++ ++ ++# scsi_debug devices with 512 LBS and 4K PBS ++aux prepare_scsi_debug_dev 256 sector_size=512 physblk_exp=3 ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "512" ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "4096" ++aux prepare_devs 2 64 ++ ++blockdev --getss "$dev1" ++blockdev --getpbsz "$dev1" ++blockdev --getss "$dev2" ++blockdev --getpbsz "$dev2" ++ ++# lbs 512, pbs 4k, xfs 4k, wc 4k ++vgcreate $SHARED $vg "$dev1" ++vgextend $vg "$dev2" ++lvcreate -n $lv1 -l 8 -an $vg "$dev1" ++lvcreate -n $lv2 -l 4 -an $vg "$dev2" ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" |tee out ++grep sectsz=4096 out ++_add_new_data_to_mnt ++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1 ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out ++grep 4096 out ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++_add_more_data_to_mnt ++_verify_data_on_mnt ++lvconvert --splitcache $vg/$lv1 ++check lv_field $vg/$lv1 segtype linear ++check lv_field $vg/$lv2 segtype linear ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++_verify_data_on_mnt ++_verify_more_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++lvchange -an $vg/$lv2 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++lvremove $vg/$lv2 ++vgremove $vg ++ ++aux cleanup_scsi_debug_dev ++sleep 1 ++ ++ ++# scsi_debug devices with 4K LBS and 4K PBS ++aux prepare_scsi_debug_dev 256 sector_size=4096 ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "4096" ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "4096" ++aux prepare_devs 2 64 ++ ++blockdev --getss "$dev1" ++blockdev --getpbsz "$dev1" ++blockdev --getss "$dev2" ++blockdev --getpbsz "$dev2" ++ ++# lbs 4k, pbs 4k, xfs 4k, wc 4k ++vgcreate $SHARED $vg "$dev1" ++vgextend $vg "$dev2" ++lvcreate -n $lv1 -l 8 -an $vg "$dev1" ++lvcreate -n $lv2 -l 4 -an $vg "$dev2" ++lvchange -ay $vg/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" |tee out ++grep sectsz=4096 out ++_add_new_data_to_mnt ++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1 ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out ++grep 4096 out ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++_add_more_data_to_mnt ++_verify_data_on_mnt ++lvconvert --splitcache $vg/$lv1 ++check lv_field $vg/$lv1 segtype linear ++check lv_field $vg/$lv2 segtype linear ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++_verify_data_on_mnt ++_verify_more_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++lvchange -an $vg/$lv2 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++lvremove $vg/$lv2 ++vgremove $vg ++ ++aux cleanup_scsi_debug_dev ++ ++ +diff --git a/test/shell/writecache-large.sh b/test/shell/writecache-large.sh +new file mode 100644 +index 0000000..b52eaf6 +--- /dev/null ++++ b/test/shell/writecache-large.sh +@@ -0,0 +1,153 @@ ++#!/usr/bin/env bash ++ ++# Copyright (C) 2018 Red Hat, Inc. All rights reserved. ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions ++# of the GNU General Public License v.2. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write to the Free Software Foundation, ++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ ++# Test writecache usage ++ ++SKIP_WITH_LVMPOLLD=1 ++ ++. lib/inittest ++ ++aux have_writecache 1 0 0 || skip ++which mkfs.xfs || skip ++ ++# scsi_debug devices with 512 LBS 512 PBS ++aux prepare_scsi_debug_dev 1200 ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "512" ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "512" ++ ++aux prepare_devs 2 600 ++blockdev --getss "$dev1" ++blockdev --getpbsz "$dev1" ++blockdev --getss "$dev2" ++blockdev --getpbsz "$dev2" ++ ++mnt="mnt" ++mkdir -p $mnt ++ ++for i in `seq 1 16384`; do echo -n "A" >> fileA; done ++for i in `seq 1 16384`; do echo -n "B" >> fileB; done ++for i in `seq 1 16384`; do echo -n "C" >> fileC; done ++ ++# generate random data ++dd if=/dev/urandom of=randA bs=512K count=2 ++dd if=/dev/urandom of=randB bs=512K count=3 ++dd if=/dev/urandom of=randC bs=512K count=4 ++ ++_add_new_data_to_mnt() { ++ mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++ ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ ++ # add original data ++ cp randA $mnt ++ cp randB $mnt ++ cp randC $mnt ++ mkdir $mnt/1 ++ cp fileA $mnt/1 ++ cp fileB $mnt/1 ++ cp fileC $mnt/1 ++ mkdir $mnt/2 ++ cp fileA $mnt/2 ++ cp fileB $mnt/2 ++ cp fileC $mnt/2 ++ sync ++} ++ ++_add_more_data_to_mnt() { ++ mkdir $mnt/more ++ cp fileA $mnt/more ++ cp fileB $mnt/more ++ cp fileC $mnt/more ++ cp randA $mnt/more ++ cp randB $mnt/more ++ cp randC $mnt/more ++ sync ++} ++ ++_verify_data_on_mnt() { ++ diff randA $mnt/randA ++ diff randB $mnt/randB ++ diff randC $mnt/randC ++ diff fileA $mnt/1/fileA ++ diff fileB $mnt/1/fileB ++ diff fileC $mnt/1/fileC ++ diff fileA $mnt/2/fileA ++ diff fileB $mnt/2/fileB ++ diff fileC $mnt/2/fileC ++} ++ ++_verify_more_data_on_mnt() { ++ diff randA $mnt/more/randA ++ diff randB $mnt/more/randB ++ diff randC $mnt/more/randC ++ diff fileA $mnt/more/fileA ++ diff fileB $mnt/more/fileB ++ diff fileC $mnt/more/fileC ++} ++ ++_verify_data_on_lv() { ++ lvchange -ay $vg/$lv1 ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ _verify_data_on_mnt ++ rm $mnt/randA ++ rm $mnt/randB ++ rm $mnt/randC ++ rm -rf $mnt/1 ++ rm -rf $mnt/2 ++ umount $mnt ++ lvchange -an $vg/$lv1 ++} ++ ++vgcreate $SHARED $vg "$dev1" ++vgextend $vg "$dev2" ++ ++# Use a large enough size so that the cleaner will not ++# finish immediately when detaching, and will require ++# a secondary check from command top level. ++ ++lvcreate -n $lv1 -L 560M -an $vg "$dev1" ++lvcreate -n $lv2 -L 500M -an $vg "$dev2" ++ ++lvchange -ay $vg/$lv1 ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++ ++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1 ++dmsetup table $vg-$lv1 ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++ ++_add_new_data_to_mnt ++_add_more_data_to_mnt ++_verify_data_on_mnt ++ ++dd if=/dev/zero of=$mnt/big1 bs=1M count=100 oflag=sync ++dd if=/dev/zero of=$mnt/big2 bs=1M count=100 oflag=sync ++dd if=/dev/zero of=$mnt/big3 bs=1M count=100 oflag=sync ++dd if=/dev/zero of=$mnt/big4 bs=1M count=100 oflag=sync ++ ++lvconvert --splitcache $vg/$lv1 ++check lv_field $vg/$lv1 segtype linear ++check lv_field $vg/$lv2 segtype linear ++dmsetup table $vg-$lv1 ++_verify_data_on_mnt ++_verify_more_data_on_mnt ++dd if=$mnt/big4 of=/dev/null bs=1M count=100 ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvchange -an $vg/$lv2 ++lvremove $vg/$lv1 ++lvremove $vg/$lv2 ++ ++vgremove -ff $vg ++ +diff --git a/test/shell/writecache-split.sh b/test/shell/writecache-split.sh +index 0f2dc47..e615e2a 100644 +--- a/test/shell/writecache-split.sh ++++ b/test/shell/writecache-split.sh +@@ -20,29 +20,21 @@ mkfs_mount_umount() + { + lvt=$1 + +- lvchange -ay $vg/$lvt +- + mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lvt" + mount "$DM_DEV_DIR/$vg/$lvt" "$mount_dir" + cp pattern1 "$mount_dir/pattern1" + dd if=/dev/zero of="$mount_dir/zeros2M" bs=1M count=32 conv=fdatasync + umount "$mount_dir" +- +- lvchange -an $vg/$lvt + } + + mount_umount() + { + lvt=$1 + +- lvchange -ay $vg/$lvt +- + mount "$DM_DEV_DIR/$vg/$lvt" "$mount_dir" + diff pattern1 "$mount_dir/pattern1" + dd if="$mount_dir/zeros2M" of=/dev/null bs=1M count=32 + umount "$mount_dir" +- +- lvchange -an $vg/$lvt + } + + aux have_writecache 1 0 0 || skip +@@ -62,18 +54,38 @@ lvcreate -n $lv1 -l 16 -an $vg "$dev1" "$dev4" + lvcreate -n $lv2 -l 4 -an $vg "$dev2" + + # +-# split when no devs are missing ++# split while inactive + # + + lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1 + ++lvchange -ay $vg/$lv1 + mkfs_mount_umount $lv1 ++lvchange -an $vg/$lv1 + + lvconvert --splitcache $vg/$lv1 + lvs -o segtype $vg/$lv1 | grep linear + lvs -o segtype $vg/$lv2 | grep linear + ++lvchange -ay $vg/$lv1 + mount_umount $lv1 ++lvchange -an $vg/$lv1 ++ ++# ++# split while active ++# ++ ++lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1 ++ ++lvchange -ay $vg/$lv1 ++mkfs_mount_umount $lv1 ++ ++lvconvert --splitcache $vg/$lv1 ++lvs -o segtype $vg/$lv1 | grep linear ++lvs -o segtype $vg/$lv2 | grep linear ++ ++mount_umount $lv1 ++lvchange -an $vg/$lv1 + + # + # split while cachevol is missing +@@ -81,7 +93,9 @@ mount_umount $lv1 + + lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1 + ++lvchange -ay $vg/$lv1 + mkfs_mount_umount $lv1 ++lvchange -an $vg/$lv1 + + aux disable_dev "$dev2" + +@@ -108,7 +122,9 @@ lvcreate -n $lv2 -l 14 -an $vg "$dev2" "$dev3" + + lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1 + ++lvchange -ay $vg/$lv1 + mkfs_mount_umount $lv1 ++lvchange -an $vg/$lv1 + + aux disable_dev "$dev3" + +diff --git a/test/shell/writecache.sh b/test/shell/writecache.sh +index 8852e93..39ef319 100644 +--- a/test/shell/writecache.sh ++++ b/test/shell/writecache.sh +@@ -19,152 +19,251 @@ SKIP_WITH_LVMPOLLD=1 + aux have_writecache 1 0 0 || skip + which mkfs.xfs || skip + +-mount_dir="mnt" +-mkdir -p $mount_dir ++# scsi_debug devices with 512 LBS 512 PBS ++aux prepare_scsi_debug_dev 256 ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "512" ++check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "512" ++aux prepare_devs 2 64 ++ ++# scsi_debug devices with 512 LBS and 4K PBS ++#aux prepare_scsi_debug_dev 256 sector_size=512 physblk_exp=3 ++#check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "512" ++#check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "4096" ++#aux prepare_devs 2 64 ++ ++# loop devs with 512 LBS and 512 PBS ++#dd if=/dev/zero of=loopa bs=$((1024*1024)) count=64 2> /dev/null ++#dd if=/dev/zero of=loopb bs=$((1024*1024)) count=64 2> /dev/null ++#LOOP1=$(losetup -f loopa --show) ++#LOOP2=$(losetup -f loopb --show) ++#aux extend_filter "a|$LOOP1|" ++#aux extend_filter "a|$LOOP2|" ++#aux lvmconf 'devices/scan = "/dev"' ++#dev1=$LOOP1 ++#dev2=$LOOP2 ++ ++# loop devs with 4096 LBS and 4096 PBS ++#dd if=/dev/zero of=loopa bs=$((1024*1024)) count=64 2> /dev/null ++#dd if=/dev/zero of=loopb bs=$((1024*1024)) count=64 2> /dev/null ++#LOOP1=$(losetup -f loopa --sector-size 4096 --show) ++#LOOP2=$(losetup -f loopb --sector-size 4096 --show) ++#aux extend_filter "a|$LOOP1|" ++#aux extend_filter "a|$LOOP2|" ++#aux lvmconf 'devices/scan = "/dev"' ++#dev1=$LOOP1 ++#dev2=$LOOP2 ++ ++# the default is brd ram devs with 512 LBS 4K PBS ++# aux prepare_devs 2 64 ++ ++blockdev --getss "$dev1" ++blockdev --getpbsz "$dev1" ++blockdev --getss "$dev2" ++blockdev --getpbsz "$dev2" ++ ++ ++mnt="mnt" ++mkdir -p $mnt ++ ++for i in `seq 1 16384`; do echo -n "A" >> fileA; done ++for i in `seq 1 16384`; do echo -n "B" >> fileB; done ++for i in `seq 1 16384`; do echo -n "C" >> fileC; done + + # generate random data +-dd if=/dev/urandom of=pattern1 bs=512K count=1 ++dd if=/dev/urandom of=randA bs=512K count=2 ++dd if=/dev/urandom of=randB bs=512K count=3 ++dd if=/dev/urandom of=randC bs=512K count=4 ++ ++_add_new_data_to_mnt() { ++ mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++ ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ ++ # add original data ++ cp randA $mnt ++ cp randB $mnt ++ cp randC $mnt ++ mkdir $mnt/1 ++ cp fileA $mnt/1 ++ cp fileB $mnt/1 ++ cp fileC $mnt/1 ++ mkdir $mnt/2 ++ cp fileA $mnt/2 ++ cp fileB $mnt/2 ++ cp fileC $mnt/2 ++ sync ++} ++ ++_add_more_data_to_mnt() { ++ mkdir $mnt/more ++ cp fileA $mnt/more ++ cp fileB $mnt/more ++ cp fileC $mnt/more ++ cp randA $mnt/more ++ cp randB $mnt/more ++ cp randC $mnt/more ++ sync ++} ++ ++_verify_data_on_mnt() { ++ diff randA $mnt/randA ++ diff randB $mnt/randB ++ diff randC $mnt/randC ++ diff fileA $mnt/1/fileA ++ diff fileB $mnt/1/fileB ++ diff fileC $mnt/1/fileC ++ diff fileA $mnt/2/fileA ++ diff fileB $mnt/2/fileB ++ diff fileC $mnt/2/fileC ++} ++ ++_verify_more_data_on_mnt() { ++ diff randA $mnt/more/randA ++ diff randB $mnt/more/randB ++ diff randC $mnt/more/randC ++ diff fileA $mnt/more/fileA ++ diff fileB $mnt/more/fileB ++ diff fileC $mnt/more/fileC ++} ++ ++_verify_data_on_lv() { ++ lvchange -ay $vg/$lv1 ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ _verify_data_on_mnt ++ rm $mnt/randA ++ rm $mnt/randB ++ rm $mnt/randC ++ rm -rf $mnt/1 ++ rm -rf $mnt/2 ++ umount $mnt ++ lvchange -an $vg/$lv1 ++} + +-aux prepare_devs 2 64 + + vgcreate $SHARED $vg "$dev1" +- + vgextend $vg "$dev2" + +-lvcreate -n $lv1 -l 8 -an $vg "$dev1" +- +-lvcreate -n $lv2 -l 4 -an $vg "$dev2" ++blockdev --getss "$dev1" ++blockdev --getpbsz "$dev1" ++blockdev --getss "$dev2" ++blockdev --getpbsz "$dev2" + +-# test1: create fs on LV before writecache is attached ++# Test attach while inactive, detach while inactive ++# create fs on LV before writecache is attached + ++lvcreate -n $lv1 -l 8 -an $vg "$dev1" ++lvcreate -n $lv2 -l 4 -an $vg "$dev2" + lvchange -ay $vg/$lv1 +- +-mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1" +- +-mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir +- +-cp pattern1 $mount_dir/pattern1 +- +-umount $mount_dir ++_add_new_data_to_mnt ++umount $mnt + lvchange -an $vg/$lv1 +- + lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1 +- + check lv_field $vg/$lv1 segtype writecache +- + lvs -a $vg/${lv2}_cvol --noheadings -o segtype >out + grep linear out +- + lvchange -ay $vg/$lv1 +- +-mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir +- +-diff pattern1 $mount_dir/pattern1 +- +-cp pattern1 $mount_dir/pattern1b +- +-ls -l $mount_dir +- +-umount $mount_dir +- ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++_add_more_data_to_mnt ++_verify_data_on_mnt ++_verify_more_data_on_mnt ++umount $mnt + lvchange -an $vg/$lv1 +- + lvconvert --splitcache $vg/$lv1 +- + check lv_field $vg/$lv1 segtype linear + check lv_field $vg/$lv2 segtype linear +- + lvchange -ay $vg/$lv1 +-lvchange -ay $vg/$lv2 +- +-mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir +- +-ls -l $mount_dir +- +-diff pattern1 $mount_dir/pattern1 +-diff pattern1 $mount_dir/pattern1b +- +-umount $mount_dir ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" + lvchange -an $vg/$lv1 ++_verify_data_on_lv + lvchange -an $vg/$lv2 ++lvremove $vg/$lv1 ++lvremove $vg/$lv2 + +-# test2: create fs on LV after writecache is attached ++# Test attach while inactive, detach while inactive ++# create fs on LV after writecache is attached + ++lvcreate -n $lv1 -l 8 -an $vg "$dev1" ++lvcreate -n $lv2 -l 4 -an $vg "$dev2" + lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1 +- + check lv_field $vg/$lv1 segtype writecache +- + lvs -a $vg/${lv2}_cvol --noheadings -o segtype >out + grep linear out +- + lvchange -ay $vg/$lv1 +- +-mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1" +- +-mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir +- +-cp pattern1 $mount_dir/pattern1 +-ls -l $mount_dir +- +-umount $mount_dir ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++_add_new_data_to_mnt ++umount $mnt + lvchange -an $vg/$lv1 +- + lvconvert --splitcache $vg/$lv1 +- +-check lv_field $vg/$lv1 segtype linear +-check lv_field $vg/$lv2 segtype linear +- + lvchange -ay $vg/$lv1 +-lvchange -ay $vg/$lv2 +- +-mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir +- +-ls -l $mount_dir +- +-diff pattern1 $mount_dir/pattern1 +- +-umount $mount_dir ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++_add_more_data_to_mnt ++_verify_data_on_mnt ++_verify_more_data_on_mnt ++umount $mnt + lvchange -an $vg/$lv1 +-lvchange -an $vg/$lv2 +- +- +-# test3: attach writecache to an active LV +- +-lvchange -ay $vg/$lv1 +- +-mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1" +- +-mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir +- +-cp pattern1 $mount_dir/pattern1 +-ls -l $mount_dir +- +-# TODO BZ 1808012 - can not convert active volume to writecache: +-not lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1 +- +-if false; then +-check lv_field $vg/$lv1 segtype writecache +- +-lvs -a $vg/${lv2}_cvol --noheadings -o segtype >out +-grep linear out +- +-cp pattern1 $mount_dir/pattern1.after ++_verify_data_on_lv ++lvremove $vg/$lv1 ++lvremove $vg/$lv2 + +-diff pattern1 $mount_dir/pattern1 +-diff pattern1 $mount_dir/pattern1.after ++# Test attach while active, detach while active + +-umount $mount_dir +-lvchange -an $vg/$lv1 ++lvcreate -n $lv1 -l 8 -an $vg "$dev1" ++lvcreate -n $lv2 -l 4 -an $vg "$dev2" + lvchange -ay $vg/$lv1 +-mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir +- +-diff pattern1 $mount_dir/pattern1 +-diff pattern1 $mount_dir/pattern1.after +-fi +- +-umount $mount_dir ++_add_new_data_to_mnt ++lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1 ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++_add_more_data_to_mnt ++_verify_data_on_mnt ++lvconvert --splitcache $vg/$lv1 ++check lv_field $vg/$lv1 segtype linear ++check lv_field $vg/$lv2 segtype linear ++blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++_verify_data_on_mnt ++_verify_more_data_on_mnt ++umount $mnt + lvchange -an $vg/$lv1 ++lvchange -an $vg/$lv2 ++_verify_data_on_lv + lvremove $vg/$lv1 ++lvremove $vg/$lv2 + ++# FIXME: test depends on unpushed commit ++# that enables two stage flush using cleaner ++# ++# Test attach while active, detach while active, ++# skip cleaner so flush message is used instead ++# ++# lvcreate -n $lv1 -l 8 -an $vg "$dev1" ++# lvcreate -n $lv2 -l 4 -an $vg "$dev2" ++# lvchange -ay $vg/$lv1 ++# _add_new_data_to_mnt ++# lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1 ++# blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++# blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++# _add_more_data_to_mnt ++# _verify_data_on_mnt ++# lvconvert --splitcache --cachesettings cleaner=0 $vg/$lv1 ++# check lv_field $vg/$lv1 segtype linear ++# check lv_field $vg/$lv2 segtype linear ++# blockdev --getss "$DM_DEV_DIR/$vg/$lv1" ++# blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" ++# _verify_data_on_mnt ++# _verify_more_data_on_mnt ++# umount $mnt ++# lvchange -an $vg/$lv1 ++# lvchange -an $vg/$lv2 ++# _verify_data_on_lv ++# lvremove $vg/$lv1 ++# lvremove $vg/$lv2 ++ + vgremove -ff $vg +- ++ +diff --git a/tools/args.h b/tools/args.h +index d1f604b..3a7e5d4 100644 +--- a/tools/args.h ++++ b/tools/args.h +@@ -126,6 +126,12 @@ arg(cachepool_ARG, '\0', "cachepool", lv_VAL, 0, 0, + arg(cachevol_ARG, '\0', "cachevol", lv_VAL, 0, 0, + "The name of a cache volume.\n") + ++arg(cachedevice_ARG, '\0', "cachedevice", pv_VAL, ARG_GROUPABLE, 0, ++ "The name of a device to use for a cache.\n") ++ ++arg(cachesize_ARG, '\0', "cachesize", sizemb_VAL, 0, 0, ++ "The size of cache to use.\n") ++ + arg(commandprofile_ARG, '\0', "commandprofile", string_VAL, 0, 0, + "The command profile to use for command configuration.\n" + "See \\fBlvm.conf\\fP(5) for more information about profiles.\n") +@@ -1428,7 +1434,16 @@ arg(thin_ARG, 'T', "thin", 0, 0, 0, + "See \\fBlvmthin\\fP(7) for more information about LVM thin provisioning.\n") + + arg(updatemetadata_ARG, '\0', "updatemetadata", 0, 0, 0, +- "Update VG metadata to correct problems.\n") ++ "Update VG metadata to correct problems.\n" ++ "If VG metadata was updated while a PV was missing, and the PV\n" ++ "reappears with an old version of metadata, then this option\n" ++ "(or any other command that writes metadata) will update the\n" ++ "metadata on the previously missing PV. If a PV was removed\n" ++ "from a VG while it was missing, and the PV reappears, using\n" ++ "this option will clear the outdated metadata from the previously\n" ++ "missing PV. If metadata text is damaged on one PV, using this\n" ++ "option will replace the damaged metadata text. For more severe\n" ++ "damage, e.g. with headers, see \\fBpvck\\fP(8).\n") + + arg(uuid_ARG, 'u', "uuid", 0, 0, 0, + "#pvchange\n" +diff --git a/tools/command-lines.in b/tools/command-lines.in +index ed3d041..1b0ca22 100644 +--- a/tools/command-lines.in ++++ b/tools/command-lines.in +@@ -247,7 +247,7 @@ RULE: --profile not --detachprofile + RULE: --metadataprofile not --detachprofile + RULE: --minrecoveryrate --maxrecoveryrate and LV_raid + RULE: --writebehind --writemostly and LV_raid1 +-RULE: --cachemode --cachepolicy --cachesettings and LV_cache LV_cachepool ++RULE: --cachemode --cachepolicy --cachesettings and LV_cache LV_cachepool LV_writecache + RULE: --errorwhenfull --discards --zero and LV_thinpool + RULE: --permission not lv_is_external_origin lv_is_raid_metadata lv_is_raid_image LV_thinpool + RULE: --alloc --contiguous --metadataprofile --permission --persistent --profile --readahead not lv_is_thick_origin +@@ -359,7 +359,8 @@ OP: PV ... + ID: lvconvert_raid_types + DESC: Convert LV to raid or change raid layout + DESC: (a specific raid level must be used, e.g. raid1). +-RULE: all not lv_is_locked lv_is_pvmove lv_is_raid_with_integrity ++RULE: all not lv_is_locked lv_is_pvmove ++RULE: lv_is_raid_with_integrity not --stripes_long --stripesize --regionsize --interval + + lvconvert --mirrors SNumber LV + OO: --regionsize RegionSize, --interval Number, --mirrorlog MirrorLog, OO_LVCONVERT +@@ -497,6 +498,20 @@ FLAGS: SECONDARY_SYNTAX + + --- + ++lvconvert --type writecache --cachedevice PV LV_linear_striped_raid ++OO: OO_LVCONVERT, --cachesize SizeMB, --cachesettings String ++ID: lvconvert_to_writecache_with_device ++DESC: Add a writecache to an LV, using a specified cache device. ++RULE: all and lv_is_visible ++ ++lvconvert --type cache --cachedevice PV LV_linear_striped_raid_thinpool ++OO: OO_LVCONVERT, --cachesize SizeMB, --cachesettings String ++ID: lvconvert_to_cache_with_device ++DESC: Add a cache to an LV, using a specified cache device. ++RULE: all and lv_is_visible ++ ++--- ++ + lvconvert --type thin-pool LV_linear_striped_raid_cache + OO: --stripes_long Number, --stripesize SizeKB, + --discards Discards, OO_LVCONVERT_POOL, OO_LVCONVERT +@@ -1205,87 +1220,107 @@ lvcreate --type cache --size SizeMB --cachepool LV_cachepool VG + OO: --cache, OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE, + --stripes Number, --stripesize SizeKB + OP: PV ... +-ID: lvcreate_cache_vol_with_new_origin +-DESC: Create a cache LV, first creating a new origin LV, +-DESC: then combining it with the existing cache pool named +-DESC: by the --cachepool arg. ++ID: lvcreate_and_attach_cachepool ++DESC: Create a new LV, then attach the specified cachepool ++DESC: which converts the new LV to type cache. + + # alternate form of lvcreate --type cache ++# (omits the --type cache option which is inferred) + lvcreate --size SizeMB --cachepool LV_cachepool VG + OO: --type cache, --cache, OO_LVCREATE_CACHE, OO_LVCREATE, + --stripes Number, --stripesize SizeKB + OP: PV ... +-ID: lvcreate_cache_vol_with_new_origin +-DESC: Create a cache LV, first creating a new origin LV, +-DESC: then combining it with the existing cache pool named +-DESC: by the --cachepool arg (variant, infers --type cache). ++ID: lvcreate_and_attach_cachepool_v2 ++DESC: Create a new LV, then attach the specified cachepool ++DESC: which converts the new LV to type cache ++DESC: (variant, infers --type cache.) + FLAGS: SECONDARY_SYNTAX + + # alternate form of lvcreate --type cache ++# (moves cachepool from option arg to position arg, ++# dropping the normal VG position arg) + lvcreate --type cache --size SizeMB LV_cachepool + OO: --cache, OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE, + --stripes Number, --stripesize SizeKB + OP: PV ... +-ID: lvcreate_cache_vol_with_new_origin +-DESC: Create a cache LV, first creating a new origin LV, +-DESC: then combining it with the existing cache pool named +-DESC: in the first arg (variant, also use --cachepool). ++ID: lvcreate_and_attach_cachepool_v3 ++DESC: Create a new LV, then attach the specified cachepool ++DESC: which converts the new LV to type cache. ++DESC: (variant, also use --cachepool). + FLAGS: SECONDARY_SYNTAX + +-# This is a ridiculously crazy command which nobody could +-# understand. It should be be eliminated. It does two different +-# things depending on whether LV in pos 1 is a cachepool LV +-# or not. Both variations are unnecessary. +-# +-# 1. If LV is a cachepool, then it's an alternate form of +-# an already complicated command above. +-# +-# # alternate form for lvcreate_cache_vol_with_new_origin +-# lvcreate --cache --size SizeMB LV_cachepool +-# OO: --type cache, --cache, OO_LVCREATE_CACHE, OO_LVCREATE, --stripes Number, --stripesize SizeKB +-# OP: PV ... +-# ID: lvcreate_cache_vol_with_new_origin +-# DESC: Create a cache LV, first creating a new origin LV, +-# DESC: then combining it with the existing cache pool named +-# DESC: in the first arg (variant, infers --type cache, +-# DESC: also use --cachepool). +-# +-# 2. If LV is not a cachepool, then it's a disguised lvconvert. +-# +-# # FIXME: this should be done by lvconvert, and this command removed +-# lvcreate --type cache --size SizeMB LV +-# OO: OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE +-# OP: PV ... +-# ID: lvcreate_convert_to_cache_vol_with_cachepool +-# DESC: Convert the specified LV to type cache after creating a new +-# DESC: cache pool LV to use (use lvconvert). ++# This command has two different meanings which ought to ++# have separate command defs, but since the syntax is the ++# same for both they have to share one command def with ++# an ambiguous meaning. Which command is performed depends ++# on whether the LV in the first arg position is a ++# cachepool or not (we can't have two different command ++# defs that differ only in the type of LV in the arg position ++# because when parsing commands we don't know the LV type.) ++# ++# 1. An alternate form of lvcreate_and_attach_cachepool_v3 ++# this syntax: lvcreate --cache --size SizeMB LV_cachepool ++# is alternative for: lvcreate --type cache --size SizeMB LV_cachepool ++# ++# 2. An alternative to using lvconvert to convert LV to type cache, ++# but in this case the cachepool is created internally and ++# then attached to the LV arg. + # + # Note that stripes are accepted by the first and not by the + # second, but it's not possible to validate this until after + # the LV type is known. +-# +-# So, to define this syntax we have to combine both of +-# those variants, each crazy on it's own, into one +-# ridiculous command. + +-# def1: alternate form of lvcreate --type cache, or +-# def2: it should be done by lvconvert. + lvcreate --cache --size SizeMB LV + OO: OO_LVCREATE_CACHE, OO_LVCREATE_POOL, OO_LVCREATE, + --stripes Number, --stripesize SizeKB + OP: PV ... +-ID: lvcreate_cache_vol_with_new_origin_or_convert_to_cache_vol_with_cachepool +-DESC: When LV is a cache pool, create a cache LV, +-DESC: first creating a new origin LV, then combining it with +-DESC: the existing cache pool named in the first arg +-DESC: (variant, infers --type cache, also use --cachepool). +-DESC: When LV is not a cache pool, convert the specified LV +-DESC: to type cache after creating a new cache pool LV to use +-DESC: (use lvconvert). ++ID: lvcreate_new_plus_old_cachepool_or_lvconvert_old_plus_new_cachepool ++DESC: When the LV arg is a cachepool, then create a new LV and ++DESC: attach the cachepool arg to it. ++DESC: (variant, use --type cache and --cachepool.) ++DESC: When the LV arg is not a cachepool, then create a new cachepool ++DESC: and attach it to the LV arg (alternative, use lvconvert.) + FLAGS: SECONDARY_SYNTAX + + --- + ++# These all create a new origin LV, then forwards to lvconvert ++# which combines it with a cachevol (which already exists or ++# which needs to be created from cachedevice), converting ++# the new LV to type cache or writecache. ++ ++lvcreate --type cache --size SizeMB --cachevol LV VG ++OO: OO_LVCREATE, OO_LVCREATE_CACHE, --stripes Number, --stripesize SizeKB ++OP: PV ... ++ID: lvcreate_and_attach_cachevol_for_cache ++DESC: Create a new LV, then attach the specified cachevol ++DESC: which converts the new LV to type cache. ++ ++lvcreate --type cache --size SizeMB --cachedevice PV VG ++OO: OO_LVCREATE, OO_LVCREATE_CACHE, --cachesize SizeMB, --stripes Number, --stripesize SizeKB ++OP: PV ... ++ID: lvcreate_and_attach_cachedevice_for_cache ++DESC: Create a new LV, then attach a cachevol created from ++DESC: the specified cache device, which converts the ++DESC: new LV to type cache. ++ ++lvcreate --type writecache --size SizeMB --cachevol LV VG ++OO: OO_LVCREATE, --cachesettings String, --stripes Number, --stripesize SizeKB ++OP: PV ... ++ID: lvcreate_and_attach_cachevol_for_writecache ++DESC: Create a new LV, then attach the specified cachevol ++DESC: which converts the new LV to type writecache. ++ ++lvcreate --type writecache --size SizeMB --cachedevice PV VG ++OO: OO_LVCREATE, --cachesize SizeMB, --cachesettings String, --stripes Number, --stripesize SizeKB ++OP: PV ... ++ID: lvcreate_and_attach_cachedevice_for_writecache ++DESC: Create a new LV, then attach a cachevol created from ++DESC: the specified cache device, which converts the ++DESC: new LV to type writecache. ++ ++--- ++ + lvdisplay + OO: --aligned, --all, --binary, --colon, --columns, + --configreport ConfigReport, --foreign, --history, --ignorelockingfailure, +diff --git a/tools/command.c b/tools/command.c +index 511dda1..2d01849 100644 +--- a/tools/command.c ++++ b/tools/command.c +@@ -1420,6 +1420,9 @@ int define_commands(struct cmd_context *cmdtool, const char *run_name) + if (line[0] == '\n') + break; + ++ if (!strcmp(line, "---") || !strcmp(line, "--")) ++ continue; ++ + if ((n = strchr(line, '\n'))) + *n = '\0'; + +diff --git a/tools/lvchange.c b/tools/lvchange.c +index 2d5bb32..c0adadf 100644 +--- a/tools/lvchange.c ++++ b/tools/lvchange.c +@@ -606,6 +606,88 @@ static int _lvchange_persistent(struct cmd_context *cmd, + return 1; + } + ++static int _lvchange_writecache(struct cmd_context *cmd, ++ struct logical_volume *lv, ++ uint32_t *mr) ++{ ++ struct writecache_settings settings = { 0 }; ++ uint32_t block_size_sectors = 0; ++ struct lv_segment *seg = first_seg(lv); ++ int set_count = 0; ++ ++ if (!get_writecache_settings(cmd, &settings, &block_size_sectors)) ++ return_0; ++ ++ if (block_size_sectors && (seg->writecache_block_size != (block_size_sectors * 512))) { ++ log_error("Cannot change existing block size %u bytes.", seg->writecache_block_size); ++ return 0; ++ } ++ ++ if (settings.high_watermark_set) { ++ seg->writecache_settings.high_watermark_set = settings.high_watermark_set; ++ seg->writecache_settings.high_watermark = settings.high_watermark; ++ set_count++; ++ } ++ if (settings.low_watermark_set) { ++ seg->writecache_settings.low_watermark_set = settings.low_watermark_set; ++ seg->writecache_settings.low_watermark = settings.low_watermark; ++ set_count++; ++ } ++ if (settings.writeback_jobs_set) { ++ seg->writecache_settings.writeback_jobs_set = settings.writeback_jobs_set; ++ seg->writecache_settings.writeback_jobs = settings.writeback_jobs; ++ set_count++; ++ } ++ if (settings.autocommit_blocks_set) { ++ seg->writecache_settings.autocommit_blocks_set = settings.autocommit_blocks_set; ++ seg->writecache_settings.autocommit_blocks = settings.autocommit_blocks; ++ set_count++; ++ } ++ if (settings.autocommit_time_set) { ++ seg->writecache_settings.autocommit_time_set = settings.autocommit_time_set; ++ seg->writecache_settings.autocommit_time = settings.autocommit_time; ++ set_count++; ++ } ++ if (settings.fua_set) { ++ seg->writecache_settings.fua_set = settings.fua_set; ++ seg->writecache_settings.fua = settings.fua; ++ set_count++; ++ } ++ if (settings.nofua_set) { ++ seg->writecache_settings.nofua_set = settings.nofua_set; ++ seg->writecache_settings.nofua = settings.nofua; ++ set_count++; ++ } ++ if (settings.cleaner_set) { ++ seg->writecache_settings.cleaner_set = settings.cleaner_set; ++ seg->writecache_settings.cleaner = settings.cleaner; ++ set_count++; ++ } ++ if (settings.max_age_set) { ++ seg->writecache_settings.max_age_set = settings.max_age_set; ++ seg->writecache_settings.max_age = settings.max_age; ++ set_count++; ++ } ++ ++ if (!set_count) { ++ /* ++ * Empty settings can be used to clear all current settings, ++ * lvchange --cachesettings "" vg/lv ++ */ ++ if (!arg_count(cmd, yes_ARG) && ++ yes_no_prompt("Clear all writecache settings? ") == 'n') { ++ log_print("No settings changed."); ++ return 1; ++ } ++ memset(&seg->writecache_settings, 0, sizeof(struct writecache_settings)); ++ } ++ ++ /* Request caller to commit and reload metadata */ ++ *mr |= MR_RELOAD; ++ ++ return 1; ++} ++ + static int _lvchange_cache(struct cmd_context *cmd, + struct logical_volume *lv, + uint32_t *mr) +@@ -619,6 +701,9 @@ static int _lvchange_cache(struct cmd_context *cmd, + int r = 0, is_clean; + uint32_t chunk_size = 0; /* FYI: lvchange does NOT support its change */ + ++ if (lv_is_writecache(lv)) ++ return _lvchange_writecache(cmd, lv, mr); ++ + seg = first_seg(lv); + + if (seg_is_cache(seg) && lv_is_cache_vol(seg->pool_lv)) +diff --git a/tools/lvconvert.c b/tools/lvconvert.c +index 8652252..524ed5a 100644 +--- a/tools/lvconvert.c ++++ b/tools/lvconvert.c +@@ -1319,6 +1319,8 @@ static int _raid4_conversion_supported(struct logical_volume *lv, struct lvconve + static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *lp) + { + int image_count = 0; ++ int images_reduced = 0; ++ int type_enforced = 0; + struct cmd_context *cmd = lv->vg->cmd; + struct lv_segment *seg = first_seg(lv); + +@@ -1357,6 +1359,8 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l + else + image_count = lp->mirrors + 1; + ++ images_reduced = (image_count < lv_raid_image_count(lv)); ++ + if (image_count < 1) { + log_error("Unable to %s images by specified amount.", + lp->keep_mimages ? "split" : "reduce"); +@@ -1369,6 +1373,12 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l + display_lvname(lv)); + return 0; + } ++ ++ if (!*lp->type_str) { ++ lp->type_str = SEG_TYPE_NAME_RAID1; ++ lp->segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_RAID1); ++ type_enforced = 1; ++ } + } + + if ((lp->corelog || lp->mirrorlog) && strcmp(lp->type_str, SEG_TYPE_NAME_MIRROR)) { +@@ -1383,7 +1393,7 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l + return lv_raid_split(lv, lp->yes, lp->lv_split_name, image_count, lp->pvh); + + if (lp->mirrors_supplied) { +- if ((seg_is_striped(seg) && seg->area_count == 1) || seg_is_raid1(seg)) { /* ??? */ ++ if (seg_is_linear(seg) || seg_is_raid1(seg)) { /* ??? */ + if (!*lp->type_str || !strcmp(lp->type_str, SEG_TYPE_NAME_RAID1) || !strcmp(lp->type_str, SEG_TYPE_NAME_LINEAR) || + (!strcmp(lp->type_str, SEG_TYPE_NAME_STRIPED) && image_count == 1)) { + if (image_count > DEFAULT_RAID1_MAX_IMAGES) { +@@ -1400,7 +1410,7 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l + lp->region_size : seg->region_size , lp->pvh)) + return_0; + +- if (lv_raid_has_integrity(lv)) { ++ if (lv_raid_has_integrity(lv) && !images_reduced) { + struct integrity_settings *isettings = NULL; + if (!lv_get_raid_integrity_settings(lv, &isettings)) + return_0; +@@ -1446,7 +1456,7 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l + /* FIXME This needs changing globally. */ + if (!arg_is_set(cmd, stripes_long_ARG)) + lp->stripes = 0; +- if (!arg_is_set(cmd, type_ARG)) ++ if (!type_enforced && !arg_is_set(cmd, type_ARG)) + lp->segtype = NULL; + if (!arg_is_set(cmd, regionsize_ARG)) + lp->region_size = 0; +@@ -1474,7 +1484,7 @@ try_new_takeover_or_reshape: + /* FIXME This needs changing globally. */ + if (!arg_is_set(cmd, stripes_long_ARG)) + lp->stripes = 0; +- if (!arg_is_set(cmd, type_ARG)) ++ if (!type_enforced && !arg_is_set(cmd, type_ARG)) + lp->segtype = NULL; + + if (!lv_raid_convert(lv, lp->segtype, +@@ -3276,7 +3286,11 @@ static int _lvconvert_to_pool(struct cmd_context *cmd, + } + metadata_lv->status &= ~LV_ACTIVATION_SKIP; + +- if (!wipe_lv(metadata_lv, (struct wipe_params) { .do_zero = 1 })) { ++ if (!wipe_lv(metadata_lv, (struct wipe_params) { ++ .do_wipe_signatures = 1, ++ .is_metadata = 1, ++ .yes = arg_count(cmd, yes_ARG), ++ .force = arg_count(cmd, force_ARG) } )) { + log_error("Aborting. Failed to wipe metadata lv."); + goto bad; + } +@@ -4245,51 +4259,205 @@ int lvconvert_to_pool_cmd(struct cmd_context *cmd, int argc, char **argv) + NULL, NULL, &_lvconvert_to_pool_single); + } + +-static int _lvconvert_cachevol_attach_single(struct cmd_context *cmd, +- struct logical_volume *lv, +- struct processing_handle *handle) ++#define MAX_CACHEDEVS 8 ++ ++static int _lv_create_cachevol(struct cmd_context *cmd, ++ struct volume_group *vg, ++ struct logical_volume *lv, ++ struct logical_volume **cachevol_lv) + { +- struct volume_group *vg = lv->vg; +- struct logical_volume *cachevol_lv; +- const char *cachevol_name; ++ char cvname[NAME_LEN]; ++ char format[NAME_LEN]; ++ struct dm_list *use_pvh; ++ struct pv_list *pvl; ++ char *dev_name; ++ struct device *dev_fast; ++ char *dev_argv[MAX_CACHEDEVS]; ++ int dev_argc = 0; ++ uint64_t cache_size_sectors = 0; ++ uint64_t full_size_sectors = 0; ++ uint64_t pv_size_sectors; ++ struct logical_volume *cachevol; ++ struct arg_value_group_list *group; ++ struct lvcreate_params lp = { ++ .activate = CHANGE_AN, ++ .alloc = ALLOC_INHERIT, ++ .major = -1, ++ .minor = -1, ++ .permission = LVM_READ | LVM_WRITE, ++ .pvh = &vg->pvs, ++ .read_ahead = DM_READ_AHEAD_NONE, ++ .stripes = 1, ++ .vg_name = vg->name, ++ .zero = 0, ++ .wipe_signatures = 0, ++ .suppress_zero_warn = 1, ++ }; + +- if (!(cachevol_name = arg_str_value(cmd, cachevol_ARG, NULL))) +- goto_out; ++ /* ++ * If cache size is not set, and all cachedevice's are unused, ++ * then the cache size is the sum of all cachedevice sizes. ++ */ ++ cache_size_sectors = arg_uint64_value(cmd, cachesize_ARG, 0); + +- if (!validate_lvname_param(cmd, &vg->name, &cachevol_name)) +- goto_out; ++ dm_list_iterate_items(group, &cmd->arg_value_groups) { ++ if (!grouped_arg_is_set(group->arg_values, cachedevice_ARG)) ++ continue; + +- if (!(cachevol_lv = find_lv(vg, cachevol_name))) { +- log_error("Cache single %s not found.", cachevol_name); +- goto out; ++ if (!(dev_name = (char *)grouped_arg_str_value(group->arg_values, cachedevice_ARG, NULL))) ++ break; ++ ++ if (dev_name[0] == '@') { ++ if (!cache_size_sectors) { ++ log_error("With tag as cachedevice, --cachesize is required."); ++ return 0; ++ } ++ goto add_dev_arg; ++ } ++ ++ if (!(dev_fast = dev_cache_get(cmd, dev_name, cmd->filter))) { ++ log_error("Device %s not found.", dev_name); ++ return 0; ++ } ++ ++ if (!(pvl = find_pv_in_vg(vg, dev_name))) { ++ log_error("PV %s not found in VG.", dev_name); ++ return 0; ++ } ++ ++ /* ++ * If the dev is used in the VG, then require a cachesize to allocate ++ * from it. If it is not used in the VG, then prompt asking if the ++ * entire dev should be used. ++ */ ++ if (!cache_size_sectors && pvl->pv->pe_alloc_count) { ++ log_error("PV %s is in use, --cachesize is required.", dev_name); ++ return 0; ++ } ++ ++ if (!cache_size_sectors) { ++ pv_size_sectors = (pvl->pv->pe_count * vg->extent_size); ++ ++ if (!arg_is_set(cmd, yes_ARG) && ++ yes_no_prompt("Use all %s from %s for cache? [y/n]: ", ++ display_size(cmd, pv_size_sectors), dev_name) == 'n') { ++ log_print("Use --cachesize SizeMB to use a part of the cachedevice."); ++ log_error("Conversion aborted."); ++ return 0; ++ } ++ full_size_sectors += pv_size_sectors; ++ } ++ add_dev_arg: ++ if (dev_argc >= MAX_CACHEDEVS) { ++ log_error("Cannot allocate from more than %u cache devices.", MAX_CACHEDEVS); ++ return 0; ++ } ++ ++ dev_argv[dev_argc++] = dev_name; + } + +- if (lv_is_cache_vol(cachevol_lv)) { +- log_error("LV %s is already used as a cachevol.", display_lvname(cachevol_lv)); +- goto out; ++ if (!cache_size_sectors) ++ cache_size_sectors = full_size_sectors; ++ ++ if (!dev_argc) { ++ log_error("No cachedevice specified to create a cachevol."); ++ return 0; + } + +- /* Ensure the LV is not active elsewhere. */ +- if (!lockd_lv(cmd, lv, "ex", 0)) +- goto_out; ++ if (!(use_pvh = create_pv_list(cmd->mem, vg, dev_argc, dev_argv, 1))) { ++ log_error("cachedevice not found in VG %s.", dev_name); ++ return 0; ++ } + +- if (!dm_list_empty(&cachevol_lv->segs_using_this_lv)) { +- log_error("LV %s is already in use.", display_lvname(cachevol_lv)); +- goto out; ++ if (dm_snprintf(cvname, NAME_LEN, "%s_cache", lv->name) < 0) { ++ log_error("Failed to create cachevol LV name."); ++ return 0; + } + +- if (!arg_is_set(cmd, yes_ARG) && +- yes_no_prompt("Erase all existing data on %s? [y/n]: ", display_lvname(cachevol_lv)) == 'n') { +- log_error("Conversion aborted."); +- goto out; ++ if (find_lv(vg, cvname)) { ++ memset(format, 0, sizeof(cvname)); ++ memset(cvname, 0, sizeof(cvname)); ++ if (dm_snprintf(format, sizeof(format), "%s_cache%%d", lv->name) < 0) { ++ log_error("Failed to generate cachevol LV format."); ++ return 0; ++ } ++ if (!generate_lv_name(vg, format, cvname, sizeof(cvname))) { ++ log_error("Failed to generate cachevol LV name."); ++ return 0; ++ } ++ } ++ ++ lp.lv_name = cvname; ++ lp.pvh = use_pvh; ++ lp.extents = cache_size_sectors / vg->extent_size; ++ ++ log_print("Creating cachevol LV %s with size %s.", ++ cvname, display_size(cmd, cache_size_sectors)); ++ ++ dm_list_init(&lp.tags); ++ ++ if (!(lp.segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED))) ++ return_0; ++ ++ if (!(cachevol = lv_create_single(vg, &lp))) { ++ log_error("Failed to create cachevol LV"); ++ return 0; ++ } ++ ++ *cachevol_lv = cachevol; ++ return 1; ++} ++ ++int lvconvert_cachevol_attach_single(struct cmd_context *cmd, ++ struct logical_volume *lv, ++ struct processing_handle *handle) ++{ ++ struct volume_group *vg = lv->vg; ++ struct logical_volume *lv_fast; ++ const char *fast_name; ++ ++ /* ++ * User specifies an existing cachevol to use or a cachedevice ++ * to create a cachevol from. ++ */ ++ if ((fast_name = arg_str_value(cmd, cachevol_ARG, NULL))) { ++ if (!validate_lvname_param(cmd, &vg->name, &fast_name)) ++ goto_bad; ++ ++ if (!(lv_fast = find_lv(vg, fast_name))) { ++ log_error("LV %s not found.", fast_name); ++ goto bad; ++ } ++ ++ if (lv_is_cache_vol(lv_fast)) { ++ log_error("LV %s is already used as a cachevol.", display_lvname(lv_fast)); ++ goto bad; ++ } ++ ++ if (!dm_list_empty(&lv_fast->segs_using_this_lv)) { ++ log_error("LV %s is already in use.", display_lvname(lv_fast)); ++ goto bad; ++ } ++ ++ if (!arg_is_set(cmd, yes_ARG) && ++ yes_no_prompt("Erase all existing data on %s? [y/n]: ", display_lvname(lv_fast)) == 'n') { ++ log_error("Conversion aborted."); ++ goto bad; ++ } ++ ++ if (!lockd_lv(cmd, lv_fast, "ex", 0)) ++ goto_bad; ++ } else { ++ if (!_lv_create_cachevol(cmd, vg, lv, &lv_fast)) ++ goto_bad; + } + + /* Ensure the LV is not active elsewhere. */ +- if (!lockd_lv(cmd, cachevol_lv, "ex", LDLV_PERSISTENT)) +- goto_out; ++ if (!lockd_lv(cmd, lv, "ex", 0)) ++ goto_bad; + +- if (!wipe_cache_pool(cachevol_lv)) +- goto_out; ++ if (!wipe_cache_pool(lv_fast)) ++ goto_bad; + + /* When the lv arg is a thinpool, redirect command to data sub lv. */ + +@@ -4299,17 +4467,17 @@ static int _lvconvert_cachevol_attach_single(struct cmd_context *cmd, + } + + if (_raid_split_image_conversion(lv)) +- goto_out; ++ goto_bad; + + /* Attach the cache to the main LV. */ + +- if (!_cache_vol_attach(cmd, lv, cachevol_lv)) +- goto_out; ++ if (!_cache_vol_attach(cmd, lv, lv_fast)) ++ goto_bad; + + log_print_unless_silent("Logical volume %s is now cached.", display_lvname(lv)); + + return ECMD_PROCESSED; +- out: ++ bad: + return ECMD_FAILED; + } + +@@ -5308,19 +5476,8 @@ static int _lvconvert_detach_writecache(struct cmd_context *cmd, + struct logical_volume *lv, + struct logical_volume *lv_fast) + { +- char cvol_name[NAME_LEN]; +- char *c; + int noflush = 0; + +- /* +- * LV must be inactive externally before detaching cache. +- */ +- +- if (lv_info(cmd, lv, 1, NULL, 0, 0)) { +- log_error("LV %s must be inactive to detach writecache.", display_lvname(lv)); +- return 0; +- } +- + if (!archive(lv->vg)) + return_0; + +@@ -5344,36 +5501,23 @@ static int _lvconvert_detach_writecache(struct cmd_context *cmd, + noflush = 1; + } + +- if (!lv_detach_writecache_cachevol(lv, noflush)) +- return_0; +- + /* +- * Rename lv_fast back to its original name, without the _cvol +- * suffix that was added when lv_fast was attached for caching. ++ * TODO: send a message to writecache in the kernel to start writing ++ * back cache data to the origin. Then release the vg lock and monitor ++ * the progress of that writeback. When it's complete we can reacquire ++ * the vg lock, rescan the vg (ensure it hasn't changed), and do the ++ * detach which should be quick since the writeback is complete. If ++ * this command is canceled while monitoring writeback, it should just ++ * be rerun. The LV will continue to have the writecache until this ++ * command is run to completion. + */ +- if (!dm_strncpy(cvol_name, lv_fast->name, sizeof(cvol_name)) || +- !(c = strstr(cvol_name, "_cvol"))) { +- log_debug("LV %s has no suffix for cachevol (skipping rename).", +- display_lvname(lv_fast)); +- } else { +- *c = 0; +- /* If the name is in use, generate new lvol%d */ +- if (lv_name_is_used_in_vg(lv->vg, cvol_name, NULL) && +- !generate_lv_name(lv->vg, "lvol%d", cvol_name, sizeof(cvol_name))) { +- log_error("Failed to generate unique name for unused logical volume."); +- return 0; +- } + +- if (!lv_rename_update(cmd, lv_fast, cvol_name, 0)) +- return_0; +- } +- +- if (!vg_write(lv->vg) || !vg_commit(lv->vg)) ++ if (!lv_detach_writecache_cachevol(lv, noflush)) + return_0; + + backup(lv->vg); + +- log_print_unless_silent("Logical volume %s write cache has been detached.", ++ log_print_unless_silent("Logical volume %s writecache has been detached.", + display_lvname(lv)); + return 1; + } +@@ -5383,7 +5527,8 @@ static int _writecache_zero(struct cmd_context *cmd, struct logical_volume *lv) + struct wipe_params wp = { + .do_wipe_signatures = 1, /* optional, to print warning if clobbering something */ + .do_zero = 1, /* required for dm-writecache to work */ +- .zero_sectors = 1 ++ .yes = arg_count(cmd, yes_ARG), ++ .force = arg_count(cmd, force_ARG) + }; + int ret; + +@@ -5400,7 +5545,8 @@ static int _writecache_zero(struct cmd_context *cmd, struct logical_volume *lv) + return 0; + } + +- ret = wipe_lv(lv, wp); ++ if (!(ret = wipe_lv(lv, wp))) ++ stack; + + if (!deactivate_lv(cmd, lv)) { + log_error("Failed to deactivate LV %s for zeroing.", display_lvname(lv)); +@@ -5410,157 +5556,6 @@ static int _writecache_zero(struct cmd_context *cmd, struct logical_volume *lv) + return ret; + } + +-static int _get_one_writecache_setting(struct cmd_context *cmd, struct writecache_settings *settings, +- char *key, char *val, uint32_t *block_size_sectors) +-{ +- /* special case: block_size is not a setting but is set with the --cachesettings option */ +- if (!strncmp(key, "block_size", strlen("block_size"))) { +- uint32_t block_size = 0; +- if (sscanf(val, "%u", &block_size) != 1) +- goto_bad; +- if (block_size == 512) +- *block_size_sectors = 1; +- else if (block_size == 4096) +- *block_size_sectors = 8; +- else +- goto_bad; +- return 1; +- } +- +- if (!strncmp(key, "high_watermark", strlen("high_watermark"))) { +- if (sscanf(val, "%llu", (unsigned long long *)&settings->high_watermark) != 1) +- goto_bad; +- if (settings->high_watermark > 100) +- goto_bad; +- settings->high_watermark_set = 1; +- return 1; +- } +- +- if (!strncmp(key, "low_watermark", strlen("low_watermark"))) { +- if (sscanf(val, "%llu", (unsigned long long *)&settings->low_watermark) != 1) +- goto_bad; +- if (settings->low_watermark > 100) +- goto_bad; +- settings->low_watermark_set = 1; +- return 1; +- } +- +- if (!strncmp(key, "writeback_jobs", strlen("writeback_jobs"))) { +- if (sscanf(val, "%llu", (unsigned long long *)&settings->writeback_jobs) != 1) +- goto_bad; +- settings->writeback_jobs_set = 1; +- return 1; +- } +- +- if (!strncmp(key, "autocommit_blocks", strlen("autocommit_blocks"))) { +- if (sscanf(val, "%llu", (unsigned long long *)&settings->autocommit_blocks) != 1) +- goto_bad; +- settings->autocommit_blocks_set = 1; +- return 1; +- } +- +- if (!strncmp(key, "autocommit_time", strlen("autocommit_time"))) { +- if (sscanf(val, "%llu", (unsigned long long *)&settings->autocommit_time) != 1) +- goto_bad; +- settings->autocommit_time_set = 1; +- return 1; +- } +- +- if (!strncmp(key, "fua", strlen("fua"))) { +- if (settings->nofua_set) { +- log_error("Setting fua and nofua cannot both be set."); +- return 0; +- } +- if (sscanf(val, "%u", &settings->fua) != 1) +- goto_bad; +- settings->fua_set = 1; +- return 1; +- } +- +- if (!strncmp(key, "nofua", strlen("nofua"))) { +- if (settings->fua_set) { +- log_error("Setting fua and nofua cannot both be set."); +- return 0; +- } +- if (sscanf(val, "%u", &settings->nofua) != 1) +- goto_bad; +- settings->nofua_set = 1; +- return 1; +- } +- +- if (settings->new_key) { +- log_error("Setting %s is not recognized. Only one unrecognized setting is allowed.", key); +- return 0; +- } +- +- log_warn("Unrecognized writecache setting \"%s\" may cause activation failure.", key); +- if (yes_no_prompt("Use unrecognized writecache setting? [y/n]: ") == 'n') { +- log_error("Aborting writecache conversion."); +- return 0; +- } +- +- log_warn("Using unrecognized writecache setting: %s = %s.", key, val); +- +- settings->new_key = dm_pool_strdup(cmd->mem, key); +- settings->new_val = dm_pool_strdup(cmd->mem, val); +- return 1; +- +- bad: +- log_error("Invalid setting: %s", key); +- return 0; +-} +- +-static int _get_writecache_settings(struct cmd_context *cmd, struct writecache_settings *settings, +- uint32_t *block_size_sectors) +-{ +- struct arg_value_group_list *group; +- const char *str; +- char key[64]; +- char val[64]; +- int num; +- int pos; +- +- /* +- * "grouped" means that multiple --cachesettings options can be used. +- * Each option is also allowed to contain multiple key = val pairs. +- */ +- +- dm_list_iterate_items(group, &cmd->arg_value_groups) { +- if (!grouped_arg_is_set(group->arg_values, cachesettings_ARG)) +- continue; +- +- if (!(str = grouped_arg_str_value(group->arg_values, cachesettings_ARG, NULL))) +- break; +- +- pos = 0; +- +- while (pos < strlen(str)) { +- /* scan for "key1=val1 key2 = val2 key3= val3" */ +- +- memset(key, 0, sizeof(key)); +- memset(val, 0, sizeof(val)); +- +- if (sscanf(str + pos, " %63[^=]=%63s %n", key, val, &num) != 2) { +- log_error("Invalid setting at: %s", str+pos); +- return 0; +- } +- +- pos += num; +- +- if (!_get_one_writecache_setting(cmd, settings, key, val, block_size_sectors)) +- return_0; +- } +- } +- +- if (settings->high_watermark_set && settings->low_watermark_set && +- (settings->high_watermark <= settings->low_watermark)) { +- log_error("High watermark must be greater than low watermark."); +- return 0; +- } +- +- return 1; +-} +- + static struct logical_volume *_lv_writecache_create(struct cmd_context *cmd, + struct logical_volume *lv, + struct logical_volume *lv_fast, +@@ -5605,9 +5600,177 @@ static struct logical_volume *_lv_writecache_create(struct cmd_context *cmd, + return lv_wcorig; + } + +-#define DEFAULT_WRITECACHE_BLOCK_SIZE_SECTORS 8 /* 4K */ ++/* ++ * Currently only supports writecache block sizes 512 and 4096. ++ * This could be expanded later. ++ */ ++static int _set_writecache_block_size(struct cmd_context *cmd, ++ struct logical_volume *lv, ++ uint32_t *block_size_sectors) ++{ ++ char pathname[PATH_MAX]; ++ struct device *fs_dev; ++ struct dm_list pvs; ++ struct pv_list *pvl; ++ uint32_t fs_block_size = 0; ++ uint32_t block_size_setting = 0; ++ uint32_t block_size = 0; ++ int lbs_unknown = 0, lbs_4k = 0, lbs_512 = 0; ++ int pbs_unknown = 0, pbs_4k = 0, pbs_512 = 0; ++ int rv; ++ ++ /* This is set if the user specified a writecache block size on the command line. */ ++ if (*block_size_sectors) ++ block_size_setting = *block_size_sectors * 512; ++ ++ dm_list_init(&pvs); ++ ++ if (!get_pv_list_for_lv(cmd->mem, lv, &pvs)) { ++ log_error("Failed to build list of PVs for %s.", display_lvname(lv)); ++ goto_bad; ++ } ++ ++ dm_list_iterate_items(pvl, &pvs) { ++ unsigned int pbs = 0; ++ unsigned int lbs = 0; + +-static int _lvconvert_writecache_attach_single(struct cmd_context *cmd, ++ if (!dev_get_direct_block_sizes(pvl->pv->dev, &pbs, &lbs)) { ++ lbs_unknown++; ++ pbs_unknown++; ++ continue; ++ } ++ ++ if (lbs == 4096) ++ lbs_4k++; ++ else if (lbs == 512) ++ lbs_512++; ++ else ++ lbs_unknown++; ++ ++ if (pbs == 4096) ++ pbs_4k++; ++ else if (pbs == 512) ++ pbs_512++; ++ else ++ pbs_unknown++; ++ } ++ ++ if (lbs_4k && lbs_512) { ++ log_error("Writecache requires consistent logical block size for LV devices."); ++ goto_bad; ++ } ++ ++ if (lbs_4k && block_size_setting && (block_size_setting < 4096)) { ++ log_error("Writecache block size %u not allowed with device logical block size 4096.", ++ block_size_setting); ++ goto_bad; ++ } ++ ++ if (dm_snprintf(pathname, sizeof(pathname), "%s/%s/%s", cmd->dev_dir, ++ lv->vg->name, lv->name) < 0) { ++ log_error("Path name too long to get LV block size %s", display_lvname(lv)); ++ goto_bad; ++ } ++ ++ if (!sync_local_dev_names(cmd)) ++ stack; ++ ++ if (!(fs_dev = dev_cache_get(cmd, pathname, NULL))) { ++ if (test_mode()) { ++ log_print("Test mode skips checking fs block size."); ++ fs_block_size = 0; ++ goto skip_fs; ++ } ++ log_error("Device for LV not found to check block size %s", pathname); ++ goto_bad; ++ } ++ ++ /* ++ * get_fs_block_size() returns the libblkid BLOCK_SIZE value, ++ * where libblkid has fs-specific code to set BLOCK_SIZE to the ++ * value we need here. ++ * ++ * The term "block size" here may not equate directly to what the fs ++ * calls the block size, e.g. xfs calls this the sector size (and ++ * something different the block size); while ext4 does call this ++ * value the block size, but it's possible values are not the same ++ * as xfs's, and do not seem to relate directly to the device LBS. ++ * ++ * With 512 LBS and 4K PBS, mkfs.xfs will use xfs sector size 4K. ++ */ ++ rv = get_fs_block_size(fs_dev, &fs_block_size); ++skip_fs: ++ if (!rv || !fs_block_size) { ++ if (lbs_4k && pbs_4k && !pbs_512) { ++ block_size = 4096; ++ } else if (lbs_512 && pbs_512 && !pbs_4k) { ++ block_size = 512; ++ } else if (lbs_512 && pbs_4k) { ++ if (block_size_setting == 4096) ++ block_size = 4096; ++ else ++ block_size = 512; ++ } else { ++ block_size = 512; ++ } ++ ++ if (block_size_setting && (block_size_setting != block_size)) { ++ log_error("Cannot use writecache block size %u with unknown file system block size, logical block size %u, physical block size %u.", ++ block_size_setting, lbs_4k ? 4096 : 512, pbs_4k ? 4096 : 512); ++ goto bad; ++ } ++ ++ if (block_size != 512) { ++ log_warn("WARNING: unable to detect a file system block size on %s", display_lvname(lv)); ++ log_warn("WARNING: using a writecache block size larger than the file system block size may corrupt the file system."); ++ if (!arg_is_set(cmd, yes_ARG) && ++ yes_no_prompt("Use writecache block size %u? [y/n]: ", block_size) == 'n') { ++ log_error("Conversion aborted."); ++ goto bad; ++ } ++ } ++ ++ log_print("Using writecache block size %u for unknown file system block size, logical block size %u, physical block size %u.", ++ block_size, lbs_4k ? 4096 : 512, pbs_4k ? 4096 : 512); ++ goto out; ++ } ++ ++ if (!block_size_setting) { ++ /* User did not specify a block size, so choose according to fs block size. */ ++ if (fs_block_size == 4096) ++ block_size = 4096; ++ else if (fs_block_size == 512) ++ block_size = 512; ++ else if (fs_block_size > 4096) ++ block_size = 4096; ++ else if (fs_block_size < 4096) ++ block_size = 512; ++ else ++ goto_bad; ++ } else { ++ if (block_size_setting <= fs_block_size) ++ block_size = block_size_setting; ++ else { ++ log_error("Writecache block size %u cannot be larger than file system block size %u.", ++ block_size_setting, fs_block_size); ++ goto_bad; ++ } ++ } ++ ++out: ++ if (block_size == 512) ++ *block_size_sectors = 1; ++ else if (block_size == 4096) ++ *block_size_sectors = 8; ++ else ++ goto_bad; ++ ++ return 1; ++bad: ++ return 0; ++} ++ ++int lvconvert_writecache_attach_single(struct cmd_context *cmd, + struct logical_volume *lv, + struct processing_handle *handle) + { +@@ -5616,68 +5779,91 @@ static int _lvconvert_writecache_attach_single(struct cmd_context *cmd, + struct logical_volume *lv_fast; + struct writecache_settings settings; + const char *fast_name; +- uint32_t block_size_sectors; ++ uint32_t block_size_sectors = 0; + char *lockd_fast_args = NULL; + char *lockd_fast_name = NULL; + struct id lockd_fast_id; + char cvol_name[NAME_LEN]; ++ int is_active; + +- fast_name = arg_str_value(cmd, cachevol_ARG, ""); ++ /* ++ * User specifies an existing cachevol to use or a cachedevice ++ * to create a cachevol from. ++ */ ++ if ((fast_name = arg_str_value(cmd, cachevol_ARG, NULL))) { ++ if (!validate_lvname_param(cmd, &vg->name, &fast_name)) ++ goto_bad; + +- if (!(lv_fast = find_lv(vg, fast_name))) { +- log_error("LV %s not found.", fast_name); +- goto bad; +- } ++ if (!(lv_fast = find_lv(vg, fast_name))) { ++ log_error("LV %s not found.", fast_name); ++ goto bad; ++ } + +- if (lv_fast == lv) { +- log_error("Invalid cachevol LV."); +- goto bad; +- } ++ if (lv_fast == lv) { ++ log_error("Invalid cachevol LV."); ++ goto bad; ++ } + +- if (!seg_is_linear(first_seg(lv_fast))) { +- log_error("LV %s must be linear to use as a writecache.", display_lvname(lv_fast)); +- goto bad; +- } ++ if (lv_is_cache_vol(lv_fast)) { ++ log_error("LV %s is already used as a cachevol.", display_lvname(lv_fast)); ++ goto bad; ++ } + +- if (lv_is_cache_vol(lv_fast)) { +- log_error("LV %s is already used as a cachevol.", display_lvname(lv_fast)); +- goto bad; +- } ++ if (!seg_is_linear(first_seg(lv_fast))) { ++ log_error("LV %s must be linear to use as a writecache.", display_lvname(lv_fast)); ++ goto bad; ++ } + +- /* +- * To permit this we need to check the block size of the fs using lv +- * (recently in libblkid) so that we can use a matching writecache +- * block size. We also want to do that if the lv is inactive. +- */ +- if (lv_is_active(lv)) { +- log_error("LV %s must be inactive to attach writecache.", display_lvname(lv)); +- goto bad; +- } ++ /* fast LV shouldn't generally be active by itself, but just in case. */ ++ if (lv_is_active(lv_fast)) { ++ log_error("LV %s must be inactive to attach.", display_lvname(lv_fast)); ++ goto bad; ++ } + +- /* fast LV shouldn't generally be active by itself, but just in case. */ +- if (lv_info(cmd, lv_fast, 1, NULL, 0, 0)) { +- log_error("LV %s must be inactive to attach.", display_lvname(lv_fast)); +- goto bad; ++ if (!arg_is_set(cmd, yes_ARG) && ++ yes_no_prompt("Erase all existing data on %s? [y/n]: ", display_lvname(lv_fast)) == 'n') { ++ log_error("Conversion aborted."); ++ goto bad; ++ } ++ } else { ++ if (!_lv_create_cachevol(cmd, vg, lv, &lv_fast)) ++ goto_bad; + } + ++ is_active = lv_is_active(lv); ++ + memset(&settings, 0, sizeof(settings)); +- block_size_sectors = DEFAULT_WRITECACHE_BLOCK_SIZE_SECTORS; + +- if (!_get_writecache_settings(cmd, &settings, &block_size_sectors)) { ++ if (!get_writecache_settings(cmd, &settings, &block_size_sectors)) { + log_error("Invalid writecache settings."); + goto bad; + } + +- if (!arg_is_set(cmd, yes_ARG) && +- yes_no_prompt("Erase all existing data on %s? [y/n]: ", display_lvname(lv_fast)) == 'n') { +- log_error("Conversion aborted."); +- goto bad; ++ if (!is_active) { ++ /* checking block size of fs on the lv requires the lv to be active */ ++ if (!activate_lv(cmd, lv)) { ++ log_error("Failed to activate LV to check block size %s", display_lvname(lv)); ++ goto bad; ++ } ++ } ++ ++ if (!_set_writecache_block_size(cmd, lv, &block_size_sectors)) { ++ if (!is_active && !deactivate_lv(cmd, lv)) ++ stack; ++ goto_bad; + } + +- /* Ensure the two LVs are not active elsewhere. */ ++ if (!is_active) { ++ if (!deactivate_lv(cmd, lv)) { ++ log_error("Failed to deactivate LV after checking block size %s", display_lvname(lv)); ++ goto bad; ++ } ++ } ++ ++ /* Ensure the LV is not active elsewhere. */ + if (!lockd_lv(cmd, lv, "ex", 0)) + goto_bad; +- if (!lockd_lv(cmd, lv_fast, "ex", 0)) ++ if (fast_name && !lockd_lv(cmd, lv_fast, "ex", 0)) + goto_bad; + + if (!archive(vg)) +@@ -5744,7 +5930,7 @@ static int _lvconvert_writecache_attach_single(struct cmd_context *cmd, + log_error("Failed to unlock fast LV %s/%s", vg->name, lockd_fast_name); + } + +- log_print_unless_silent("Logical volume %s now has write cache.", ++ log_print_unless_silent("Logical volume %s now has writecache.", + display_lvname(lv)); + return ECMD_PROCESSED; + bad: +@@ -5768,7 +5954,7 @@ int lvconvert_to_writecache_cmd(struct cmd_context *cmd, int argc, char **argv) + cmd->cname->flags &= ~GET_VGNAME_FROM_OPTIONS; + + ret = process_each_lv(cmd, cmd->position_argc, cmd->position_argv, NULL, NULL, READ_FOR_UPDATE, handle, NULL, +- &_lvconvert_writecache_attach_single); ++ &lvconvert_writecache_attach_single); + + destroy_processing_handle(cmd, handle); + +@@ -5791,7 +5977,7 @@ int lvconvert_to_cache_with_cachevol_cmd(struct cmd_context *cmd, int argc, char + cmd->cname->flags &= ~GET_VGNAME_FROM_OPTIONS; + + ret = process_each_lv(cmd, cmd->position_argc, cmd->position_argv, NULL, NULL, READ_FOR_UPDATE, handle, NULL, +- &_lvconvert_cachevol_attach_single); ++ &lvconvert_cachevol_attach_single); + + destroy_processing_handle(cmd, handle); + +diff --git a/tools/lvcreate.c b/tools/lvcreate.c +index 5c978b3..3357a08 100644 +--- a/tools/lvcreate.c ++++ b/tools/lvcreate.c +@@ -766,7 +766,9 @@ static int _lvcreate_params(struct cmd_context *cmd, + * + * Ordering of following type tests is IMPORTANT + */ +- if ((segtype_str = arg_str_value(cmd, type_ARG, NULL))) { ++ if (lp->ignore_type) { ++ segtype_str = SEG_TYPE_NAME_STRIPED; ++ } else if ((segtype_str = arg_str_value(cmd, type_ARG, NULL))) { + lp->type = 1; + if (!strcmp(segtype_str, "linear")) { + segtype_str = "striped"; +@@ -1799,3 +1801,152 @@ int lvcreate(struct cmd_context *cmd, int argc, char **argv) + destroy_processing_handle(cmd, handle); + return ret; + } ++ ++static int _lvcreate_and_attach_writecache_single(struct cmd_context *cmd, ++ const char *vg_name, struct volume_group *vg, struct processing_handle *handle) ++{ ++ struct processing_params *pp = (struct processing_params *) handle->custom_handle; ++ struct lvcreate_params *lp = pp->lp; ++ struct logical_volume *lv; ++ int ret; ++ ++ ret = _lvcreate_single(cmd, vg_name, vg, handle); ++ ++ if (ret == ECMD_FAILED) ++ return ret; ++ ++ if (!(lv = find_lv(vg, lp->lv_name))) { ++ log_error("Failed to find LV %s to add writecache.", lp->lv_name); ++ return ECMD_FAILED; ++ } ++ ++ ret = lvconvert_writecache_attach_single(cmd, lv, handle); ++ ++ if (ret == ECMD_FAILED) { ++ log_error("Removing new LV after failing to add writecache."); ++ if (!deactivate_lv(cmd, lv)) ++ log_error("Failed to deactivate new LV %s.", display_lvname(lv)); ++ if (!lv_remove_with_dependencies(cmd, lv, 1, 0)) ++ log_error("Failed to remove new LV %s.", display_lvname(lv)); ++ return ECMD_FAILED; ++ } ++ ++ return ECMD_PROCESSED; ++} ++ ++int lvcreate_and_attach_writecache_cmd(struct cmd_context *cmd, int argc, char **argv) ++{ ++ struct processing_handle *handle = NULL; ++ struct processing_params pp; ++ struct lvcreate_params lp = { ++ .major = -1, ++ .minor = -1, ++ }; ++ struct lvcreate_cmdline_params lcp = { 0 }; ++ int ret; ++ ++ /* ++ * Tell lvcreate to ignore --type since we are using lvcreate ++ * to create a linear LV and using lvconvert to add cache. ++ * (Would be better if lvcreate code was split up so we could ++ * call a specific function that just created a linear/striped LV.) ++ */ ++ lp.ignore_type = 1; ++ ++ if (!_lvcreate_params(cmd, argc, argv, &lp, &lcp)) { ++ stack; ++ return EINVALID_CMD_LINE; ++ } ++ ++ pp.lp = &lp; ++ pp.lcp = &lcp; ++ ++ if (!(handle = init_processing_handle(cmd, NULL))) { ++ log_error("Failed to initialize processing handle."); ++ return ECMD_FAILED; ++ } ++ ++ handle->custom_handle = &pp; ++ ++ ret = process_each_vg(cmd, 0, NULL, lp.vg_name, NULL, READ_FOR_UPDATE, 0, handle, ++ &_lvcreate_and_attach_writecache_single); ++ ++ _destroy_lvcreate_params(&lp); ++ destroy_processing_handle(cmd, handle); ++ return ret; ++} ++ ++static int _lvcreate_and_attach_cache_single(struct cmd_context *cmd, ++ const char *vg_name, struct volume_group *vg, struct processing_handle *handle) ++{ ++ struct processing_params *pp = (struct processing_params *) handle->custom_handle; ++ struct lvcreate_params *lp = pp->lp; ++ struct logical_volume *lv; ++ int ret; ++ ++ ret = _lvcreate_single(cmd, vg_name, vg, handle); ++ ++ if (ret == ECMD_FAILED) ++ return ret; ++ ++ if (!(lv = find_lv(vg, lp->lv_name))) { ++ log_error("Failed to find LV %s to add cache.", lp->lv_name); ++ return ECMD_FAILED; ++ } ++ ++ ret = lvconvert_cachevol_attach_single(cmd, lv, handle); ++ ++ if (ret == ECMD_FAILED) { ++ log_error("Removing new LV after failing to add cache."); ++ if (!deactivate_lv(cmd, lv)) ++ log_error("Failed to deactivate new LV %s.", display_lvname(lv)); ++ if (!lv_remove_with_dependencies(cmd, lv, 1, 0)) ++ log_error("Failed to remove new LV %s.", display_lvname(lv)); ++ return ECMD_FAILED; ++ } ++ ++ return ECMD_PROCESSED; ++} ++ ++int lvcreate_and_attach_cache_cmd(struct cmd_context *cmd, int argc, char **argv) ++{ ++ struct processing_handle *handle = NULL; ++ struct processing_params pp; ++ struct lvcreate_params lp = { ++ .major = -1, ++ .minor = -1, ++ }; ++ struct lvcreate_cmdline_params lcp = { 0 }; ++ int ret; ++ ++ /* ++ * Tell lvcreate to ignore --type since we are using lvcreate ++ * to create a linear LV and using lvconvert to add cache. ++ * (Would be better if lvcreate code was split up so we could ++ * call a specific function that just created a linear/striped LV.) ++ */ ++ lp.ignore_type = 1; ++ ++ if (!_lvcreate_params(cmd, argc, argv, &lp, &lcp)) { ++ stack; ++ return EINVALID_CMD_LINE; ++ } ++ ++ pp.lp = &lp; ++ pp.lcp = &lcp; ++ ++ if (!(handle = init_processing_handle(cmd, NULL))) { ++ log_error("Failed to initialize processing handle."); ++ return ECMD_FAILED; ++ } ++ ++ handle->custom_handle = &pp; ++ ++ ret = process_each_vg(cmd, 0, NULL, lp.vg_name, NULL, READ_FOR_UPDATE, 0, handle, ++ &_lvcreate_and_attach_cache_single); ++ ++ _destroy_lvcreate_params(&lp); ++ destroy_processing_handle(cmd, handle); ++ return ret; ++} ++ +diff --git a/tools/lvmcmdline.c b/tools/lvmcmdline.c +index d87a8f0..7cf4e3f 100644 +--- a/tools/lvmcmdline.c ++++ b/tools/lvmcmdline.c +@@ -124,8 +124,10 @@ static const struct command_function _command_functions[CMD_COUNT] = { + { lvconvert_to_cachepool_CMD, lvconvert_to_pool_cmd }, + { lvconvert_to_thin_with_external_CMD, lvconvert_to_thin_with_external_cmd }, + { lvconvert_to_cache_with_cachevol_CMD, lvconvert_to_cache_with_cachevol_cmd }, ++ { lvconvert_to_cache_with_device_CMD, lvconvert_to_cache_with_cachevol_cmd }, + { lvconvert_to_cache_with_cachepool_CMD, lvconvert_to_cache_with_cachepool_cmd }, + { lvconvert_to_writecache_CMD, lvconvert_to_writecache_cmd }, ++ { lvconvert_to_writecache_with_device_CMD, lvconvert_to_writecache_cmd }, + { lvconvert_swap_pool_metadata_CMD, lvconvert_swap_pool_metadata_cmd }, + { lvconvert_to_thinpool_or_swap_metadata_CMD, lvconvert_to_pool_or_swap_metadata_cmd }, + { lvconvert_to_cachepool_or_swap_metadata_CMD, lvconvert_to_pool_or_swap_metadata_cmd }, +@@ -152,6 +154,12 @@ static const struct command_function _command_functions[CMD_COUNT] = { + /* lvconvert for integrity */ + { lvconvert_integrity_CMD, lvconvert_integrity_cmd }, + ++ /* lvcreate */ ++ { lvcreate_and_attach_cachevol_for_cache_CMD, lvcreate_and_attach_cache_cmd }, ++ { lvcreate_and_attach_cachedevice_for_cache_CMD, lvcreate_and_attach_cache_cmd }, ++ { lvcreate_and_attach_cachevol_for_writecache_CMD, lvcreate_and_attach_writecache_cmd }, ++ { lvcreate_and_attach_cachedevice_for_writecache_CMD, lvcreate_and_attach_writecache_cmd }, ++ + { pvscan_display_CMD, pvscan_display_cmd }, + { pvscan_cache_CMD, pvscan_cache_cmd }, + }; +diff --git a/tools/toollib.c b/tools/toollib.c +index 89b6374..eb0de55 100644 +--- a/tools/toollib.c ++++ b/tools/toollib.c +@@ -1184,6 +1184,170 @@ out: + return ok; + } + ++static int _get_one_writecache_setting(struct cmd_context *cmd, struct writecache_settings *settings, ++ char *key, char *val, uint32_t *block_size_sectors) ++{ ++ /* special case: block_size is not a setting but is set with the --cachesettings option */ ++ if (!strncmp(key, "block_size", strlen("block_size"))) { ++ uint32_t block_size = 0; ++ if (sscanf(val, "%u", &block_size) != 1) ++ goto_bad; ++ if (block_size == 512) ++ *block_size_sectors = 1; ++ else if (block_size == 4096) ++ *block_size_sectors = 8; ++ else ++ goto_bad; ++ return 1; ++ } ++ ++ if (!strncmp(key, "high_watermark", strlen("high_watermark"))) { ++ if (sscanf(val, "%llu", (unsigned long long *)&settings->high_watermark) != 1) ++ goto_bad; ++ if (settings->high_watermark > 100) ++ goto_bad; ++ settings->high_watermark_set = 1; ++ return 1; ++ } ++ ++ if (!strncmp(key, "low_watermark", strlen("low_watermark"))) { ++ if (sscanf(val, "%llu", (unsigned long long *)&settings->low_watermark) != 1) ++ goto_bad; ++ if (settings->low_watermark > 100) ++ goto_bad; ++ settings->low_watermark_set = 1; ++ return 1; ++ } ++ ++ if (!strncmp(key, "writeback_jobs", strlen("writeback_jobs"))) { ++ if (sscanf(val, "%llu", (unsigned long long *)&settings->writeback_jobs) != 1) ++ goto_bad; ++ settings->writeback_jobs_set = 1; ++ return 1; ++ } ++ ++ if (!strncmp(key, "autocommit_blocks", strlen("autocommit_blocks"))) { ++ if (sscanf(val, "%llu", (unsigned long long *)&settings->autocommit_blocks) != 1) ++ goto_bad; ++ settings->autocommit_blocks_set = 1; ++ return 1; ++ } ++ ++ if (!strncmp(key, "autocommit_time", strlen("autocommit_time"))) { ++ if (sscanf(val, "%llu", (unsigned long long *)&settings->autocommit_time) != 1) ++ goto_bad; ++ settings->autocommit_time_set = 1; ++ return 1; ++ } ++ ++ if (!strncmp(key, "fua", strlen("fua"))) { ++ if (settings->nofua_set) { ++ log_error("Setting fua and nofua cannot both be set."); ++ return 0; ++ } ++ if (sscanf(val, "%u", &settings->fua) != 1) ++ goto_bad; ++ settings->fua_set = 1; ++ return 1; ++ } ++ ++ if (!strncmp(key, "nofua", strlen("nofua"))) { ++ if (settings->fua_set) { ++ log_error("Setting fua and nofua cannot both be set."); ++ return 0; ++ } ++ if (sscanf(val, "%u", &settings->nofua) != 1) ++ goto_bad; ++ settings->nofua_set = 1; ++ return 1; ++ } ++ ++ if (!strncmp(key, "cleaner", strlen("cleaner"))) { ++ if (sscanf(val, "%u", &settings->cleaner) != 1) ++ goto_bad; ++ settings->cleaner_set = 1; ++ return 1; ++ } ++ ++ if (!strncmp(key, "max_age", strlen("max_age"))) { ++ if (sscanf(val, "%u", &settings->max_age) != 1) ++ goto_bad; ++ settings->max_age_set = 1; ++ return 1; ++ } ++ ++ if (settings->new_key) { ++ log_error("Setting %s is not recognized. Only one unrecognized setting is allowed.", key); ++ return 0; ++ } ++ ++ log_warn("Unrecognized writecache setting \"%s\" may cause activation failure.", key); ++ if (yes_no_prompt("Use unrecognized writecache setting? [y/n]: ") == 'n') { ++ log_error("Aborting writecache conversion."); ++ return 0; ++ } ++ ++ log_warn("Using unrecognized writecache setting: %s = %s.", key, val); ++ ++ settings->new_key = dm_pool_strdup(cmd->mem, key); ++ settings->new_val = dm_pool_strdup(cmd->mem, val); ++ return 1; ++ ++ bad: ++ log_error("Invalid setting: %s", key); ++ return 0; ++} ++ ++int get_writecache_settings(struct cmd_context *cmd, struct writecache_settings *settings, ++ uint32_t *block_size_sectors) ++{ ++ struct arg_value_group_list *group; ++ const char *str; ++ char key[64]; ++ char val[64]; ++ int num; ++ int pos; ++ ++ /* ++ * "grouped" means that multiple --cachesettings options can be used. ++ * Each option is also allowed to contain multiple key = val pairs. ++ */ ++ ++ dm_list_iterate_items(group, &cmd->arg_value_groups) { ++ if (!grouped_arg_is_set(group->arg_values, cachesettings_ARG)) ++ continue; ++ ++ if (!(str = grouped_arg_str_value(group->arg_values, cachesettings_ARG, NULL))) ++ break; ++ ++ pos = 0; ++ ++ while (pos < strlen(str)) { ++ /* scan for "key1=val1 key2 = val2 key3= val3" */ ++ ++ memset(key, 0, sizeof(key)); ++ memset(val, 0, sizeof(val)); ++ ++ if (sscanf(str + pos, " %63[^=]=%63s %n", key, val, &num) != 2) { ++ log_error("Invalid setting at: %s", str+pos); ++ return 0; ++ } ++ ++ pos += num; ++ ++ if (!_get_one_writecache_setting(cmd, settings, key, val, block_size_sectors)) ++ return_0; ++ } ++ } ++ ++ if (settings->high_watermark_set && settings->low_watermark_set && ++ (settings->high_watermark <= settings->low_watermark)) { ++ log_error("High watermark must be greater than low watermark."); ++ return 0; ++ } ++ ++ return 1; ++} + + /* FIXME move to lib */ + static int _pv_change_tag(struct physical_volume *pv, const char *tag, int addtag) +diff --git a/tools/toollib.h b/tools/toollib.h +index 53a5e5b..f3a60fb 100644 +--- a/tools/toollib.h ++++ b/tools/toollib.h +@@ -217,6 +217,9 @@ int get_cache_params(struct cmd_context *cmd, + const char **name, + struct dm_config_tree **settings); + ++int get_writecache_settings(struct cmd_context *cmd, struct writecache_settings *settings, ++ uint32_t *block_size_sectors); ++ + int change_tag(struct cmd_context *cmd, struct volume_group *vg, + struct logical_volume *lv, struct physical_volume *pv, int arg); + +diff --git a/tools/tools.h b/tools/tools.h +index 7f2434d..c3d780d 100644 +--- a/tools/tools.h ++++ b/tools/tools.h +@@ -278,7 +278,18 @@ int lvconvert_to_vdopool_param_cmd(struct cmd_context *cmd, int argc, char **arg + + int lvconvert_integrity_cmd(struct cmd_context *cmd, int argc, char **argv); + ++int lvcreate_and_attach_writecache_cmd(struct cmd_context *cmd, int argc, char **argv); ++int lvcreate_and_attach_cache_cmd(struct cmd_context *cmd, int argc, char **argv); ++ + int pvscan_display_cmd(struct cmd_context *cmd, int argc, char **argv); + int pvscan_cache_cmd(struct cmd_context *cmd, int argc, char **argv); + ++ ++int lvconvert_writecache_attach_single(struct cmd_context *cmd, ++ struct logical_volume *lv, ++ struct processing_handle *handle); ++int lvconvert_cachevol_attach_single(struct cmd_context *cmd, ++ struct logical_volume *lv, ++ struct processing_handle *handle); ++ + #endif +-- +1.8.3.1 + diff --git a/SOURCES/0004-Revert-wipe_lv-changes.patch b/SOURCES/0004-Revert-wipe_lv-changes.patch new file mode 100644 index 0000000..0dce48f --- /dev/null +++ b/SOURCES/0004-Revert-wipe_lv-changes.patch @@ -0,0 +1,422 @@ +From 99b646d87469b5ca0e93fad6b77f51a00fbbd2b7 Mon Sep 17 00:00:00 2001 +From: Marian Csontos +Date: Wed, 12 Aug 2020 18:47:15 +0200 +Subject: [PATCH] Revert "debug: missing stacktrace" + +This reverts commit d0faad0db38fe733cae42d7df136d7ed4f7bcba6. + +Revert "raid: no wiping when zeroing raid metadata device" + +This reverts commit 9b9bf8786fb423a4430cc676301edadf2310098d. + +Revert "lvconvert: more support for yes conversion" + +This reverts commit b7f3667ce20b731bbda9b1d61df49abbcd1bd20e. + +Revert "wipe_lv: always zero at least 4K" + +This reverts commit fe78cd4082cb9af10580180d61898fcef93dc624. + +Revert "tests: check pool metadata are zeroed" + +This reverts commit 3f32f9811e01c8953d201c7c9b563561ad856130. + +Revert "tests: failure of zeroing fails command" + +This reverts commit 094d6f80ddb6d8a1c64977dfaae4073827063fe3. + +Revert "make: make generate" + +This reverts commit 88b92d4225b90db82047f3bac55d8059918e9c1b. + +Conflicts: + man/lvconvert.8_pregen + +Revert "pool: zero metadata" + +This reverts commit bc39d5bec6fea787a8d8d16fa484084b7d2a7c29. + +Conflicts: + WHATS_NEW + +Revert "wipe_lv: make error a fatal event" + +This reverts commit edbc5a62b26806e5c4de59b5292609e955303576. + +Conflicts: + WHATS_NEW + +build: make generate +--- + WHATS_NEW | 2 - + conf/example.conf.in | 6 +-- + lib/config/config_settings.h | 5 +-- + lib/config/defaults.h | 1 - + lib/metadata/lv_manip.c | 78 ++++++++++++--------------------- + lib/metadata/metadata-exported.h | 2 - + lib/metadata/pool_manip.c | 6 +-- + test/lib/aux.sh | 1 - + test/shell/lvcreate-signature-wiping.sh | 7 --- + test/shell/lvcreate-thin.sh | 21 --------- + tools/lvconvert.c | 12 ++--- + 11 files changed, 36 insertions(+), 105 deletions(-) + +diff --git a/WHATS_NEW b/WHATS_NEW +index ac99e97..6a098b5 100644 +--- a/WHATS_NEW ++++ b/WHATS_NEW +@@ -6,8 +6,6 @@ Version 2.03.10 - + warning. + Fix conversion to raid from striped lagging type. + Fix conversion to 'mirrored' mirror log with larger regionsize. +- Zero pool metadata on allocation (disable with allocation/zero_metadata=0). +- Failure in zeroing or wiping will fail command (bypass with -Zn, -Wn). + Fix running out of free buffers for async writing for larger writes. + Add integrity with raid capability. + Fix support for lvconvert --repair used by foreign apps (i.e. Docker). +diff --git a/conf/example.conf.in b/conf/example.conf.in +index d5807e6..88858fc 100644 +--- a/conf/example.conf.in ++++ b/conf/example.conf.in +@@ -489,7 +489,7 @@ allocation { + # This configuration option does not have a default value defined. + + # Configuration option allocation/thin_pool_metadata_require_separate_pvs. +- # Thin pool metadata and data will always use different PVs. ++ # Thin pool metdata and data will always use different PVs. + thin_pool_metadata_require_separate_pvs = 0 + + # Configuration option allocation/thin_pool_zero. +@@ -527,10 +527,6 @@ allocation { + # This configuration option has an automatic default value. + # thin_pool_chunk_size_policy = "generic" + +- # Configuration option allocation/zero_metadata. +- # Zero whole metadata area before use with thin or cache pool. +- zero_metadata = 1 +- + # Configuration option allocation/thin_pool_chunk_size. + # The minimal chunk size in KiB for thin pool volumes. + # Larger chunk sizes may improve performance for plain thin volumes, +diff --git a/lib/config/config_settings.h b/lib/config/config_settings.h +index b38ca11..dce9705 100644 +--- a/lib/config/config_settings.h ++++ b/lib/config/config_settings.h +@@ -626,7 +626,7 @@ cfg(allocation_cache_pool_max_chunks_CFG, "cache_pool_max_chunks", allocation_CF + "Using cache pool with more chunks may degrade cache performance.\n") + + cfg(allocation_thin_pool_metadata_require_separate_pvs_CFG, "thin_pool_metadata_require_separate_pvs", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_METADATA_REQUIRE_SEPARATE_PVS, vsn(2, 2, 89), NULL, 0, NULL, +- "Thin pool metadata and data will always use different PVs.\n") ++ "Thin pool metdata and data will always use different PVs.\n") + + cfg(allocation_thin_pool_zero_CFG, "thin_pool_zero", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_ZERO, vsn(2, 2, 99), NULL, 0, NULL, + "Thin pool data chunks are zeroed before they are first used.\n" +@@ -657,9 +657,6 @@ cfg(allocation_thin_pool_chunk_size_policy_CFG, "thin_pool_chunk_size_policy", a + " 512KiB.\n" + "#\n") + +-cfg(allocation_zero_metadata_CFG, "zero_metadata", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_ZERO_METADATA, vsn(2, 3, 10), NULL, 0, NULL, +- "Zero whole metadata area before use with thin or cache pool.\n") +- + cfg_runtime(allocation_thin_pool_chunk_size_CFG, "thin_pool_chunk_size", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_UNDEFINED, CFG_TYPE_INT, vsn(2, 2, 99), 0, NULL, + "The minimal chunk size in KiB for thin pool volumes.\n" + "Larger chunk sizes may improve performance for plain thin volumes,\n" +diff --git a/lib/config/defaults.h b/lib/config/defaults.h +index 708a575..be4f5ff 100644 +--- a/lib/config/defaults.h ++++ b/lib/config/defaults.h +@@ -129,7 +129,6 @@ + #define DEFAULT_THIN_POOL_DISCARDS "passdown" + #define DEFAULT_THIN_POOL_ZERO 1 + #define DEFAULT_POOL_METADATA_SPARE 1 /* thin + cache */ +-#define DEFAULT_ZERO_METADATA 1 /* thin + cache */ + + #ifdef CACHE_CHECK_NEEDS_CHECK + # define DEFAULT_CACHE_CHECK_OPTION1 "-q" +diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c +index f0ba3f0..1642b90 100644 +--- a/lib/metadata/lv_manip.c ++++ b/lib/metadata/lv_manip.c +@@ -7576,22 +7576,20 @@ int wipe_lv(struct logical_volume *lv, struct wipe_params wp) + struct device *dev; + char name[PATH_MAX]; + uint64_t zero_sectors; +- int zero_metadata = wp.is_metadata ? +- find_config_tree_bool(lv->vg->cmd, allocation_zero_metadata_CFG, NULL) : 0; + +- if (!wp.do_zero && !wp.do_wipe_signatures && !wp.is_metadata) ++ if (!wp.do_zero && !wp.do_wipe_signatures) + /* nothing to do */ + return 1; + + if (!lv_is_active(lv)) { +- log_error("Volume %s is not active locally (volume_list activation filter?).", +- display_lvname(lv)); ++ log_error("Volume \"%s/%s\" is not active locally (volume_list activation filter?).", ++ lv->vg->name, lv->name); + return 0; + } + + /* Wait until devices are available */ + if (!sync_local_dev_names(lv->vg->cmd)) { +- log_error("Failed to sync local devices before wiping volume %s.", ++ log_error("Failed to sync local devices before wiping LV %s.", + display_lvname(lv)); + return 0; + } +@@ -7615,59 +7613,40 @@ int wipe_lv(struct logical_volume *lv, struct wipe_params wp) + } + + if (!label_scan_open_rw(dev)) { +- log_error("Failed to open %s for wiping and zeroing.", display_lvname(lv)); +- return 0; ++ log_error("Failed to open %s/%s for wiping and zeroing.", lv->vg->name, lv->name); ++ goto out; + } + + if (wp.do_wipe_signatures) { +- log_verbose("Wiping known signatures on logical volume %s.", +- display_lvname(lv)); ++ log_verbose("Wiping known signatures on logical volume \"%s/%s\"", ++ lv->vg->name, lv->name); + if (!wipe_known_signatures(lv->vg->cmd, dev, name, 0, + TYPE_DM_SNAPSHOT_COW, +- wp.yes, wp.force, NULL)) { +- log_error("Filed to wipe signatures of logical volume %s.", +- display_lvname(lv)); +- return 0; +- } ++ wp.yes, wp.force, NULL)) ++ stack; + } + +- if (wp.do_zero || wp.is_metadata) { +- zero_metadata = !wp.is_metadata ? 0 : +- find_config_tree_bool(lv->vg->cmd, allocation_zero_metadata_CFG, NULL); +- if (zero_metadata) { +- log_debug("Metadata logical volume %s will be fully zeroed.", +- display_lvname(lv)); +- zero_sectors = lv->size; +- } else { +- if (wp.is_metadata) /* Verbosely notify metadata will not be fully zeroed */ +- log_verbose("Metadata logical volume %s not fully zeroed and may contain stale data.", +- display_lvname(lv)); +- zero_sectors = UINT64_C(4096) >> SECTOR_SHIFT; +- if (wp.zero_sectors > zero_sectors) +- zero_sectors = wp.zero_sectors; ++ if (wp.do_zero) { ++ zero_sectors = wp.zero_sectors ? : UINT64_C(4096) >> SECTOR_SHIFT; + +- if (zero_sectors > lv->size) +- zero_sectors = lv->size; +- } ++ if (zero_sectors > lv->size) ++ zero_sectors = lv->size; + +- log_verbose("Initializing %s of logical volume %s with value %d.", ++ log_verbose("Initializing %s of logical volume \"%s/%s\" with value %d.", + display_size(lv->vg->cmd, zero_sectors), +- display_lvname(lv), wp.zero_value); +- +- if ((!wp.is_metadata && +- wp.zero_value && !dev_set_bytes(dev, UINT64_C(0), +- (size_t) zero_sectors << SECTOR_SHIFT, +- (uint8_t)wp.zero_value)) || +- !dev_write_zeros(dev, UINT64_C(0), (size_t) zero_sectors << SECTOR_SHIFT)) { +- log_error("Failed to initialize %s of logical volume %s with value %d.", +- display_size(lv->vg->cmd, zero_sectors), +- display_lvname(lv), wp.zero_value); +- return 0; ++ lv->vg->name, lv->name, wp.zero_value); ++ ++ if (!wp.zero_value) { ++ if (!dev_write_zeros(dev, UINT64_C(0), (size_t) zero_sectors << SECTOR_SHIFT)) ++ stack; ++ } else { ++ if (!dev_set_bytes(dev, UINT64_C(0), (size_t) zero_sectors << SECTOR_SHIFT, (uint8_t)wp.zero_value)) ++ stack; + } + } + + label_scan_invalidate(dev); +- ++out: + lv->status &= ~LV_NOSCAN; + + return 1; +@@ -7731,10 +7710,12 @@ int activate_and_wipe_lvlist(struct dm_list *lv_list, int commit) + } + + dm_list_iterate_items(lvl, lv_list) { ++ log_verbose("Wiping metadata area %s.", display_lvname(lvl->lv)); + /* Wipe any know signatures */ +- if (!wipe_lv(lvl->lv, (struct wipe_params) { .do_zero = 1 /* TODO: is_metadata = 1 */ })) { ++ if (!wipe_lv(lvl->lv, (struct wipe_params) { .do_wipe_signatures = 1, .do_zero = 1, .zero_sectors = 1 })) { ++ log_error("Failed to wipe %s.", display_lvname(lvl->lv)); + r = 0; +- goto_out; ++ goto out; + } + } + out: +@@ -8479,8 +8460,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg, + .do_zero = lp->zero, + .do_wipe_signatures = lp->wipe_signatures, + .yes = lp->yes, +- .force = lp->force, +- .is_metadata = lp->is_metadata, ++ .force = lp->force + })) { + log_error("Aborting. Failed to wipe %s.", lp->snapshot + ? "snapshot exception store" : "start of new LV"); +diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h +index 06ea757..0cc5f37 100644 +--- a/lib/metadata/metadata-exported.h ++++ b/lib/metadata/metadata-exported.h +@@ -803,7 +803,6 @@ struct wipe_params { + int do_wipe_signatures; /* should we wipe known signatures found on LV? */ + int yes; /* answer yes automatically to all questions */ + force_t force; /* force mode */ +- int is_metadata; /* wipe volume is metadata LV */ + }; + + /* Zero out LV and/or wipe signatures */ +@@ -956,7 +955,6 @@ struct lvcreate_params { + unsigned suppress_zero_warn : 1; + unsigned needs_lockd_init : 1; + unsigned ignore_type : 1; +- unsigned is_metadata : 1; /* created LV will be used as metadata LV (and can be zeroed) */ + + const char *vg_name; /* only-used when VG is not yet opened (in /tools) */ + const char *lv_name; /* all */ +diff --git a/lib/metadata/pool_manip.c b/lib/metadata/pool_manip.c +index 23b5b63..bed51f1 100644 +--- a/lib/metadata/pool_manip.c ++++ b/lib/metadata/pool_manip.c +@@ -545,8 +545,8 @@ int create_pool(struct logical_volume *pool_lv, + display_lvname(pool_lv)); + goto bad; + } +- /* Clear pool metadata device. */ +- if (!(r = wipe_lv(pool_lv, (struct wipe_params) { .is_metadata = 1 }))) { ++ /* Clear 4KB of pool metadata device. */ ++ if (!(r = wipe_lv(pool_lv, (struct wipe_params) { .do_zero = 1 }))) { + log_error("Aborting. Failed to wipe pool metadata %s.", + display_lvname(pool_lv)); + } +@@ -627,7 +627,6 @@ struct logical_volume *alloc_pool_metadata(struct logical_volume *pool_lv, + .tags = DM_LIST_HEAD_INIT(lvc.tags), + .temporary = 1, + .zero = 1, +- .is_metadata = 1, + }; + + if (!(lvc.segtype = get_segtype_from_string(pool_lv->vg->cmd, SEG_TYPE_NAME_STRIPED))) +@@ -664,7 +663,6 @@ static struct logical_volume *_alloc_pool_metadata_spare(struct volume_group *vg + .tags = DM_LIST_HEAD_INIT(lp.tags), + .temporary = 1, + .zero = 1, +- .is_metadata = 1, + }; + + if (!(lp.segtype = get_segtype_from_string(vg->cmd, SEG_TYPE_NAME_STRIPED))) +diff --git a/test/lib/aux.sh b/test/lib/aux.sh +index 17e7935..e40da95 100644 +--- a/test/lib/aux.sh ++++ b/test/lib/aux.sh +@@ -1234,7 +1234,6 @@ activation/verify_udev_operations = $LVM_VERIFY_UDEV + activation/raid_region_size = 512 + allocation/wipe_signatures_when_zeroing_new_lvs = 0 + allocation/vdo_slab_size_mb = 128 +-allocation/zero_metadata = 0 + backup/archive = 0 + backup/backup = 0 + devices/cache_dir = "$TESTDIR/etc" +diff --git a/test/shell/lvcreate-signature-wiping.sh b/test/shell/lvcreate-signature-wiping.sh +index 18d7a2f..73fea54 100644 +--- a/test/shell/lvcreate-signature-wiping.sh ++++ b/test/shell/lvcreate-signature-wiping.sh +@@ -42,13 +42,6 @@ init_lv_ + test_blkid_ || skip + lvremove -f $vg/$lv1 + +-# Zeroing stops the command when there is a failure (write error in this case) +-aux error_dev "$dev1" "$(get first_extent_sector "$dev1"):2" +-not lvcreate -l1 -n $lv1 $vg 2>&1 | tee out +-grep "Failed to initialize" out +-aux enable_dev "$dev1" +- +- + aux lvmconf "allocation/wipe_signatures_when_zeroing_new_lvs = 0" + + lvcreate -y -Zn -l1 -n $lv1 $vg 2>&1 | tee out +diff --git a/test/shell/lvcreate-thin.sh b/test/shell/lvcreate-thin.sh +index c073eaf..9ca7f11 100644 +--- a/test/shell/lvcreate-thin.sh ++++ b/test/shell/lvcreate-thin.sh +@@ -248,25 +248,4 @@ not lvcreate -s $vg/lv1 -L4M -V2G --name $vg/lv4 + not lvcreate -T mirpool -L4M --alloc anywhere -m1 $vg + not lvcreate --thinpool mirpool -L4M --alloc anywhere -m1 $vg + +- +-# Check pool metadata volume is zeroed, when zero_metadata is enabled. +-# 1st. ensure 8megs of both PVs will have some non-0 data +-lvcreate -L8m -n $lv1 $vg "$dev1" +-lvextend -L+8m $vg/$lv1 "$dev2" +-dd if=/dev/urandom of="$DM_DEV_DIR/$vg/$lv1" bs=1M count=16 oflag=direct conv=fdatasync +-lvremove -ff $vg/$lv1 +- +-lvcreate -l1 --poolmetadatasize 4m --conf 'allocation/zero_metadata=1' -vvvv -T $vg/pool +-lvchange -an $vg +-# component activation to check device was zeroed +-lvchange -y -ay $vg/pool_tmeta +-dd if="$DM_DEV_DIR/$vg/pool_tmeta" of=file bs=1M count=3 skip=1 iflag=direct conv=fdatasync +- +-md5sum -b file | tee out +-# md5sum of 3M of zeros +-grep d1dd210d6b1312cb342b56d02bd5e651 out +-lvchange -an $vg +-lvremove -ff $vg +- +- + vgremove -ff $vg +diff --git a/tools/lvconvert.c b/tools/lvconvert.c +index 524ed5a..6324ed7 100644 +--- a/tools/lvconvert.c ++++ b/tools/lvconvert.c +@@ -3286,11 +3286,7 @@ static int _lvconvert_to_pool(struct cmd_context *cmd, + } + metadata_lv->status &= ~LV_ACTIVATION_SKIP; + +- if (!wipe_lv(metadata_lv, (struct wipe_params) { +- .do_wipe_signatures = 1, +- .is_metadata = 1, +- .yes = arg_count(cmd, yes_ARG), +- .force = arg_count(cmd, force_ARG) } )) { ++ if (!wipe_lv(metadata_lv, (struct wipe_params) { .do_zero = 1 })) { + log_error("Aborting. Failed to wipe metadata lv."); + goto bad; + } +@@ -5527,8 +5523,7 @@ static int _writecache_zero(struct cmd_context *cmd, struct logical_volume *lv) + struct wipe_params wp = { + .do_wipe_signatures = 1, /* optional, to print warning if clobbering something */ + .do_zero = 1, /* required for dm-writecache to work */ +- .yes = arg_count(cmd, yes_ARG), +- .force = arg_count(cmd, force_ARG) ++ .zero_sectors = 1 + }; + int ret; + +@@ -5545,8 +5540,7 @@ static int _writecache_zero(struct cmd_context *cmd, struct logical_volume *lv) + return 0; + } + +- if (!(ret = wipe_lv(lv, wp))) +- stack; ++ ret = wipe_lv(lv, wp); + + if (!deactivate_lv(cmd, lv)) { + log_error("Failed to deactivate LV %s for zeroing.", display_lvname(lv)); +-- +1.8.3.1 + diff --git a/SOURCES/lvm2-2_03_09-cachevol-stop-dm-errors-with-uncaching-cache-with-ca.patch b/SOURCES/lvm2-2_03_09-cachevol-stop-dm-errors-with-uncaching-cache-with-ca.patch deleted file mode 100644 index 8b53214..0000000 --- a/SOURCES/lvm2-2_03_09-cachevol-stop-dm-errors-with-uncaching-cache-with-ca.patch +++ /dev/null @@ -1,70 +0,0 @@ - WHATS_NEW | 4 ++++ - lib/activate/dev_manager.c | 14 +++++++------- - 2 files changed, 11 insertions(+), 7 deletions(-) - -diff --git a/WHATS_NEW b/WHATS_NEW -index 50a0045..30f1391 100644 ---- a/WHATS_NEW -+++ b/WHATS_NEW -@@ -1,3 +1,7 @@ -+Version 2.03.09 - -+==================================== -+ Fix showing of a dm kernel error when uncaching a volume with cachevol. -+ - Version 2.03.08 - 11th February 2020 - ==================================== - Prevent problematic snapshots of writecache volumes. -diff --git a/lib/activate/dev_manager.c b/lib/activate/dev_manager.c -index 8569e86..c8a22fb 100644 ---- a/lib/activate/dev_manager.c -+++ b/lib/activate/dev_manager.c -@@ -3161,8 +3161,8 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree, - char *dlid_meta; - char *dlid_data; - char *dlid_pool; -- uint64_t meta_len = first_seg(lv)->metadata_len; -- uint64_t data_len = first_seg(lv)->data_len; -+ uint64_t meta_size = first_seg(lv)->metadata_len; -+ uint64_t data_size = first_seg(lv)->data_len; - uint16_t udev_flags = _get_udev_flags(dm, lv, layer, - laopts->noscan, laopts->temporary, - 0); -@@ -3210,12 +3210,12 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree, - - if (dm->track_pending_delete) { - log_debug_activation("Using error for pending meta delete %s.", display_lvname(lv)); -- if (!dm_tree_node_add_error_target(dnode_meta, (uint64_t)lv->vg->extent_size * meta_len)) -+ if (!dm_tree_node_add_error_target(dnode_meta, meta_size)) - return_0; - } else { - /* add load_segment to meta dnode: linear, size of meta area */ - if (!add_linear_area_to_dtree(dnode_meta, -- meta_len, -+ meta_size, - lv->vg->extent_size, - lv->vg->cmd->use_linear_target, - lv->vg->name, lv->name)) -@@ -3239,19 +3239,19 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree, - - if (dm->track_pending_delete) { - log_debug_activation("Using error for pending data delete %s.", display_lvname(lv)); -- if (!dm_tree_node_add_error_target(dnode_data, (uint64_t)lv->vg->extent_size * data_len)) -+ if (!dm_tree_node_add_error_target(dnode_data, data_size)) - return_0; - } else { - /* add load_segment to data dnode: linear, size of data area */ - if (!add_linear_area_to_dtree(dnode_data, -- data_len, -+ data_size, - lv->vg->extent_size, - lv->vg->cmd->use_linear_target, - lv->vg->name, lv->name)) - return_0; - - /* add seg_area to prev load_seg: offset 0 maps to cachepool lv after meta */ -- if (!dm_tree_node_add_target_area(dnode_data, NULL, dlid_pool, meta_len)) -+ if (!dm_tree_node_add_target_area(dnode_data, NULL, dlid_pool, meta_size)) - return_0; - } - } - diff --git a/SOURCES/lvm2-2_03_09-test-Can-not-attach-writecache-to-active-volume.patch b/SOURCES/lvm2-2_03_09-test-Can-not-attach-writecache-to-active-volume.patch deleted file mode 100644 index d579eb0..0000000 --- a/SOURCES/lvm2-2_03_09-test-Can-not-attach-writecache-to-active-volume.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 54d0cb47848f2abfcbe106dc65250f612c7b1455 Mon Sep 17 00:00:00 2001 -From: Marian Csontos -Date: Wed, 18 Mar 2020 14:30:09 +0100 -Subject: [PATCH] test: Can not attach writecache to active volume - ---- - test/shell/writecache.sh | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/test/shell/writecache.sh b/test/shell/writecache.sh -index 0a7f694..a7a2d95 100644 ---- a/test/shell/writecache.sh -+++ b/test/shell/writecache.sh -@@ -139,8 +139,10 @@ mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir - cp pattern1 $mount_dir/pattern1 - ls -l $mount_dir - --lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1 -+# TODO BZ 1808012 - can not convert active volume to writecache: -+not lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1 - -+if false; then - check lv_field $vg/$lv1 segtype writecache - - lvs -a $vg/${lv2}_cvol --noheadings -o segtype >out -@@ -162,6 +164,7 @@ diff pattern1 $mount_dir/pattern1.after - umount $mount_dir - lvchange -an $vg/$lv1 - lvremove $vg/$lv1 -+fi - - vgremove -ff $vg - --- -1.8.3.1 - diff --git a/SOURCES/lvm2-2_03_09-thin-don-t-use-writecache-for-poolmetadata.patch b/SOURCES/lvm2-2_03_09-thin-don-t-use-writecache-for-poolmetadata.patch deleted file mode 100644 index 071c852..0000000 --- a/SOURCES/lvm2-2_03_09-thin-don-t-use-writecache-for-poolmetadata.patch +++ /dev/null @@ -1,18 +0,0 @@ - tools/lvconvert.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/tools/lvconvert.c b/tools/lvconvert.c -index aa2dca7..757b323 100644 ---- a/tools/lvconvert.c -+++ b/tools/lvconvert.c -@@ -3104,8 +3104,9 @@ static int _lvconvert_to_pool(struct cmd_context *cmd, - return 0; - } - -- /* FIXME Tidy up all these type restrictions. */ -+ /* FIXME Tidy up all these type restrictions. (Use a type whitelist?) */ - if (lv_is_cache_type(metadata_lv) || -+ lv_is_writecache(metadata_lv) || - lv_is_thin_type(metadata_lv) || - lv_is_cow(metadata_lv) || lv_is_merging_cow(metadata_lv) || - lv_is_origin(metadata_lv) || lv_is_merging_origin(metadata_lv) || diff --git a/SOURCES/lvm2-2_03_09-writecache-allow-removing-wcorig-lv.patch b/SOURCES/lvm2-2_03_09-writecache-allow-removing-wcorig-lv.patch deleted file mode 100644 index d76f358..0000000 --- a/SOURCES/lvm2-2_03_09-writecache-allow-removing-wcorig-lv.patch +++ /dev/null @@ -1,16 +0,0 @@ - lib/metadata/lv_manip.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c -index 3604a63..3090a93 100644 ---- a/lib/metadata/lv_manip.c -+++ b/lib/metadata/lv_manip.c -@@ -6638,7 +6638,7 @@ int lv_remove_with_dependencies(struct cmd_context *cmd, struct logical_volume * - return 0; - } - -- if (lv_is_cache_origin(lv)) { -+ if (lv_is_cache_origin(lv) || lv_is_writecache_origin(lv)) { - if (!_lv_remove_segs_using_this_lv(cmd, lv, force, level, "cache origin")) - return_0; - /* Removal of cache LV also removes caching origin */ diff --git a/SOURCES/lvm2-2_03_09-writecache-check-if-cachevol-is-writable.patch b/SOURCES/lvm2-2_03_09-writecache-check-if-cachevol-is-writable.patch deleted file mode 100644 index 0a04eff..0000000 --- a/SOURCES/lvm2-2_03_09-writecache-check-if-cachevol-is-writable.patch +++ /dev/null @@ -1,19 +0,0 @@ - tools/lvconvert.c | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/tools/lvconvert.c b/tools/lvconvert.c -index cd9a3e8..aa2dca7 100644 ---- a/tools/lvconvert.c -+++ b/tools/lvconvert.c -@@ -5351,6 +5351,11 @@ static int _writecache_zero(struct cmd_context *cmd, struct logical_volume *lv) - }; - int ret; - -+ if (!(lv->status & LVM_WRITE)) { -+ log_error("Cannot initialize readonly LV %s", display_lvname(lv)); -+ return 0; -+ } -+ - if (test_mode()) - return 1; - diff --git a/SOURCES/lvm2-2_03_09-writecache-drop-real-dm-suffix.patch b/SOURCES/lvm2-2_03_09-writecache-drop-real-dm-suffix.patch deleted file mode 100644 index f5dde8b..0000000 --- a/SOURCES/lvm2-2_03_09-writecache-drop-real-dm-suffix.patch +++ /dev/null @@ -1,76 +0,0 @@ - lib/misc/lvm-string.c | 1 - - lib/writecache/writecache.c | 2 +- - test/shell/writecache.sh | 36 ++++++++++++++++++++++++++++++++++++ - 3 files changed, 37 insertions(+), 2 deletions(-) - -diff --git a/lib/misc/lvm-string.c b/lib/misc/lvm-string.c -index 0ee3403..d8b27cb 100644 ---- a/lib/misc/lvm-string.c -+++ b/lib/misc/lvm-string.c -@@ -251,7 +251,6 @@ char *build_dm_uuid(struct dm_pool *mem, const struct logical_volume *lv, - */ - /* Suffixes used here MUST match lib/activate/dev_manager.c */ - layer = lv_is_cache_origin(lv) ? "real" : -- lv_is_writecache_origin(lv) ? "real" : - (lv_is_cache(lv) && lv_is_pending_delete(lv)) ? "real" : - lv_is_cache_pool_data(lv) ? "cdata" : - lv_is_cache_pool_metadata(lv) ? "cmeta" : -diff --git a/lib/writecache/writecache.c b/lib/writecache/writecache.c -index 130922a..08a306e 100644 ---- a/lib/writecache/writecache.c -+++ b/lib/writecache/writecache.c -@@ -260,7 +260,7 @@ static int _writecache_add_target_line(struct dev_manager *dm, - if ((pmem = lv_on_pmem(seg->writecache)) < 0) - return_0; - -- if (!(origin_uuid = build_dm_uuid(mem, seg_lv(seg, 0), "real"))) -+ if (!(origin_uuid = build_dm_uuid(mem, seg_lv(seg, 0), NULL))) - return_0; - - if (!(fast_uuid = build_dm_uuid(mem, seg->writecache, "cvol"))) -diff --git a/test/shell/writecache.sh b/test/shell/writecache.sh -index 6cd4665..0a7f694 100644 ---- a/test/shell/writecache.sh -+++ b/test/shell/writecache.sh -@@ -127,5 +127,41 @@ umount $mount_dir - lvchange -an $vg/$lv1 - lvchange -an $vg/$lv2 - -+ -+# test3: attach writecache to an active LV -+ -+lvchange -ay $vg/$lv1 -+ -+mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1" -+ -+mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir -+ -+cp pattern1 $mount_dir/pattern1 -+ls -l $mount_dir -+ -+lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1 -+ -+check lv_field $vg/$lv1 segtype writecache -+ -+lvs -a $vg/${lv2}_cvol --noheadings -o segtype >out -+grep linear out -+ -+cp pattern1 $mount_dir/pattern1.after -+ -+diff pattern1 $mount_dir/pattern1 -+diff pattern1 $mount_dir/pattern1.after -+ -+umount $mount_dir -+lvchange -an $vg/$lv1 -+lvchange -ay $vg/$lv1 -+mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir -+ -+diff pattern1 $mount_dir/pattern1 -+diff pattern1 $mount_dir/pattern1.after -+ -+umount $mount_dir -+lvchange -an $vg/$lv1 -+lvremove $vg/$lv1 -+ - vgremove -ff $vg - diff --git a/SOURCES/lvm2-2_03_09-writecache-fix-watermark-error-message.patch b/SOURCES/lvm2-2_03_09-writecache-fix-watermark-error-message.patch deleted file mode 100644 index bc3d1b6..0000000 --- a/SOURCES/lvm2-2_03_09-writecache-fix-watermark-error-message.patch +++ /dev/null @@ -1,16 +0,0 @@ - tools/lvconvert.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tools/lvconvert.c b/tools/lvconvert.c -index 0ddeb35..4ebda4c 100644 ---- a/tools/lvconvert.c -+++ b/tools/lvconvert.c -@@ -5515,7 +5515,7 @@ static int _get_writecache_settings(struct cmd_context *cmd, struct writecache_s - - if (settings->high_watermark_set && settings->low_watermark_set && - (settings->high_watermark <= settings->low_watermark)) { -- log_error("High watermark must be greater than or equal to low watermark."); -+ log_error("High watermark must be greater than low watermark."); - return 0; - } - diff --git a/SOURCES/lvm2-2_03_09-writecache-require-inactive-LV-to-attach.patch b/SOURCES/lvm2-2_03_09-writecache-require-inactive-LV-to-attach.patch deleted file mode 100644 index 8a93ba5..0000000 --- a/SOURCES/lvm2-2_03_09-writecache-require-inactive-LV-to-attach.patch +++ /dev/null @@ -1,40 +0,0 @@ - tools/lvconvert.c | 19 ++++++++++--------- - 1 file changed, 10 insertions(+), 9 deletions(-) - -diff --git a/tools/lvconvert.c b/tools/lvconvert.c -index 4ebda4c..bfaf4c0 100644 ---- a/tools/lvconvert.c -+++ b/tools/lvconvert.c -@@ -5599,6 +5599,16 @@ static int _lvconvert_writecache_attach_single(struct cmd_context *cmd, - goto bad; - } - -+ /* -+ * To permit this we need to check the block size of the fs using lv -+ * (recently in libblkid) so that we can use a matching writecache -+ * block size. We also want to do that if the lv is inactive. -+ */ -+ if (lv_is_active(lv)) { -+ log_error("LV %s must be inactive to attach writecache.", display_lvname(lv)); -+ goto bad; -+ } -+ - /* fast LV shouldn't generally be active by itself, but just in case. */ - if (lv_info(cmd, lv_fast, 1, NULL, 0, 0)) { - log_error("LV %s must be inactive to attach.", display_lvname(lv_fast)); -@@ -5639,15 +5649,6 @@ static int _lvconvert_writecache_attach_single(struct cmd_context *cmd, - memcpy(&lockd_fast_id, &lv_fast->lvid.id[1], sizeof(struct id)); - } - -- /* -- * TODO: use libblkid to get the sector size of lv. If it doesn't -- * match the block_size we are using for the writecache, then warn that -- * an existing file system on lv may become unmountable with the -- * writecache attached because of the changing sector size. If this -- * happens, then use --splitcache, and reattach the writecache using a -- * writecache block_size value matching the sector size of lv. -- */ -- - if (!_writecache_zero(cmd, lv_fast)) { - log_error("LV %s could not be zeroed.", display_lvname(lv_fast)); - return ECMD_FAILED; diff --git a/SOURCES/lvm2-2_03_09-writecache-working-real-dm-uuid-suffix-for-wcorig-lv.patch b/SOURCES/lvm2-2_03_09-writecache-working-real-dm-uuid-suffix-for-wcorig-lv.patch deleted file mode 100644 index 2df3470..0000000 --- a/SOURCES/lvm2-2_03_09-writecache-working-real-dm-uuid-suffix-for-wcorig-lv.patch +++ /dev/null @@ -1,102 +0,0 @@ - lib/activate/dev_manager.c | 4 ++++ - lib/metadata/lv.c | 2 +- - lib/metadata/writecache_manip.c | 16 ++++++++-------- - lib/misc/lvm-string.c | 1 + - lib/writecache/writecache.c | 2 +- - tools/lvconvert.c | 2 ++ - 6 files changed, 17 insertions(+), 10 deletions(-) - -diff --git a/lib/activate/dev_manager.c b/lib/activate/dev_manager.c -index c8a22fb..3b99131 100644 ---- a/lib/activate/dev_manager.c -+++ b/lib/activate/dev_manager.c -@@ -3316,6 +3316,10 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree, - if (!layer && lv_is_new_thin_pool(lv)) - layer = lv_layer(lv); - -+ /* Adds -real to the dm uuid of wcorig LV. */ -+ if (!layer && lv_is_writecache_origin(lv)) -+ layer = lv_layer(lv); /* "real" */ -+ - if (!(dlid = build_dm_uuid(dm->mem, lv, layer))) - return_0; - -diff --git a/lib/metadata/lv.c b/lib/metadata/lv.c -index ab26b8d..17d4907 100644 ---- a/lib/metadata/lv.c -+++ b/lib/metadata/lv.c -@@ -830,7 +830,7 @@ const char *lv_layer(const struct logical_volume *lv) - if (lv_is_vdo_pool(lv)) - return "vpool"; - -- if (lv_is_origin(lv) || lv_is_external_origin(lv)) -+ if (lv_is_origin(lv) || lv_is_external_origin(lv) || lv_is_writecache_origin(lv)) - return "real"; - - return NULL; -diff --git a/lib/metadata/writecache_manip.c b/lib/metadata/writecache_manip.c -index 0122025..31d069e 100644 ---- a/lib/metadata/writecache_manip.c -+++ b/lib/metadata/writecache_manip.c -@@ -24,15 +24,15 @@ - - int lv_is_writecache_origin(const struct logical_volume *lv) - { -- struct seg_list *sl; -+ struct lv_segment *seg; - -- dm_list_iterate_items(sl, &lv->segs_using_this_lv) { -- if (!sl->seg || !sl->seg->lv || !sl->seg->origin) -- continue; -- if (lv_is_writecache(sl->seg->lv) && (sl->seg->origin == lv)) -- return 1; -- } -- return 0; -+ /* Make sure there's exactly one segment in segs_using_this_lv! */ -+ if (dm_list_empty(&lv->segs_using_this_lv) || -+ (dm_list_size(&lv->segs_using_this_lv) > 1)) -+ return 0; -+ -+ seg = get_only_segment_using_this_lv(lv); -+ return seg && lv_is_writecache(seg->lv) && !lv_is_pending_delete(seg->lv) && (seg_lv(seg, 0) == lv); - } - - int lv_is_writecache_cachevol(const struct logical_volume *lv) -diff --git a/lib/misc/lvm-string.c b/lib/misc/lvm-string.c -index d8b27cb..0ee3403 100644 ---- a/lib/misc/lvm-string.c -+++ b/lib/misc/lvm-string.c -@@ -251,6 +251,7 @@ char *build_dm_uuid(struct dm_pool *mem, const struct logical_volume *lv, - */ - /* Suffixes used here MUST match lib/activate/dev_manager.c */ - layer = lv_is_cache_origin(lv) ? "real" : -+ lv_is_writecache_origin(lv) ? "real" : - (lv_is_cache(lv) && lv_is_pending_delete(lv)) ? "real" : - lv_is_cache_pool_data(lv) ? "cdata" : - lv_is_cache_pool_metadata(lv) ? "cmeta" : -diff --git a/lib/writecache/writecache.c b/lib/writecache/writecache.c -index 08a306e..130922a 100644 ---- a/lib/writecache/writecache.c -+++ b/lib/writecache/writecache.c -@@ -260,7 +260,7 @@ static int _writecache_add_target_line(struct dev_manager *dm, - if ((pmem = lv_on_pmem(seg->writecache)) < 0) - return_0; - -- if (!(origin_uuid = build_dm_uuid(mem, seg_lv(seg, 0), NULL))) -+ if (!(origin_uuid = build_dm_uuid(mem, seg_lv(seg, 0), "real"))) - return_0; - - if (!(fast_uuid = build_dm_uuid(mem, seg->writecache, "cvol"))) -diff --git a/tools/lvconvert.c b/tools/lvconvert.c -index 757b323..0ddeb35 100644 ---- a/tools/lvconvert.c -+++ b/tools/lvconvert.c -@@ -5537,6 +5537,8 @@ static struct logical_volume *_lv_writecache_create(struct cmd_context *cmd, - if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_WRITECACHE))) - return_NULL; - -+ lv->status |= WRITECACHE; -+ - /* - * "lv_wcorig" is a new LV with new id, but with the segments from "lv". - * "lv" keeps the existing name and id, but gets a new writecache segment, diff --git a/SOURCES/lvm2-2_03_10-Allow-dm-integrity-to-be-used-for-raid-images.patch b/SOURCES/lvm2-2_03_10-Allow-dm-integrity-to-be-used-for-raid-images.patch new file mode 100644 index 0000000..d8dcd03 --- /dev/null +++ b/SOURCES/lvm2-2_03_10-Allow-dm-integrity-to-be-used-for-raid-images.patch @@ -0,0 +1,5052 @@ +From d51fc23f967acdf8534cdda63aa9aea0f44f9d0c Mon Sep 17 00:00:00 2001 +From: David Teigland +Date: Wed, 20 Nov 2019 16:07:27 -0600 +Subject: [PATCH 2/3] Allow dm-integrity to be used for raid images + +dm-integrity stores checksums of the data written to an +LV, and returns an error if data read from the LV does +not match the previously saved checksum. When used on +raid images, dm-raid will correct the error by reading +the block from another image, and the device user sees +no error. The integrity metadata (checksums) are stored +on an internal LV allocated by lvm for each linear image. +The internal LV is allocated on the same PV as the image. + +Create a raid LV with an integrity layer over each +raid image (for raid levels 1,4,5,6,10): + +lvcreate --type raidN --raidintegrity y [options] + +Add an integrity layer to images of an existing raid LV: + +lvconvert --raidintegrity y LV + +Remove the integrity layer from images of a raid LV: + +lvconvert --raidintegrity n LV + +Settings + +Use --raidintegritymode journal|bitmap (journal is default) +to configure the method used by dm-integrity to ensure +crash consistency. + +Initialization + +When integrity is added to an LV, the kernel needs to +initialize the integrity metadata/checksums for all blocks +in the LV. The data corruption checking performed by +dm-integrity will only operate on areas of the LV that +are already initialized. The progress of integrity +initialization is reported by the "syncpercent" LV +reporting field (and under the Cpy%Sync lvs column.) + +Example: create a raid1 LV with integrity: + +$ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo + Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. + Logical volume "rr_rimage_0_imeta" created. + Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. + Logical volume "rr_rimage_1_imeta" created. + Logical volume "rr" created. +$ lvs -a foo + LV VG Attr LSize Origin Cpy%Sync + rr foo rwi-a-r--- 1.00g 4.93 + [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 + [rr_rimage_0_imeta] foo ewi-ao---- 12.00m + [rr_rimage_0_iorig] foo -wi-ao---- 1.00g + [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 + [rr_rimage_1_imeta] foo ewi-ao---- 12.00m + [rr_rimage_1_iorig] foo -wi-ao---- 1.00g + [rr_rmeta_0] foo ewi-aor--- 4.00m + [rr_rmeta_1] foo ewi-aor--- 4.00m + +(cherry picked from commit d9e8895a96539d75166c0f74e58f5ed4e729e551) +--- + configure | 27 ++ + configure.ac | 18 + + device_mapper/all.h | 39 ++ + device_mapper/ioctl/libdm-iface.c | 31 +- + device_mapper/ioctl/libdm-targets.h | 1 + + device_mapper/libdm-deptree.c | 154 ++++++- + device_mapper/libdm-targets.c | 27 ++ + include/configure.h.in | 3 + + lib/Makefile.in | 2 + + lib/activate/activate.c | 7 + + lib/activate/activate.h | 4 + + lib/activate/dev_manager.c | 18 +- + lib/commands/toolcontext.c | 5 + + lib/device/dev-type.c | 39 ++ + lib/device/dev-type.h | 2 + + lib/format_text/flags.c | 2 + + lib/integrity/integrity.c | 343 +++++++++++++++ + lib/metadata/integrity_manip.c | 821 ++++++++++++++++++++++++++++++++++++ + lib/metadata/lv.c | 18 +- + lib/metadata/lv_manip.c | 150 ++++++- + lib/metadata/merge.c | 2 + + lib/metadata/metadata-exported.h | 26 ++ + lib/metadata/raid_manip.c | 85 +++- + lib/metadata/segtype.h | 6 + + lib/metadata/snapshot_manip.c | 2 + + lib/misc/lvm-string.c | 4 +- + lib/report/report.c | 2 +- + man/lvmraid.7_main | 83 ++++ + test/lib/aux.sh | 8 + + test/shell/integrity-blocksize.sh | 183 ++++++++ + test/shell/integrity-dmeventd.sh | 289 +++++++++++++ + test/shell/integrity-large.sh | 175 ++++++++ + test/shell/integrity-misc.sh | 228 ++++++++++ + test/shell/integrity.sh | 735 ++++++++++++++++++++++++++++++++ + tools/args.h | 20 + + tools/command-lines.in | 45 +- + tools/lv_props.h | 1 + + tools/lv_types.h | 1 + + tools/lvchange.c | 5 + + tools/lvconvert.c | 137 ++++++ + tools/lvcreate.c | 15 +- + tools/lvmcmdline.c | 3 + + tools/pvmove.c | 10 + + tools/toollib.c | 47 +++ + tools/tools.h | 4 + + 45 files changed, 3790 insertions(+), 37 deletions(-) + create mode 100644 lib/integrity/integrity.c + create mode 100644 lib/metadata/integrity_manip.c + create mode 100644 test/shell/integrity-blocksize.sh + create mode 100644 test/shell/integrity-dmeventd.sh + create mode 100644 test/shell/integrity-large.sh + create mode 100644 test/shell/integrity-misc.sh + create mode 100644 test/shell/integrity.sh + +diff --git a/configure b/configure +index 6dd7eda..716ee9c 100755 +--- a/configure ++++ b/configure +@@ -918,6 +918,7 @@ enable_cache_check_needs_check + with_vdo + with_vdo_format + with_writecache ++with_integrity + enable_readline + enable_realtime + enable_ocf +@@ -1716,6 +1717,7 @@ Optional Packages: + --with-vdo=TYPE vdo support: internal/none [internal] + --with-vdo-format=PATH vdoformat tool: [autodetect] + --with-writecache=TYPE writecache support: internal/none [none] ++ --with-integrity=TYPE integrity support: internal/none [none] + --with-ocfdir=DIR install OCF files in + [PREFIX/lib/ocf/resource.d/lvm2] + --with-default-pid-dir=PID_DIR +@@ -9762,6 +9764,31 @@ $as_echo "#define WRITECACHE_INTERNAL 1" >>confdefs.h + esac + + ################################################################################ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to include integrity" >&5 ++$as_echo_n "checking whether to include integrity... " >&6; } ++ ++# Check whether --with-integrity was given. ++if test "${with_integrity+set}" = set; then : ++ withval=$with_integrity; INTEGRITY=$withval ++else ++ INTEGRITY="none" ++fi ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INTEGRITY" >&5 ++$as_echo "$INTEGRITY" >&6; } ++ ++case "$INTEGRITY" in ++ none) ;; ++ internal) ++ ++$as_echo "#define INTEGRITY_INTERNAL 1" >>confdefs.h ++ ++ ;; ++ *) as_fn_error $? "--with-integrity parameter invalid" "$LINENO" 5 ;; ++esac ++ ++################################################################################ + # Check whether --enable-readline was given. + if test "${enable_readline+set}" = set; then : + enableval=$enable_readline; READLINE=$enableval +diff --git a/configure.ac b/configure.ac +index 74ca201..9a0e41a 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -668,6 +668,24 @@ case "$WRITECACHE" in + esac + + ################################################################################ ++dnl -- integrity inclusion type ++AC_MSG_CHECKING(whether to include integrity) ++AC_ARG_WITH(integrity, ++ AC_HELP_STRING([--with-integrity=TYPE], ++ [integrity support: internal/none [none]]), ++ INTEGRITY=$withval, INTEGRITY="none") ++ ++AC_MSG_RESULT($INTEGRITY) ++ ++case "$INTEGRITY" in ++ none) ;; ++ internal) ++ AC_DEFINE([INTEGRITY_INTERNAL], 1, [Define to 1 to include built-in support for integrity.]) ++ ;; ++ *) AC_MSG_ERROR([--with-integrity parameter invalid]) ;; ++esac ++ ++################################################################################ + dnl -- Disable readline + AC_ARG_ENABLE([readline], + AC_HELP_STRING([--disable-readline], [disable readline support]), +diff --git a/device_mapper/all.h b/device_mapper/all.h +index b23485f..f00b6a5 100644 +--- a/device_mapper/all.h ++++ b/device_mapper/all.h +@@ -234,6 +234,7 @@ int dm_task_suppress_identical_reload(struct dm_task *dmt); + int dm_task_secure_data(struct dm_task *dmt); + int dm_task_retry_remove(struct dm_task *dmt); + int dm_task_deferred_remove(struct dm_task *dmt); ++void dm_task_skip_reload_params_compare(struct dm_task *dmt); + + /* + * Record timestamp immediately after the ioctl returns. +@@ -392,6 +393,15 @@ struct dm_status_writecache { + int dm_get_status_writecache(struct dm_pool *mem, const char *params, + struct dm_status_writecache **status); + ++struct dm_status_integrity { ++ uint64_t number_of_mismatches; ++ uint64_t provided_data_sectors; ++ uint64_t recalc_sector; ++}; ++ ++int dm_get_status_integrity(struct dm_pool *mem, const char *params, ++ struct dm_status_integrity **status); ++ + /* + * Parse params from STATUS call for snapshot target + * +@@ -970,6 +980,35 @@ int dm_tree_node_add_writecache_target(struct dm_tree_node *node, + uint32_t writecache_block_size, + struct writecache_settings *settings); + ++struct integrity_settings { ++ char mode[8]; ++ uint32_t tag_size; ++ uint32_t block_size; /* optional table param always set by lvm */ ++ const char *internal_hash; /* optional table param always set by lvm */ ++ ++ uint32_t journal_sectors; ++ uint32_t interleave_sectors; ++ uint32_t buffer_sectors; ++ uint32_t journal_watermark; ++ uint32_t commit_time; ++ uint32_t bitmap_flush_interval; ++ uint64_t sectors_per_bit; ++ ++ unsigned journal_sectors_set:1; ++ unsigned interleave_sectors_set:1; ++ unsigned buffer_sectors_set:1; ++ unsigned journal_watermark_set:1; ++ unsigned commit_time_set:1; ++ unsigned bitmap_flush_interval_set:1; ++ unsigned sectors_per_bit_set:1; ++}; ++ ++int dm_tree_node_add_integrity_target(struct dm_tree_node *node, ++ uint64_t size, ++ const char *origin_uuid, ++ const char *meta_uuid, ++ struct integrity_settings *settings, ++ int recalculate); + + /* + * VDO target +diff --git a/device_mapper/ioctl/libdm-iface.c b/device_mapper/ioctl/libdm-iface.c +index fe04af8..25e7d1a 100644 +--- a/device_mapper/ioctl/libdm-iface.c ++++ b/device_mapper/ioctl/libdm-iface.c +@@ -805,6 +805,11 @@ int dm_task_suppress_identical_reload(struct dm_task *dmt) + return 1; + } + ++void dm_task_skip_reload_params_compare(struct dm_task *dmt) ++{ ++ dmt->skip_reload_params_compare = 1; ++} ++ + int dm_task_set_add_node(struct dm_task *dmt, dm_add_node_t add_node) + { + switch (add_node) { +@@ -1575,11 +1580,29 @@ static int _reload_with_suppression_v4(struct dm_task *dmt) + len = strlen(t2->params); + while (len-- > 0 && t2->params[len] == ' ') + t2->params[len] = '\0'; +- if ((t1->start != t2->start) || +- (t1->length != t2->length) || +- (strcmp(t1->type, t2->type)) || +- (strcmp(t1->params, t2->params))) ++ ++ if (t1->start != t2->start) { ++ log_debug("reload %u:%u start diff", task->major, task->minor); ++ goto no_match; ++ } ++ if (t1->length != t2->length) { ++ log_debug("reload %u:%u length diff", task->major, task->minor); + goto no_match; ++ } ++ if (strcmp(t1->type, t2->type)) { ++ log_debug("reload %u:%u type diff %s %s", task->major, task->minor, t1->type, t2->type); ++ goto no_match; ++ } ++ if (strcmp(t1->params, t2->params)) { ++ if (dmt->skip_reload_params_compare) ++ log_debug("reload %u:%u skip params ignore %s %s", ++ task->major, task->minor, t1->params, t2->params); ++ else { ++ log_debug("reload %u:%u params diff", task->major, task->minor); ++ goto no_match; ++ } ++ } ++ + t1 = t1->next; + t2 = t2->next; + } +diff --git a/device_mapper/ioctl/libdm-targets.h b/device_mapper/ioctl/libdm-targets.h +index b5b20d5..9786a7e 100644 +--- a/device_mapper/ioctl/libdm-targets.h ++++ b/device_mapper/ioctl/libdm-targets.h +@@ -59,6 +59,7 @@ struct dm_task { + int skip_lockfs; + int query_inactive_table; + int suppress_identical_reload; ++ int skip_reload_params_compare; + dm_add_node_t add_node; + uint64_t existing_table_size; + int cookie_set; +diff --git a/device_mapper/libdm-deptree.c b/device_mapper/libdm-deptree.c +index 7fac6ab..9ba24cb 100644 +--- a/device_mapper/libdm-deptree.c ++++ b/device_mapper/libdm-deptree.c +@@ -38,6 +38,7 @@ enum { + SEG_STRIPED, + SEG_ZERO, + SEG_WRITECACHE, ++ SEG_INTEGRITY, + SEG_THIN_POOL, + SEG_THIN, + SEG_VDO, +@@ -78,6 +79,7 @@ static const struct { + { SEG_STRIPED, "striped" }, + { SEG_ZERO, "zero"}, + { SEG_WRITECACHE, "writecache"}, ++ { SEG_INTEGRITY, "integrity"}, + { SEG_THIN_POOL, "thin-pool"}, + { SEG_THIN, "thin"}, + { SEG_VDO, "vdo" }, +@@ -221,6 +223,11 @@ struct load_segment { + int writecache_pmem; /* writecache, 1 if pmem, 0 if ssd */ + uint32_t writecache_block_size; /* writecache, in bytes */ + struct writecache_settings writecache_settings; /* writecache */ ++ ++ uint64_t integrity_data_sectors; /* integrity (provided_data_sectors) */ ++ struct dm_tree_node *integrity_meta_node; /* integrity */ ++ struct integrity_settings integrity_settings; /* integrity */ ++ int integrity_recalculate; /* integrity */ + }; + + /* Per-device properties */ +@@ -268,6 +275,16 @@ struct load_properties { + unsigned delay_resume_if_extended; + + /* ++ * When comparing table lines to decide if a reload is ++ * needed, ignore any differences betwen the lvm device ++ * params and the kernel-reported device params. ++ * dm-integrity reports many internal parameters on the ++ * table line when lvm does not explicitly set them, ++ * causing lvm and the kernel to have differing params. ++ */ ++ unsigned skip_reload_params_compare; ++ ++ /* + * Call node_send_messages(), set to 2 if there are messages + * When != 0, it validates matching transaction id, thus thin-pools + * where transation_id is passed as 0 are never validated, this +@@ -2705,6 +2722,84 @@ static int _writecache_emit_segment_line(struct dm_task *dmt, + return 1; + } + ++static int _integrity_emit_segment_line(struct dm_task *dmt, ++ struct load_segment *seg, ++ char *params, size_t paramsize) ++{ ++ struct integrity_settings *set = &seg->integrity_settings; ++ int pos = 0; ++ int count; ++ char origin_dev[DM_FORMAT_DEV_BUFSIZE]; ++ char meta_dev[DM_FORMAT_DEV_BUFSIZE]; ++ ++ if (!_build_dev_string(origin_dev, sizeof(origin_dev), seg->origin)) ++ return_0; ++ ++ if (seg->integrity_meta_node && ++ !_build_dev_string(meta_dev, sizeof(meta_dev), seg->integrity_meta_node)) ++ return_0; ++ ++ count = 3; /* block_size, internal_hash, fix_padding options are always passed */ ++ ++ if (seg->integrity_meta_node) ++ count++; ++ ++ if (seg->integrity_recalculate) ++ count++; ++ ++ if (set->journal_sectors_set) ++ count++; ++ if (set->interleave_sectors_set) ++ count++; ++ if (set->buffer_sectors_set) ++ count++; ++ if (set->journal_watermark_set) ++ count++; ++ if (set->commit_time_set) ++ count++; ++ if (set->bitmap_flush_interval_set) ++ count++; ++ if (set->sectors_per_bit_set) ++ count++; ++ ++ EMIT_PARAMS(pos, "%s 0 %u %s %d fix_padding block_size:%u internal_hash:%s", ++ origin_dev, ++ set->tag_size, ++ set->mode, ++ count, ++ set->block_size, ++ set->internal_hash); ++ ++ if (seg->integrity_meta_node) ++ EMIT_PARAMS(pos, " meta_device:%s", meta_dev); ++ ++ if (seg->integrity_recalculate) ++ EMIT_PARAMS(pos, " recalculate"); ++ ++ if (set->journal_sectors_set) ++ EMIT_PARAMS(pos, " journal_sectors:%u", set->journal_sectors); ++ ++ if (set->interleave_sectors_set) ++ EMIT_PARAMS(pos, " ineterleave_sectors:%u", set->interleave_sectors); ++ ++ if (set->buffer_sectors_set) ++ EMIT_PARAMS(pos, " buffer_sectors:%u", set->buffer_sectors); ++ ++ if (set->journal_watermark_set) ++ EMIT_PARAMS(pos, " journal_watermark:%u", set->journal_watermark); ++ ++ if (set->commit_time_set) ++ EMIT_PARAMS(pos, " commit_time:%u", set->commit_time); ++ ++ if (set->bitmap_flush_interval_set) ++ EMIT_PARAMS(pos, " bitmap_flush_interval:%u", set->bitmap_flush_interval); ++ ++ if (set->sectors_per_bit_set) ++ EMIT_PARAMS(pos, " sectors_per_bit:%llu", (unsigned long long)set->sectors_per_bit); ++ ++ return 1; ++} ++ + static int _thin_pool_emit_segment_line(struct dm_task *dmt, + struct load_segment *seg, + char *params, size_t paramsize) +@@ -2889,6 +2984,10 @@ static int _emit_segment_line(struct dm_task *dmt, uint32_t major, + if (!_writecache_emit_segment_line(dmt, seg, params, paramsize)) + return_0; + break; ++ case SEG_INTEGRITY: ++ if (!_integrity_emit_segment_line(dmt, seg, params, paramsize)) ++ return_0; ++ break; + } + + switch(seg->type) { +@@ -2901,6 +3000,7 @@ static int _emit_segment_line(struct dm_task *dmt, uint32_t major, + case SEG_THIN: + case SEG_CACHE: + case SEG_WRITECACHE: ++ case SEG_INTEGRITY: + break; + case SEG_CRYPT: + case SEG_LINEAR: +@@ -3005,6 +3105,9 @@ static int _load_node(struct dm_tree_node *dnode) + if (!dm_task_suppress_identical_reload(dmt)) + log_warn("WARNING: Failed to suppress reload of identical tables."); + ++ if (dnode->props.skip_reload_params_compare) ++ dm_task_skip_reload_params_compare(dmt); ++ + if ((r = dm_task_run(dmt))) { + r = dm_task_get_info(dmt, &dnode->info); + if (r && !dnode->info.inactive_table) +@@ -3023,8 +3126,8 @@ static int _load_node(struct dm_tree_node *dnode) + if (!existing_table_size && dnode->props.delay_resume_if_new) + dnode->props.size_changed = 0; + +- log_debug_activation("Table size changed from %" PRIu64 " to %" +- PRIu64 " for %s.%s", existing_table_size, ++ log_debug_activation("Table size changed from %" PRIu64 " to %" PRIu64 " for %s.%s", ++ existing_table_size, + seg_start, _node_name(dnode), + dnode->props.size_changed ? "" : " (Ignoring.)"); + +@@ -3136,7 +3239,10 @@ int dm_tree_preload_children(struct dm_tree_node *dnode, + } + + /* No resume for a device without parents or with unchanged or smaller size */ +- if (!dm_tree_node_num_children(child, 1) || (child->props.size_changed <= 0)) ++ if (!dm_tree_node_num_children(child, 1)) ++ continue; ++ ++ if (child->props.size_changed <= 0) + continue; + + if (!child->info.inactive_table && !child->info.suspended) +@@ -3738,6 +3844,48 @@ int dm_tree_node_add_writecache_target(struct dm_tree_node *node, + return 1; + } + ++int dm_tree_node_add_integrity_target(struct dm_tree_node *node, ++ uint64_t size, ++ const char *origin_uuid, ++ const char *meta_uuid, ++ struct integrity_settings *settings, ++ int recalculate) ++{ ++ struct load_segment *seg; ++ ++ if (!(seg = _add_segment(node, SEG_INTEGRITY, size))) ++ return_0; ++ ++ if (!meta_uuid) { ++ log_error("No integrity meta uuid."); ++ return 0; ++ } ++ ++ if (!(seg->integrity_meta_node = dm_tree_find_node_by_uuid(node->dtree, meta_uuid))) { ++ log_error("Missing integrity's meta uuid %s.", meta_uuid); ++ return 0; ++ } ++ ++ if (!_link_tree_nodes(node, seg->integrity_meta_node)) ++ return_0; ++ ++ if (!(seg->origin = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) { ++ log_error("Missing integrity's origin uuid %s.", origin_uuid); ++ return 0; ++ } ++ ++ if (!_link_tree_nodes(node, seg->origin)) ++ return_0; ++ ++ memcpy(&seg->integrity_settings, settings, sizeof(struct integrity_settings)); ++ ++ seg->integrity_recalculate = recalculate; ++ ++ node->props.skip_reload_params_compare = 1; ++ ++ return 1; ++} ++ + int dm_tree_node_add_replicator_target(struct dm_tree_node *node, + uint64_t size, + const char *rlog_uuid, +diff --git a/device_mapper/libdm-targets.c b/device_mapper/libdm-targets.c +index 86cb847..bfe76c5 100644 +--- a/device_mapper/libdm-targets.c ++++ b/device_mapper/libdm-targets.c +@@ -380,6 +380,33 @@ int dm_get_status_writecache(struct dm_pool *mem, const char *params, + return 1; + } + ++int dm_get_status_integrity(struct dm_pool *mem, const char *params, ++ struct dm_status_integrity **status) ++{ ++ struct dm_status_integrity *s; ++ char recalc_str[16] = "\0"; ++ ++ if (!(s = dm_pool_zalloc(mem, sizeof(*s)))) ++ return_0; ++ ++ if (sscanf(params, "%llu %llu %s", ++ (unsigned long long *)&s->number_of_mismatches, ++ (unsigned long long *)&s->provided_data_sectors, ++ recalc_str) != 3) { ++ log_error("Failed to parse integrity params: %s.", params); ++ dm_pool_free(mem, s); ++ return 0; ++ } ++ ++ if (recalc_str[0] == '-') ++ s->recalc_sector = 0; ++ else ++ s->recalc_sector = strtoull(recalc_str, NULL, 0); ++ ++ *status = s; ++ return 1; ++} ++ + int parse_thin_pool_status(const char *params, struct dm_status_thin_pool *s) + { + int pos; +diff --git a/include/configure.h.in b/include/configure.h.in +index 91a3a7d..57736cc 100644 +--- a/include/configure.h.in ++++ b/include/configure.h.in +@@ -678,6 +678,9 @@ + /* Define to 1 to include built-in support for writecache. */ + #undef WRITECACHE_INTERNAL + ++/* Define to 1 to include built-in support for integrity. */ ++#undef INTEGRITY_INTERNAL ++ + /* Define to get access to GNU/Linux extension */ + #undef _GNU_SOURCE + +diff --git a/lib/Makefile.in b/lib/Makefile.in +index 2a064f3..8e50ec4 100644 +--- a/lib/Makefile.in ++++ b/lib/Makefile.in +@@ -20,6 +20,7 @@ SOURCES =\ + activate/activate.c \ + cache/lvmcache.c \ + writecache/writecache.c \ ++ integrity/integrity.c \ + cache_segtype/cache.c \ + commands/toolcontext.c \ + config/config.c \ +@@ -67,6 +68,7 @@ SOURCES =\ + log/log.c \ + metadata/cache_manip.c \ + metadata/writecache_manip.c \ ++ metadata/integrity_manip.c \ + metadata/lv.c \ + metadata/lv_manip.c \ + metadata/merge.c \ +diff --git a/lib/activate/activate.c b/lib/activate/activate.c +index a82a5cb..22c4e63 100644 +--- a/lib/activate/activate.c ++++ b/lib/activate/activate.c +@@ -2535,6 +2535,13 @@ static int _lv_activate(struct cmd_context *cmd, const char *lvid_s, + goto out; + } + ++ if ((cmd->partial_activation || cmd->degraded_activation) && ++ lv_is_partial(lv) && lv_is_raid(lv) && lv_raid_has_integrity((struct logical_volume *)lv)) { ++ cmd->partial_activation = 0; ++ cmd->degraded_activation = 0; ++ log_print("No degraded or partial activation for raid with integrity."); ++ } ++ + if ((!lv->vg->cmd->partial_activation) && lv_is_partial(lv)) { + if (!lv_is_raid_type(lv) || !partial_raid_lv_supports_degraded_activation(lv)) { + log_error("Refusing activation of partial LV %s. " +diff --git a/lib/activate/activate.h b/lib/activate/activate.h +index a5ee438..e3c1bb3 100644 +--- a/lib/activate/activate.h ++++ b/lib/activate/activate.h +@@ -39,6 +39,7 @@ typedef enum { + SEG_STATUS_THIN_POOL, + SEG_STATUS_VDO_POOL, + SEG_STATUS_WRITECACHE, ++ SEG_STATUS_INTEGRITY, + SEG_STATUS_UNKNOWN + } lv_seg_status_type_t; + +@@ -53,6 +54,7 @@ struct lv_seg_status { + struct dm_status_thin *thin; + struct dm_status_thin_pool *thin_pool; + struct dm_status_writecache *writecache; ++ struct dm_status_integrity *integrity; + struct lv_status_vdo vdo_pool; + }; + }; +@@ -260,6 +262,7 @@ void fs_unlock(void); + + #define TARGET_NAME_CACHE "cache" + #define TARGET_NAME_WRITECACHE "writecache" ++#define TARGET_NAME_INTEGRITY "integrity" + #define TARGET_NAME_ERROR "error" + #define TARGET_NAME_ERROR_OLD "erro" /* Truncated in older kernels */ + #define TARGET_NAME_LINEAR "linear" +@@ -277,6 +280,7 @@ void fs_unlock(void); + #define MODULE_NAME_CLUSTERED_MIRROR "clog" + #define MODULE_NAME_CACHE TARGET_NAME_CACHE + #define MODULE_NAME_WRITECACHE TARGET_NAME_WRITECACHE ++#define MODULE_NAME_INTEGRITY TARGET_NAME_INTEGRITY + #define MODULE_NAME_ERROR TARGET_NAME_ERROR + #define MODULE_NAME_LOG_CLUSTERED "log-clustered" + #define MODULE_NAME_LOG_USERSPACE "log-userspace" +diff --git a/lib/activate/dev_manager.c b/lib/activate/dev_manager.c +index 75d4df0..a626b00 100644 +--- a/lib/activate/dev_manager.c ++++ b/lib/activate/dev_manager.c +@@ -46,7 +46,7 @@ typedef enum { + } action_t; + + /* This list must match lib/misc/lvm-string.c:build_dm_uuid(). */ +-const char *uuid_suffix_list[] = { "pool", "cdata", "cmeta", "cvol", "tdata", "tmeta", "vdata", "vpool", NULL}; ++const char *uuid_suffix_list[] = { "pool", "cdata", "cmeta", "cvol", "tdata", "tmeta", "vdata", "vpool", "imeta", NULL}; + + struct dlid_list { + struct dm_list list; +@@ -222,6 +222,10 @@ static int _get_segment_status_from_target_params(const char *target_name, + if (!dm_get_status_writecache(seg_status->mem, params, &(seg_status->writecache))) + return_0; + seg_status->type = SEG_STATUS_WRITECACHE; ++ } else if (segtype_is_integrity(segtype)) { ++ if (!dm_get_status_integrity(seg_status->mem, params, &(seg_status->integrity))) ++ return_0; ++ seg_status->type = SEG_STATUS_INTEGRITY; + } else + /* + * TODO: Add support for other segment types too! +@@ -299,6 +303,9 @@ static int _info_run(const char *dlid, struct dm_info *dminfo, + if (lv_is_vdo_pool(seg_status->seg->lv)) + length = get_vdo_pool_virtual_size(seg_status->seg); + ++ if (lv_is_integrity(seg_status->seg->lv)) ++ length = seg_status->seg->integrity_data_sectors; ++ + do { + target = dm_get_next_target(dmt, target, &target_start, + &target_length, &target_name, &target_params); +@@ -2620,6 +2627,10 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree, + if (!_add_lv_to_dtree(dm, dtree, seg->writecache, dm->activation ? origin_only : 1)) + return_0; + } ++ if (seg->integrity_meta_dev && seg_is_integrity(seg)) { ++ if (!_add_lv_to_dtree(dm, dtree, seg->integrity_meta_dev, dm->activation ? origin_only : 1)) ++ return_0; ++ } + if (seg->pool_lv && + (lv_is_cache_pool(seg->pool_lv) || lv_is_cache_vol(seg->pool_lv) || dm->track_external_lv_deps) && + /* When activating and not origin_only detect linear 'overlay' over pool */ +@@ -3076,6 +3087,11 @@ static int _add_segment_to_dtree(struct dev_manager *dm, + lv_layer(seg->writecache))) + return_0; + ++ if (seg->integrity_meta_dev && !laopts->origin_only && ++ !_add_new_lv_to_dtree(dm, dtree, seg->integrity_meta_dev, laopts, ++ lv_layer(seg->integrity_meta_dev))) ++ return_0; ++ + /* Add any LVs used by this segment */ + for (s = 0; s < seg->area_count; ++s) { + if ((seg_type(seg, s) == AREA_LV) && +diff --git a/lib/commands/toolcontext.c b/lib/commands/toolcontext.c +index 479d499..88d5b3e 100644 +--- a/lib/commands/toolcontext.c ++++ b/lib/commands/toolcontext.c +@@ -1362,6 +1362,11 @@ static int _init_segtypes(struct cmd_context *cmd) + return 0; + #endif + ++#ifdef INTEGRITY_INTERNAL ++ if (!init_integrity_segtypes(cmd, &seglib)) ++ return 0; ++#endif ++ + return 1; + } + +diff --git a/lib/device/dev-type.c b/lib/device/dev-type.c +index d225d4d..deb5d6a 100644 +--- a/lib/device/dev-type.c ++++ b/lib/device/dev-type.c +@@ -647,6 +647,45 @@ out: + } + + #ifdef BLKID_WIPING_SUPPORT ++int get_fs_block_size(struct device *dev, uint32_t *fs_block_size) ++{ ++ blkid_probe probe = NULL; ++ const char *block_size_str = NULL; ++ uint64_t block_size_val; ++ int r = 0; ++ ++ *fs_block_size = 0; ++ ++ if (!(probe = blkid_new_probe_from_filename(dev_name(dev)))) { ++ log_error("Failed to create a new blkid probe for device %s.", dev_name(dev)); ++ goto out; ++ } ++ ++ blkid_probe_enable_partitions(probe, 1); ++ ++ (void) blkid_probe_lookup_value(probe, "BLOCK_SIZE", &block_size_str, NULL); ++ ++ if (!block_size_str) ++ goto out; ++ ++ block_size_val = strtoull(block_size_str, NULL, 10); ++ ++ *fs_block_size = (uint32_t)block_size_val; ++ r = 1; ++out: ++ if (probe) ++ blkid_free_probe(probe); ++ return r; ++} ++#else ++int get_fs_block_size(struct device *dev, uint32_t *fs_block_size) ++{ ++ *fs_block_size = 0; ++ return 0; ++} ++#endif ++ ++#ifdef BLKID_WIPING_SUPPORT + + static inline int _type_in_flag_list(const char *type, uint32_t flag_list) + { +diff --git a/lib/device/dev-type.h b/lib/device/dev-type.h +index e090050..fdf7791 100644 +--- a/lib/device/dev-type.h ++++ b/lib/device/dev-type.h +@@ -97,4 +97,6 @@ int dev_is_pmem(struct device *dev); + + int dev_is_lv(struct device *dev); + ++int get_fs_block_size(struct device *dev, uint32_t *fs_block_size); ++ + #endif +diff --git a/lib/format_text/flags.c b/lib/format_text/flags.c +index 2873ba6..bc93a5d 100644 +--- a/lib/format_text/flags.c ++++ b/lib/format_text/flags.c +@@ -104,6 +104,8 @@ static const struct flag _lv_flags[] = { + {LV_VDO_POOL, NULL, 0}, + {LV_VDO_POOL_DATA, NULL, 0}, + {WRITECACHE, NULL, 0}, ++ {INTEGRITY, NULL, 0}, ++ {INTEGRITY_METADATA, NULL, 0}, + {LV_PENDING_DELETE, NULL, 0}, /* FIXME Display like COMPATIBLE_FLAG */ + {LV_REMOVED, NULL, 0}, + {0, NULL, 0} +diff --git a/lib/integrity/integrity.c b/lib/integrity/integrity.c +new file mode 100644 +index 0000000..d5ad86b +--- /dev/null ++++ b/lib/integrity/integrity.c +@@ -0,0 +1,343 @@ ++/* ++ * Copyright (C) 2013-2016 Red Hat, Inc. All rights reserved. ++ * ++ * This file is part of LVM2. ++ * ++ * This copyrighted material is made available to anyone wishing to use, ++ * modify, copy, or redistribute it subject to the terms and conditions ++ * of the GNU Lesser General Public License v.2.1. ++ * ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this program; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++#include "base/memory/zalloc.h" ++#include "lib/misc/lib.h" ++#include "lib/commands/toolcontext.h" ++#include "lib/metadata/segtype.h" ++#include "lib/display/display.h" ++#include "lib/format_text/text_export.h" ++#include "lib/config/config.h" ++#include "lib/datastruct/str_list.h" ++#include "lib/misc/lvm-string.h" ++#include "lib/activate/activate.h" ++#include "lib/metadata/metadata.h" ++#include "lib/metadata/lv_alloc.h" ++#include "lib/config/defaults.h" ++ ++#define SEG_LOG_ERROR(t, p...) \ ++ log_error(t " segment %s of logical volume %s.", ## p, \ ++ dm_config_parent_name(sn), seg->lv->name), 0; ++ ++static void _integrity_display(const struct lv_segment *seg) ++{ ++ /* TODO: lvdisplay segments */ ++} ++ ++static int _integrity_text_import(struct lv_segment *seg, ++ const struct dm_config_node *sn, ++ struct dm_hash_table *pv_hash __attribute__((unused))) ++{ ++ struct integrity_settings *set; ++ struct logical_volume *origin_lv = NULL; ++ struct logical_volume *meta_lv = NULL; ++ const char *origin_name = NULL; ++ const char *meta_dev = NULL; ++ const char *mode = NULL; ++ const char *hash = NULL; ++ ++ memset(&seg->integrity_settings, 0, sizeof(struct integrity_settings)); ++ set = &seg->integrity_settings; ++ ++ /* origin always set */ ++ ++ if (!dm_config_has_node(sn, "origin")) ++ return SEG_LOG_ERROR("origin not specified in"); ++ ++ if (!dm_config_get_str(sn, "origin", &origin_name)) ++ return SEG_LOG_ERROR("origin must be a string in"); ++ ++ if (!(origin_lv = find_lv(seg->lv->vg, origin_name))) ++ return SEG_LOG_ERROR("Unknown LV specified for integrity origin %s in", origin_name); ++ ++ if (!set_lv_segment_area_lv(seg, 0, origin_lv, 0, 0)) ++ return_0; ++ ++ /* data_sectors always set */ ++ ++ if (!dm_config_get_uint64(sn, "data_sectors", &seg->integrity_data_sectors)) ++ return SEG_LOG_ERROR("integrity data_sectors must be set in"); ++ ++ /* mode always set */ ++ ++ if (!dm_config_get_str(sn, "mode", &mode)) ++ return SEG_LOG_ERROR("integrity mode must be set in"); ++ ++ if (strlen(mode) > 7) ++ return SEG_LOG_ERROR("integrity mode invalid in"); ++ ++ strncpy(set->mode, mode, 7); ++ ++ /* tag_size always set */ ++ ++ if (!dm_config_get_uint32(sn, "tag_size", &set->tag_size)) ++ return SEG_LOG_ERROR("integrity tag_size must be set in"); ++ ++ /* block_size always set */ ++ ++ if (!dm_config_get_uint32(sn, "block_size", &set->block_size)) ++ return SEG_LOG_ERROR("integrity block_size invalid in"); ++ ++ /* internal_hash always set */ ++ ++ if (!dm_config_get_str(sn, "internal_hash", &hash)) ++ return SEG_LOG_ERROR("integrity internal_hash must be set in"); ++ ++ if (!(set->internal_hash = dm_pool_strdup(seg->lv->vg->vgmem, hash))) ++ return SEG_LOG_ERROR("integrity internal_hash failed to be set in"); ++ ++ /* meta_dev optional */ ++ ++ if (dm_config_has_node(sn, "meta_dev")) { ++ if (!dm_config_get_str(sn, "meta_dev", &meta_dev)) ++ return SEG_LOG_ERROR("meta_dev must be a string in"); ++ ++ if (!(meta_lv = find_lv(seg->lv->vg, meta_dev))) ++ return SEG_LOG_ERROR("Unknown logical volume %s specified for integrity in", meta_dev); ++ } ++ ++ if (dm_config_has_node(sn, "recalculate")) { ++ if (!dm_config_get_uint32(sn, "recalculate", &seg->integrity_recalculate)) ++ return SEG_LOG_ERROR("integrity recalculate error in"); ++ } ++ ++ /* the rest are optional */ ++ ++ if (dm_config_has_node(sn, "journal_sectors")) { ++ if (!dm_config_get_uint32(sn, "journal_sectors", &set->journal_sectors)) ++ return SEG_LOG_ERROR("Unknown integrity_setting in"); ++ set->journal_sectors_set = 1; ++ } ++ ++ if (dm_config_has_node(sn, "interleave_sectors")) { ++ if (!dm_config_get_uint32(sn, "interleave_sectors", &set->interleave_sectors)) ++ return SEG_LOG_ERROR("Unknown integrity_setting in"); ++ set->interleave_sectors_set = 1; ++ } ++ ++ if (dm_config_has_node(sn, "buffer_sectors")) { ++ if (!dm_config_get_uint32(sn, "buffer_sectors", &set->buffer_sectors)) ++ return SEG_LOG_ERROR("Unknown integrity_setting in"); ++ set->buffer_sectors_set = 1; ++ } ++ ++ if (dm_config_has_node(sn, "journal_watermark")) { ++ if (!dm_config_get_uint32(sn, "journal_watermark", &set->journal_watermark)) ++ return SEG_LOG_ERROR("Unknown integrity_setting in"); ++ set->journal_watermark_set = 1; ++ } ++ ++ if (dm_config_has_node(sn, "commit_time")) { ++ if (!dm_config_get_uint32(sn, "commit_time", &set->commit_time)) ++ return SEG_LOG_ERROR("Unknown integrity_setting in"); ++ set->commit_time_set = 1; ++ } ++ ++ if (dm_config_has_node(sn, "bitmap_flush_interval")) { ++ if (!dm_config_get_uint32(sn, "bitmap_flush_interval", &set->bitmap_flush_interval)) ++ return SEG_LOG_ERROR("Unknown integrity_setting in"); ++ set->bitmap_flush_interval_set = 1; ++ } ++ ++ if (dm_config_has_node(sn, "sectors_per_bit")) { ++ if (!dm_config_get_uint64(sn, "sectors_per_bit", &set->sectors_per_bit)) ++ return SEG_LOG_ERROR("Unknown integrity_setting in"); ++ set->sectors_per_bit_set = 1; ++ } ++ ++ seg->origin = origin_lv; ++ seg->integrity_meta_dev = meta_lv; ++ seg->lv->status |= INTEGRITY; ++ ++ if (meta_lv) ++ meta_lv->status |= INTEGRITY_METADATA; ++ ++ if (meta_lv && !add_seg_to_segs_using_this_lv(meta_lv, seg)) ++ return_0; ++ ++ return 1; ++} ++ ++static int _integrity_text_import_area_count(const struct dm_config_node *sn, ++ uint32_t *area_count) ++{ ++ *area_count = 1; ++ ++ return 1; ++} ++ ++static int _integrity_text_export(const struct lv_segment *seg, ++ struct formatter *f) ++{ ++ const struct integrity_settings *set = &seg->integrity_settings; ++ ++ outf(f, "origin = \"%s\"", seg_lv(seg, 0)->name); ++ outf(f, "data_sectors = %llu", (unsigned long long)seg->integrity_data_sectors); ++ ++ outf(f, "mode = \"%s\"", set->mode); ++ outf(f, "tag_size = %u", set->tag_size); ++ outf(f, "block_size = %u", set->block_size); ++ outf(f, "internal_hash = \"%s\"", set->internal_hash); ++ ++ if (seg->integrity_meta_dev) ++ outf(f, "meta_dev = \"%s\"", seg->integrity_meta_dev->name); ++ ++ if (seg->integrity_recalculate) ++ outf(f, "recalculate = %u", seg->integrity_recalculate); ++ ++ if (set->journal_sectors_set) ++ outf(f, "journal_sectors = %u", set->journal_sectors); ++ ++ if (set->interleave_sectors_set) ++ outf(f, "interleave_sectors = %u", set->interleave_sectors); ++ ++ if (set->buffer_sectors_set) ++ outf(f, "buffer_sectors = %u", set->buffer_sectors); ++ ++ if (set->journal_watermark_set) ++ outf(f, "journal_watermark = %u", set->journal_watermark); ++ ++ if (set->commit_time_set) ++ outf(f, "commit_time = %u", set->commit_time); ++ ++ if (set->bitmap_flush_interval) ++ outf(f, "bitmap_flush_interval = %u", set->bitmap_flush_interval); ++ ++ if (set->sectors_per_bit) ++ outf(f, "sectors_per_bit = %llu", (unsigned long long)set->sectors_per_bit); ++ ++ return 1; ++} ++ ++static void _destroy(struct segment_type *segtype) ++{ ++ free((void *) segtype); ++} ++ ++#ifdef DEVMAPPER_SUPPORT ++ ++static int _target_present(struct cmd_context *cmd, ++ const struct lv_segment *seg __attribute__((unused)), ++ unsigned *attributes __attribute__((unused))) ++{ ++ static int _integrity_checked = 0; ++ static int _integrity_present = 0; ++ uint32_t maj, min, patchlevel; ++ ++ if (!activation()) ++ return 0; ++ ++ if (!_integrity_checked) { ++ _integrity_checked = 1; ++ _integrity_present = target_present(cmd, TARGET_NAME_INTEGRITY, 1); ++ ++ if (!target_version(TARGET_NAME_INTEGRITY, &maj, &min, &patchlevel)) ++ return 0; ++ ++ if (maj < 1 || min < 6) { ++ log_error("Integrity target version older than minimum 1.6.0"); ++ return 0; ++ } ++ } ++ ++ return _integrity_present; ++} ++ ++static int _modules_needed(struct dm_pool *mem, ++ const struct lv_segment *seg __attribute__((unused)), ++ struct dm_list *modules) ++{ ++ if (!str_list_add(mem, modules, MODULE_NAME_INTEGRITY)) { ++ log_error("String list allocation failed for integrity module."); ++ return 0; ++ } ++ ++ return 1; ++} ++#endif /* DEVMAPPER_SUPPORT */ ++ ++#ifdef DEVMAPPER_SUPPORT ++static int _integrity_add_target_line(struct dev_manager *dm, ++ struct dm_pool *mem, ++ struct cmd_context *cmd __attribute__((unused)), ++ void **target_state __attribute__((unused)), ++ struct lv_segment *seg, ++ const struct lv_activate_opts *laopts, ++ struct dm_tree_node *node, uint64_t len, ++ uint32_t *pvmove_mirror_count __attribute__((unused))) ++{ ++ char *origin_uuid; ++ char *meta_uuid = NULL; ++ ++ if (!seg_is_integrity(seg)) { ++ log_error(INTERNAL_ERROR "Passed segment is not integrity."); ++ return 0; ++ } ++ ++ if (!(origin_uuid = build_dm_uuid(mem, seg_lv(seg, 0), NULL))) ++ return_0; ++ ++ if (seg->integrity_meta_dev) { ++ if (!(meta_uuid = build_dm_uuid(mem, seg->integrity_meta_dev, NULL))) ++ return_0; ++ } ++ ++ if (!seg->integrity_data_sectors) { ++ log_error("_integrity_add_target_line zero size"); ++ return_0; ++ } ++ ++ if (!dm_tree_node_add_integrity_target(node, seg->integrity_data_sectors, ++ origin_uuid, meta_uuid, ++ &seg->integrity_settings, ++ seg->integrity_recalculate)) ++ return_0; ++ ++ return 1; ++} ++#endif /* DEVMAPPER_SUPPORT */ ++ ++static struct segtype_handler _integrity_ops = { ++ .display = _integrity_display, ++ .text_import = _integrity_text_import, ++ .text_import_area_count = _integrity_text_import_area_count, ++ .text_export = _integrity_text_export, ++#ifdef DEVMAPPER_SUPPORT ++ .add_target_line = _integrity_add_target_line, ++ .target_present = _target_present, ++ .modules_needed = _modules_needed, ++#endif ++ .destroy = _destroy, ++}; ++ ++int init_integrity_segtypes(struct cmd_context *cmd, ++ struct segtype_library *seglib) ++{ ++ struct segment_type *segtype = zalloc(sizeof(*segtype)); ++ ++ if (!segtype) { ++ log_error("Failed to allocate memory for integrity segtype"); ++ return 0; ++ } ++ ++ segtype->name = SEG_TYPE_NAME_INTEGRITY; ++ segtype->flags = SEG_INTEGRITY; ++ segtype->ops = &_integrity_ops; ++ ++ if (!lvm_register_segtype(seglib, segtype)) ++ return_0; ++ log_very_verbose("Initialised segtype: %s", segtype->name); ++ ++ return 1; ++} +diff --git a/lib/metadata/integrity_manip.c b/lib/metadata/integrity_manip.c +new file mode 100644 +index 0000000..7942be0 +--- /dev/null ++++ b/lib/metadata/integrity_manip.c +@@ -0,0 +1,821 @@ ++/* ++ * Copyright (C) 2014-2015 Red Hat, Inc. All rights reserved. ++ * ++ * This file is part of LVM2. ++ * ++ * This copyrighted material is made available to anyone wishing to use, ++ * modify, copy, or redistribute it subject to the terms and conditions ++ * of the GNU Lesser General Public License v.2.1. ++ * ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this program; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++#include "lib/misc/lib.h" ++#include "lib/metadata/metadata.h" ++#include "lib/locking/locking.h" ++#include "lib/misc/lvm-string.h" ++#include "lib/commands/toolcontext.h" ++#include "lib/display/display.h" ++#include "lib/metadata/segtype.h" ++#include "lib/activate/activate.h" ++#include "lib/config/defaults.h" ++#include "lib/activate/dev_manager.h" ++ ++#define DEFAULT_TAG_SIZE 4 /* bytes */ ++#define DEFAULT_MODE 'J' ++#define DEFAULT_INTERNAL_HASH "crc32c" ++#define DEFAULT_BLOCK_SIZE 512 ++ ++#define ONE_MB_IN_BYTES 1048576 ++ ++int lv_is_integrity_origin(const struct logical_volume *lv) ++{ ++ struct seg_list *sl; ++ ++ dm_list_iterate_items(sl, &lv->segs_using_this_lv) { ++ if (!sl->seg || !sl->seg->lv || !sl->seg->origin) ++ continue; ++ if (lv_is_integrity(sl->seg->lv) && (sl->seg->origin == lv)) ++ return 1; ++ } ++ return 0; ++} ++ ++/* ++ * Every 500M of data needs 4M of metadata. ++ * (From trial and error testing.) ++ */ ++static uint64_t _lv_size_bytes_to_integrity_meta_bytes(uint64_t lv_size_bytes) ++{ ++ return ((lv_size_bytes / (500 * ONE_MB_IN_BYTES)) + 1) * (4 * ONE_MB_IN_BYTES); ++} ++ ++/* ++ * The user wants external metadata, but did not specify an existing ++ * LV to hold metadata, so create an LV for metadata. ++ */ ++static int _lv_create_integrity_metadata(struct cmd_context *cmd, ++ struct volume_group *vg, ++ struct lvcreate_params *lp, ++ struct logical_volume **meta_lv) ++{ ++ char metaname[NAME_LEN]; ++ uint64_t lv_size_bytes, meta_bytes, meta_sectors; ++ struct logical_volume *lv; ++ struct lvcreate_params lp_meta = { ++ .activate = CHANGE_AN, ++ .alloc = ALLOC_INHERIT, ++ .major = -1, ++ .minor = -1, ++ .permission = LVM_READ | LVM_WRITE, ++ .pvh = &vg->pvs, ++ .read_ahead = DM_READ_AHEAD_NONE, ++ .stripes = 1, ++ .vg_name = vg->name, ++ .zero = 0, ++ .wipe_signatures = 0, ++ .suppress_zero_warn = 1, ++ }; ++ ++ if (lp->lv_name && ++ dm_snprintf(metaname, NAME_LEN, "%s_imeta", lp->lv_name) < 0) { ++ log_error("Failed to create metadata LV name."); ++ return 0; ++ } ++ ++ lp_meta.lv_name = metaname; ++ lp_meta.pvh = lp->pvh; ++ ++ lv_size_bytes = (uint64_t)lp->extents * (uint64_t)vg->extent_size * 512; ++ meta_bytes = _lv_size_bytes_to_integrity_meta_bytes(lv_size_bytes); ++ meta_sectors = meta_bytes / 512; ++ lp_meta.extents = meta_sectors / vg->extent_size; ++ ++ log_print_unless_silent("Creating integrity metadata LV %s with size %s.", ++ metaname, display_size(cmd, meta_sectors)); ++ ++ dm_list_init(&lp_meta.tags); ++ ++ if (!(lp_meta.segtype = get_segtype_from_string(vg->cmd, SEG_TYPE_NAME_STRIPED))) ++ return_0; ++ ++ if (!(lv = lv_create_single(vg, &lp_meta))) { ++ log_error("Failed to create integrity metadata LV"); ++ return 0; ++ } ++ ++ if (dm_list_size(&lv->segments) > 1) { ++ log_error("Integrity metadata uses more than one segment."); ++ return 0; ++ } ++ ++ *meta_lv = lv; ++ return 1; ++} ++ ++int lv_extend_integrity_in_raid(struct logical_volume *lv, struct dm_list *pvh) ++{ ++ struct cmd_context *cmd = lv->vg->cmd; ++ struct volume_group *vg = lv->vg; ++ const struct segment_type *segtype; ++ struct lv_segment *seg_top, *seg_image; ++ struct logical_volume *lv_image; ++ struct logical_volume *lv_iorig; ++ struct logical_volume *lv_imeta; ++ struct dm_list allocatable_pvs; ++ struct dm_list *use_pvh; ++ uint64_t lv_size_bytes, meta_bytes, meta_sectors, prev_meta_sectors; ++ uint32_t meta_extents, prev_meta_extents; ++ uint32_t area_count, s; ++ ++ seg_top = first_seg(lv); ++ ++ if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED))) ++ return_0; ++ ++ area_count = seg_top->area_count; ++ ++ for (s = 0; s < area_count; s++) { ++ lv_image = seg_lv(seg_top, s); ++ seg_image = first_seg(lv_image); ++ ++ if (!(lv_imeta = seg_image->integrity_meta_dev)) { ++ log_error("LV %s segment has no integrity metadata device.", display_lvname(lv)); ++ return 0; ++ } ++ ++ if (!(lv_iorig = seg_lv(seg_image, 0))) { ++ log_error("LV %s integrity segment has no origin", display_lvname(lv)); ++ return 0; ++ } ++ ++ lv_size_bytes = lv_iorig->size * 512; ++ meta_bytes = _lv_size_bytes_to_integrity_meta_bytes(lv_size_bytes); ++ meta_sectors = meta_bytes / 512; ++ meta_extents = meta_sectors / vg->extent_size; ++ ++ prev_meta_sectors = lv_imeta->size; ++ prev_meta_extents = prev_meta_sectors / vg->extent_size; ++ ++ if (meta_extents <= prev_meta_extents) { ++ log_debug("extend not needed for imeta LV %s", lv_imeta->name); ++ continue; ++ } ++ ++ /* ++ * We only allow lv_imeta to exist on a single PV (for now), ++ * so the allocatable_pvs is the one PV currently used by ++ * lv_imeta. ++ */ ++ dm_list_init(&allocatable_pvs); ++ ++ if (!get_pv_list_for_lv(cmd->mem, lv_imeta, &allocatable_pvs)) { ++ log_error("Failed to build list of PVs for extending %s.", display_lvname(lv_imeta)); ++ return 0; ++ } ++ ++ use_pvh = &allocatable_pvs; ++ ++ if (!lv_extend(lv_imeta, segtype, 1, 0, 0, 0, ++ meta_extents - prev_meta_extents, ++ use_pvh, lv_imeta->alloc, 0)) { ++ log_error("Failed to extend integrity metadata LV %s", lv_imeta->name); ++ return 0; ++ } ++ } ++ ++ return 1; ++} ++ ++int lv_remove_integrity_from_raid(struct logical_volume *lv) ++{ ++ struct logical_volume *iorig_lvs[DEFAULT_RAID_MAX_IMAGES]; ++ struct logical_volume *imeta_lvs[DEFAULT_RAID_MAX_IMAGES]; ++ struct cmd_context *cmd = lv->vg->cmd; ++ struct volume_group *vg = lv->vg; ++ struct lv_segment *seg_top, *seg_image; ++ struct logical_volume *lv_image; ++ struct logical_volume *lv_iorig; ++ struct logical_volume *lv_imeta; ++ uint32_t area_count, s; ++ int is_active = lv_is_active(lv); ++ ++ seg_top = first_seg(lv); ++ ++ if (!seg_is_raid1(seg_top) && !seg_is_raid4(seg_top) && ++ !seg_is_any_raid5(seg_top) && !seg_is_any_raid6(seg_top) && ++ !seg_is_any_raid10(seg_top)) { ++ log_error("LV %s segment is unsupported raid for integrity.", display_lvname(lv)); ++ return 0; ++ } ++ ++ area_count = seg_top->area_count; ++ ++ for (s = 0; s < area_count; s++) { ++ lv_image = seg_lv(seg_top, s); ++ seg_image = first_seg(lv_image); ++ ++ if (!(lv_imeta = seg_image->integrity_meta_dev)) { ++ log_error("LV %s segment has no integrity metadata device.", display_lvname(lv)); ++ return 0; ++ } ++ ++ if (!(lv_iorig = seg_lv(seg_image, 0))) { ++ log_error("LV %s integrity segment has no origin", display_lvname(lv)); ++ return 0; ++ } ++ ++ if (!remove_seg_from_segs_using_this_lv(seg_image->integrity_meta_dev, seg_image)) ++ return_0; ++ ++ iorig_lvs[s] = lv_iorig; ++ imeta_lvs[s] = lv_imeta; ++ ++ lv_image->status &= ~INTEGRITY; ++ seg_image->integrity_meta_dev = NULL; ++ seg_image->integrity_data_sectors = 0; ++ memset(&seg_image->integrity_settings, 0, sizeof(seg_image->integrity_settings)); ++ ++ if (!remove_layer_from_lv(lv_image, lv_iorig)) ++ return_0; ++ } ++ ++ if (is_active) { ++ /* vg_write(), suspend_lv(), vg_commit(), resume_lv() */ ++ if (!lv_update_and_reload(lv)) { ++ log_error("Failed to update and reload LV after integrity remove."); ++ return 0; ++ } ++ } ++ ++ for (s = 0; s < area_count; s++) { ++ lv_iorig = iorig_lvs[s]; ++ lv_imeta = imeta_lvs[s]; ++ ++ if (is_active) { ++ if (!deactivate_lv(cmd, lv_iorig)) ++ log_error("Failed to deactivate unused iorig LV %s.", lv_iorig->name); ++ ++ if (!deactivate_lv(cmd, lv_imeta)) ++ log_error("Failed to deactivate unused imeta LV %s.", lv_imeta->name); ++ } ++ ++ lv_imeta->status &= ~INTEGRITY_METADATA; ++ lv_set_visible(lv_imeta); ++ ++ if (!lv_remove(lv_iorig)) ++ log_error("Failed to remove unused iorig LV %s.", lv_iorig->name); ++ ++ if (!lv_remove(lv_imeta)) ++ log_error("Failed to remove unused imeta LV %s.", lv_imeta->name); ++ } ++ ++ if (!vg_write(vg) || !vg_commit(vg)) ++ return_0; ++ ++ return 1; ++} ++ ++static int _set_integrity_block_size(struct cmd_context *cmd, struct logical_volume *lv, ++ struct integrity_settings *settings, ++ int lbs_4k, int lbs_512, int pbs_4k, int pbs_512) ++{ ++ char pathname[PATH_MAX]; ++ struct device *fs_dev; ++ uint32_t fs_block_size = 0; ++ int rv; ++ ++ if (lbs_4k && lbs_512) { ++ log_error("Integrity requires consistent logical block size for LV devices."); ++ goto_bad; ++ } ++ ++ if (settings->block_size && ++ (settings->block_size != 512 && settings->block_size != 1024 && ++ settings->block_size != 2048 && settings->block_size != 4096)) { ++ log_error("Invalid integrity block size, possible values are 512, 1024, 2048, 4096"); ++ goto_bad; ++ } ++ ++ if (lbs_4k && settings->block_size && (settings->block_size < 4096)) { ++ log_error("Integrity block size %u not allowed with device logical block size 4096.", ++ settings->block_size); ++ goto_bad; ++ } ++ ++ if (!strcmp(cmd->name, "lvcreate")) { ++ if (lbs_4k) { ++ settings->block_size = 4096; ++ } else if (lbs_512 && pbs_4k && !pbs_512) { ++ settings->block_size = 4096; ++ } else if (lbs_512) { ++ if (!settings->block_size) ++ settings->block_size = 512; ++ } else if (!lbs_4k && !lbs_512) { ++ if (!settings->block_size) ++ settings->block_size = 512; ++ log_print("Using integrity block size %u with unknown device logical block size.", ++ settings->block_size); ++ } else { ++ goto_bad; ++ } ++ ++ } else if (!strcmp(cmd->name, "lvconvert")) { ++ if (dm_snprintf(pathname, sizeof(pathname), "%s%s/%s", cmd->dev_dir, ++ lv->vg->name, lv->name) < 0) { ++ log_error("Path name too long to get LV block size %s", display_lvname(lv)); ++ goto_bad; ++ } ++ if (!(fs_dev = dev_cache_get(cmd, pathname, NULL))) { ++ log_error("Device for LV not found to check block size %s", display_lvname(lv)); ++ goto_bad; ++ } ++ ++ /* ++ * get_fs_block_size() returns the libblkid BLOCK_SIZE value, ++ * where libblkid has fs-specific code to set BLOCK_SIZE to the ++ * value we need here. ++ * ++ * The term "block size" here may not equate directly to what the fs ++ * calls the block size, e.g. xfs calls this the sector size (and ++ * something different the block size); while ext4 does call this ++ * value the block size, but it's possible values are not the same ++ * as xfs's, and do not seem to relate directly to the device LBS. ++ */ ++ rv = get_fs_block_size(fs_dev, &fs_block_size); ++ if (!rv || !fs_block_size) { ++ int use_bs; ++ ++ if (lbs_4k && pbs_4k) { ++ use_bs = 4096; ++ } else if (lbs_512 && pbs_512) { ++ use_bs = 512; ++ } else if (lbs_512 && pbs_4k) { ++ if (settings->block_size == 4096) ++ use_bs = 4096; ++ else ++ use_bs = 512; ++ } else { ++ use_bs = 512; ++ } ++ ++ if (settings->block_size && (settings->block_size != use_bs)) { ++ log_error("Cannot use integrity block size %u with unknown file system block size, logical block size %u, physical block size %u.", ++ settings->block_size, lbs_4k ? 4096 : 512, pbs_4k ? 4096 : 512); ++ goto bad; ++ } ++ ++ settings->block_size = use_bs; ++ ++ log_print("Using integrity block size %u for unknown file system block size, logical block size %u, physical block size %u.", ++ settings->block_size, lbs_4k ? 4096 : 512, pbs_4k ? 4096 : 512); ++ goto out; ++ } ++ ++ if (!settings->block_size) { ++ if (fs_block_size <= 4096) ++ settings->block_size = fs_block_size; ++ else ++ settings->block_size = 4096; /* dm-integrity max is 4096 */ ++ log_print("Using integrity block size %u for file system block size %u.", ++ settings->block_size, fs_block_size); ++ } else { ++ /* let user specify integrity block size that is less than fs block size */ ++ if (settings->block_size > fs_block_size) { ++ log_error("Integrity block size %u cannot be larger than file system block size %u.", ++ settings->block_size, fs_block_size); ++ goto_bad; ++ } ++ log_print("Using integrity block size %u for file system block size %u.", ++ settings->block_size, fs_block_size); ++ } ++ } ++out: ++ return 1; ++bad: ++ return 0; ++} ++ ++/* ++ * Add integrity to each raid image. ++ * ++ * for each rimage_N: ++ * . create and allocate a new linear LV rimage_N_imeta ++ * . move the segments from rimage_N to a new rimage_N_iorig ++ * . add an integrity segment to rimage_N with ++ * origin=rimage_N_iorig, meta_dev=rimage_N_imeta ++ * ++ * Before: ++ * rimage_0 ++ * segment1: striped: pv0:A ++ * rimage_1 ++ * segment1: striped: pv1:B ++ * ++ * After: ++ * rimage_0 ++ * segment1: integrity: rimage_0_iorig, rimage_0_imeta ++ * rimage_1 ++ * segment1: integrity: rimage_1_iorig, rimage_1_imeta ++ * rimage_0_iorig ++ * segment1: striped: pv0:A ++ * rimage_1_iorig ++ * segment1: striped: pv1:B ++ * rimage_0_imeta ++ * segment1: striped: pv2:A ++ * rimage_1_imeta ++ * segment1: striped: pv2:B ++ * ++ */ ++ ++int lv_add_integrity_to_raid(struct logical_volume *lv, struct integrity_settings *settings, ++ struct dm_list *pvh, struct logical_volume *lv_imeta_0) ++{ ++ char imeta_name[NAME_LEN]; ++ char *imeta_name_dup; ++ struct lvcreate_params lp; ++ struct dm_list allocatable_pvs; ++ struct logical_volume *imeta_lvs[DEFAULT_RAID_MAX_IMAGES]; ++ struct cmd_context *cmd = lv->vg->cmd; ++ struct volume_group *vg = lv->vg; ++ struct logical_volume *lv_image, *lv_imeta, *lv_iorig; ++ struct lv_segment *seg_top, *seg_image; ++ struct pv_list *pvl; ++ const struct segment_type *segtype; ++ struct integrity_settings *set = NULL; ++ struct dm_list *use_pvh = NULL; ++ uint32_t area_count, s; ++ uint32_t revert_meta_lvs = 0; ++ int lbs_4k = 0, lbs_512 = 0, lbs_unknown = 0; ++ int pbs_4k = 0, pbs_512 = 0, pbs_unknown = 0; ++ int is_active; ++ ++ memset(imeta_lvs, 0, sizeof(imeta_lvs)); ++ ++ is_active = lv_is_active(lv); ++ ++ if (dm_list_size(&lv->segments) != 1) ++ return_0; ++ ++ if (!dm_list_empty(&lv->segs_using_this_lv)) { ++ log_error("Integrity can only be added to top level raid LV."); ++ return 0; ++ } ++ ++ if (lv_is_origin(lv)) { ++ log_error("Integrity cannot be added to snapshot origins."); ++ return 0; ++ } ++ ++ seg_top = first_seg(lv); ++ area_count = seg_top->area_count; ++ ++ if (!seg_is_raid1(seg_top) && !seg_is_raid4(seg_top) && ++ !seg_is_any_raid5(seg_top) && !seg_is_any_raid6(seg_top) && ++ !seg_is_any_raid10(seg_top)) { ++ log_error("Integrity can only be added to raid1,4,5,6,10."); ++ return 0; ++ } ++ ++ /* ++ * For each rimage, create an _imeta LV for integrity metadata. ++ * Each needs to be zeroed. ++ */ ++ for (s = 0; s < area_count; s++) { ++ struct logical_volume *meta_lv; ++ struct wipe_params wipe = { .do_zero = 1, .zero_sectors = 8 }; ++ ++ if (s >= DEFAULT_RAID_MAX_IMAGES) ++ goto_bad; ++ ++ lv_image = seg_lv(seg_top, s); ++ ++ /* ++ * This function is used to add integrity to new images added ++ * to the raid, in which case old images will already be ++ * integrity. ++ */ ++ if (seg_is_integrity(first_seg(lv_image))) ++ continue; ++ ++ if (!seg_is_striped(first_seg(lv_image))) { ++ log_error("raid image must be linear to add integrity"); ++ goto_bad; ++ } ++ ++ /* ++ * Use an existing lv_imeta from previous linear+integrity LV. ++ * FIXME: is it guaranteed that lv_image_0 is the existing? ++ */ ++ if (!s && lv_imeta_0) { ++ if (dm_snprintf(imeta_name, sizeof(imeta_name), "%s_imeta", lv_image->name) > 0) { ++ if ((imeta_name_dup = dm_pool_strdup(vg->vgmem, imeta_name))) ++ lv_imeta_0->name = imeta_name_dup; ++ } ++ imeta_lvs[0] = lv_imeta_0; ++ continue; ++ } ++ ++ dm_list_init(&allocatable_pvs); ++ ++ if (!get_pv_list_for_lv(cmd->mem, lv_image, &allocatable_pvs)) { ++ log_error("Failed to build list of PVs for %s.", display_lvname(lv_image)); ++ goto_bad; ++ } ++ ++ dm_list_iterate_items(pvl, &allocatable_pvs) { ++ unsigned int pbs = 0; ++ unsigned int lbs = 0; ++ ++ if (!dev_get_direct_block_sizes(pvl->pv->dev, &pbs, &lbs)) { ++ lbs_unknown++; ++ pbs_unknown++; ++ continue; ++ } ++ if (lbs == 4096) ++ lbs_4k++; ++ else if (lbs == 512) ++ lbs_512++; ++ else ++ lbs_unknown++; ++ if (pbs == 4096) ++ pbs_4k++; ++ else if (pbs == 512) ++ pbs_512++; ++ else ++ pbs_unknown++; ++ } ++ ++ use_pvh = &allocatable_pvs; ++ ++ /* ++ * allocate a new linear LV NAME_rimage_N_imeta ++ */ ++ memset(&lp, 0, sizeof(lp)); ++ lp.lv_name = lv_image->name; ++ lp.pvh = use_pvh; ++ lp.extents = lv_image->size / vg->extent_size; ++ ++ if (!_lv_create_integrity_metadata(cmd, vg, &lp, &meta_lv)) ++ goto_bad; ++ ++ revert_meta_lvs++; ++ ++ /* Used below to set up the new integrity segment. */ ++ imeta_lvs[s] = meta_lv; ++ ++ /* ++ * dm-integrity requires the metadata LV header to be zeroed. ++ */ ++ ++ if (!activate_lv(cmd, meta_lv)) { ++ log_error("Failed to activate LV %s to zero", display_lvname(meta_lv)); ++ goto_bad; ++ } ++ ++ if (!wipe_lv(meta_lv, wipe)) { ++ log_error("Failed to zero LV for integrity metadata %s", display_lvname(meta_lv)); ++ if (deactivate_lv(cmd, meta_lv)) ++ log_error("Failed to deactivate LV %s after zero", display_lvname(meta_lv)); ++ goto_bad; ++ } ++ ++ if (!deactivate_lv(cmd, meta_lv)) { ++ log_error("Failed to deactivate LV %s after zero", display_lvname(meta_lv)); ++ goto_bad; ++ } ++ } ++ ++ /* ++ * Set settings->block_size which will be copied to segment settings below. ++ * integrity block size chosen based on device logical block size and ++ * file system block size. ++ */ ++ if (!_set_integrity_block_size(cmd, lv, settings, lbs_4k, lbs_512, pbs_4k, pbs_512)) ++ goto_bad; ++ ++ /* ++ * For each rimage, move its segments to a new rimage_iorig and give ++ * the rimage a new integrity segment. ++ */ ++ for (s = 0; s < area_count; s++) { ++ lv_image = seg_lv(seg_top, s); ++ ++ /* Not adding integrity to this image. */ ++ if (!imeta_lvs[s]) ++ continue; ++ ++ if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_INTEGRITY))) ++ goto_bad; ++ ++ log_debug("Adding integrity to raid image %s", lv_image->name); ++ ++ /* ++ * "lv_iorig" is a new LV with new id, but with the segments ++ * from "lv_image". "lv_image" keeps the existing name and id, ++ * but gets a new integrity segment, in place of the segments ++ * that were moved to lv_iorig. ++ */ ++ if (!(lv_iorig = insert_layer_for_lv(cmd, lv_image, INTEGRITY, "_iorig"))) ++ goto_bad; ++ ++ lv_image->status |= INTEGRITY; ++ ++ /* ++ * Set up the new first segment of lv_image as integrity. ++ */ ++ seg_image = first_seg(lv_image); ++ seg_image->segtype = segtype; ++ ++ lv_imeta = imeta_lvs[s]; ++ lv_imeta->status |= INTEGRITY_METADATA; ++ lv_set_hidden(lv_imeta); ++ seg_image->integrity_data_sectors = lv_image->size; ++ seg_image->integrity_meta_dev = lv_imeta; ++ seg_image->integrity_recalculate = 1; ++ ++ memcpy(&seg_image->integrity_settings, settings, sizeof(struct integrity_settings)); ++ set = &seg_image->integrity_settings; ++ ++ if (!set->mode[0]) ++ set->mode[0] = DEFAULT_MODE; ++ ++ if (!set->tag_size) ++ set->tag_size = DEFAULT_TAG_SIZE; ++ ++ if (!set->block_size) ++ set->block_size = DEFAULT_BLOCK_SIZE; ++ ++ if (!set->internal_hash) ++ set->internal_hash = DEFAULT_INTERNAL_HASH; ++ } ++ ++ if (is_active) { ++ log_debug("Writing VG and updating LV with new integrity LV %s", lv->name); ++ ++ /* vg_write(), suspend_lv(), vg_commit(), resume_lv() */ ++ if (!lv_update_and_reload(lv)) { ++ log_error("LV update and reload failed"); ++ goto_bad; ++ } ++ revert_meta_lvs = 0; ++ ++ } else { ++ log_debug("Writing VG with new integrity LV %s", lv->name); ++ ++ if (!vg_write(vg) || !vg_commit(vg)) ++ goto_bad; ++ ++ revert_meta_lvs = 0; ++ ++ /* ++ * This first activation includes "recalculate" which starts the ++ * kernel's recalculating (initialization) process. ++ */ ++ ++ log_debug("Activating to start integrity initialization for LV %s", lv->name); ++ ++ if (!activate_lv(cmd, lv)) { ++ log_error("Failed to activate integrity LV to initialize."); ++ goto_bad; ++ } ++ } ++ ++ /* ++ * Now that the device is being initialized, update the VG to clear ++ * integrity_recalculate so that subsequent activations will not ++ * include "recalculate" and restart initialization. ++ */ ++ ++ log_debug("Writing VG with initialized integrity LV %s", lv->name); ++ ++ for (s = 0; s < area_count; s++) { ++ lv_image = seg_lv(seg_top, s); ++ seg_image = first_seg(lv_image); ++ seg_image->integrity_recalculate = 0; ++ } ++ ++ if (!vg_write(vg) || !vg_commit(vg)) ++ goto_bad; ++ ++ return 1; ++ ++bad: ++ log_error("Failed to add integrity."); ++ ++ for (s = 0; s < revert_meta_lvs; s++) { ++ if (!lv_remove(imeta_lvs[s])) ++ log_error("New integrity metadata LV may require manual removal."); ++ } ++ ++ if (!vg_write(vg) || !vg_commit(vg)) ++ log_error("New integrity metadata LV may require manual removal."); ++ ++ return 0; ++} ++ ++/* ++ * This should rarely if ever be used. A command that adds integrity ++ * to an LV will activate and then clear the flag. If it fails before ++ * clearing the flag, then this function will be used by a subsequent ++ * activation to clear the flag. ++ */ ++void lv_clear_integrity_recalculate_metadata(struct logical_volume *lv) ++{ ++ struct volume_group *vg = lv->vg; ++ struct logical_volume *lv_image; ++ struct lv_segment *seg, *seg_image; ++ uint32_t s; ++ ++ seg = first_seg(lv); ++ ++ if (seg_is_raid(seg)) { ++ for (s = 0; s < seg->area_count; s++) { ++ lv_image = seg_lv(seg, s); ++ seg_image = first_seg(lv_image); ++ seg_image->integrity_recalculate = 0; ++ } ++ } else if (seg_is_integrity(seg)) { ++ seg->integrity_recalculate = 0; ++ } else { ++ log_error("Invalid LV type for clearing integrity"); ++ return; ++ } ++ ++ if (!vg_write(vg) || !vg_commit(vg)) { ++ log_warn("WARNING: failed to clear integrity recalculate flag for %s", ++ display_lvname(lv)); ++ } ++} ++ ++int lv_has_integrity_recalculate_metadata(struct logical_volume *lv) ++{ ++ struct logical_volume *lv_image; ++ struct lv_segment *seg, *seg_image; ++ uint32_t s; ++ int ret = 0; ++ ++ seg = first_seg(lv); ++ ++ if (seg_is_raid(seg)) { ++ for (s = 0; s < seg->area_count; s++) { ++ lv_image = seg_lv(seg, s); ++ seg_image = first_seg(lv_image); ++ ++ if (!seg_is_integrity(seg_image)) ++ continue; ++ if (seg_image->integrity_recalculate) ++ ret = 1; ++ } ++ } else if (seg_is_integrity(seg)) { ++ ret = seg->integrity_recalculate; ++ } ++ ++ return ret; ++} ++ ++int lv_raid_has_integrity(struct logical_volume *lv) ++{ ++ struct logical_volume *lv_image; ++ struct lv_segment *seg, *seg_image; ++ uint32_t s; ++ ++ seg = first_seg(lv); ++ ++ if (seg_is_raid(seg)) { ++ for (s = 0; s < seg->area_count; s++) { ++ lv_image = seg_lv(seg, s); ++ seg_image = first_seg(lv_image); ++ ++ if (seg_is_integrity(seg_image)) ++ return 1; ++ } ++ } ++ ++ return 0; ++} ++ ++int lv_get_raid_integrity_settings(struct logical_volume *lv, struct integrity_settings **isettings) ++{ ++ struct logical_volume *lv_image; ++ struct lv_segment *seg, *seg_image; ++ uint32_t s; ++ ++ seg = first_seg(lv); ++ ++ if (seg_is_raid(seg)) { ++ for (s = 0; s < seg->area_count; s++) { ++ lv_image = seg_lv(seg, s); ++ seg_image = first_seg(lv_image); ++ ++ if (seg_is_integrity(seg_image)) { ++ *isettings = &seg_image->integrity_settings; ++ return 1; ++ } ++ } ++ } ++ ++ return 0; ++} ++ +diff --git a/lib/metadata/lv.c b/lib/metadata/lv.c +index 17d4907..4ee58b4 100644 +--- a/lib/metadata/lv.c ++++ b/lib/metadata/lv.c +@@ -385,6 +385,17 @@ dm_percent_t lvseg_percent_with_info_and_seg_status(const struct lv_with_info_an + * Esentially rework _target_percent API for segtype. + */ + switch (s->type) { ++ case SEG_STATUS_INTEGRITY: ++ if (type != PERCENT_GET_DIRTY) ++ p = DM_PERCENT_INVALID; ++ else if (!s->integrity->recalc_sector) ++ p = DM_PERCENT_INVALID; ++ else if (s->integrity->recalc_sector == s->integrity->provided_data_sectors) ++ p = DM_PERCENT_100; ++ else ++ p = dm_make_percent(s->integrity->recalc_sector, ++ s->integrity->provided_data_sectors); ++ break; + case SEG_STATUS_CACHE: + if (s->cache->fail || s->cache->error) + p = DM_PERCENT_INVALID; +@@ -593,6 +604,8 @@ struct logical_volume *lv_origin_lv(const struct logical_volume *lv) + origin = first_seg(lv)->external_lv; + else if (lv_is_writecache(lv) && first_seg(lv)->origin) + origin = first_seg(lv)->origin; ++ else if (lv_is_integrity(lv) && first_seg(lv)->origin) ++ origin = first_seg(lv)->origin; + + return origin; + } +@@ -1208,10 +1221,13 @@ char *lv_attr_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_ + repstr[0] = (lv_is_merging_origin(lv)) ? 'O' : 'o'; + else if (lv_is_pool_metadata(lv) || + lv_is_pool_metadata_spare(lv) || +- lv_is_raid_metadata(lv)) ++ lv_is_raid_metadata(lv) || ++ lv_is_integrity_metadata(lv)) + repstr[0] = 'e'; + else if (lv_is_cache_type(lv) || lv_is_writecache(lv)) + repstr[0] = 'C'; ++ else if (lv_is_integrity(lv)) ++ repstr[0] = 'g'; + else if (lv_is_raid(lv)) + repstr[0] = (lv_is_not_synced(lv)) ? 'R' : 'r'; + else if (lv_is_mirror(lv)) +diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c +index 3090a93..1311f70 100644 +--- a/lib/metadata/lv_manip.c ++++ b/lib/metadata/lv_manip.c +@@ -134,7 +134,9 @@ enum { + LV_TYPE_SANLOCK, + LV_TYPE_CACHEVOL, + LV_TYPE_WRITECACHE, +- LV_TYPE_WRITECACHEORIGIN ++ LV_TYPE_WRITECACHEORIGIN, ++ LV_TYPE_INTEGRITY, ++ LV_TYPE_INTEGRITYORIGIN + }; + + static const char *_lv_type_names[] = { +@@ -190,6 +192,8 @@ static const char *_lv_type_names[] = { + [LV_TYPE_CACHEVOL] = "cachevol", + [LV_TYPE_WRITECACHE] = "writecache", + [LV_TYPE_WRITECACHEORIGIN] = "writecacheorigin", ++ [LV_TYPE_INTEGRITY] = "integrity", ++ [LV_TYPE_INTEGRITYORIGIN] = "integrityorigin", + }; + + static int _lv_layout_and_role_mirror(struct dm_pool *mem, +@@ -461,6 +465,43 @@ bad: + return 0; + } + ++static int _lv_layout_and_role_integrity(struct dm_pool *mem, ++ const struct logical_volume *lv, ++ struct dm_list *layout, ++ struct dm_list *role, ++ int *public_lv) ++{ ++ int top_level = 0; ++ ++ /* non-top-level LVs */ ++ if (lv_is_integrity_metadata(lv)) { ++ if (!str_list_add_no_dup_check(mem, role, _lv_type_names[LV_TYPE_INTEGRITY]) || ++ !str_list_add_no_dup_check(mem, role, _lv_type_names[LV_TYPE_METADATA])) ++ goto_bad; ++ } else if (lv_is_integrity_origin(lv)) { ++ if (!str_list_add_no_dup_check(mem, role, _lv_type_names[LV_TYPE_INTEGRITY]) || ++ !str_list_add_no_dup_check(mem, role, _lv_type_names[LV_TYPE_ORIGIN]) || ++ !str_list_add_no_dup_check(mem, role, _lv_type_names[LV_TYPE_INTEGRITYORIGIN])) ++ goto_bad; ++ } else ++ top_level = 1; ++ ++ if (!top_level) { ++ *public_lv = 0; ++ return 1; ++ } ++ ++ /* top-level LVs */ ++ if (lv_is_integrity(lv)) { ++ if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_INTEGRITY])) ++ goto_bad; ++ } ++ ++ return 1; ++bad: ++ return 0; ++} ++ + static int _lv_layout_and_role_thick_origin_snapshot(struct dm_pool *mem, + const struct logical_volume *lv, + struct dm_list *layout, +@@ -577,6 +618,11 @@ int lv_layout_and_role(struct dm_pool *mem, const struct logical_volume *lv, + !_lv_layout_and_role_cache(mem, lv, *layout, *role, &public_lv)) + goto_bad; + ++ /* Integrity related */ ++ if ((lv_is_integrity(lv) || lv_is_integrity_origin(lv) || lv_is_integrity_metadata(lv)) && ++ !_lv_layout_and_role_integrity(mem, lv, *layout, *role, &public_lv)) ++ goto_bad; ++ + /* VDO and related */ + if (lv_is_vdo_type(lv) && + !_lv_layout_and_role_vdo(mem, lv, *layout, *role, &public_lv)) +@@ -1457,6 +1503,15 @@ static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete) + return_0; + } + ++ if (delete && seg_is_integrity(seg)) { ++ /* Remove integrity origin in addition to integrity layer. */ ++ if (!lv_remove(seg_lv(seg, 0))) ++ return_0; ++ /* Remove integrity metadata. */ ++ if (seg->integrity_meta_dev && !lv_remove(seg->integrity_meta_dev)) ++ return_0; ++ } ++ + if ((pool_lv = seg->pool_lv)) { + if (!detach_pool_lv(seg)) + return_0; +@@ -4111,11 +4166,14 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah, + uint32_t extents, uint32_t first_area, + uint32_t mirrors, uint32_t stripes, uint32_t stripe_size) + { ++ struct logical_volume *sub_lvs[DEFAULT_RAID_MAX_IMAGES]; + const struct segment_type *segtype; +- struct logical_volume *sub_lv, *meta_lv; ++ struct logical_volume *meta_lv, *sub_lv; + struct lv_segment *seg = first_seg(lv); ++ struct lv_segment *sub_lv_seg; + uint32_t fa, s; + int clear_metadata = 0; ++ int integrity_sub_lvs = 0; + uint32_t area_multiple = 1; + + if (!(segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_STRIPED))) +@@ -4133,16 +4191,28 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah, + area_multiple = seg->area_count; + } + ++ for (s = 0; s < seg->area_count; s++) { ++ sub_lv = seg_lv(seg, s); ++ sub_lv_seg = sub_lv ? first_seg(sub_lv) : NULL; ++ ++ if (sub_lv_seg && seg_is_integrity(sub_lv_seg)) { ++ sub_lvs[s] = seg_lv(sub_lv_seg, 0); ++ integrity_sub_lvs = 1; ++ } else ++ sub_lvs[s] = sub_lv; ++ } ++ + for (fa = first_area, s = 0; s < seg->area_count; s++) { +- if (is_temporary_mirror_layer(seg_lv(seg, s))) { +- if (!_lv_extend_layered_lv(ah, seg_lv(seg, s), extents / area_multiple, ++ sub_lv = sub_lvs[s]; ++ ++ if (is_temporary_mirror_layer(sub_lv)) { ++ if (!_lv_extend_layered_lv(ah, sub_lv, extents / area_multiple, + fa, mirrors, stripes, stripe_size)) + return_0; +- fa += lv_mirror_count(seg_lv(seg, s)); ++ fa += lv_mirror_count(sub_lv); + continue; + } + +- sub_lv = seg_lv(seg, s); + if (!lv_add_segment(ah, fa, stripes, sub_lv, segtype, + stripe_size, sub_lv->status, 0)) { + log_error("Aborting. Failed to extend %s in %s.", +@@ -4184,6 +4254,41 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah, + fa += stripes; + } + ++ /* ++ * In raid+integrity, the lv_iorig raid images have been extended above. ++ * Now propagate the new lv_iorig sizes up to the integrity LV layers ++ * that are referencing the lv_iorig. ++ */ ++ if (integrity_sub_lvs) { ++ for (s = 0; s < seg->area_count; s++) { ++ struct logical_volume *lv_image; ++ struct logical_volume *lv_iorig; ++ struct logical_volume *lv_imeta; ++ struct lv_segment *seg_image; ++ ++ lv_image = seg_lv(seg, s); ++ seg_image = first_seg(lv_image); ++ ++ if (!(lv_imeta = seg_image->integrity_meta_dev)) { ++ log_error("1"); ++ return_0; ++ } ++ ++ if (!(lv_iorig = seg_lv(seg_image, 0))) { ++ log_error("2"); ++ return_0; ++ } ++ ++ /* new size in sectors */ ++ lv_image->size = lv_iorig->size; ++ seg_image->integrity_data_sectors = lv_iorig->size; ++ /* new size in extents */ ++ lv_image->le_count = lv_iorig->le_count; ++ seg_image->len = lv_iorig->le_count; ++ seg_image->area_len = lv_iorig->le_count; ++ } ++ } ++ + seg->len += extents; + if (seg_is_raid(seg)) + seg->area_len = seg->len; +@@ -4345,6 +4450,13 @@ int lv_extend(struct logical_volume *lv, + mirrors, stripes, stripe_size))) + goto_out; + ++ if (lv_raid_has_integrity(lv)) { ++ if (!lv_extend_integrity_in_raid(lv, allocatable_pvs)) { ++ r = 0; ++ goto_out; ++ } ++ } ++ + /* + * If we are expanding an existing mirror, we can skip the + * resync of the extension if the LV is currently in-sync +@@ -4538,6 +4650,9 @@ static int _for_each_sub_lv(struct logical_volume *lv, int level, + if (!_for_each_sub_lv(seg->writecache, level, fn, data)) + return_0; + ++ if (!_for_each_sub_lv(seg->integrity_meta_dev, level, fn, data)) ++ return_0; ++ + for (s = 0; s < seg->area_count; s++) { + if (seg_type(seg, s) != AREA_LV) + continue; +@@ -5064,6 +5179,12 @@ static int _lvresize_check(struct logical_volume *lv, + return 0; + } + ++ if (lv_is_integrity(lv) || lv_raid_has_integrity(lv)) { ++ if (lp->resize == LV_REDUCE) { ++ log_error("Cannot reduce LV with integrity."); ++ return 0; ++ } ++ } + return 1; + } + +@@ -5613,6 +5734,9 @@ static int _lvresize_prepare(struct logical_volume **lv, + if (lv_is_thin_pool(*lv) || lv_is_vdo_pool(*lv)) + *lv = seg_lv(first_seg(*lv), 0); /* switch to data LV */ + ++ if (lv_is_integrity(*lv)) ++ *lv = seg_lv(first_seg(*lv), 0); ++ + /* Resolve extents from size */ + if (lp->size && !_lvresize_adjust_size(vg, lp->size, lp->sign, &lp->extents)) + return_0; +@@ -7948,6 +8072,11 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg, + /* FIXME Eventually support raid/mirrors with -m */ + if (!(create_segtype = get_segtype_from_string(vg->cmd, SEG_TYPE_NAME_STRIPED))) + return_0; ++ ++ } else if (seg_is_integrity(lp)) { ++ if (!(create_segtype = get_segtype_from_string(vg->cmd, SEG_TYPE_NAME_STRIPED))) ++ return_0; ++ + } else if (seg_is_mirrored(lp) || (seg_is_raid(lp) && !seg_is_any_raid0(lp))) { + if (!(lp->region_size = adjusted_mirror_region_size(vg->cmd, + vg->extent_size, +@@ -8198,6 +8327,15 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg, + goto out; + } + ++ if (seg_is_raid(lp) && lp->raidintegrity) { ++ log_debug("Adding integrity to new LV"); ++ ++ if (!lv_add_integrity_to_raid(lv, &lp->integrity_settings, lp->pvh, NULL)) ++ goto revert_new_lv; ++ ++ backup(vg); ++ } ++ + /* Do not scan this LV until properly zeroed/wiped. */ + if (_should_wipe_lv(lp, lv, 0)) + lv->status |= LV_NOSCAN; +diff --git a/lib/metadata/merge.c b/lib/metadata/merge.c +index 11b26b4..ecd55ef 100644 +--- a/lib/metadata/merge.c ++++ b/lib/metadata/merge.c +@@ -742,6 +742,8 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg) + seg_found++; + if (seg->metadata_lv == lv || seg->pool_lv == lv || seg->writecache == lv) + seg_found++; ++ if (seg->integrity_meta_dev == lv) ++ seg_found++; + if (seg_is_thin_volume(seg) && (seg->origin == lv || seg->external_lv == lv)) + seg_found++; + +diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h +index 35c1231..52bc776 100644 +--- a/lib/metadata/metadata-exported.h ++++ b/lib/metadata/metadata-exported.h +@@ -84,12 +84,14 @@ + #define CONVERTING UINT64_C(0x0000000000400000) /* LV */ + + #define MISSING_PV UINT64_C(0x0000000000800000) /* PV */ ++#define INTEGRITY UINT64_C(0x0000000000800000) /* LV - Internal use only */ + #define PV_MOVED_VG UINT64_C(0x4000000000000000) /* PV - Moved to a new VG */ + #define PARTIAL_LV UINT64_C(0x0000000001000000) /* LV - derived flag, not + written out in metadata*/ + + //#define POSTORDER_FLAG UINT64_C(0x0000000002000000) /* Not real flags, reserved for + //#define POSTORDER_OPEN_FLAG UINT64_C(0x0000000004000000) temporary use inside vg_read_internal. */ ++#define INTEGRITY_METADATA UINT64_C(0x0000000004000000) /* LV - Internal use only */ + #define VIRTUAL_ORIGIN UINT64_C(0x0000000008000000) /* LV - internal use only */ + + #define MERGING UINT64_C(0x0000000010000000) /* LV SEG */ +@@ -261,6 +263,8 @@ + #define lv_is_pool_metadata_spare(lv) (((lv)->status & POOL_METADATA_SPARE) ? 1 : 0) + #define lv_is_lockd_sanlock_lv(lv) (((lv)->status & LOCKD_SANLOCK_LV) ? 1 : 0) + #define lv_is_writecache(lv) (((lv)->status & WRITECACHE) ? 1 : 0) ++#define lv_is_integrity(lv) (((lv)->status & INTEGRITY) ? 1 : 0) ++#define lv_is_integrity_metadata(lv) (((lv)->status & INTEGRITY_METADATA) ? 1 : 0) + + #define lv_is_vdo(lv) (((lv)->status & LV_VDO) ? 1 : 0) + #define lv_is_vdo_pool(lv) (((lv)->status & LV_VDO_POOL) ? 1 : 0) +@@ -272,9 +276,11 @@ + /* Recognize component LV (matching lib/misc/lvm-string.c _lvname_has_reserved_component_string()) */ + #define lv_is_component(lv) (lv_is_cache_origin(lv) || \ + lv_is_writecache_origin(lv) || \ ++ lv_is_integrity_origin(lv) || \ + ((lv)->status & (\ + CACHE_POOL_DATA |\ + CACHE_POOL_METADATA |\ ++ INTEGRITY_METADATA |\ + LV_CACHE_VOL |\ + LV_VDO_POOL_DATA |\ + MIRROR_IMAGE |\ +@@ -519,6 +525,11 @@ struct lv_segment { + uint32_t writecache_block_size; /* For writecache */ + struct writecache_settings writecache_settings; /* For writecache */ + ++ uint64_t integrity_data_sectors; ++ struct logical_volume *integrity_meta_dev; ++ struct integrity_settings integrity_settings; ++ uint32_t integrity_recalculate; ++ + struct dm_vdo_target_params vdo_params; /* For VDO-pool */ + uint32_t vdo_pool_header_size; /* For VDO-pool */ + uint32_t vdo_pool_virtual_extents; /* For VDO-pool */ +@@ -992,6 +1003,10 @@ struct lvcreate_params { + alloc_policy_t alloc; /* all */ + struct dm_vdo_target_params vdo_params; /* vdo */ + ++ int raidintegrity; ++ const char *raidintegritymode; ++ struct integrity_settings integrity_settings; ++ + struct dm_list tags; /* all */ + + int yes; +@@ -1086,6 +1101,8 @@ int lv_is_cache_origin(const struct logical_volume *lv); + int lv_is_writecache_origin(const struct logical_volume *lv); + int lv_is_writecache_cachevol(const struct logical_volume *lv); + ++int lv_is_integrity_origin(const struct logical_volume *lv); ++ + int lv_is_merging_cow(const struct logical_volume *cow); + uint32_t cow_max_extents(const struct logical_volume *origin, uint32_t chunk_size); + int cow_has_min_chunks(const struct volume_group *vg, uint32_t cow_extents, uint32_t chunk_size); +@@ -1389,4 +1406,13 @@ struct dm_list *create_pv_list(struct dm_pool *mem, struct volume_group *vg, int + char **argv, int allocatable_only); + struct dm_list *clone_pv_list(struct dm_pool *mem, struct dm_list *pvsl); + ++int lv_add_integrity_to_raid(struct logical_volume *lv, struct integrity_settings *settings, struct dm_list *pvh, ++ struct logical_volume *lv_imeta_0); ++int lv_remove_integrity_from_raid(struct logical_volume *lv); ++void lv_clear_integrity_recalculate_metadata(struct logical_volume *lv); ++int lv_has_integrity_recalculate_metadata(struct logical_volume *lv); ++int lv_raid_has_integrity(struct logical_volume *lv); ++int lv_extend_integrity_in_raid(struct logical_volume *lv, struct dm_list *pvh); ++int lv_get_raid_integrity_settings(struct logical_volume *lv, struct integrity_settings **isettings); ++ + #endif +diff --git a/lib/metadata/raid_manip.c b/lib/metadata/raid_manip.c +index fa1b91a..3b3e1d3 100644 +--- a/lib/metadata/raid_manip.c ++++ b/lib/metadata/raid_manip.c +@@ -3119,6 +3119,11 @@ static int _raid_remove_images(struct logical_volume *lv, int yes, + + /* Convert to linear? */ + if (new_count == 1) { ++ if (lv_raid_has_integrity(lv)) { ++ log_error("Integrity must be removed before converting raid to linear."); ++ return 0; ++ } ++ + if (!yes && yes_no_prompt("Are you sure you want to convert %s LV %s to type %s losing all resilience? [y/n]: ", + lvseg_name(first_seg(lv)), display_lvname(lv), SEG_TYPE_NAME_LINEAR) == 'n') { + log_error("Logical volume %s NOT converted to \"%s\".", +@@ -3265,6 +3270,11 @@ int lv_raid_split(struct logical_volume *lv, int yes, const char *split_name, + return 0; + } + ++ if (lv_raid_has_integrity(lv)) { ++ log_error("Integrity must be removed before splitting."); ++ return 0; ++ } ++ + if ((old_count - new_count) != 1) { + log_error("Unable to split more than one image from %s.", + display_lvname(lv)); +@@ -3328,9 +3338,11 @@ int lv_raid_split(struct logical_volume *lv, int yes, const char *split_name, + } + + /* Convert to linear? */ +- if ((new_count == 1) && !_raid_remove_top_layer(lv, &removal_lvs)) { +- log_error("Failed to remove RAID layer after linear conversion."); +- return 0; ++ if (new_count == 1) { ++ if (!_raid_remove_top_layer(lv, &removal_lvs)) { ++ log_error("Failed to remove RAID layer after linear conversion."); ++ return 0; ++ } + } + + /* Get first item */ +@@ -3432,6 +3444,11 @@ int lv_raid_split_and_track(struct logical_volume *lv, + return 0; + } + ++ if (lv_raid_has_integrity(lv)) { ++ log_error("Integrity must be removed before splitting."); ++ return 0; ++ } ++ + if (!seg_is_mirrored(seg)) { + log_error("Unable to split images from non-mirrored RAID."); + return 0; +@@ -6727,7 +6744,17 @@ static int _lv_raid_rebuild_or_replace(struct logical_volume *lv, + struct lv_segment *raid_seg = first_seg(lv); + struct lv_list *lvl; + char *tmp_names[raid_seg->area_count * 2]; ++ char tmp_name_buf[NAME_LEN]; ++ char *tmp_name_dup; + const char *action_str = rebuild ? "rebuild" : "replace"; ++ int has_integrity; ++ ++ if ((has_integrity = lv_raid_has_integrity(lv))) { ++ if (rebuild) { ++ log_error("Can't rebuild raid with integrity."); ++ return 0; ++ } ++ } + + if (seg_is_any_raid0(raid_seg)) { + log_error("Can't replace any devices in %s LV %s.", +@@ -6992,6 +7019,15 @@ try_again: + tmp_names[s] = tmp_names[sd] = NULL; + } + ++ /* Add integrity layer to any new images. */ ++ if (has_integrity) { ++ struct integrity_settings *isettings = NULL; ++ if (!lv_get_raid_integrity_settings(lv, &isettings)) ++ return_0; ++ if (!lv_add_integrity_to_raid(lv, isettings, NULL, NULL)) ++ return_0; ++ } ++ + skip_alloc: + if (!lv_update_and_reload_origin(lv)) + return_0; +@@ -7014,9 +7050,43 @@ skip_alloc: + if (!rebuild) + for (s = 0; s < raid_seg->area_count; s++) { + sd = s + raid_seg->area_count; ++ + if (tmp_names[s] && tmp_names[sd]) { +- seg_metalv(raid_seg, s)->name = tmp_names[s]; +- seg_lv(raid_seg, s)->name = tmp_names[sd]; ++ struct logical_volume *lv_image = seg_lv(raid_seg, s); ++ struct logical_volume *lv_rmeta = seg_metalv(raid_seg, s); ++ ++ lv_rmeta->name = tmp_names[s]; ++ lv_image->name = tmp_names[sd]; ++ ++ if (lv_is_integrity(lv_image)) { ++ struct logical_volume *lv_imeta; ++ struct logical_volume *lv_iorig; ++ struct lv_segment *seg_image; ++ ++ seg_image = first_seg(lv_image); ++ lv_imeta = seg_image->integrity_meta_dev; ++ lv_iorig = seg_lv(seg_image, 0); ++ ++ if (dm_snprintf(tmp_name_buf, NAME_LEN, "%s_imeta", lv_image->name) < 0) { ++ stack; ++ continue; ++ } ++ if (!(tmp_name_dup = dm_pool_strdup(lv->vg->vgmem, tmp_name_buf))) { ++ stack; ++ continue; ++ } ++ lv_imeta->name = tmp_name_dup; ++ ++ if (dm_snprintf(tmp_name_buf, NAME_LEN, "%s_iorig", lv_image->name) < 0) { ++ stack; ++ continue; ++ } ++ if (!(tmp_name_dup = dm_pool_strdup(lv->vg->vgmem, tmp_name_buf))) { ++ stack; ++ continue; ++ } ++ lv_iorig->name = tmp_name_dup; ++ } + } + } + +@@ -7192,6 +7262,11 @@ int partial_raid_lv_supports_degraded_activation(const struct logical_volume *cl + { + int not_capable = 0; + struct logical_volume * lv = (struct logical_volume *)clv; /* drop const */ ++ ++ if (lv_raid_has_integrity(lv)) { ++ log_error("Integrity must be removed before degraded or partial activation of raid."); ++ return 0; ++ } + + if (!_lv_may_be_activated_in_degraded_mode(lv, ¬_capable) || not_capable) + return_0; +diff --git a/lib/metadata/segtype.h b/lib/metadata/segtype.h +index 22a511e..08ddc35 100644 +--- a/lib/metadata/segtype.h ++++ b/lib/metadata/segtype.h +@@ -67,6 +67,7 @@ struct dev_manager; + #define SEG_RAID6_N_6 (1ULL << 35) + #define SEG_RAID6 SEG_RAID6_ZR + #define SEG_WRITECACHE (1ULL << 36) ++#define SEG_INTEGRITY (1ULL << 37) + + #define SEG_STRIPED_TARGET (1ULL << 39) + #define SEG_LINEAR_TARGET (1ULL << 40) +@@ -84,6 +85,7 @@ struct dev_manager; + #define SEG_TYPE_NAME_CACHE "cache" + #define SEG_TYPE_NAME_CACHE_POOL "cache-pool" + #define SEG_TYPE_NAME_WRITECACHE "writecache" ++#define SEG_TYPE_NAME_INTEGRITY "integrity" + #define SEG_TYPE_NAME_ERROR "error" + #define SEG_TYPE_NAME_FREE "free" + #define SEG_TYPE_NAME_ZERO "zero" +@@ -117,6 +119,7 @@ struct dev_manager; + #define segtype_is_cache(segtype) ((segtype)->flags & SEG_CACHE ? 1 : 0) + #define segtype_is_cache_pool(segtype) ((segtype)->flags & SEG_CACHE_POOL ? 1 : 0) + #define segtype_is_writecache(segtype) ((segtype)->flags & SEG_WRITECACHE ? 1 : 0) ++#define segtype_is_integrity(segtype) ((segtype)->flags & SEG_INTEGRITY ? 1 : 0) + #define segtype_is_mirrored(segtype) ((segtype)->flags & SEG_AREAS_MIRRORED ? 1 : 0) + #define segtype_is_mirror(segtype) ((segtype)->flags & SEG_MIRROR ? 1 : 0) + #define segtype_is_pool(segtype) ((segtype)->flags & (SEG_CACHE_POOL | SEG_THIN_POOL) ? 1 : 0) +@@ -179,6 +182,7 @@ struct dev_manager; + #define seg_is_cache(seg) segtype_is_cache((seg)->segtype) + #define seg_is_cache_pool(seg) segtype_is_cache_pool((seg)->segtype) + #define seg_is_writecache(seg) segtype_is_writecache((seg)->segtype) ++#define seg_is_integrity(seg) segtype_is_integrity((seg)->segtype) + #define seg_is_used_cache_pool(seg) (seg_is_cache_pool(seg) && (!dm_list_empty(&(seg->lv)->segs_using_this_lv))) + #define seg_is_linear(seg) (seg_is_striped(seg) && ((seg)->area_count == 1)) + #define seg_is_mirror(seg) segtype_is_mirror((seg)->segtype) +@@ -347,6 +351,8 @@ int init_vdo_segtypes(struct cmd_context *cmd, struct segtype_library *seglib); + + int init_writecache_segtypes(struct cmd_context *cmd, struct segtype_library *seglib); + ++int init_integrity_segtypes(struct cmd_context *cmd, struct segtype_library *seglib); ++ + #define CACHE_FEATURE_POLICY_MQ (1U << 0) + #define CACHE_FEATURE_POLICY_SMQ (1U << 1) + #define CACHE_FEATURE_METADATA2 (1U << 2) +diff --git a/lib/metadata/snapshot_manip.c b/lib/metadata/snapshot_manip.c +index 64e27ae..3faea0e 100644 +--- a/lib/metadata/snapshot_manip.c ++++ b/lib/metadata/snapshot_manip.c +@@ -387,6 +387,8 @@ int validate_snapshot_origin(const struct logical_volume *origin_lv) + } + } else if (lv_is_raid_type(origin_lv) && !lv_is_raid(origin_lv)) { + err = "raid subvolumes"; ++ } else if (lv_is_raid(origin_lv) && lv_raid_has_integrity((struct logical_volume *)origin_lv)) { ++ err = "raid with integrity"; + } else if (lv_is_writecache(origin_lv)) { + err = "writecache"; + } +diff --git a/lib/misc/lvm-string.c b/lib/misc/lvm-string.c +index 0ee3403..959a6a1 100644 +--- a/lib/misc/lvm-string.c ++++ b/lib/misc/lvm-string.c +@@ -166,7 +166,9 @@ static const char *_lvname_has_reserved_component_string(const char *lvname) + "_rmeta", + "_tdata", + "_tmeta", +- "_vdata" ++ "_vdata", ++ "_imeta", ++ "_iorig" + }; + unsigned i; + +diff --git a/lib/report/report.c b/lib/report/report.c +index d379e2a..170df69 100644 +--- a/lib/report/report.c ++++ b/lib/report/report.c +@@ -3173,7 +3173,7 @@ static int _copypercent_disp(struct dm_report *rh, + dm_percent_t percent = DM_PERCENT_INVALID; + + /* TODO: just cache passes through lvseg_percent... */ +- if (lv_is_cache(lv) || lv_is_used_cache_pool(lv) || ++ if (lv_is_integrity(lv) || lv_is_cache(lv) || lv_is_used_cache_pool(lv) || + (!lv_is_merging_origin(lv) && lv_is_raid(lv) && !seg_is_any_raid0(first_seg(lv)))) + percent = lvseg_percent_with_info_and_seg_status(lvdm, PERCENT_GET_DIRTY); + else if (lv_is_raid(lv) && !seg_is_any_raid0(first_seg(lv))) +diff --git a/man/lvmraid.7_main b/man/lvmraid.7_main +index 498de90..aedd16a 100644 +--- a/man/lvmraid.7_main ++++ b/man/lvmraid.7_main +@@ -785,6 +785,89 @@ configuration file itself. + activation_mode + + ++.SH Data Integrity ++ ++The device mapper integrity target can be used in combination with RAID ++levels 1,4,5,6,10 to detect and correct data corruption in RAID images. A ++dm-integrity layer is placed above each RAID image, and an extra sub LV is ++created to hold integrity metadata (data checksums) for each RAID image. ++When data is read from an image, integrity checksums are used to detect ++corruption. If detected, dm-raid reads the data from another (good) image ++to return to the caller. dm-raid will also automatically write the good ++data back to the image with bad data to correct the corruption. ++ ++When creating a RAID LV with integrity, or adding integrity, space is ++required for integrity metadata. Every 500MB of LV data requires an ++additional 4MB to be allocated for integrity metadata, for each RAID ++image. ++ ++Create a RAID LV with integrity: ++ ++.B lvcreate \-\-type raidN \-\-raidintegrity y ++ ++Add integrity to an existing RAID LV: ++ ++.B lvconvert --raidintegrity y ++.I LV ++ ++Remove integrity from a RAID LV: ++ ++.B lvconvert --raidintegrity n ++.I LV ++ ++.SS Integrity options ++ ++.B --raidintegritymode journal|bitmap ++ ++Use a journal (default) or bitmap for keeping integrity checksums ++consistent in case of a crash. The bitmap areas are recalculated after a ++crash, so corruption in those areas would not be detected. A journal does ++not have this problem. The journal mode doubles writes to storage, but ++can improve performance for scattered writes packed into a single journal ++write. bitmap mode can in theory achieve full write throughput of the ++device, but would not benefit from the potential scattered write ++optimization. ++ ++.B --raidintegrityblocksize 512|1024|2048|4096 ++ ++The block size to use for dm-integrity on raid images. The integrity ++block size should usually match the device logical block size, or the file ++system sector/block sizes. It may be less than the file system ++sector/block size, but not less than the device logical block size. ++Possible values: 512, 1024, 2048, 4096. ++ ++.SS Integrity initialization ++ ++When integrity is added to an LV, the kernel needs to initialize the ++integrity metadata (checksums) for all blocks in the LV. The data ++corruption checking performed by dm-integrity will only operate on areas ++of the LV that are already initialized. The progress of integrity ++initialization is reported by the "syncpercent" LV reporting field (and ++under the Cpy%Sync lvs column.) ++ ++.SS Integrity limitations ++ ++To work around some limitations, it is possible to remove integrity from ++the LV, make the change, then add integrity again. (Integrity metadata ++would need to initialized when added again.) ++ ++LVM must be able to allocate the integrity metadata sub LV on a single PV ++that is already in use by the associated RAID image. This can potentially ++cause a problem during lvextend if the original PV holding the image and ++integrity metadata is full. To work around this limitation, remove ++integrity, extend the LV, and add integrity again. ++ ++Additional RAID images can be added to raid1 LVs, but not to other raid ++levels. ++ ++A raid1 LV with integrity cannot be converted to linear (remove integrity ++to do this.) ++ ++RAID LVs with integrity cannot yet be used as sub LVs with other LV types. ++ ++The following are not yet permitted on RAID LVs with integrity: lvreduce, ++pvmove, snapshots, splitmirror, raid syncaction commands, raid rebuild. ++ + .SH RAID1 Tuning + + A RAID1 LV can be tuned so that certain devices are avoided for reading +diff --git a/test/lib/aux.sh b/test/lib/aux.sh +index 83a88a6..e40da95 100644 +--- a/test/lib/aux.sh ++++ b/test/lib/aux.sh +@@ -1563,6 +1563,14 @@ have_writecache() { + target_at_least dm-writecache "$@" + } + ++have_integrity() { ++ lvm segtypes 2>/dev/null | grep -q integrity$ || { ++ echo "integrity is not built-in." >&2 ++ return 1 ++ } ++ target_at_least dm-integrity "$@" ++} ++ + have_raid() { + target_at_least dm-raid "$@" + +diff --git a/test/shell/integrity-blocksize.sh b/test/shell/integrity-blocksize.sh +new file mode 100644 +index 0000000..444e3db +--- /dev/null ++++ b/test/shell/integrity-blocksize.sh +@@ -0,0 +1,183 @@ ++#!/usr/bin/env bash ++ ++# Copyright (C) 2018 Red Hat, Inc. All rights reserved. ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions ++# of the GNU General Public License v.2. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write to the Free Software Foundation, ++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ ++SKIP_WITH_LVMPOLLD=1 ++ ++. lib/inittest ++ ++aux have_integrity 1 5 0 || skip ++ ++losetup -h | grep sector-size || skip ++ ++# Tests with fs block sizes require a libblkid version that shows BLOCK_SIZE ++aux prepare_devs 1 ++vgcreate $vg "$dev1" ++lvcreate -n $lv1 -l8 $vg ++mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++blkid "$DM_DEV_DIR/$vg/$lv1" | grep BLOCK_SIZE || skip ++lvchange -an $vg ++vgremove -ff $vg ++ ++dd if=/dev/zero of=loopa bs=$((1024*1024)) count=64 2> /dev/null ++dd if=/dev/zero of=loopb bs=$((1024*1024)) count=64 2> /dev/null ++dd if=/dev/zero of=loopc bs=$((1024*1024)) count=64 2> /dev/null ++dd if=/dev/zero of=loopd bs=$((1024*1024)) count=64 2> /dev/null ++LOOP1=$(losetup -f loopa --show) ++LOOP2=$(losetup -f loopb --show) ++LOOP3=$(losetup -f loopc --sector-size 4096 --show) ++LOOP4=$(losetup -f loopd --sector-size 4096 --show) ++ ++echo $LOOP1 ++echo $LOOP2 ++echo $LOOP3 ++echo $LOOP4 ++ ++aux extend_filter "a|$LOOP1|" ++aux extend_filter "a|$LOOP2|" ++aux extend_filter "a|$LOOP3|" ++aux extend_filter "a|$LOOP4|" ++ ++aux lvmconf 'devices/scan = "/dev"' ++ ++vgcreate $vg1 $LOOP1 $LOOP2 ++vgcreate $vg2 $LOOP3 $LOOP4 ++ ++# lvcreate on dev512, result 512 ++lvcreate --type raid1 -m1 --raidintegrity y -l 8 -n $lv1 $vg1 ++pvck --dump metadata $LOOP1 | grep 'block_size = 512' ++lvremove -y $vg1/$lv1 ++ ++# lvcreate on dev4k, result 4k ++lvcreate --type raid1 -m1 --raidintegrity y -l 8 -n $lv1 $vg2 ++pvck --dump metadata $LOOP3 | grep 'block_size = 4096' ++lvremove -y $vg2/$lv1 ++ ++# lvcreate --bs 512 on dev4k, result fail ++not lvcreate --type raid1 -m1 --raidintegrity y --raidintegrityblocksize 512 -l 8 -n $lv1 $vg2 ++ ++# lvcreate --bs 4096 on dev512, result 4k ++lvcreate --type raid1 -m1 --raidintegrity y --raidintegrityblocksize 4096 -l 8 -n $lv1 $vg1 ++pvck --dump metadata $LOOP1 | grep 'block_size = 4096' ++lvremove -y $vg1/$lv1 ++ ++# Test an unknown fs block size by simply not creating a fs on the lv. ++ ++# lvconvert on dev512, fsunknown, result 512 ++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1 ++# clear any residual fs so that libblkid cannot find an fs block size ++aux wipefs_a /dev/$vg1/$lv1 ++lvconvert --raidintegrity y $vg1/$lv1 ++pvck --dump metadata $LOOP1 | grep 'block_size = 512' ++lvremove -y $vg1/$lv1 ++ ++# lvconvert on dev4k, fsunknown, result 4k ++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2 ++# clear any residual fs so that libblkid cannot find an fs block size ++aux wipefs_a /dev/$vg2/$lv1 ++lvconvert --raidintegrity y $vg2/$lv1 ++pvck --dump metadata $LOOP3 | grep 'block_size = 4096' ++lvremove -y $vg2/$lv1 ++ ++# lvconvert --bs 4k on dev512, fsunknown, result fail ++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1 ++# clear any residual fs so that libblkid cannot find an fs block size ++aux wipefs_a /dev/$vg1/$lv1 ++not lvconvert --raidintegrity y --raidintegrityblocksize 4096 $vg1/$lv1 ++lvremove -y $vg1/$lv1 ++ ++# lvconvert --bs 512 on dev4k, fsunknown, result fail ++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2 ++# clear any residual fs so that libblkid cannot find an fs block size ++aux wipefs_a /dev/$vg2/$lv1 ++not lvconvert --raidintegrity y --raidintegrityblocksize 512 $vg2/$lv1 ++lvremove -y $vg2/$lv1 ++ ++# lvconvert on dev512, xfs 512, result 512 ++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1 ++aux wipefs_a /dev/$vg1/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg1/$lv1" ++lvconvert --raidintegrity y $vg1/$lv1 ++pvck --dump metadata $LOOP1 | grep 'block_size = 512' ++lvremove -y $vg1/$lv1 ++ ++# lvconvert on dev4k, xfs 4096, result 4096 ++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2 ++aux wipefs_a /dev/$vg2/$lv1 ++mkfs.xfs -f "$DM_DEV_DIR/$vg2/$lv1" ++lvconvert --raidintegrity y $vg2/$lv1 ++pvck --dump metadata $LOOP3 | grep 'block_size = 4096' ++lvremove -y $vg2/$lv1 ++ ++# lvconvert on dev512, ext4 1024, result 1024 ++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1 ++aux wipefs_a /dev/$vg1/$lv1 ++mkfs.ext4 -b 1024 "$DM_DEV_DIR/$vg1/$lv1" ++lvconvert --raidintegrity y $vg1/$lv1 ++pvck --dump metadata $LOOP1 | grep 'block_size = 1024' ++lvremove -y $vg1/$lv1 ++ ++# lvconvert on dev4k, ext4 4096, result 4096 ++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2 ++aux wipefs_a /dev/$vg2/$lv1 ++mkfs.ext4 "$DM_DEV_DIR/$vg2/$lv1" ++lvconvert --raidintegrity y $vg2/$lv1 ++pvck --dump metadata $LOOP3 | grep 'block_size = 4096' ++lvremove -y $vg2/$lv1 ++ ++# lvconvert --bs 512 on dev512, xfs 4096, result 512 ++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1 ++aux wipefs_a /dev/$vg1/$lv1 ++mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg1/$lv1" ++lvconvert --raidintegrity y --raidintegrityblocksize 512 $vg1/$lv1 ++pvck --dump metadata $LOOP1 | grep 'block_size = 512' ++lvremove -y $vg1/$lv1 ++ ++# lvconvert --bs 1024 on dev512, xfs 4096, result 1024 ++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1 ++aux wipefs_a /dev/$vg1/$lv1 ++mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg1/$lv1" ++lvconvert --raidintegrity y --raidintegrityblocksize 1024 $vg1/$lv1 ++pvck --dump metadata $LOOP1 | grep 'block_size = 1024' ++lvremove -y $vg1/$lv1 ++ ++# lvconvert --bs 512 on dev512, ext4 1024, result 512 ++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1 ++aux wipefs_a /dev/$vg1/$lv1 ++mkfs.ext4 -b 1024 "$DM_DEV_DIR/$vg1/$lv1" ++lvconvert --raidintegrity y --raidintegrityblocksize 512 $vg1/$lv1 ++pvck --dump metadata $LOOP1 | grep 'block_size = 512' ++lvremove -y $vg1/$lv1 ++ ++# lvconvert --bs 512 on dev4k, ext4 4096, result fail ++lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2 ++aux wipefs_a /dev/$vg2/$lv1 ++mkfs.ext4 "$DM_DEV_DIR/$vg2/$lv1" ++not lvconvert --raidintegrity y --raidintegrityblocksize 512 $vg2/$lv1 ++lvremove -y $vg2/$lv1 ++ ++# FIXME: need to use scsi_debug to create devs with LBS 512 PBS 4k ++# FIXME: lvconvert, fsunknown, LBS 512, PBS 4k: result 512 ++# FIXME: lvconvert --bs 512, fsunknown, LBS 512, PBS 4k: result 512 ++# FIXME: lvconvert --bs 4k, fsunknown, LBS 512, PBS 4k: result 4k ++ ++vgremove -ff $vg1 ++vgremove -ff $vg2 ++ ++losetup -d $LOOP1 ++losetup -d $LOOP2 ++losetup -d $LOOP3 ++losetup -d $LOOP4 ++rm loopa ++rm loopb ++rm loopc ++rm loopd ++ +diff --git a/test/shell/integrity-dmeventd.sh b/test/shell/integrity-dmeventd.sh +new file mode 100644 +index 0000000..58899ca +--- /dev/null ++++ b/test/shell/integrity-dmeventd.sh +@@ -0,0 +1,289 @@ ++#!/usr/bin/env bash ++ ++# Copyright (C) 2018 Red Hat, Inc. All rights reserved. ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions ++# of the GNU General Public License v.2. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write to the Free Software Foundation, ++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ ++SKIP_WITH_LVMPOLLD=1 ++ ++. lib/inittest ++ ++aux have_integrity 1 5 0 || skip ++which mkfs.xfs || skip ++ ++mnt="mnt" ++mkdir -p $mnt ++ ++aux prepare_devs 6 64 ++ ++for i in `seq 1 16384`; do echo -n "A" >> fileA; done ++for i in `seq 1 16384`; do echo -n "B" >> fileB; done ++for i in `seq 1 16384`; do echo -n "C" >> fileC; done ++ ++# generate random data ++dd if=/dev/urandom of=randA bs=512K count=2 ++dd if=/dev/urandom of=randB bs=512K count=3 ++dd if=/dev/urandom of=randC bs=512K count=4 ++ ++_prepare_vg() { ++ vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4" ++ pvs ++} ++ ++_add_new_data_to_mnt() { ++ mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++ ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ ++ # add original data ++ cp randA $mnt ++ cp randB $mnt ++ cp randC $mnt ++ mkdir $mnt/1 ++ cp fileA $mnt/1 ++ cp fileB $mnt/1 ++ cp fileC $mnt/1 ++ mkdir $mnt/2 ++ cp fileA $mnt/2 ++ cp fileB $mnt/2 ++ cp fileC $mnt/2 ++} ++ ++_add_more_data_to_mnt() { ++ mkdir $mnt/more ++ cp fileA $mnt/more ++ cp fileB $mnt/more ++ cp fileC $mnt/more ++ cp randA $mnt/more ++ cp randB $mnt/more ++ cp randC $mnt/more ++} ++ ++_verify_data_on_mnt() { ++ diff randA $mnt/randA ++ diff randB $mnt/randB ++ diff randC $mnt/randC ++ diff fileA $mnt/1/fileA ++ diff fileB $mnt/1/fileB ++ diff fileC $mnt/1/fileC ++ diff fileA $mnt/2/fileA ++ diff fileB $mnt/2/fileB ++ diff fileC $mnt/2/fileC ++} ++ ++_verify_data_on_lv() { ++ lvchange -ay $vg/$lv1 ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ _verify_data_on_mnt ++ rm $mnt/randA ++ rm $mnt/randB ++ rm $mnt/randC ++ rm -rf $mnt/1 ++ rm -rf $mnt/2 ++ umount $mnt ++ lvchange -an $vg/$lv1 ++} ++ ++_sync_percent() { ++ local checklv=$1 ++ get lv_field "$checklv" sync_percent | cut -d. -f1 ++} ++ ++_wait_recalc() { ++ local checklv=$1 ++ ++ for i in $(seq 1 10) ; do ++ sync=$(_sync_percent "$checklv") ++ echo "sync_percent is $sync" ++ ++ if test "$sync" = "100"; then ++ return ++ fi ++ ++ sleep 1 ++ done ++ ++ echo "timeout waiting for recalc" ++ return 1 ++} ++ ++aux lvmconf \ ++ 'activation/raid_fault_policy = "allocate"' ++ ++aux prepare_dmeventd ++ ++# raid1, one device fails, dmeventd calls repair ++ ++vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4" ++lvcreate --type raid1 -m 2 --raidintegrity y --ignoremonitoring -l 8 -n $lv1 $vg "$dev1" "$dev2" "$dev3" ++lvchange --monitor y $vg/$lv1 ++lvs -a -o+devices $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_wait_recalc $vg/${lv1}_rimage_2 ++aux wait_for_sync $vg $lv1 ++_add_new_data_to_mnt ++ ++aux disable_dev "$dev2" ++ ++# wait for dmeventd to call lvconvert --repair which should ++# replace dev2 with dev4 ++sleep 5 ++ ++lvs -a -o+devices $vg > out ++cat out ++not grep "$dev2" out ++grep "$dev4" out ++ ++_add_more_data_to_mnt ++_verify_data_on_mnt ++ ++aux enable_dev "$dev2" ++ ++lvs -a -o+devices $vg > out ++cat out ++not grep "$dev2" out ++grep "$dev4" out ++grep "$dev1" out ++grep "$dev3" out ++ ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# raid1, two devices fail, dmeventd calls repair ++ ++vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" ++lvcreate --type raid1 -m 2 --raidintegrity y --ignoremonitoring -l 8 -n $lv1 $vg "$dev1" "$dev2" "$dev3" ++lvchange --monitor y $vg/$lv1 ++lvs -a -o+devices $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_wait_recalc $vg/${lv1}_rimage_2 ++aux wait_for_sync $vg $lv1 ++_add_new_data_to_mnt ++ ++aux disable_dev "$dev2" ++aux disable_dev "$dev1" ++ ++# wait for dmeventd to call lvconvert --repair which should ++# replace dev1 and dev2 with dev4 and dev5 ++sleep 5 ++ ++lvs -a -o+devices $vg > out ++cat out ++not grep "$dev1" out ++not grep "$dev2" out ++grep "$dev4" out ++grep "$dev5" out ++grep "$dev3" out ++ ++_add_more_data_to_mnt ++_verify_data_on_mnt ++ ++aux enable_dev "$dev1" ++aux enable_dev "$dev2" ++ ++lvs -a -o+devices $vg > out ++cat out ++not grep "$dev1" out ++not grep "$dev2" out ++grep "$dev4" out ++grep "$dev5" out ++grep "$dev3" out ++ ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# raid6, one device fails, dmeventd calls repair ++ ++vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" ++lvcreate --type raid6 --raidintegrity y --ignoremonitoring -l 8 -n $lv1 $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" ++lvchange --monitor y $vg/$lv1 ++lvs -a -o+devices $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_wait_recalc $vg/${lv1}_rimage_2 ++_wait_recalc $vg/${lv1}_rimage_3 ++_wait_recalc $vg/${lv1}_rimage_4 ++aux wait_for_sync $vg $lv1 ++_add_new_data_to_mnt ++ ++aux disable_dev "$dev2" ++ ++# wait for dmeventd to call lvconvert --repair which should ++# replace dev2 with dev6 ++sleep 5 ++ ++lvs -a -o+devices $vg > out ++cat out ++not grep "$dev2" out ++grep "$dev6" out ++ ++_add_more_data_to_mnt ++_verify_data_on_mnt ++ ++aux enable_dev "$dev2" ++ ++lvs -a -o+devices $vg > out ++cat out ++not grep "$dev2" out ++grep "$dev6" out ++ ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# raid10, one device fails, dmeventd calls repair ++ ++vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" ++lvcreate --type raid10 --raidintegrity y --ignoremonitoring -l 8 -n $lv1 $vg "$dev1" "$dev2" "$dev3" "$dev4" ++lvchange --monitor y $vg/$lv1 ++lvs -a -o+devices $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_wait_recalc $vg/${lv1}_rimage_2 ++_wait_recalc $vg/${lv1}_rimage_3 ++aux wait_for_sync $vg $lv1 ++_add_new_data_to_mnt ++ ++aux disable_dev "$dev1" ++ ++# wait for dmeventd to call lvconvert --repair which should ++# replace dev1 with dev5 ++sleep 5 ++ ++lvs -a -o+devices $vg > out ++cat out ++not grep "$dev1" out ++grep "$dev5" out ++ ++_add_more_data_to_mnt ++_verify_data_on_mnt ++ ++aux enable_dev "$dev1" ++ ++lvs -a -o+devices $vg > out ++cat out ++not grep "$dev1" out ++grep "$dev5" out ++ ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ +diff --git a/test/shell/integrity-large.sh b/test/shell/integrity-large.sh +new file mode 100644 +index 0000000..0c36e4d +--- /dev/null ++++ b/test/shell/integrity-large.sh +@@ -0,0 +1,175 @@ ++#!/usr/bin/env bash ++ ++# Copyright (C) 2018 Red Hat, Inc. All rights reserved. ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions ++# of the GNU General Public License v.2. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write to the Free Software Foundation, ++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ ++# Test writecache usage ++ ++SKIP_WITH_LVMPOLLD=1 ++ ++. lib/inittest ++ ++aux have_integrity 1 5 0 || skip ++which mkfs.xfs || skip ++ ++mnt="mnt" ++mkdir -p $mnt ++ ++# raid1 LV needs to be extended to 512MB to test imeta being exended ++aux prepare_devs 4 600 ++ ++for i in `seq 1 16384`; do echo -n "A" >> fileA; done ++for i in `seq 1 16384`; do echo -n "B" >> fileB; done ++for i in `seq 1 16384`; do echo -n "C" >> fileC; done ++ ++# generate random data ++dd if=/dev/urandom of=randA bs=512K count=2 ++dd if=/dev/urandom of=randB bs=512K count=3 ++dd if=/dev/urandom of=randC bs=512K count=4 ++ ++_prepare_vg() { ++ vgcreate $SHARED $vg "$dev1" "$dev2" ++ pvs ++} ++ ++_add_data_to_lv() { ++ mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++ ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ ++ # add original data ++ cp randA $mnt ++ cp randB $mnt ++ cp randC $mnt ++ mkdir $mnt/1 ++ cp fileA $mnt/1 ++ cp fileB $mnt/1 ++ cp fileC $mnt/1 ++ mkdir $mnt/2 ++ cp fileA $mnt/2 ++ cp fileB $mnt/2 ++ cp fileC $mnt/2 ++ ++ umount $mnt ++} ++ ++_verify_data_on_lv() { ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ ++ diff randA $mnt/randA ++ diff randB $mnt/randB ++ diff randC $mnt/randC ++ diff fileA $mnt/1/fileA ++ diff fileB $mnt/1/fileB ++ diff fileC $mnt/1/fileC ++ diff fileA $mnt/2/fileA ++ diff fileB $mnt/2/fileB ++ diff fileC $mnt/2/fileC ++ ++ umount $mnt ++} ++ ++_sync_percent() { ++ local checklv=$1 ++ get lv_field "$checklv" sync_percent | cut -d. -f1 ++} ++ ++_wait_recalc() { ++ local checklv=$1 ++ ++ for i in $(seq 1 10) ; do ++ sync=$(_sync_percent "$checklv") ++ echo "sync_percent is $sync" ++ ++ if test "$sync" = "100"; then ++ return ++ fi ++ ++ sleep 1 ++ done ++ ++ echo "timeout waiting for recalc" ++ return 1 ++} ++ ++# lvextend to 512MB is needed for the imeta LV to ++# be extended from 4MB to 8MB. ++ ++_prepare_vg ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvchange -an $vg/$lv1 ++lvchange -ay $vg/$lv1 ++_add_data_to_lv ++lvconvert --raidintegrity y $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++_verify_data_on_lv ++lvchange -an $vg/$lv1 ++lvextend -L 512M $vg/$lv1 ++lvs -a -o+devices $vg ++lvchange -ay $vg/$lv1 ++_verify_data_on_lv ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++check lv_field $vg/${lv1}_rimage_0_imeta size "8.00m" ++check lv_field $vg/${lv1}_rimage_1_imeta size "8.00m" ++ ++# provide space to extend the images onto new devs ++vgextend $vg "$dev3" "$dev4" ++ ++# extending the images is possible using dev3,dev4 ++# but extending imeta on the existing dev1,dev2 fails ++not lvextend -L +512M $vg/$lv1 ++ ++# removing integrity will permit extending the images ++# using dev3,dev4 since imeta limitation is gone ++lvconvert --raidintegrity n $vg/$lv1 ++lvextend -L +512M $vg/$lv1 ++lvs -a -o+devices $vg ++ ++# adding integrity again will allocate new 12MB imeta LVs ++# on dev3,dev4 ++lvconvert --raidintegrity y $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++check lv_field $vg/${lv1}_rimage_0_imeta size "12.00m" ++check lv_field $vg/${lv1}_rimage_1_imeta size "12.00m" ++ ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++ ++# this succeeds because dev1,dev2 can hold rmeta+rimage ++lvcreate --type raid1 -n $lv1 -L 592M -an $vg "$dev1" "$dev2" ++ ++# this fails because dev1,dev2 can hold rmeta+rimage, but not imeta ++# and we require imeta to be on same devs as rmeta/rimeta ++not lvcreate --type raid1 --raidintegrity y -n $lv1 -L 592M -an $vg "$dev1" "$dev2" ++lvs -a -o+devices $vg ++lvremove $vg/$lv1 ++ ++# this can allocate from more devs so there's enough space for imeta to ++# be allocated in the vg, but lvcreate fails because rmeta+rimage are ++# allocated from dev1,dev2, we restrict imeta to being allocated on the ++# same devs as rmeta/rimage, and dev1,dev2 can't fit imeta. ++not lvcreate --type raid1 --raidintegrity y -n $lv1 -L 592M -an $vg ++lvs -a -o+devices $vg ++ ++# counterintuitively, increasing the size will allow lvcreate to succeed ++# because rmeta+rimage are pushed to being allocated on dev1,dev2,dev3,dev4 ++# which means imeta is now free to be allocated from dev3,dev4 which have ++# plenty of space ++lvcreate --type raid1 --raidintegrity y -n $lv1 -L 600M -an $vg ++lvs -a -o+devices $vg ++ ++vgremove -ff $vg ++ +diff --git a/test/shell/integrity-misc.sh b/test/shell/integrity-misc.sh +new file mode 100644 +index 0000000..73b0a67 +--- /dev/null ++++ b/test/shell/integrity-misc.sh +@@ -0,0 +1,228 @@ ++#!/usr/bin/env bash ++ ++# Copyright (C) 2018 Red Hat, Inc. All rights reserved. ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions ++# of the GNU General Public License v.2. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write to the Free Software Foundation, ++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ ++SKIP_WITH_LVMPOLLD=1 ++ ++. lib/inittest ++ ++aux have_integrity 1 5 0 || skip ++which mkfs.xfs || skip ++ ++mnt="mnt" ++mkdir -p $mnt ++ ++aux prepare_devs 5 64 ++ ++for i in `seq 1 16384`; do echo -n "A" >> fileA; done ++for i in `seq 1 16384`; do echo -n "B" >> fileB; done ++for i in `seq 1 16384`; do echo -n "C" >> fileC; done ++ ++# generate random data ++dd if=/dev/urandom of=randA bs=512K count=2 ++dd if=/dev/urandom of=randB bs=512K count=3 ++dd if=/dev/urandom of=randC bs=512K count=4 ++ ++_prepare_vg() { ++ vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" ++ pvs ++} ++ ++_add_new_data_to_mnt() { ++ mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++ ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ ++ # add original data ++ cp randA $mnt ++ cp randB $mnt ++ cp randC $mnt ++ mkdir $mnt/1 ++ cp fileA $mnt/1 ++ cp fileB $mnt/1 ++ cp fileC $mnt/1 ++ mkdir $mnt/2 ++ cp fileA $mnt/2 ++ cp fileB $mnt/2 ++ cp fileC $mnt/2 ++} ++ ++_add_more_data_to_mnt() { ++ mkdir $mnt/more ++ cp fileA $mnt/more ++ cp fileB $mnt/more ++ cp fileC $mnt/more ++ cp randA $mnt/more ++ cp randB $mnt/more ++ cp randC $mnt/more ++} ++ ++_verify_data_on_mnt() { ++ diff randA $mnt/randA ++ diff randB $mnt/randB ++ diff randC $mnt/randC ++ diff fileA $mnt/1/fileA ++ diff fileB $mnt/1/fileB ++ diff fileC $mnt/1/fileC ++ diff fileA $mnt/2/fileA ++ diff fileB $mnt/2/fileB ++ diff fileC $mnt/2/fileC ++} ++ ++_verify_data_on_lv() { ++ lvchange -ay $vg/$lv1 ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ _verify_data_on_mnt ++ rm $mnt/randA ++ rm $mnt/randB ++ rm $mnt/randC ++ rm -rf $mnt/1 ++ rm -rf $mnt/2 ++ umount $mnt ++ lvchange -an $vg/$lv1 ++} ++ ++_sync_percent() { ++ local checklv=$1 ++ get lv_field "$checklv" sync_percent | cut -d. -f1 ++} ++ ++_wait_recalc() { ++ local checklv=$1 ++ ++ for i in $(seq 1 10) ; do ++ sync=$(_sync_percent "$checklv") ++ echo "sync_percent is $sync" ++ ++ if test "$sync" = "100"; then ++ return ++ fi ++ ++ sleep 1 ++ done ++ ++ echo "timeout waiting for recalc" ++ return 1 ++} ++ ++# lvrename ++_prepare_vg ++lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_new_data_to_mnt ++umount $mnt ++lvrename $vg/$lv1 $vg/$lv2 ++mount "$DM_DEV_DIR/$vg/$lv2" $mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv2 ++lvremove $vg/$lv2 ++vgremove -ff $vg ++ ++# lvconvert --replace ++# an existing dev is replaced with another dev ++# lv must be active ++_prepare_vg ++lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_new_data_to_mnt ++lvconvert --replace "$dev1" $vg/$lv1 "$dev3" ++lvs -a -o+devices $vg > out ++cat out ++grep "$dev2" out ++grep "$dev3" out ++not grep "$dev1" out ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# lvconvert --replace ++# same as prev but with bitmap mode ++_prepare_vg ++lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg "$dev1" "$dev2" ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_new_data_to_mnt ++lvconvert --replace "$dev1" $vg/$lv1 "$dev3" ++lvs -a -o+devices $vg > out ++cat out ++grep "$dev2" out ++grep "$dev3" out ++not grep "$dev1" out ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# lvconvert --repair ++# while lv is active a device goes missing (with rimage,rmeta,imeta,orig). ++# lvconvert --repair should replace the missing dev with another, ++# (like lvconvert --replace does for a dev that's not missing). ++_prepare_vg ++lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_new_data_to_mnt ++aux disable_dev "$dev2" ++lvs -a -o+devices $vg > out ++cat out ++grep unknown out ++lvconvert -vvvv -y --repair $vg/$lv1 ++lvs -a -o+devices $vg > out ++cat out ++not grep "$dev2" out ++not grep unknown out ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++aux enable_dev "$dev2" ++vgremove -ff $vg ++ ++# lvchange activationmode ++# a device is missing (with rimage,rmeta,imeta,iorig), the lv ++# is already inactive, and it cannot be activated, with ++# activationmode degraded or partial, or in any way, ++# until integrity is removed. ++ ++_prepare_vg ++lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_new_data_to_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++aux disable_dev "$dev2" ++lvs -a -o+devices $vg ++not lvchange -ay $vg/$lv1 ++not lvchange -ay --activationmode degraded $vg/$lv1 ++not lvchange -ay --activationmode partial $vg/$lv1 ++lvconvert --raidintegrity n $vg/$lv1 ++lvchange -ay --activationmode degraded $vg/$lv1 ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++lvremove $vg/$lv1 ++aux enable_dev "$dev2" ++vgremove -ff $vg ++ +diff --git a/test/shell/integrity.sh b/test/shell/integrity.sh +new file mode 100644 +index 0000000..7e4f2cb +--- /dev/null ++++ b/test/shell/integrity.sh +@@ -0,0 +1,735 @@ ++#!/usr/bin/env bash ++ ++# Copyright (C) 2018 Red Hat, Inc. All rights reserved. ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions ++# of the GNU General Public License v.2. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write to the Free Software Foundation, ++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ ++SKIP_WITH_LVMPOLLD=1 ++ ++. lib/inittest ++ ++aux have_integrity 1 5 0 || skip ++which mkfs.xfs || skip ++which xfs_growfs || skip ++ ++mnt="mnt" ++mkdir -p $mnt ++ ++aux prepare_devs 5 64 ++ ++for i in `seq 1 16384`; do echo -n "A" >> fileA; done ++for i in `seq 1 16384`; do echo -n "B" >> fileB; done ++for i in `seq 1 16384`; do echo -n "C" >> fileC; done ++ ++# generate random data ++dd if=/dev/urandom of=randA bs=512K count=2 ++dd if=/dev/urandom of=randB bs=512K count=3 ++dd if=/dev/urandom of=randC bs=512K count=4 ++ ++_prepare_vg() { ++ # zero devs so we are sure to find the correct file data ++ # on the underlying devs when corrupting it ++ dd if=/dev/zero of="$dev1" || true ++ dd if=/dev/zero of="$dev2" || true ++ dd if=/dev/zero of="$dev3" || true ++ dd if=/dev/zero of="$dev4" || true ++ dd if=/dev/zero of="$dev5" || true ++ vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" ++ pvs ++} ++ ++_test_fs_with_error() { ++ mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++ ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ ++ # add original data ++ cp fileA $mnt ++ cp fileB $mnt ++ cp fileC $mnt ++ ++ umount $mnt ++ lvchange -an $vg/$lv1 ++ ++ # corrupt the original data on the underying dev ++ # flip one bit in fileB, changing a 0x42 to 0x43 ++ # the bit is changed in the last 4096 byte block ++ # of the file, so when reading back the file we ++ # will get the first three 4096 byte blocks, for ++ # a total of 12288 bytes before getting an error ++ # on the last 4096 byte block. ++ xxd "$dev1" > dev1.txt ++ tac dev1.txt > dev1.rev ++ sed -e '0,/4242 4242 4242 4242 4242 4242 4242 4242/ s/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.rev > dev1.rev.bad ++ tac dev1.rev.bad > dev1.bad ++ xxd -r dev1.bad > "$dev1" ++ rm dev1.txt dev1.rev dev1.rev.bad dev1.bad ++ ++ lvchange -ay $vg/$lv1 ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ ++ # read complete fileA which was not corrupted ++ dd if=$mnt/fileA of=tmp bs=1k ++ ls -l tmp ++ stat -c %s tmp ++ diff fileA tmp ++ rm tmp ++ ++ # read partial fileB which was corrupted ++ not dd if=$mnt/fileB of=tmp bs=1k ++ ls -l tmp ++ stat -c %s tmp | grep 12288 ++ not diff fileB tmp ++ rm tmp ++ ++ umount $mnt ++} ++ ++_test_fs_with_raid() { ++ mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++ ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ ++ # add original data ++ cp fileA $mnt ++ cp fileB $mnt ++ cp fileC $mnt ++ ++ umount $mnt ++ lvchange -an $vg/$lv1 ++ ++ xxd "$dev1" > dev1.txt ++ tac dev1.txt > dev1.rev ++ sed -e '0,/4242 4242 4242 4242 4242 4242 4242 4242/ s/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.rev > dev1.rev.bad ++ tac dev1.rev.bad > dev1.bad ++ xxd -r dev1.bad > "$dev1" ++ rm dev1.txt dev1.rev dev1.rev.bad dev1.bad ++ ++ lvchange -ay $vg/$lv1 ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ ++ # read complete fileA which was not corrupted ++ dd if=$mnt/fileA of=tmp bs=1k ++ ls -l tmp ++ stat -c %s tmp | grep 16384 ++ diff fileA tmp ++ rm tmp ++ ++ # read complete fileB, corruption is corrected by raid ++ dd if=$mnt/fileB of=tmp bs=1k ++ ls -l tmp ++ stat -c %s tmp | grep 16384 ++ diff fileB tmp ++ rm tmp ++ ++ umount $mnt ++} ++ ++_add_new_data_to_mnt() { ++ mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1" ++ ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ ++ # add original data ++ cp randA $mnt ++ cp randB $mnt ++ cp randC $mnt ++ mkdir $mnt/1 ++ cp fileA $mnt/1 ++ cp fileB $mnt/1 ++ cp fileC $mnt/1 ++ mkdir $mnt/2 ++ cp fileA $mnt/2 ++ cp fileB $mnt/2 ++ cp fileC $mnt/2 ++} ++ ++_add_more_data_to_mnt() { ++ mkdir $mnt/more ++ cp fileA $mnt/more ++ cp fileB $mnt/more ++ cp fileC $mnt/more ++ cp randA $mnt/more ++ cp randB $mnt/more ++ cp randC $mnt/more ++} ++ ++_verify_data_on_mnt() { ++ diff randA $mnt/randA ++ diff randB $mnt/randB ++ diff randC $mnt/randC ++ diff fileA $mnt/1/fileA ++ diff fileB $mnt/1/fileB ++ diff fileC $mnt/1/fileC ++ diff fileA $mnt/2/fileA ++ diff fileB $mnt/2/fileB ++ diff fileC $mnt/2/fileC ++} ++ ++_verify_data_on_lv() { ++ lvchange -ay $vg/$lv1 ++ mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++ _verify_data_on_mnt ++ rm $mnt/randA ++ rm $mnt/randB ++ rm $mnt/randC ++ rm -rf $mnt/1 ++ rm -rf $mnt/2 ++ umount $mnt ++ lvchange -an $vg/$lv1 ++} ++ ++_sync_percent() { ++ local checklv=$1 ++ get lv_field "$checklv" sync_percent | cut -d. -f1 ++} ++ ++_wait_recalc() { ++ local checklv=$1 ++ ++ for i in $(seq 1 10) ; do ++ sync=$(_sync_percent "$checklv") ++ echo "sync_percent is $sync" ++ ++ if test "$sync" = "100"; then ++ return ++ fi ++ ++ sleep 1 ++ done ++ ++ echo "timeout waiting for recalc" ++ return 1 ++} ++ ++# Test corrupting data on an image and verifying that ++# it is detected by integrity and corrected by raid. ++ ++_prepare_vg ++lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg ++_test_fs_with_raid ++lvchange -an $vg/$lv1 ++lvconvert --raidintegrity n $vg/$lv1 ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid1 -m2 --raidintegrity y -n $lv1 -l 8 $vg ++_test_fs_with_raid ++lvchange -an $vg/$lv1 ++lvconvert --raidintegrity n $vg/$lv1 ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg ++_test_fs_with_raid ++lvchange -an $vg/$lv1 ++lvconvert --raidintegrity n $vg/$lv1 ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg ++_test_fs_with_raid ++lvchange -an $vg/$lv1 ++lvconvert --raidintegrity n $vg/$lv1 ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg ++_test_fs_with_raid ++lvchange -an $vg/$lv1 ++lvconvert --raidintegrity n $vg/$lv1 ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg ++_test_fs_with_raid ++lvchange -an $vg/$lv1 ++lvconvert --raidintegrity n $vg/$lv1 ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# Test removing integrity from an active LV ++ ++_prepare_vg ++lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_new_data_to_mnt ++lvconvert --raidintegrity n $vg/$lv1 ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_new_data_to_mnt ++lvconvert --raidintegrity n $vg/$lv1 ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_new_data_to_mnt ++lvconvert --raidintegrity n $vg/$lv1 ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_new_data_to_mnt ++lvconvert --raidintegrity n $vg/$lv1 ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_new_data_to_mnt ++lvconvert --raidintegrity n $vg/$lv1 ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# Test adding integrity to an active LV ++ ++_prepare_vg ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++_add_new_data_to_mnt ++lvconvert --raidintegrity y $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid4 -n $lv1 -l 8 $vg ++_add_new_data_to_mnt ++lvconvert --raidintegrity y $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid5 -n $lv1 -l 8 $vg ++_add_new_data_to_mnt ++lvconvert --raidintegrity y $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid6 -n $lv1 -l 8 $vg ++_add_new_data_to_mnt ++lvconvert --raidintegrity y $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid10 -n $lv1 -l 8 $vg ++_add_new_data_to_mnt ++lvconvert --raidintegrity y $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# Test lvextend while inactive ++ ++_prepare_vg ++lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++_add_new_data_to_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++lvextend -l 16 $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++xfs_growfs $mnt ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++_add_new_data_to_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++lvextend -l 16 $vg/$lv1 ++lvchange -ay $vg/$lv1 ++mount "$DM_DEV_DIR/$vg/$lv1" $mnt ++xfs_growfs $mnt ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# Test lvextend while active ++ ++_prepare_vg ++lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++_add_new_data_to_mnt ++lvextend -l 16 $vg/$lv1 ++xfs_growfs $mnt ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++_add_new_data_to_mnt ++lvextend -l 16 $vg/$lv1 ++xfs_growfs $mnt ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++_add_new_data_to_mnt ++lvextend -l 16 $vg/$lv1 ++xfs_growfs $mnt ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# Test adding image to raid1 ++ ++_prepare_vg ++lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++_add_new_data_to_mnt ++lvconvert -y -m+1 $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_wait_recalc $vg/${lv1}_rimage_2 ++lvs -a -o+devices $vg ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# Test removing image from raid1 ++ ++_prepare_vg ++lvcreate --type raid1 -m2 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_wait_recalc $vg/${lv1}_rimage_2 ++lvs -a -o+devices $vg ++_add_new_data_to_mnt ++lvconvert -y -m-1 $vg/$lv1 ++lvs -a -o+devices $vg ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# Test disallowed operations on raid+integrity ++ ++_prepare_vg ++lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++_add_new_data_to_mnt ++not lvconvert -y -m-1 $vg/$lv1 ++not lvconvert --splitmirrors 1 -n tmp -y $vg/$lv1 ++not lvconvert --splitmirrors 1 --trackchanges -y $vg/$lv1 ++not lvchange --syncaction check $vg/$lv1 ++not lvchange --syncaction repair $vg/$lv1 ++not lvreduce -L4M $vg/$lv1 ++not lvcreate -s -n snap -L4M $vg/$lv1 ++not pvmove -n $vg/$lv1 "$dev1" ++not pvmove "$dev1" ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# Repeat many of the tests above using bitmap mode ++ ++_prepare_vg ++lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg ++_test_fs_with_raid ++lvchange -an $vg/$lv1 ++lvconvert --raidintegrity n $vg/$lv1 ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++_prepare_vg ++lvcreate --type raid6 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg ++_test_fs_with_raid ++lvchange -an $vg/$lv1 ++lvconvert --raidintegrity n $vg/$lv1 ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# remove from active lv ++_prepare_vg ++lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_new_data_to_mnt ++lvconvert --raidintegrity n $vg/$lv1 ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# add to active lv ++_prepare_vg ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++_add_new_data_to_mnt ++lvconvert --raidintegrity y --raidintegritymode bitmap $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# lvextend active ++_prepare_vg ++lvcreate --type raid1 --raidintegrity y --raidintegritymode bitmap -m1 -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++_add_new_data_to_mnt ++lvextend -l 16 $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++xfs_growfs $mnt ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# add image to raid1 ++_prepare_vg ++lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++lvs -a -o+devices $vg ++_add_new_data_to_mnt ++lvconvert -y -m+1 $vg/$lv1 ++_wait_recalc $vg/${lv1}_rimage_0 ++_wait_recalc $vg/${lv1}_rimage_1 ++_wait_recalc $vg/${lv1}_rimage_2 ++lvs -a -o+devices $vg ++_add_more_data_to_mnt ++_verify_data_on_mnt ++umount $mnt ++lvchange -an $vg/$lv1 ++_verify_data_on_lv ++lvremove $vg/$lv1 ++vgremove -ff $vg ++ ++# Test that raid+integrity cannot be a sublv ++# part1: cannot add integrity to a raid LV that is already a sublv ++ ++_prepare_vg ++ ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvconvert -y --type thin-pool $vg/$lv1 ++not lvconvert --raidintegrity y $vg/$lv1 ++not lvconvert --raidintegrity y $vg/${lv1}_tdata ++not lvconvert --raidintegrity y $vg/${lv1}_tmeta ++lvremove -y $vg/$lv1 ++ ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvconvert -y --type cache-pool $vg/$lv1 ++not lvconvert --raidintegrity y $vg/$lv1 ++not lvconvert --raidintegrity y $vg/${lv1}_cdata ++not lvconvert --raidintegrity y $vg/${lv1}_cmeta ++lvremove -y $vg/$lv1 ++ ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvcreate --type cache-pool -n cpool -l 8 $vg ++lvconvert -y --type cache --cachepool cpool $vg/$lv1 ++not lvconvert --raidintegrity y $vg/$lv1 ++not lvconvert --raidintegrity y $vg/${lv1}_corig ++lvremove -y $vg/$lv1 ++ ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvcreate --type raid1 -m1 -n cvol -l 8 $vg ++lvconvert -y --type cache --cachevol cvol $vg/$lv1 ++not lvconvert --raidintegrity y $vg/$lv1 ++not lvconvert --raidintegrity y $vg/${lv1}_corig ++not lvconvert --raidintegrity y $vg/cvol ++lvremove -y $vg/$lv1 ++ ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvcreate -n cvol -l 8 $vg ++lvchange -an $vg ++lvconvert -y --type writecache --cachevol cvol $vg/$lv1 ++not lvconvert --raidintegrity y $vg/$lv1 ++not lvconvert --raidintegrity y $vg/${lv1}_wcorig ++lvremove -y $vg/$lv1 ++ ++# Test that raid+integrity cannot be a sublv ++# part2: cannot convert an existing raid+integrity LV into a sublv ++ ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvconvert -y --type thin-pool $vg/$lv1 ++not lvconvert --raidintegrity y $vg/${lv1}_tdata ++lvremove -y $vg/$lv1 ++ ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvcreate --type raid1 -m1 -n $lv2 -l 8 $vg ++lvconvert -y --type cache --cachevol $lv2 $vg/$lv1 ++not lvconvert --raidintegrity y $vg/${lv1}_corig ++not lvconvert --raidintegrity y $vg/${lv2}_vol ++lvremove -y $vg/$lv1 ++ ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvcreate --type raid1 -m1 -n $lv2 -l 8 $vg ++lvconvert -y --type cache --cachepool $lv2 $vg/$lv1 ++not lvconvert --raidintegrity y $vg/${lv1}_corig ++not lvconvert --raidintegrity y $vg/${lv2}_cpool_cdata ++lvremove -y $vg/$lv1 ++ ++# cannot add integrity to raid that has a snapshot ++ ++lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg ++lvcreate -s -n $lv2 -l 8 $vg/$lv1 ++not lvconvert --raidintegrity y $vg/$lv1 ++lvremove -y $vg/$lv1 ++ ++vgremove -ff $vg +diff --git a/tools/args.h b/tools/args.h +index 999d891..d1f604b 100644 +--- a/tools/args.h ++++ b/tools/args.h +@@ -512,6 +512,26 @@ arg(pvmetadatacopies_ARG, '\0', "pvmetadatacopies", pvmetadatacopies_VAL, 0, 0, + "This may be useful in VGs containing many PVs (this places limitations\n" + "on the ability to use vgsplit later.)\n") + ++arg(raidintegrity_ARG, '\0', "raidintegrity", bool_VAL, 0, 0, ++ "Enable or disable data integrity checksums for raid images.\n") ++ ++arg(raidintegrityblocksize_ARG, '\0', "raidintegrityblocksize", number_VAL, 0, 0, ++ "The block size to use for dm-integrity on raid images.\n" ++ "The integrity block size should usually match the device\n" ++ "logical block size, or the file system block size.\n" ++ "It may be less than the file system block size, but not\n" ++ "less than the device logical block size.\n" ++ "Possible values: 512, 1024, 2048, 4096.\n") ++ ++arg(raidintegritymode_ARG, '\0', "raidintegritymode", string_VAL, 0, 0, ++ "Use a journal (default) or bitmap for keeping integrity checksums consistent\n" ++ "in case of a crash. The bitmap areas are recalculated after a crash, so corruption\n" ++ "in those areas would not be detected. A journal does not have this problem.\n" ++ "The journal mode doubles writes to storage, but can improve performance for\n" ++ "scattered writes packed into a single journal write.\n" ++ "bitmap mode can in theory achieve full write throughput of the device,\n" ++ "but would not benefit from the potential scattered write optimization.\n") ++ + arg(readonly_ARG, '\0', "readonly", 0, 0, 0, + "Run the command in a special read-only mode which will read on-disk\n" + "metadata without needing to take any locks. This can be used to peek\n" +diff --git a/tools/command-lines.in b/tools/command-lines.in +index 37a01cb..ed3d041 100644 +--- a/tools/command-lines.in ++++ b/tools/command-lines.in +@@ -262,7 +262,7 @@ IO: --ignoreskippedcluster + ID: lvchange_resync + DESC: Resyncronize a mirror or raid LV. + DESC: Use to reset 'R' attribute on a not initially synchronized LV. +-RULE: all not lv_is_pvmove lv_is_locked ++RULE: all not lv_is_pvmove lv_is_locked lv_is_raid_with_integrity + RULE: all not LV_raid0 + + lvchange --syncaction SyncAction VG|LV_raid|Tag|Select ... +@@ -359,7 +359,7 @@ OP: PV ... + ID: lvconvert_raid_types + DESC: Convert LV to raid or change raid layout + DESC: (a specific raid level must be used, e.g. raid1). +-RULE: all not lv_is_locked lv_is_pvmove ++RULE: all not lv_is_locked lv_is_pvmove lv_is_raid_with_integrity + + lvconvert --mirrors SNumber LV + OO: --regionsize RegionSize, --interval Number, --mirrorlog MirrorLog, OO_LVCONVERT +@@ -373,21 +373,21 @@ OO: OO_LVCONVERT, --interval Number, --regionsize RegionSize, --stripesize SizeK + OP: PV ... + ID: lvconvert_raid_types + DESC: Convert raid LV to change number of stripe images. +-RULE: all not lv_is_locked lv_is_pvmove ++RULE: all not lv_is_locked lv_is_pvmove lv_is_raid_with_integrity + RULE: all not LV_raid0 LV_raid1 + + lvconvert --stripesize SizeKB LV_raid + OO: OO_LVCONVERT, --interval Number, --regionsize RegionSize + ID: lvconvert_raid_types + DESC: Convert raid LV to change the stripe size. +-RULE: all not lv_is_locked lv_is_pvmove ++RULE: all not lv_is_locked lv_is_pvmove lv_is_raid_with_integrity + RULE: all not LV_raid0 LV_raid1 + + lvconvert --regionsize RegionSize LV_raid + OO: OO_LVCONVERT + ID: lvconvert_change_region_size + DESC: Change the region size of an LV. +-RULE: all not lv_is_locked lv_is_pvmove ++RULE: all not lv_is_locked lv_is_pvmove lv_is_raid_with_integrity + RULE: all not LV_raid0 + FLAGS: SECONDARY_SYNTAX + +@@ -401,20 +401,20 @@ OO: OO_LVCONVERT + OP: PV ... + ID: lvconvert_split_mirror_images + DESC: Split images from a raid1 or mirror LV and use them to create a new LV. +-RULE: all not lv_is_locked lv_is_pvmove ++RULE: all not lv_is_locked lv_is_pvmove lv_is_raid_with_integrity + + lvconvert --splitmirrors Number --trackchanges LV_raid1_cache + OO: OO_LVCONVERT + OP: PV ... + ID: lvconvert_split_mirror_images + DESC: Split images from a raid1 LV and track changes to origin for later merge. +-RULE: all not lv_is_locked lv_is_pvmove ++RULE: all not lv_is_locked lv_is_pvmove lv_is_raid_with_integrity + + lvconvert --mergemirrors LV_linear_raid|VG|Tag ... + OO: OO_LVCONVERT + ID: lvconvert_merge_mirror_images + DESC: Merge LV images that were split from a raid1 LV. +-RULE: all not lv_is_locked lv_is_pvmove lv_is_merging_origin lv_is_virtual_origin lv_is_external_origin lv_is_merging_cow ++RULE: all not lv_is_locked lv_is_pvmove lv_is_merging_origin lv_is_virtual_origin lv_is_external_origin lv_is_merging_cow lv_is_raid_with_integrity + + lvconvert --mirrorlog MirrorLog LV_mirror + OO: OO_LVCONVERT +@@ -434,7 +434,7 @@ OO: --thin, --originname LV_new, OO_LVCONVERT_POOL, OO_LVCONVERT + ID: lvconvert_to_thin_with_external + DESC: Convert LV to a thin LV, using the original LV as an external origin. + RULE: all and lv_is_visible +-RULE: all not lv_is_locked ++RULE: all not lv_is_locked lv_is_raid_with_integrity + RULE: --poolmetadata not --readahead --stripesize --stripes_long + + # alternate form of lvconvert --type thin +@@ -445,7 +445,7 @@ DESC: Convert LV to a thin LV, using the original LV as an external origin + DESC: (infers --type thin). + FLAGS: SECONDARY_SYNTAX + RULE: all and lv_is_visible +-RULE: all not lv_is_locked ++RULE: all not lv_is_locked lv_is_raid_with_integrity + RULE: --poolmetadata not --readahead --stripesize --stripes_long + + --- +@@ -455,6 +455,7 @@ OO: --cache, OO_LVCONVERT_CACHE, OO_LVCONVERT_POOL, OO_LVCONVERT + ID: lvconvert_to_cache_with_cachepool + DESC: Attach a cache pool to an LV, converts the LV to type cache. + RULE: all and lv_is_visible ++RULE: all not lv_is_raid_with_integrity + RULE: --poolmetadata not --readahead --stripesize --stripes_long + + # alternate form of lvconvert --type cache +@@ -463,6 +464,7 @@ OO: --type cache, OO_LVCONVERT_CACHE, OO_LVCONVERT_POOL, OO_LVCONVERT + ID: lvconvert_to_cache_with_cachepool + DESC: Attach a cache pool to an LV (infers --type cache). + RULE: all and lv_is_visible ++RULE: all not lv_is_raid_with_integrity + RULE: --poolmetadata not --readahead --stripesize --stripes_long + FLAGS: SECONDARY_SYNTAX + +@@ -473,6 +475,7 @@ OO: OO_LVCONVERT, --cachesettings String + ID: lvconvert_to_writecache + DESC: Attach a writecache to an LV, converts the LV to type writecache. + RULE: all and lv_is_visible ++RULE: all not lv_is_raid_with_integrity + + --- + +@@ -481,6 +484,7 @@ OO: --cache, OO_LVCONVERT_CACHE, OO_LVCONVERT, --poolmetadatasize SizeMB, --chun + ID: lvconvert_to_cache_with_cachevol + DESC: Attach a cache to an LV, converts the LV to type cache. + RULE: all and lv_is_visible ++RULE: all not lv_is_raid_with_integrity + + # alternate form of lvconvert --type cache + lvconvert --cache --cachevol LV LV_linear_striped_raid_thinpool +@@ -488,6 +492,7 @@ OO: OO_LVCONVERT_CACHE, OO_LVCONVERT, --poolmetadatasize SizeMB, --chunksize Siz + ID: lvconvert_to_cache_with_cachevol + DESC: Attach a cache to an LV, converts the LV to type cache. + RULE: all and lv_is_visible ++RULE: all not lv_is_raid_with_integrity + FLAGS: SECONDARY_SYNTAX + + --- +@@ -499,7 +504,7 @@ OP: PV ... + ID: lvconvert_to_thinpool + DESC: Convert LV to type thin-pool. + RULE: all and lv_is_visible +-RULE: all not lv_is_locked lv_is_origin lv_is_merging_origin lv_is_external_origin lv_is_virtual ++RULE: all not lv_is_locked lv_is_origin lv_is_merging_origin lv_is_external_origin lv_is_virtual lv_is_raid_with_integrity + RULE: --poolmetadata not --readahead --stripesize --stripes_long + + # This command syntax has two different meanings depending on +@@ -533,6 +538,7 @@ DESC: Convert LV to type thin-pool (variant, use --type thin-pool). + DESC: Swap metadata LV in a thin pool (variant, use --swapmetadata). + FLAGS: PREVIOUS_SYNTAX + RULE: all and lv_is_visible ++RULE: all not lv_is_raid_with_integrity + RULE: --poolmetadata not --readahead --stripesize --stripes_long + + --- +@@ -543,6 +549,7 @@ OP: PV ... + ID: lvconvert_to_cachepool + DESC: Convert LV to type cache-pool. + RULE: --poolmetadata not --readahead --stripesize --stripes_long ++RULE: all not lv_is_raid_with_integrity + + # This command syntax has two different meanings depending on + # whether the LV pos arg is already a cache pool or not. +@@ -574,6 +581,7 @@ DESC: Convert LV to type cache-pool (variant, use --type cache-pool). + DESC: Swap metadata LV in a cache pool (variant, use --swapmetadata). + FLAGS: PREVIOUS_SYNTAX + RULE: all and lv_is_visible ++RULE: all not lv_is_raid_with_integrity + RULE: --poolmetadata not --readahead --stripesize --stripes_long + + --- +@@ -583,7 +591,7 @@ OO: --name LV_new, --virtualsize SizeMB, --compression Bool, --deduplication Boo + ID: lvconvert_to_vdopool + DESC: Convert LV to type vdopool. + RULE: all and lv_is_visible +-RULE: all not lv_is_locked lv_is_origin lv_is_merging_origin lv_is_external_origin lv_is_virtual ++RULE: all not lv_is_locked lv_is_origin lv_is_merging_origin lv_is_external_origin lv_is_virtual lv_is_raid_with_integrity + + lvconvert --vdopool LV_linear_striped_raid_cache + OO: --type vdo-pool, OO_LVCONVERT, +@@ -591,7 +599,7 @@ OO: --type vdo-pool, OO_LVCONVERT, + ID: lvconvert_to_vdopool_param + DESC: Convert LV to type vdopool. + RULE: all and lv_is_visible +-RULE: all not lv_is_locked lv_is_origin lv_is_merging_origin lv_is_external_origin lv_is_virtual ++RULE: all not lv_is_locked lv_is_origin lv_is_merging_origin lv_is_external_origin lv_is_virtual lv_is_raid_with_integrity + FLAGS: SECONDARY_SYNTAX + + --- +@@ -757,6 +765,14 @@ FLAGS: SECONDARY_SYNTAX + + --- + ++lvconvert --raidintegrity Bool LV_raid ++OO: --raidintegritymode String, --raidintegrityblocksize Number, OO_LVCONVERT ++OP: PV ... ++ID: lvconvert_integrity ++DESC: Add or remove data integrity checksums to raid images. ++ ++--- ++ + # --extents is not specified; it's an automatic alternative for --size + + OO_LVCREATE: --addtag Tag, --alloc Alloc, --autobackup Bool, --activate Active, +@@ -870,7 +886,8 @@ DESC: Create a raid1 or mirror LV (infers --type raid1|mirror). + # R9,R10,R11,R12 (--type raid with any use of --stripes/--mirrors) + lvcreate --type raid --size SizeMB VG + OO: --mirrors PNumber, --stripes Number, --stripesize SizeKB, +---regionsize RegionSize, --minrecoveryrate SizeKB, --maxrecoveryrate SizeKB, OO_LVCREATE ++--regionsize RegionSize, --minrecoveryrate SizeKB, --maxrecoveryrate SizeKB, ++--raidintegrity Bool, --raidintegritymode String, --raidintegrityblocksize Number, OO_LVCREATE + OP: PV ... + ID: lvcreate_raid_any + DESC: Create a raid LV (a specific raid level must be used, e.g. raid1). +diff --git a/tools/lv_props.h b/tools/lv_props.h +index 2925028..60c8c73 100644 +--- a/tools/lv_props.h ++++ b/tools/lv_props.h +@@ -52,5 +52,6 @@ lvp(is_cow_covering_origin_LVP, "lv_is_cow_covering_origin", NULL) + lvp(is_visible_LVP, "lv_is_visible", NULL) + lvp(is_historical_LVP, "lv_is_historical", NULL) + lvp(is_raid_with_tracking_LVP, "lv_is_raid_with_tracking", NULL) ++lvp(is_raid_with_integrity_LVP, "lv_is_raid_with_integrity", NULL) + lvp(LVP_COUNT, "", NULL) + +diff --git a/tools/lv_types.h b/tools/lv_types.h +index 778cd54..d1c94cc 100644 +--- a/tools/lv_types.h ++++ b/tools/lv_types.h +@@ -34,5 +34,6 @@ lvt(raid10_LVT, "raid10", NULL) + lvt(error_LVT, "error", NULL) + lvt(zero_LVT, "zero", NULL) + lvt(writecache_LVT, "writecache", NULL) ++lvt(integrity_LVT, "integrity", NULL) + lvt(LVT_COUNT, "", NULL) + +diff --git a/tools/lvchange.c b/tools/lvchange.c +index 5f0fcab..2d5bb32 100644 +--- a/tools/lvchange.c ++++ b/tools/lvchange.c +@@ -1573,6 +1573,11 @@ static int _lvchange_syncaction_single(struct cmd_context *cmd, + struct logical_volume *lv, + struct processing_handle *handle) + { ++ if (lv_raid_has_integrity(lv)) { ++ log_error("Integrity must be removed to use syncaction commands."); ++ return_ECMD_FAILED; ++ } ++ + /* If LV is inactive here, ensure it's not active elsewhere. */ + if (!lockd_lv(cmd, lv, "ex", 0)) + return_ECMD_FAILED; +diff --git a/tools/lvconvert.c b/tools/lvconvert.c +index bb40930..e969b44 100644 +--- a/tools/lvconvert.c ++++ b/tools/lvconvert.c +@@ -1391,11 +1391,23 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l + DEFAULT_RAID1_MAX_IMAGES, lp->segtype->name, display_lvname(lv)); + return 0; + } ++ if (!seg_is_raid1(seg) && lv_raid_has_integrity(lv)) { ++ log_error("Cannot add raid images with integrity for this raid level."); ++ return 0; ++ } + if (!lv_raid_change_image_count(lv, lp->yes, image_count, + (lp->region_size_supplied || !seg->region_size) ? + lp->region_size : seg->region_size , lp->pvh)) + return_0; + ++ if (lv_raid_has_integrity(lv)) { ++ struct integrity_settings *isettings = NULL; ++ if (!lv_get_raid_integrity_settings(lv, &isettings)) ++ return_0; ++ if (!lv_add_integrity_to_raid(lv, isettings, lp->pvh, NULL)) ++ return_0; ++ } ++ + log_print_unless_silent("Logical volume %s successfully converted.", + display_lvname(lv)); + +@@ -1425,6 +1437,12 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l + return 0; + } + ++ if (lv_raid_has_integrity(lv)) { ++ /* FIXME: which conversions are happening here? */ ++ log_error("This conversion is not supported for raid with integrity."); ++ return 0; ++ } ++ + /* FIXME This needs changing globally. */ + if (!arg_is_set(cmd, stripes_long_ARG)) + lp->stripes = 0; +@@ -1444,6 +1462,12 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l + } + + try_new_takeover_or_reshape: ++ if (lv_raid_has_integrity(lv)) { ++ /* FIXME: which conversions are happening here? */ ++ log_error("This conversion is not supported for raid with integrity."); ++ return 0; ++ } ++ + if (!_raid4_conversion_supported(lv, lp)) + return 0; + +@@ -5758,6 +5782,119 @@ int lvconvert_to_cache_with_cachevol_cmd(struct cmd_context *cmd, int argc, char + return ret; + } + ++static int _lvconvert_integrity_remove(struct cmd_context *cmd, struct logical_volume *lv) ++{ ++ struct volume_group *vg = lv->vg; ++ int ret = 0; ++ ++ if (!lv_is_integrity(lv) && !lv_is_raid(lv)) { ++ log_error("LV does not have integrity."); ++ return 0; ++ } ++ ++ /* ensure it's not active elsewhere. */ ++ if (!lockd_lv(cmd, lv, "ex", 0)) ++ return_0; ++ ++ if (!archive(vg)) ++ return_0; ++ ++ if (lv_is_raid(lv)) ++ ret = lv_remove_integrity_from_raid(lv); ++ if (!ret) ++ return_0; ++ ++ backup(vg); ++ ++ log_print_unless_silent("Logical volume %s has removed integrity.", display_lvname(lv)); ++ return 1; ++} ++ ++static int _lvconvert_integrity_add(struct cmd_context *cmd, struct logical_volume *lv, ++ struct integrity_settings *set) ++{ ++ struct volume_group *vg = lv->vg; ++ struct dm_list *use_pvh; ++ int ret = 0; ++ ++ /* ensure it's not active elsewhere. */ ++ if (!lockd_lv(cmd, lv, "ex", 0)) ++ return_0; ++ ++ if (cmd->position_argc > 1) { ++ /* First pos arg is required LV, remaining are optional PVs. */ ++ if (!(use_pvh = create_pv_list(cmd->mem, vg, cmd->position_argc - 1, cmd->position_argv + 1, 0))) ++ return_0; ++ } else ++ use_pvh = &vg->pvs; ++ ++ if (!archive(vg)) ++ return_0; ++ ++ if (lv_is_partial(lv)) { ++ log_error("Cannot add integrity while LV is missing PVs."); ++ return 0; ++ } ++ ++ if (lv_is_raid(lv)) ++ ret = lv_add_integrity_to_raid(lv, set, use_pvh, NULL); ++ if (!ret) ++ return_0; ++ ++ backup(vg); ++ ++ log_print_unless_silent("Logical volume %s has added integrity.", display_lvname(lv)); ++ return 1; ++} ++ ++static int _lvconvert_integrity_single(struct cmd_context *cmd, ++ struct logical_volume *lv, ++ struct processing_handle *handle) ++{ ++ struct integrity_settings settings; ++ int ret = 0; ++ ++ memset(&settings, 0, sizeof(settings)); ++ ++ if (!integrity_mode_set(arg_str_value(cmd, raidintegritymode_ARG, NULL), &settings)) ++ return_ECMD_FAILED; ++ ++ if (arg_is_set(cmd, raidintegrityblocksize_ARG)) ++ settings.block_size = arg_int_value(cmd, raidintegrityblocksize_ARG, 0); ++ ++ if (arg_int_value(cmd, raidintegrity_ARG, 0)) ++ ret = _lvconvert_integrity_add(cmd, lv, &settings); ++ else ++ ret = _lvconvert_integrity_remove(cmd, lv); ++ ++ if (!ret) ++ return ECMD_FAILED; ++ return ECMD_PROCESSED; ++} ++ ++int lvconvert_integrity_cmd(struct cmd_context *cmd, int argc, char **argv) ++{ ++ struct processing_handle *handle; ++ int ret; ++ ++ if (!(handle = init_processing_handle(cmd, NULL))) { ++ log_error("Failed to initialize processing handle."); ++ return ECMD_FAILED; ++ } ++ ++ /* Want to be able to remove integrity from partial LV */ ++ cmd->handles_missing_pvs = 1; ++ ++ cmd->cname->flags &= ~GET_VGNAME_FROM_OPTIONS; ++ ++ ret = process_each_lv(cmd, cmd->position_argc, cmd->position_argv, NULL, NULL, READ_FOR_UPDATE, handle, NULL, ++ &_lvconvert_integrity_single); ++ ++ destroy_processing_handle(cmd, handle); ++ ++ return ret; ++} ++ + /* + * All lvconvert command defs have their own function, + * so the generic function name is unused. +diff --git a/tools/lvcreate.c b/tools/lvcreate.c +index 448f125..5c978b3 100644 +--- a/tools/lvcreate.c ++++ b/tools/lvcreate.c +@@ -858,7 +858,10 @@ static int _lvcreate_params(struct cmd_context *cmd, + maxrecoveryrate_ARG,\ + minrecoveryrate_ARG,\ + raidmaxrecoveryrate_ARG,\ +- raidminrecoveryrate_ARG ++ raidminrecoveryrate_ARG, \ ++ raidintegrity_ARG, \ ++ raidintegritymode_ARG, \ ++ raidintegrityblocksize_ARG + + #define SIZE_ARGS \ + extents_ARG,\ +@@ -1227,6 +1230,16 @@ static int _lvcreate_params(struct cmd_context *cmd, + } + } + ++ if (seg_is_raid(lp) && arg_int_value(cmd, raidintegrity_ARG, 0)) { ++ lp->raidintegrity = 1; ++ if (arg_is_set(cmd, raidintegrityblocksize_ARG)) ++ lp->integrity_settings.block_size = arg_int_value(cmd, raidintegrityblocksize_ARG, 0); ++ if (arg_is_set(cmd, raidintegritymode_ARG)) { ++ if (!integrity_mode_set(arg_str_value(cmd, raidintegritymode_ARG, NULL), &lp->integrity_settings)) ++ return_0; ++ } ++ } ++ + lcp->pv_count = argc; + lcp->pvs = argv; + +diff --git a/tools/lvmcmdline.c b/tools/lvmcmdline.c +index f147be3..d87a8f0 100644 +--- a/tools/lvmcmdline.c ++++ b/tools/lvmcmdline.c +@@ -149,6 +149,9 @@ static const struct command_function _command_functions[CMD_COUNT] = { + { lvconvert_to_vdopool_CMD, lvconvert_to_vdopool_cmd }, + { lvconvert_to_vdopool_param_CMD, lvconvert_to_vdopool_param_cmd }, + ++ /* lvconvert for integrity */ ++ { lvconvert_integrity_CMD, lvconvert_integrity_cmd }, ++ + { pvscan_display_CMD, pvscan_display_cmd }, + { pvscan_cache_CMD, pvscan_cache_cmd }, + }; +diff --git a/tools/pvmove.c b/tools/pvmove.c +index 0419a3d..a346b53 100644 +--- a/tools/pvmove.c ++++ b/tools/pvmove.c +@@ -381,6 +381,11 @@ static struct logical_volume *_set_up_pvmove_lv(struct cmd_context *cmd, + return NULL; + } + ++ if (lv_is_raid(lv) && lv_raid_has_integrity(lv)) { ++ log_error("Unable to pvmove device used for raid with integrity."); ++ return NULL; ++ } ++ + seg = first_seg(lv); + if (!needs_exclusive) { + /* Presence of exclusive LV decides whether pvmove must be also exclusive */ +@@ -625,6 +630,11 @@ static int _pvmove_setup_single(struct cmd_context *cmd, + log_error("pvmove not allowed on LV using writecache."); + return ECMD_FAILED; + } ++ ++ if (lv_is_raid(lv) && lv_raid_has_integrity(lv)) { ++ log_error("pvmove not allowed on raid LV with integrity."); ++ return ECMD_FAILED; ++ } + } + + /* +diff --git a/tools/toollib.c b/tools/toollib.c +index 6386a69..96d0d6d 100644 +--- a/tools/toollib.c ++++ b/tools/toollib.c +@@ -718,11 +718,26 @@ int vgcreate_params_set_from_args(struct cmd_context *cmd, + return 1; + } + ++int integrity_mode_set(const char *mode, struct integrity_settings *settings) ++{ ++ if (!mode || !strcmp(mode, "bitmap") || !strcmp(mode, "B")) ++ settings->mode[0] = 'B'; ++ else if (!strcmp(mode, "journal") || !strcmp(mode, "J")) ++ settings->mode[0] = 'J'; ++ else { ++ /* FIXME: the kernel has other modes, should we allow any of those? */ ++ log_error("Invalid raid integrity mode (use \"bitmap\" or \"journal\")"); ++ return 0; ++ } ++ return 1; ++} ++ + /* Shared code for changing activation state for vgchange/lvchange */ + int lv_change_activate(struct cmd_context *cmd, struct logical_volume *lv, + activation_change_t activate) + { + int r = 1; ++ int integrity_recalculate; + struct logical_volume *snapshot_lv; + + if (lv_is_cache_pool(lv)) { +@@ -780,9 +795,34 @@ int lv_change_activate(struct cmd_context *cmd, struct logical_volume *lv, + return 0; + } + ++ if ((integrity_recalculate = lv_has_integrity_recalculate_metadata(lv))) { ++ /* Don't want pvscan to write VG while running from systemd service. */ ++ if (!strcmp(cmd->name, "pvscan")) { ++ log_error("Cannot activate uninitialized integrity LV %s from pvscan.", ++ display_lvname(lv)); ++ return 0; ++ } ++ ++ if (vg_is_shared(lv->vg)) { ++ uint32_t lockd_state = 0; ++ if (!lockd_vg(cmd, lv->vg->name, "ex", 0, &lockd_state)) { ++ log_error("Cannot activate uninitialized integrity LV %s without lock.", ++ display_lvname(lv)); ++ return 0; ++ } ++ } ++ } ++ + if (!lv_active_change(cmd, lv, activate)) + return_0; + ++ /* Write VG metadata to clear the integrity recalculate flag. */ ++ if (integrity_recalculate && lv_is_active(lv)) { ++ log_print_unless_silent("Updating VG to complete initialization of integrity LV %s.", ++ display_lvname(lv)); ++ lv_clear_integrity_recalculate_metadata(lv); ++ } ++ + set_lv_notify(lv->vg->cmd); + + return r; +@@ -1144,6 +1184,7 @@ out: + return ok; + } + ++ + /* FIXME move to lib */ + static int _pv_change_tag(struct physical_volume *pv, const char *tag, int addtag) + { +@@ -2255,6 +2296,8 @@ static int _lv_is_prop(struct cmd_context *cmd, struct logical_volume *lv, int l + return lv_is_historical(lv); + case is_raid_with_tracking_LVP: + return lv_is_raid_with_tracking(lv); ++ case is_raid_with_integrity_LVP: ++ return lv_raid_has_integrity(lv); + default: + log_error(INTERNAL_ERROR "unknown lv property value lvp_enum %d", lvp_enum); + } +@@ -2309,6 +2352,8 @@ static int _lv_is_type(struct cmd_context *cmd, struct logical_volume *lv, int l + return seg_is_raid10(seg); + case writecache_LVT: + return seg_is_writecache(seg); ++ case integrity_LVT: ++ return seg_is_integrity(seg); + case error_LVT: + return !strcmp(seg->segtype->name, SEG_TYPE_NAME_ERROR); + case zero_LVT: +@@ -2367,6 +2412,8 @@ int get_lvt_enum(struct logical_volume *lv) + return raid10_LVT; + if (seg_is_writecache(seg)) + return writecache_LVT; ++ if (seg_is_integrity(seg)) ++ return integrity_LVT; + + if (!strcmp(seg->segtype->name, SEG_TYPE_NAME_ERROR)) + return error_LVT; +diff --git a/tools/tools.h b/tools/tools.h +index 3cf4293..7f2434d 100644 +--- a/tools/tools.h ++++ b/tools/tools.h +@@ -212,6 +212,8 @@ unsigned grouped_arg_is_set(const struct arg_values *av, int a); + const char *grouped_arg_str_value(const struct arg_values *av, int a, const char *def); + int32_t grouped_arg_int_value(const struct arg_values *av, int a, const int32_t def); + ++int integrity_mode_set(const char *mode, struct integrity_settings *settings); ++ + const char *command_name(struct cmd_context *cmd); + + int pvmove_poll(struct cmd_context *cmd, const char *pv_name, const char *uuid, +@@ -274,6 +276,8 @@ int lvconvert_merge_cmd(struct cmd_context *cmd, int argc, char **argv); + int lvconvert_to_vdopool_cmd(struct cmd_context *cmd, int argc, char **argv); + int lvconvert_to_vdopool_param_cmd(struct cmd_context *cmd, int argc, char **argv); + ++int lvconvert_integrity_cmd(struct cmd_context *cmd, int argc, char **argv); ++ + int pvscan_display_cmd(struct cmd_context *cmd, int argc, char **argv); + int pvscan_cache_cmd(struct cmd_context *cmd, int argc, char **argv); + +-- +1.8.3.1 + diff --git a/SOURCES/lvm2-2_03_10-Fix-scripts-lvmlocks.service.in-using-nonexistent-lock-opt-autowait.patch b/SOURCES/lvm2-2_03_10-Fix-scripts-lvmlocks.service.in-using-nonexistent-lock-opt-autowait.patch new file mode 100644 index 0000000..2eec67f --- /dev/null +++ b/SOURCES/lvm2-2_03_10-Fix-scripts-lvmlocks.service.in-using-nonexistent-lock-opt-autowait.patch @@ -0,0 +1,31 @@ +From 48105f492f7f8c157ba714217ae55c6fb50e76c0 Mon Sep 17 00:00:00 2001 +From: Maxim Plotnikov +Date: Wed, 22 Apr 2020 00:16:29 +0300 +Subject: [PATCH] Fix scripts/lvmlocks.service.in using nonexistent --lock-opt + autowait + +The --lock-opt autowait was dropped back in 9ab6bdce01, +and attempting to specify it has quite an opposite effect: +no waiting is done, which makes the unit almost useless. + +(cherry picked from commit a509776588a5c0c0bfc2394e4d1ed717531b0257) +--- + scripts/lvmlocks.service.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/scripts/lvmlocks.service.in b/scripts/lvmlocks.service.in +index f6a951f..a3d0bd4 100644 +--- a/scripts/lvmlocks.service.in ++++ b/scripts/lvmlocks.service.in +@@ -8,7 +8,7 @@ Type=oneshot + RemainAfterExit=yes + + # start lockspaces and wait for them to finish starting +-ExecStart=@SBINDIR@/lvm vgchange --lock-start --lock-opt autowait ++ExecStart=@SBINDIR@/lvm vgchange --lock-start --lock-opt auto + + # stop lockspaces and wait for them to finish stopping + ExecStop=@SBINDIR@/lvmlockctl --stop-lockspaces --wait 1 +-- +1.8.3.1 + diff --git a/SOURCES/lvm2-2_03_10-WHATS_NEW-integrity-with-raid.patch b/SOURCES/lvm2-2_03_10-WHATS_NEW-integrity-with-raid.patch new file mode 100644 index 0000000..a0d68f4 --- /dev/null +++ b/SOURCES/lvm2-2_03_10-WHATS_NEW-integrity-with-raid.patch @@ -0,0 +1,24 @@ +From a08afc8d0d18b2547176e731852b816df76c63eb Mon Sep 17 00:00:00 2001 +From: David Teigland +Date: Wed, 15 Apr 2020 11:04:12 -0500 +Subject: [PATCH 3/3] WHATS_NEW: integrity with raid + +(cherry picked from commit 211eaa284c4df992916e0a523d0ff932aa790a98) +--- + WHATS_NEW | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/WHATS_NEW b/WHATS_NEW +index 89583f7..c0267b7 100644 +--- a/WHATS_NEW ++++ b/WHATS_NEW +@@ -1,5 +1,6 @@ + Version 2.03.10 - + ================================= ++ Add integrity with raid capability. + Fix support for lvconvert --repair used by foreign apps (i.e. Docker). + + Version 2.03.09 - 26th March 2020 +-- +1.8.3.1 + diff --git a/SOURCES/lvm2-2_03_10-WHATS_NEWS-update.patch b/SOURCES/lvm2-2_03_10-WHATS_NEWS-update.patch new file mode 100644 index 0000000..82eb395 --- /dev/null +++ b/SOURCES/lvm2-2_03_10-WHATS_NEWS-update.patch @@ -0,0 +1,15 @@ + WHATS_NEW | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/WHATS_NEW b/WHATS_NEW +index db914c0..89583f7 100644 +--- a/WHATS_NEW ++++ b/WHATS_NEW +@@ -1,3 +1,7 @@ ++Version 2.03.10 - ++================================= ++ Fix support for lvconvert --repair used by foreign apps (i.e. Docker). ++ + Version 2.03.09 - 26th March 2020 + ================================= + Fix formating of vdopool (vdo_slab_size_mb was smaller by 2 bits). diff --git a/SOURCES/lvm2-2_03_10-blkdeactivate-add-support-for-VDO-in-blkdeactivate-script.patch b/SOURCES/lvm2-2_03_10-blkdeactivate-add-support-for-VDO-in-blkdeactivate-script.patch new file mode 100644 index 0000000..6abd26b --- /dev/null +++ b/SOURCES/lvm2-2_03_10-blkdeactivate-add-support-for-VDO-in-blkdeactivate-script.patch @@ -0,0 +1,179 @@ + WHATS_NEW_DM | 4 ++++ + man/blkdeactivate.8_main | 11 +++++++++++ + scripts/blkdeactivate.sh.in | 48 ++++++++++++++++++++++++++++++++++++++++++++- + 3 files changed, 62 insertions(+), 1 deletion(-) + +diff --git a/WHATS_NEW_DM b/WHATS_NEW_DM +index 12bdcea..3ec9c3c 100644 +--- a/WHATS_NEW_DM ++++ b/WHATS_NEW_DM +@@ -1,3 +1,7 @@ ++Version 1.02.173 - ++================================== ++ Add support for VDO in blkdeactivate script. ++ + Version 1.02.171 - 26th March 2020 + ================================== + Fix dm_list interators with gcc 10 optimization (-ftree-pta). +diff --git a/man/blkdeactivate.8_main b/man/blkdeactivate.8_main +index f3c19a8..06af52e 100644 +--- a/man/blkdeactivate.8_main ++++ b/man/blkdeactivate.8_main +@@ -9,6 +9,7 @@ blkdeactivate \(em utility to deactivate block devices + .RB [ -l \ \fIlvm_options\fP ] + .RB [ -m \ \fImpath_options\fP ] + .RB [ -r \ \fImdraid_options\fP ] ++.RB [ -o \ \fIvdo_options\fP ] + .RB [ -u ] + .RB [ -v ] + .RI [ device ] +@@ -70,6 +71,15 @@ Comma-separated list of MD RAID specific options: + Wait MD device's resync, recovery or reshape action to complete + before deactivation. + .RE ++ ++.TP ++.BR -o ", " --vdooptions \ \fIvdo_options\fP ++Comma-separated list of VDO specific options: ++.RS ++.IP \fIconfigfile=file\fP ++Use specified VDO configuration file. ++.RE ++ + .TP + .BR -u ", " --umount + Unmount a mounted device before trying to deactivate it. +@@ -120,4 +130,5 @@ of a device-mapper device fails, retry it and force removal. + .BR lvm (8), + .BR mdadm (8), + .BR multipathd (8), ++.BR vdo (8), + .BR umount (8) +diff --git a/scripts/blkdeactivate.sh.in b/scripts/blkdeactivate.sh.in +index a4b8a8f..57b3e58 100644 +--- a/scripts/blkdeactivate.sh.in ++++ b/scripts/blkdeactivate.sh.in +@@ -1,6 +1,6 @@ + #!/bin/bash + # +-# Copyright (C) 2012-2017 Red Hat, Inc. All rights reserved. ++# Copyright (C) 2012-2020 Red Hat, Inc. All rights reserved. + # + # This file is part of LVM2. + # +@@ -38,6 +38,7 @@ MDADM="/sbin/mdadm" + MOUNTPOINT="/bin/mountpoint" + MPATHD="/sbin/multipathd" + UMOUNT="/bin/umount" ++VDO="/bin/vdo" + + sbindir="@SBINDIR@" + DMSETUP="$sbindir/dmsetup" +@@ -54,6 +55,7 @@ DMSETUP_OPTS="" + LVM_OPTS="" + MDADM_OPTS="" + MPATHD_OPTS="" ++VDO_OPTS="" + + LSBLK="/bin/lsblk -r --noheadings -o TYPE,KNAME,NAME,MOUNTPOINT" + LSBLK_VARS="local devtype local kname local name local mnt" +@@ -124,6 +126,7 @@ usage() { + echo " -l | --lvmoptions LVM_OPTIONS Comma separated LVM specific options" + echo " -m | --mpathoptions MPATH_OPTIONS Comma separated DM-multipath specific options" + echo " -r | --mdraidoptions MDRAID_OPTIONS Comma separated MD RAID specific options" ++ echo " -o | --vdooptions VDO_OPTIONS Comma separated VDO specific options" + echo " -u | --umount Unmount the device if mounted" + echo " -v | --verbose Verbose mode (also implies -e)" + echo +@@ -138,6 +141,8 @@ usage() { + echo " wait wait for resync, recovery or reshape to complete first" + echo " MPATH_OPTIONS:" + echo " disablequeueing disable queueing on all DM-multipath devices first" ++ echo " VDO_OPTIONS:" ++ echo " configfile=file use specified VDO configuration file" + + exit + } +@@ -319,6 +324,23 @@ deactivate_md () { + fi + } + ++deactivate_vdo() { ++ local xname ++ xname=$(printf "%s" "$name") ++ test -b "$DEV_DIR/mapper/$xname" || return 0 ++ test -z "${SKIP_DEVICE_LIST["$kname"]}" || return 1 ++ ++ deactivate_holders "$DEV_DIR/mapper/$xname" || return 1 ++ ++ echo -n " [VDO]: deactivating VDO volume $xname... " ++ if eval "$VDO" stop $VDO_OPTS --name="$xname" "$OUT" "$ERR"; then ++ echo "done" ++ else ++ echo "skipping" ++ add_device_to_skip_list ++ fi ++} ++ + deactivate () { + ###################################################################### + # DEACTIVATION HOOKS FOR NEW DEVICE TYPES GO HERE! # +@@ -335,6 +357,8 @@ deactivate () { + ###################################################################### + if test "$devtype" = "lvm"; then + deactivate_lvm ++ elif test "$devtype" = "vdo"; then ++ deactivate_vdo + elif test "${kname:0:3}" = "dm-"; then + deactivate_dm + elif test "${kname:0:2}" = "md"; then +@@ -479,6 +503,20 @@ get_mpathopts() { + IFS=$ORIG_IFS + } + ++get_vdoopts() { ++ ORIG_IFS=$IFS; IFS=',' ++ ++ for opt in $1; do ++ case "$opt" in ++ "") ;; ++ configfile=*) tmp=${opt#*=}; VDO_OPTS+="--confFile=${tmp%%,*} " ;; ++ *) echo "$opt: unknown VDO option" ++ esac ++ done ++ ++ IFS=$ORIG_IFS ++} ++ + set_env() { + if test "$ERRORS" -eq "1"; then + unset ERR +@@ -493,6 +531,7 @@ set_env() { + LVM_OPTS+="-vvvv" + MDADM_OPTS+="-vv" + MPATHD_OPTS+="-v 3" ++ VDO_OPTS+="--verbose " + else + OUT="1>$DEV_DIR/null" + fi +@@ -509,6 +548,12 @@ set_env() { + MDADM_AVAILABLE=0 + fi + ++ if test -f $VDO; then ++ VDO_AVAILABLE=1 ++ else ++ VDO_AVAILABLE=0 ++ fi ++ + MPATHD_RUNNING=0 + test "$MPATHD_DO_DISABLEQUEUEING" -eq 1 && { + if test -f "$MPATHD"; then +@@ -528,6 +573,7 @@ while test $# -ne 0; do + "-l"|"--lvmoptions") get_lvmopts "$2" ; shift ;; + "-m"|"--mpathoptions") get_mpathopts "$2" ; shift ;; + "-r"|"--mdraidoptions") get_mdraidopts "$2"; shift ;; ++ "-o"|"--vdooptions") get_vdoopts "$2"; shift ;; + "-u"|"--umount") DO_UMOUNT=1 ;; + "-v"|"--verbose") VERBOSE=1 ; ERRORS=1 ;; + "-vv") VERBOSE=1 ; ERRORS=1 ; set -x ;; diff --git a/SOURCES/lvm2-2_03_10-build-make-generate.patch b/SOURCES/lvm2-2_03_10-build-make-generate.patch new file mode 100644 index 0000000..4b76f58 --- /dev/null +++ b/SOURCES/lvm2-2_03_10-build-make-generate.patch @@ -0,0 +1,180 @@ +From 7def94164ae6c18d84e40f00db2e2b74a7662b35 Mon Sep 17 00:00:00 2001 +From: Marian Csontos +Date: Tue, 5 May 2020 10:20:18 +0200 +Subject: [PATCH] build: make generate + +(cherry picked from commit bcc149048440dce5fc7962f88ed523469dd39a32) +--- + man/lvconvert.8_pregen | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++ + man/lvcreate.8_pregen | 53 ++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 116 insertions(+) + +diff --git a/man/lvconvert.8_pregen b/man/lvconvert.8_pregen +index b676e72..7440984 100644 +--- a/man/lvconvert.8_pregen ++++ b/man/lvconvert.8_pregen +@@ -163,6 +163,18 @@ lvconvert - Change logical volume layout + .ad b + .br + .ad l ++ \fB--raidintegrity\fP \fBy\fP|\fBn\fP ++.ad b ++.br ++.ad l ++ \fB--raidintegrityblocksize\fP \fINumber\fP ++.ad b ++.br ++.ad l ++ \fB--raidintegritymode\fP \fIString\fP ++.ad b ++.br ++.ad l + \fB-r\fP|\fB--readahead\fP \fBauto\fP|\fBnone\fP|\fINumber\fP + .ad b + .br +@@ -982,6 +994,28 @@ Poll LV to continue conversion. + .br + - + ++Add or remove data integrity checksums to raid images. ++.br ++.P ++\fBlvconvert\fP \fB--raidintegrity\fP \fBy\fP|\fBn\fP \fILV\fP\fI_raid\fP ++.br ++.RS 4 ++.ad l ++[ \fB--raidintegritymode\fP \fIString\fP ] ++.ad b ++.br ++.ad l ++[ \fB--raidintegrityblocksize\fP \fINumber\fP ] ++.ad b ++.br ++[ COMMON_OPTIONS ] ++.RE ++.br ++.RS 4 ++[ \fIPV\fP ... ] ++.RE ++- ++ + Common options for command: + . + .RS 4 +@@ -1405,6 +1439,35 @@ Repeat once to also suppress any prompts with answer 'no'. + .ad b + .HP + .ad l ++\fB--raidintegrity\fP \fBy\fP|\fBn\fP ++.br ++Enable or disable data integrity checksums for raid images. ++.ad b ++.HP ++.ad l ++\fB--raidintegrityblocksize\fP \fINumber\fP ++.br ++The block size to use for dm-integrity on raid images. ++The integrity block size should usually match the device ++logical block size, or the file system block size. ++It may be less than the file system block size, but not ++less than the device logical block size. ++Possible values: 512, 1024, 2048, 4096. ++.ad b ++.HP ++.ad l ++\fB--raidintegritymode\fP \fIString\fP ++.br ++Use a journal (default) or bitmap for keeping integrity checksums consistent ++in case of a crash. The bitmap areas are recalculated after a crash, so corruption ++in those areas would not be detected. A journal does not have this problem. ++The journal mode doubles writes to storage, but can improve performance for ++scattered writes packed into a single journal write. ++bitmap mode can in theory achieve full write throughput of the device, ++but would not benefit from the potential scattered write optimization. ++.ad b ++.HP ++.ad l + \fB-r\fP|\fB--readahead\fP \fBauto\fP|\fBnone\fP|\fINumber\fP + .br + Sets read ahead sector count of an LV. +diff --git a/man/lvcreate.8_pregen b/man/lvcreate.8_pregen +index a80f9f5..be8e783 100644 +--- a/man/lvcreate.8_pregen ++++ b/man/lvcreate.8_pregen +@@ -187,6 +187,18 @@ lvcreate - Create a logical volume + .ad b + .br + .ad l ++ \fB--raidintegrity\fP \fBy\fP|\fBn\fP ++.ad b ++.br ++.ad l ++ \fB--raidintegrityblocksize\fP \fINumber\fP ++.ad b ++.br ++.ad l ++ \fB--raidintegritymode\fP \fIString\fP ++.ad b ++.br ++.ad l + \fB-r\fP|\fB--readahead\fP \fBauto\fP|\fBnone\fP|\fINumber\fP + .ad b + .br +@@ -425,6 +437,18 @@ Create a raid LV (a specific raid level must be used, e.g. raid1). + [ \fB--[raid]maxrecoveryrate\fP \fISize\fP[k|UNIT] ] + .ad b + .br ++.ad l ++[ \fB--raidintegrity\fP \fBy\fP|\fBn\fP ] ++.ad b ++.br ++.ad l ++[ \fB--raidintegritymode\fP \fIString\fP ] ++.ad b ++.br ++.ad l ++[ \fB--raidintegrityblocksize\fP \fINumber\fP ] ++.ad b ++.br + [ COMMON_OPTIONS ] + .RE + .br +@@ -1420,6 +1444,35 @@ Repeat once to also suppress any prompts with answer 'no'. + .ad b + .HP + .ad l ++\fB--raidintegrity\fP \fBy\fP|\fBn\fP ++.br ++Enable or disable data integrity checksums for raid images. ++.ad b ++.HP ++.ad l ++\fB--raidintegrityblocksize\fP \fINumber\fP ++.br ++The block size to use for dm-integrity on raid images. ++The integrity block size should usually match the device ++logical block size, or the file system block size. ++It may be less than the file system block size, but not ++less than the device logical block size. ++Possible values: 512, 1024, 2048, 4096. ++.ad b ++.HP ++.ad l ++\fB--raidintegritymode\fP \fIString\fP ++.br ++Use a journal (default) or bitmap for keeping integrity checksums consistent ++in case of a crash. The bitmap areas are recalculated after a crash, so corruption ++in those areas would not be detected. A journal does not have this problem. ++The journal mode doubles writes to storage, but can improve performance for ++scattered writes packed into a single journal write. ++bitmap mode can in theory achieve full write throughput of the device, ++but would not benefit from the potential scattered write optimization. ++.ad b ++.HP ++.ad l + \fB-r\fP|\fB--readahead\fP \fBauto\fP|\fBnone\fP|\fINumber\fP + .br + Sets read ahead sector count of an LV. +-- +1.8.3.1 + diff --git a/SOURCES/lvm2-2_03_10-lvconvert-no-validation-for-thin-pools-not-used-by-lvm.patch b/SOURCES/lvm2-2_03_10-lvconvert-no-validation-for-thin-pools-not-used-by-lvm.patch new file mode 100644 index 0000000..90ca054 --- /dev/null +++ b/SOURCES/lvm2-2_03_10-lvconvert-no-validation-for-thin-pools-not-used-by-lvm.patch @@ -0,0 +1,17 @@ + tools/lvconvert.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/tools/lvconvert.c b/tools/lvconvert.c +index f6d9a29..bb40930 100644 +--- a/tools/lvconvert.c ++++ b/tools/lvconvert.c +@@ -2388,7 +2388,8 @@ static int _lvconvert_thin_pool_repair(struct cmd_context *cmd, + goto deactivate_mlv; + } + +- if (thin_dump[0]) { ++ /* Check matching transactionId when thin-pool is used by lvm2 (transactionId != 0) */ ++ if (first_seg(pool_lv)->transaction_id && thin_dump[0]) { + argv[0] = thin_dump; + argv[1] = pms_path; + argv[2] = NULL; diff --git a/SOURCES/lvm2-2_03_10-move-pv_list-code-into-lib.patch b/SOURCES/lvm2-2_03_10-move-pv_list-code-into-lib.patch new file mode 100644 index 0000000..ad900bb --- /dev/null +++ b/SOURCES/lvm2-2_03_10-move-pv_list-code-into-lib.patch @@ -0,0 +1,641 @@ +From 945de675c47d891d1f181f15971d26ff959ac631 Mon Sep 17 00:00:00 2001 +From: David Teigland +Date: Tue, 14 Jan 2020 14:12:20 -0600 +Subject: [PATCH 1/3] move pv_list code into lib + +(cherry picked from commit b6b4ad8e28eff7476cb04c4cb93312b06605b82f) +--- + lib/Makefile.in | 1 + + lib/metadata/metadata-exported.h | 4 + + lib/metadata/pv_list.c | 291 +++++++++++++++++++++++++++++++++++++++ + tools/toollib.c | 270 ------------------------------------ + tools/toollib.h | 9 -- + 5 files changed, 296 insertions(+), 279 deletions(-) + create mode 100644 lib/metadata/pv_list.c + +diff --git a/lib/Makefile.in b/lib/Makefile.in +index c037b41..2a064f3 100644 +--- a/lib/Makefile.in ++++ b/lib/Makefile.in +@@ -74,6 +74,7 @@ SOURCES =\ + metadata/mirror.c \ + metadata/pool_manip.c \ + metadata/pv.c \ ++ metadata/pv_list.c \ + metadata/pv_manip.c \ + metadata/pv_map.c \ + metadata/raid_manip.c \ +diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h +index c61c85c..35c1231 100644 +--- a/lib/metadata/metadata-exported.h ++++ b/lib/metadata/metadata-exported.h +@@ -1385,4 +1385,8 @@ int vg_is_foreign(struct volume_group *vg); + + void vg_write_commit_bad_mdas(struct cmd_context *cmd, struct volume_group *vg); + ++struct dm_list *create_pv_list(struct dm_pool *mem, struct volume_group *vg, int argc, ++ char **argv, int allocatable_only); ++struct dm_list *clone_pv_list(struct dm_pool *mem, struct dm_list *pvsl); ++ + #endif +diff --git a/lib/metadata/pv_list.c b/lib/metadata/pv_list.c +new file mode 100644 +index 0000000..143b573 +--- /dev/null ++++ b/lib/metadata/pv_list.c +@@ -0,0 +1,291 @@ ++/* ++ * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved. ++ * Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved. ++ * ++ * This file is part of LVM2. ++ * ++ * This copyrighted material is made available to anyone wishing to use, ++ * modify, copy, or redistribute it subject to the terms and conditions ++ * of the GNU Lesser General Public License v.2.1. ++ * ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this program; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++#include "lib/misc/lib.h" ++#include "lib/misc/lvm-string.h" ++#include "lib/datastruct/str_list.h" ++#include "lib/device/device.h" ++#include "lib/metadata/metadata.h" ++ ++/* ++ * Process physical extent range specifiers ++ */ ++static int _add_pe_range(struct dm_pool *mem, const char *pvname, ++ struct dm_list *pe_ranges, uint32_t start, uint32_t count) ++{ ++ struct pe_range *per; ++ ++ log_debug("Adding PE range: start PE " FMTu32 " length " FMTu32 " on %s.", ++ start, count, pvname); ++ ++ /* Ensure no overlap with existing areas */ ++ dm_list_iterate_items(per, pe_ranges) { ++ if (((start < per->start) && (start + count - 1 >= per->start)) || ++ ((start >= per->start) && ++ (per->start + per->count - 1) >= start)) { ++ log_error("Overlapping PE ranges specified (" FMTu32 ++ "-" FMTu32 ", " FMTu32 "-" FMTu32 ") on %s.", ++ start, start + count - 1, per->start, ++ per->start + per->count - 1, pvname); ++ return 0; ++ } ++ } ++ ++ if (!(per = dm_pool_alloc(mem, sizeof(*per)))) { ++ log_error("Allocation of list failed."); ++ return 0; ++ } ++ ++ per->start = start; ++ per->count = count; ++ dm_list_add(pe_ranges, &per->list); ++ ++ return 1; ++} ++ ++static int _xstrtouint32(const char *s, char **p, int base, uint32_t *result) ++{ ++ unsigned long ul; ++ ++ errno = 0; ++ ul = strtoul(s, p, base); ++ ++ if (errno || *p == s || ul > UINT32_MAX) ++ return 0; ++ ++ *result = ul; ++ ++ return 1; ++} ++ ++static int _parse_pes(struct dm_pool *mem, char *c, struct dm_list *pe_ranges, ++ const char *pvname, uint32_t size) ++{ ++ char *endptr; ++ uint32_t start, end, len; ++ ++ /* Default to whole PV */ ++ if (!c) { ++ if (!_add_pe_range(mem, pvname, pe_ranges, UINT32_C(0), size)) ++ return_0; ++ return 1; ++ } ++ ++ while (*c) { ++ if (*c != ':') ++ goto error; ++ ++ c++; ++ ++ /* Disallow :: and :\0 */ ++ if (*c == ':' || !*c) ++ goto error; ++ ++ /* Default to whole range */ ++ start = UINT32_C(0); ++ end = size - 1; ++ ++ /* Start extent given? */ ++ if (isdigit(*c)) { ++ if (!_xstrtouint32(c, &endptr, 10, &start)) ++ goto error; ++ c = endptr; ++ /* Just one number given? */ ++ if (!*c || *c == ':') ++ end = start; ++ } ++ /* Range? */ ++ if (*c == '-') { ++ c++; ++ if (isdigit(*c)) { ++ if (!_xstrtouint32(c, &endptr, 10, &end)) ++ goto error; ++ c = endptr; ++ } ++ } else if (*c == '+') { /* Length? */ ++ c++; ++ if (isdigit(*c)) { ++ if (!_xstrtouint32(c, &endptr, 10, &len)) ++ goto error; ++ c = endptr; ++ end = start + (len ? (len - 1) : 0); ++ } ++ } ++ ++ if (*c && *c != ':') ++ goto error; ++ ++ if ((start > end) || (end > size - 1)) { ++ log_error("PE range error: start extent %" PRIu32 " to " ++ "end extent %" PRIu32 ".", start, end); ++ return 0; ++ } ++ ++ if (!_add_pe_range(mem, pvname, pe_ranges, start, end - start + 1)) ++ return_0; ++ ++ } ++ ++ return 1; ++ ++ error: ++ log_error("Physical extent parsing error at %s.", c); ++ return 0; ++} ++ ++static int _create_pv_entry(struct dm_pool *mem, struct pv_list *pvl, ++ char *colon, int allocatable_only, struct dm_list *r) ++{ ++ const char *pvname; ++ struct pv_list *new_pvl = NULL, *pvl2; ++ struct dm_list *pe_ranges; ++ ++ pvname = pv_dev_name(pvl->pv); ++ if (allocatable_only && !(pvl->pv->status & ALLOCATABLE_PV)) { ++ log_warn("WARNING: Physical volume %s not allocatable.", pvname); ++ return 1; ++ } ++ ++ if (allocatable_only && is_missing_pv(pvl->pv)) { ++ log_warn("WARNING: Physical volume %s is missing.", pvname); ++ return 1; ++ } ++ ++ if (allocatable_only && ++ (pvl->pv->pe_count == pvl->pv->pe_alloc_count)) { ++ log_warn("WARNING: No free extents on physical volume \"%s\".", pvname); ++ return 1; ++ } ++ ++ dm_list_iterate_items(pvl2, r) ++ if (pvl->pv->dev == pvl2->pv->dev) { ++ new_pvl = pvl2; ++ break; ++ } ++ ++ if (!new_pvl) { ++ if (!(new_pvl = dm_pool_alloc(mem, sizeof(*new_pvl)))) { ++ log_error("Unable to allocate physical volume list."); ++ return 0; ++ } ++ ++ memcpy(new_pvl, pvl, sizeof(*new_pvl)); ++ ++ if (!(pe_ranges = dm_pool_alloc(mem, sizeof(*pe_ranges)))) { ++ log_error("Allocation of pe_ranges list failed."); ++ return 0; ++ } ++ dm_list_init(pe_ranges); ++ new_pvl->pe_ranges = pe_ranges; ++ dm_list_add(r, &new_pvl->list); ++ } ++ ++ /* Determine selected physical extents */ ++ if (!_parse_pes(mem, colon, new_pvl->pe_ranges, pv_dev_name(pvl->pv), ++ pvl->pv->pe_count)) ++ return_0; ++ ++ return 1; ++} ++ ++struct dm_list *create_pv_list(struct dm_pool *mem, struct volume_group *vg, int argc, ++ char **argv, int allocatable_only) ++{ ++ struct dm_list *r; ++ struct pv_list *pvl; ++ struct dm_list tagsl, arg_pvnames; ++ char *pvname = NULL; ++ char *colon, *at_sign, *tagname; ++ int i; ++ ++ /* Build up list of PVs */ ++ if (!(r = dm_pool_alloc(mem, sizeof(*r)))) { ++ log_error("Allocation of list failed."); ++ return NULL; ++ } ++ dm_list_init(r); ++ ++ dm_list_init(&tagsl); ++ dm_list_init(&arg_pvnames); ++ ++ for (i = 0; i < argc; i++) { ++ dm_unescape_colons_and_at_signs(argv[i], &colon, &at_sign); ++ ++ if (at_sign && (at_sign == argv[i])) { ++ tagname = at_sign + 1; ++ if (!validate_tag(tagname)) { ++ log_error("Skipping invalid tag %s.", tagname); ++ continue; ++ } ++ dm_list_iterate_items(pvl, &vg->pvs) { ++ if (str_list_match_item(&pvl->pv->tags, ++ tagname)) { ++ if (!_create_pv_entry(mem, pvl, NULL, ++ allocatable_only, ++ r)) ++ return_NULL; ++ } ++ } ++ continue; ++ } ++ ++ pvname = argv[i]; ++ ++ if (colon && !(pvname = dm_pool_strndup(mem, pvname, ++ (unsigned) (colon - pvname)))) { ++ log_error("Failed to clone PV name."); ++ return NULL; ++ } ++ ++ if (!(pvl = find_pv_in_vg(vg, pvname))) { ++ log_error("Physical Volume \"%s\" not found in " ++ "Volume Group \"%s\".", pvname, vg->name); ++ return NULL; ++ } ++ if (!_create_pv_entry(mem, pvl, colon, allocatable_only, r)) ++ return_NULL; ++ } ++ ++ if (dm_list_empty(r)) ++ log_error("No specified PVs have space available."); ++ ++ return dm_list_empty(r) ? NULL : r; ++} ++ ++struct dm_list *clone_pv_list(struct dm_pool *mem, struct dm_list *pvsl) ++{ ++ struct dm_list *r; ++ struct pv_list *pvl, *new_pvl; ++ ++ /* Build up list of PVs */ ++ if (!(r = dm_pool_alloc(mem, sizeof(*r)))) { ++ log_error("Allocation of list failed."); ++ return NULL; ++ } ++ dm_list_init(r); ++ ++ dm_list_iterate_items(pvl, pvsl) { ++ if (!(new_pvl = dm_pool_zalloc(mem, sizeof(*new_pvl)))) { ++ log_error("Unable to allocate physical volume list."); ++ return NULL; ++ } ++ ++ memcpy(new_pvl, pvl, sizeof(*new_pvl)); ++ dm_list_add(r, &new_pvl->list); ++ } ++ ++ return r; ++} ++ +diff --git a/tools/toollib.c b/tools/toollib.c +index a5304bf..6386a69 100644 +--- a/tools/toollib.c ++++ b/tools/toollib.c +@@ -457,276 +457,6 @@ const char *extract_vgname(struct cmd_context *cmd, const char *lv_name) + return vg_name; + } + +-/* +- * Process physical extent range specifiers +- */ +-static int _add_pe_range(struct dm_pool *mem, const char *pvname, +- struct dm_list *pe_ranges, uint32_t start, uint32_t count) +-{ +- struct pe_range *per; +- +- log_debug("Adding PE range: start PE " FMTu32 " length " FMTu32 " on %s.", +- start, count, pvname); +- +- /* Ensure no overlap with existing areas */ +- dm_list_iterate_items(per, pe_ranges) { +- if (((start < per->start) && (start + count - 1 >= per->start)) || +- ((start >= per->start) && +- (per->start + per->count - 1) >= start)) { +- log_error("Overlapping PE ranges specified (" FMTu32 +- "-" FMTu32 ", " FMTu32 "-" FMTu32 ") on %s.", +- start, start + count - 1, per->start, +- per->start + per->count - 1, pvname); +- return 0; +- } +- } +- +- if (!(per = dm_pool_alloc(mem, sizeof(*per)))) { +- log_error("Allocation of list failed."); +- return 0; +- } +- +- per->start = start; +- per->count = count; +- dm_list_add(pe_ranges, &per->list); +- +- return 1; +-} +- +-static int _xstrtouint32(const char *s, char **p, int base, uint32_t *result) +-{ +- unsigned long ul; +- +- errno = 0; +- ul = strtoul(s, p, base); +- +- if (errno || *p == s || ul > UINT32_MAX) +- return 0; +- +- *result = ul; +- +- return 1; +-} +- +-static int _parse_pes(struct dm_pool *mem, char *c, struct dm_list *pe_ranges, +- const char *pvname, uint32_t size) +-{ +- char *endptr; +- uint32_t start, end, len; +- +- /* Default to whole PV */ +- if (!c) { +- if (!_add_pe_range(mem, pvname, pe_ranges, UINT32_C(0), size)) +- return_0; +- return 1; +- } +- +- while (*c) { +- if (*c != ':') +- goto error; +- +- c++; +- +- /* Disallow :: and :\0 */ +- if (*c == ':' || !*c) +- goto error; +- +- /* Default to whole range */ +- start = UINT32_C(0); +- end = size - 1; +- +- /* Start extent given? */ +- if (isdigit(*c)) { +- if (!_xstrtouint32(c, &endptr, 10, &start)) +- goto error; +- c = endptr; +- /* Just one number given? */ +- if (!*c || *c == ':') +- end = start; +- } +- /* Range? */ +- if (*c == '-') { +- c++; +- if (isdigit(*c)) { +- if (!_xstrtouint32(c, &endptr, 10, &end)) +- goto error; +- c = endptr; +- } +- } else if (*c == '+') { /* Length? */ +- c++; +- if (isdigit(*c)) { +- if (!_xstrtouint32(c, &endptr, 10, &len)) +- goto error; +- c = endptr; +- end = start + (len ? (len - 1) : 0); +- } +- } +- +- if (*c && *c != ':') +- goto error; +- +- if ((start > end) || (end > size - 1)) { +- log_error("PE range error: start extent %" PRIu32 " to " +- "end extent %" PRIu32 ".", start, end); +- return 0; +- } +- +- if (!_add_pe_range(mem, pvname, pe_ranges, start, end - start + 1)) +- return_0; +- +- } +- +- return 1; +- +- error: +- log_error("Physical extent parsing error at %s.", c); +- return 0; +-} +- +-static int _create_pv_entry(struct dm_pool *mem, struct pv_list *pvl, +- char *colon, int allocatable_only, struct dm_list *r) +-{ +- const char *pvname; +- struct pv_list *new_pvl = NULL, *pvl2; +- struct dm_list *pe_ranges; +- +- pvname = pv_dev_name(pvl->pv); +- if (allocatable_only && !(pvl->pv->status & ALLOCATABLE_PV)) { +- log_warn("WARNING: Physical volume %s not allocatable.", pvname); +- return 1; +- } +- +- if (allocatable_only && is_missing_pv(pvl->pv)) { +- log_warn("WARNING: Physical volume %s is missing.", pvname); +- return 1; +- } +- +- if (allocatable_only && +- (pvl->pv->pe_count == pvl->pv->pe_alloc_count)) { +- log_warn("WARNING: No free extents on physical volume \"%s\".", pvname); +- return 1; +- } +- +- dm_list_iterate_items(pvl2, r) +- if (pvl->pv->dev == pvl2->pv->dev) { +- new_pvl = pvl2; +- break; +- } +- +- if (!new_pvl) { +- if (!(new_pvl = dm_pool_alloc(mem, sizeof(*new_pvl)))) { +- log_error("Unable to allocate physical volume list."); +- return 0; +- } +- +- memcpy(new_pvl, pvl, sizeof(*new_pvl)); +- +- if (!(pe_ranges = dm_pool_alloc(mem, sizeof(*pe_ranges)))) { +- log_error("Allocation of pe_ranges list failed."); +- return 0; +- } +- dm_list_init(pe_ranges); +- new_pvl->pe_ranges = pe_ranges; +- dm_list_add(r, &new_pvl->list); +- } +- +- /* Determine selected physical extents */ +- if (!_parse_pes(mem, colon, new_pvl->pe_ranges, pv_dev_name(pvl->pv), +- pvl->pv->pe_count)) +- return_0; +- +- return 1; +-} +- +-struct dm_list *create_pv_list(struct dm_pool *mem, struct volume_group *vg, int argc, +- char **argv, int allocatable_only) +-{ +- struct dm_list *r; +- struct pv_list *pvl; +- struct dm_list tagsl, arg_pvnames; +- char *pvname = NULL; +- char *colon, *at_sign, *tagname; +- int i; +- +- /* Build up list of PVs */ +- if (!(r = dm_pool_alloc(mem, sizeof(*r)))) { +- log_error("Allocation of list failed."); +- return NULL; +- } +- dm_list_init(r); +- +- dm_list_init(&tagsl); +- dm_list_init(&arg_pvnames); +- +- for (i = 0; i < argc; i++) { +- dm_unescape_colons_and_at_signs(argv[i], &colon, &at_sign); +- +- if (at_sign && (at_sign == argv[i])) { +- tagname = at_sign + 1; +- if (!validate_tag(tagname)) { +- log_error("Skipping invalid tag %s.", tagname); +- continue; +- } +- dm_list_iterate_items(pvl, &vg->pvs) { +- if (str_list_match_item(&pvl->pv->tags, +- tagname)) { +- if (!_create_pv_entry(mem, pvl, NULL, +- allocatable_only, +- r)) +- return_NULL; +- } +- } +- continue; +- } +- +- pvname = argv[i]; +- +- if (colon && !(pvname = dm_pool_strndup(mem, pvname, +- (unsigned) (colon - pvname)))) { +- log_error("Failed to clone PV name."); +- return NULL; +- } +- +- if (!(pvl = find_pv_in_vg(vg, pvname))) { +- log_error("Physical Volume \"%s\" not found in " +- "Volume Group \"%s\".", pvname, vg->name); +- return NULL; +- } +- if (!_create_pv_entry(mem, pvl, colon, allocatable_only, r)) +- return_NULL; +- } +- +- if (dm_list_empty(r)) +- log_error("No specified PVs have space available."); +- +- return dm_list_empty(r) ? NULL : r; +-} +- +-struct dm_list *clone_pv_list(struct dm_pool *mem, struct dm_list *pvsl) +-{ +- struct dm_list *r; +- struct pv_list *pvl, *new_pvl; +- +- /* Build up list of PVs */ +- if (!(r = dm_pool_alloc(mem, sizeof(*r)))) { +- log_error("Allocation of list failed."); +- return NULL; +- } +- dm_list_init(r); +- +- dm_list_iterate_items(pvl, pvsl) { +- if (!(new_pvl = dm_pool_zalloc(mem, sizeof(*new_pvl)))) { +- log_error("Unable to allocate physical volume list."); +- return NULL; +- } +- +- memcpy(new_pvl, pvl, sizeof(*new_pvl)); +- dm_list_add(r, &new_pvl->list); +- } +- +- return r; +-} +- + const char _pe_size_may_not_be_negative_msg[] = "Physical extent size may not be negative."; + + int vgcreate_params_set_defaults(struct cmd_context *cmd, +diff --git a/tools/toollib.h b/tools/toollib.h +index 9102f55..53a5e5b 100644 +--- a/tools/toollib.h ++++ b/tools/toollib.h +@@ -182,15 +182,6 @@ void opt_array_to_str(struct cmd_context *cmd, int *opts, int count, + int pvcreate_params_from_args(struct cmd_context *cmd, struct pvcreate_params *pp); + int pvcreate_each_device(struct cmd_context *cmd, struct processing_handle *handle, struct pvcreate_params *pp); + +-/* +- * Builds a list of pv's from the names in argv. Used in +- * lvcreate/extend. +- */ +-struct dm_list *create_pv_list(struct dm_pool *mem, struct volume_group *vg, int argc, +- char **argv, int allocatable_only); +- +-struct dm_list *clone_pv_list(struct dm_pool *mem, struct dm_list *pvs); +- + int vgcreate_params_set_defaults(struct cmd_context *cmd, + struct vgcreate_params *vp_def, + struct volume_group *vg); +-- +1.8.3.1 + diff --git a/SOURCES/lvm2-2_03_10-test-repair-of-thin-pool-used-by-foreign-apps.patch b/SOURCES/lvm2-2_03_10-test-repair-of-thin-pool-used-by-foreign-apps.patch new file mode 100644 index 0000000..c05d18d --- /dev/null +++ b/SOURCES/lvm2-2_03_10-test-repair-of-thin-pool-used-by-foreign-apps.patch @@ -0,0 +1,82 @@ + test/shell/thin-foreign-repair.sh | 72 +++++++++++++++++++++++++++++++++++++++ + 1 file changed, 72 insertions(+) + create mode 100644 test/shell/thin-foreign-repair.sh + +diff --git a/test/shell/thin-foreign-repair.sh b/test/shell/thin-foreign-repair.sh +new file mode 100644 +index 0000000..147a9a0 +--- /dev/null ++++ b/test/shell/thin-foreign-repair.sh +@@ -0,0 +1,72 @@ ++#!/usr/bin/env bash ++ ++# Copyright (C) 2020 Red Hat, Inc. All rights reserved. ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions ++# of the GNU General Public License v.2. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write to the Free Software Foundation, ++# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ ++# test foreing user of thin-pool ++ ++ ++SKIP_WITH_LVMPOLLD=1 ++ ++. lib/inittest ++ ++cleanup_mounted_and_teardown() ++{ ++ dmsetup remove $THIN || true ++ vgremove -ff $vg ++ aux teardown ++} ++ ++# ++# Main ++# ++aux have_thin 1 0 0 || skip ++which mkfs.ext4 || skip ++ ++# Use our mkfs config file to get approximately same results ++# TODO: maybe use it for all test via some 'prepare' function ++export MKE2FS_CONFIG="$TESTOLDPWD/lib/mke2fs.conf" ++ ++aux prepare_vg 2 64 ++ ++# Create named pool only ++lvcreate -L2 -T $vg/pool ++ ++POOL="$vg-pool" ++THIN="${PREFIX}_thin" ++ ++# Foreing user is using own ioctl command to create thin devices ++dmsetup message $POOL 0 "create_thin 0" ++dmsetup message $POOL 0 "set_transaction_id 0 2" ++ ++# Once the transaction id has changed, lvm2 shall not be able to create thinLV ++fail lvcreate -V10 $vg/pool ++ ++trap 'cleanup_mounted_and_teardown' EXIT ++ ++# 20M thin device ++dmsetup create "$THIN" --table "0 40960 thin $DM_DEV_DIR/mapper/$POOL 0" ++ ++mkfs.ext4 "$DM_DEV_DIR/mapper/$THIN" ++ ++dmsetup remove "$THIN" ++ ++lvchange -an $vg/pool ++ ++# Repair thin-pool used by 'foreing' apps (setting their own tid) ++lvconvert --repair $vg/pool 2>&1 | tee out ++ ++not grep "Transaction id" out ++ ++lvchange -ay $vg/pool ++ ++dmsetup create "$THIN" --table "0 40960 thin $DM_DEV_DIR/mapper/$POOL 0" ++ ++fsck -n "$DM_DEV_DIR/mapper/$THIN" diff --git a/SOURCES/lvm2-rhel8.patch b/SOURCES/lvm2-rhel8.patch index e6b4752..5a2a411 100644 --- a/SOURCES/lvm2-rhel8.patch +++ b/SOURCES/lvm2-rhel8.patch @@ -3,16 +3,16 @@ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION -index c5cc2e4..49d4854 100644 +index 8c4a9a8..00618e0 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ --2.03.08(2) (2020-02-11) -+2.03.08(2)-RHEL8 (2020-02-11) +-2.03.09(2) (2020-03-26) ++2.03.09(2)-RHEL8 (2020-04-21) diff --git a/VERSION_DM b/VERSION_DM -index f909625..3c2949a 100644 +index 0ae62fd..b9ec43e 100644 --- a/VERSION_DM +++ b/VERSION_DM @@ -1 +1 @@ --1.02.169 (2020-02-11) -+1.02.169-RHEL8 (2020-02-11) +-1.02.171 (2020-03-26) ++1.02.171-RHEL8 (2020-04-21) diff --git a/SOURCES/lvm2-set-default-preferred_names.patch b/SOURCES/lvm2-set-default-preferred_names.patch index 853cf51..ece62c4 100644 --- a/SOURCES/lvm2-set-default-preferred_names.patch +++ b/SOURCES/lvm2-set-default-preferred_names.patch @@ -1,3 +1,9 @@ +From 6a078fe01b47fa165226a15263c8bd6350b1c307 Mon Sep 17 00:00:00 2001 +From: Marian Csontos +Date: Thu, 3 Jan 2019 13:49:08 +0100 +Subject: [PATCH 2/8] lvm2: set default preferred_names + +--- conf/example.conf.in | 3 ++- lib/config/config_settings.h | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) @@ -29,3 +35,6 @@ index 2bb72ba..dce9705 100644 "Select which path name to display for a block device.\n" "If multiple path names exist for a block device, and LVM needs to\n" "display a name for the device, the path names are matched against\n" +-- +1.8.3.1 + diff --git a/SOURCES/lvm2-test-skip-problematic-tests.patch b/SOURCES/lvm2-test-skip-problematic-tests.patch index 68df0ee..80edd99 100644 --- a/SOURCES/lvm2-test-skip-problematic-tests.patch +++ b/SOURCES/lvm2-test-skip-problematic-tests.patch @@ -1,3 +1,9 @@ +From 74f05f17ea3d1a3639a65ba337f2b7df7f4981bf Mon Sep 17 00:00:00 2001 +From: Marian Csontos +Date: Sun, 18 Aug 2019 17:31:30 +0200 +Subject: [PATCH 3/8] lvm2: test: skip-problematic-tests + +--- test/dbus/lvmdbustest.py | 1 + test/shell/lvcreate-usage.sh | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) @@ -37,3 +43,6 @@ index 6d46939..9e00f1c 100644 lvremove -ff $vg # +-- +1.8.3.1 + diff --git a/SPECS/lvm2.spec b/SPECS/lvm2.spec index 8a86760..aea1291 100644 --- a/SPECS/lvm2.spec +++ b/SPECS/lvm2.spec @@ -1,4 +1,4 @@ -%global device_mapper_version 1.02.169 +%global device_mapper_version 1.02.171 %global enable_cache 1 %global enable_cluster 1 @@ -11,6 +11,7 @@ %global enable_testsuite 1 %global enable_vdo 1 %global enable_writecache 1 +%global enable_integrity 1 %global system_release_version 23 %global systemd_version 189-3 @@ -56,24 +57,28 @@ Name: lvm2 %if 0%{?rhel} Epoch: %{rhel} %endif -Version: 2.03.08 -Release: 3%{?dist} +Version: 2.03.09 +Release: 5%{?dist} License: GPLv2 URL: http://sourceware.org/lvm2 Source0: ftp://sourceware.org/pub/lvm2/releases/LVM2.%{version}.tgz Patch0: lvm2-rhel8.patch Patch1: lvm2-set-default-preferred_names.patch Patch2: lvm2-test-skip-problematic-tests.patch -Patch3: lvm2-2_03_09-cachevol-stop-dm-errors-with-uncaching-cache-with-ca.patch -Patch4: lvm2-2_03_09-writecache-check-if-cachevol-is-writable.patch -Patch5: lvm2-2_03_09-thin-don-t-use-writecache-for-poolmetadata.patch -Patch6: lvm2-2_03_09-writecache-drop-real-dm-suffix.patch -Patch7: lvm2-2_03_09-writecache-working-real-dm-uuid-suffix-for-wcorig-lv.patch -Patch8: lvm2-2_03_09-writecache-fix-watermark-error-message.patch -Patch9: lvm2-2_03_09-writecache-allow-removing-wcorig-lv.patch -# BZ 1808012: -Patch10: lvm2-2_03_09-writecache-require-inactive-LV-to-attach.patch -Patch11: lvm2-2_03_09-test-Can-not-attach-writecache-to-active-volume.patch +Patch3: lvm2-2_03_10-lvconvert-no-validation-for-thin-pools-not-used-by-lvm.patch +Patch4: lvm2-2_03_10-test-repair-of-thin-pool-used-by-foreign-apps.patch +Patch5: lvm2-2_03_10-WHATS_NEWS-update.patch +Patch6: lvm2-2_03_10-blkdeactivate-add-support-for-VDO-in-blkdeactivate-script.patch +Patch7: lvm2-2_03_10-Fix-scripts-lvmlocks.service.in-using-nonexistent-lock-opt-autowait.patch +Patch8: lvm2-2_03_10-move-pv_list-code-into-lib.patch +Patch9: lvm2-2_03_10-Allow-dm-integrity-to-be-used-for-raid-images.patch +Patch10: lvm2-2_03_10-WHATS_NEW-integrity-with-raid.patch +Patch11: lvm2-2_03_10-build-make-generate.patch +Patch12: 0001-Merge-master-up-to-commit-53803821de16.patch +Patch13: 0002-Merge-master-up-to-commit-be61bd6ff5c6.patch +Patch14: 0003-Merge-master-up-to-commit-c1d136fea3d1.patch +# BZ 1868169: +Patch15: 0004-Revert-wipe_lv-changes.patch BuildRequires: gcc %if %{enable_testsuite} @@ -141,6 +146,10 @@ or more physical volumes and creating one or more logical volumes %patch9 -p1 -b .backup9 %patch10 -p1 -b .backup10 %patch11 -p1 -b .backup11 +%patch12 -p1 -b .backup12 +%patch13 -p1 -b .backup13 +%patch14 -p1 -b .backup14 +%patch15 -p1 -b .backup15 %build %global _default_pid_dir /run @@ -187,7 +196,11 @@ or more physical volumes and creating one or more logical volumes %global configure_writecache --with-writecache=internal %endif -%configure --with-default-dm-run-dir=%{_default_dm_run_dir} --with-default-run-dir=%{_default_run_dir} --with-default-pid-dir=%{_default_pid_dir} --with-default-locking-dir=%{_default_locking_dir} --with-usrlibdir=%{_libdir} --enable-fsadm --enable-write_install --with-user= --with-group= --with-device-uid=0 --with-device-gid=6 --with-device-mode=0660 --enable-pkgconfig --enable-cmdlib --enable-dmeventd --enable-blkid_wiping %{?configure_cluster} %{?configure_cmirror} %{?configure_udev} %{?configure_thin} %{?configure_cache} %{?configure_lvmpolld} %{?configure_lockd_dlm} %{?configure_lockd_sanlock} %{?configure_lvmdbusd} %{?configure_dmfilemapd} %{?configure_writecache} %{?configure_vdo} --disable-silent-rules +%if %{enable_integrity} +%global configure_integrity --with-integrity=internal +%endif + +%configure --with-default-dm-run-dir=%{_default_dm_run_dir} --with-default-run-dir=%{_default_run_dir} --with-default-pid-dir=%{_default_pid_dir} --with-default-locking-dir=%{_default_locking_dir} --with-usrlibdir=%{_libdir} --enable-fsadm --enable-write_install --with-user= --with-group= --with-device-uid=0 --with-device-gid=6 --with-device-mode=0660 --enable-pkgconfig --enable-cmdlib --enable-dmeventd --enable-blkid_wiping %{?configure_cluster} %{?configure_cmirror} %{?configure_udev} %{?configure_thin} %{?configure_cache} %{?configure_lvmpolld} %{?configure_lockd_dlm} %{?configure_lockd_sanlock} %{?configure_lvmdbusd} %{?configure_dmfilemapd} %{?configure_writecache} %{?configure_vdo} %{?configure_integrity} --disable-silent-rules make %{?_smp_mflags} @@ -198,7 +211,7 @@ make install_systemd_units DESTDIR=$RPM_BUILD_ROOT make install_systemd_generators DESTDIR=$RPM_BUILD_ROOT make install_tmpfiles_configuration DESTDIR=$RPM_BUILD_ROOT %if %{enable_testsuite} -make -C test install DESTDIR=$RPM_BUILD_ROOT +make install DESTDIR=$RPM_BUILD_ROOT -C test %endif %post @@ -741,6 +754,24 @@ An extensive functional testsuite for LVM2. %endif %changelog +* Wed Aug 12 2020 Marian Csontos - 2.03.09-5 +- Revert wipe_lv changes. + +* Sun Aug 09 2020 Marian Csontos - 2.03.09-4 +- Merge fixes from upstream. + +* Mon Jun 29 2020 Marian Csontos - 2.03.09-3 +- Merge fixes from upstream. + +* Thu May 21 2020 Marian Csontos - 2.03.09-2 +- Merge fixes from upstream. + +* Fri Apr 24 2020 Marian Csontos - 2.03.09-1 +- Fix support for lvconvert --repair of pools used by third party applications. +- Fix formating of vdopool (vdo_slab_size_mb was smaller by 2 bits). +- Fix busy loop in dmeventd. +- Fix lvmlocks.service using incorrect option. + * Wed Mar 18 2020 Marian Csontos - 2.03.08-3 - Attaching writecache require inactive LV.