- Fix passing size for pvresize over DBus Resolves: RHEL-45872 - Upstream kernel VDO support Resolves: RHEL-31953
572 lines
27 KiB
Diff
572 lines
27 KiB
Diff
From 5db24c0542a6c7ee44e2d487969ef71170d9cb33 Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Tue, 2 Jul 2024 12:30:42 +0200
|
|
Subject: [PATCH 1/2] lvm: Check for dm-vdo instead of kvdo module for VDO
|
|
support
|
|
|
|
VDO is available in the upstream kernel since 6.9 as dm-vdo so we
|
|
should check for it instead of the out-of-tree kvdo module.
|
|
---
|
|
src/plugins/lvm-dbus.c | 2 +-
|
|
src/plugins/lvm.c | 2 +-
|
|
tests/lvm_dbus_tests.py | 4 ++--
|
|
tests/lvm_test.py | 4 ++--
|
|
4 files changed, 6 insertions(+), 6 deletions(-)
|
|
|
|
diff --git a/src/plugins/lvm-dbus.c b/src/plugins/lvm-dbus.c
|
|
index d080b389..0c017306 100644
|
|
--- a/src/plugins/lvm-dbus.c
|
|
+++ b/src/plugins/lvm-dbus.c
|
|
@@ -321,7 +321,7 @@ static const UtilFeatureDep features[FEATURES_LAST] = {
|
|
#define MODULE_DEPS_VDO_MASK (1 << MODULE_DEPS_VDO)
|
|
#define MODULE_DEPS_LAST 1
|
|
|
|
-static const gchar*const module_deps[MODULE_DEPS_LAST] = { "kvdo" };
|
|
+static const gchar*const module_deps[MODULE_DEPS_LAST] = { "dm-vdo" };
|
|
|
|
/**
|
|
* bd_lvm_init:
|
|
diff --git a/src/plugins/lvm.c b/src/plugins/lvm.c
|
|
index 361a084b..dc7491a7 100644
|
|
--- a/src/plugins/lvm.c
|
|
+++ b/src/plugins/lvm.c
|
|
@@ -329,7 +329,7 @@ static const UtilFeatureDep features[FEATURES_LAST] = {
|
|
#define MODULE_DEPS_VDO_MASK (1 << MODULE_DEPS_VDO)
|
|
#define MODULE_DEPS_LAST 1
|
|
|
|
-static const gchar*const module_deps[MODULE_DEPS_LAST] = { "kvdo" };
|
|
+static const gchar*const module_deps[MODULE_DEPS_LAST] = { "dm-vdo" };
|
|
|
|
#define UNUSED __attribute__((unused))
|
|
|
|
diff --git a/tests/lvm_dbus_tests.py b/tests/lvm_dbus_tests.py
|
|
index fc270cb5..9f302611 100644
|
|
--- a/tests/lvm_dbus_tests.py
|
|
+++ b/tests/lvm_dbus_tests.py
|
|
@@ -2029,11 +2029,11 @@ class LVMVDOTest(LVMTestCase):
|
|
|
|
@classmethod
|
|
def setUpClass(cls):
|
|
- if not BlockDev.utils_have_kernel_module("kvdo"):
|
|
+ if not BlockDev.utils_have_kernel_module("dm-vdo"):
|
|
raise unittest.SkipTest("VDO kernel module not available, skipping.")
|
|
|
|
try:
|
|
- BlockDev.utils_load_kernel_module("kvdo")
|
|
+ BlockDev.utils_load_kernel_module("dm-vdo")
|
|
except GLib.GError as e:
|
|
if "File exists" not in e.message:
|
|
raise unittest.SkipTest("cannot load VDO kernel module, skipping.")
|
|
diff --git a/tests/lvm_test.py b/tests/lvm_test.py
|
|
index a4c90172..b1d65baf 100644
|
|
--- a/tests/lvm_test.py
|
|
+++ b/tests/lvm_test.py
|
|
@@ -1933,11 +1933,11 @@ class LVMVDOTest(LVMTestCase):
|
|
|
|
@classmethod
|
|
def setUpClass(cls):
|
|
- if not BlockDev.utils_have_kernel_module("kvdo"):
|
|
+ if not BlockDev.utils_have_kernel_module("dm-vdo"):
|
|
raise unittest.SkipTest("VDO kernel module not available, skipping.")
|
|
|
|
try:
|
|
- BlockDev.utils_load_kernel_module("kvdo")
|
|
+ BlockDev.utils_load_kernel_module("dm-vdo")
|
|
except GLib.GError as e:
|
|
if "File exists" not in e.message:
|
|
raise unittest.SkipTest("cannot load VDO kernel module, skipping.")
|
|
--
|
|
2.45.2
|
|
|
|
|
|
From 6cf4de50d385d801c1936fc67ee2902f9c60e04e Mon Sep 17 00:00:00 2001
|
|
From: Vojtech Trefny <vtrefny@redhat.com>
|
|
Date: Tue, 2 Jul 2024 12:32:46 +0200
|
|
Subject: [PATCH 2/2] lvm: Get VDO stats from device mapper instead of
|
|
/sys/kvdo
|
|
|
|
The /sys/kvdo API has been removed so we need to get the full
|
|
statistics using libdevmapper now.
|
|
---
|
|
configure.ac | 4 +
|
|
dist/libblockdev.spec.in | 2 +
|
|
src/lib/plugin_apis/lvm.api | 6 +-
|
|
src/plugins/Makefile.am | 8 +-
|
|
src/plugins/lvm-dbus.c | 18 +--
|
|
src/plugins/lvm.c | 18 +--
|
|
src/plugins/vdo_stats.c | 213 ++++++++++++++++++++++--------------
|
|
tests/lvm_dbus_tests.py | 4 +-
|
|
tests/lvm_test.py | 10 +-
|
|
9 files changed, 175 insertions(+), 108 deletions(-)
|
|
|
|
diff --git a/configure.ac b/configure.ac
|
|
index 02b26e3e..cc875bb4 100644
|
|
--- a/configure.ac
|
|
+++ b/configure.ac
|
|
@@ -193,6 +193,10 @@ AS_IF([test "x$with_dm" != "xno" -o "x$with_lvm" != "xno" -o "x$with_lvm_dbus" !
|
|
[LIBBLOCKDEV_PKG_CHECK_MODULES([DEVMAPPER], [devmapper >= 1.02.93])],
|
|
[])
|
|
|
|
+AS_IF([test "x$with_lvm" != "xno" -o "x$with_lvm_dbus" != "xno"],
|
|
+ [LIBBLOCKDEV_PKG_CHECK_MODULES([YAML], [yaml-0.1])],
|
|
+ [])
|
|
+
|
|
AS_IF([test "x$with_part" != "xno"],
|
|
[LIBBLOCKDEV_PKG_CHECK_MODULES([FDISK], [fdisk >= 2.31.0])],
|
|
[])
|
|
diff --git a/dist/libblockdev.spec.in b/dist/libblockdev.spec.in
|
|
index 768aa0c0..e8703411 100644
|
|
--- a/dist/libblockdev.spec.in
|
|
+++ b/dist/libblockdev.spec.in
|
|
@@ -290,6 +290,7 @@ with the libblockdev-loop plugin/library.
|
|
%if %{with_lvm}
|
|
%package lvm
|
|
BuildRequires: device-mapper-devel
|
|
+BuildRequires: libyaml-devel
|
|
Summary: The LVM plugin for the libblockdev library
|
|
Requires: %{name}-utils%{?_isa} = %{version}-%{release}
|
|
Requires: lvm2
|
|
@@ -312,6 +313,7 @@ with the libblockdev-lvm plugin/library.
|
|
%if %{with_lvm_dbus}
|
|
%package lvm-dbus
|
|
BuildRequires: device-mapper-devel
|
|
+BuildRequires: libyaml-devel
|
|
Summary: The LVM plugin for the libblockdev library
|
|
Requires: %{name}-utils%{?_isa} = %{version}-%{release}
|
|
Requires: lvm2-dbusd >= 2.02.156
|
|
diff --git a/src/lib/plugin_apis/lvm.api b/src/lib/plugin_apis/lvm.api
|
|
index 2152aa79..3dd5ea4a 100644
|
|
--- a/src/lib/plugin_apis/lvm.api
|
|
+++ b/src/lib/plugin_apis/lvm.api
|
|
@@ -1965,10 +1965,10 @@ BDLVMVDOWritePolicy bd_lvm_get_vdo_write_policy_from_str (const gchar *policy_st
|
|
* statistics or %NULL in case of error
|
|
* (@error gets populated in those cases)
|
|
*
|
|
- * Statistics are collected from the values exposed by the kernel `kvdo` module
|
|
- * at the `/sys/kvdo/<VDO_NAME>/statistics/` path.
|
|
+ * Statistics are collected from the values exposed by the kernel `dm-vdo` module.
|
|
+ *
|
|
* Some of the keys are computed to mimic the information produced by the vdo tools.
|
|
- * Please note the contents of the hashtable may vary depending on the actual kvdo module version.
|
|
+ * Please note the contents of the hashtable may vary depending on the actual dm-vdo module version.
|
|
*
|
|
* Tech category: %BD_LVM_TECH_VDO-%BD_LVM_TECH_MODE_QUERY
|
|
*/
|
|
diff --git a/src/plugins/Makefile.am b/src/plugins/Makefile.am
|
|
index 0195e9e1..71bacb7c 100644
|
|
--- a/src/plugins/Makefile.am
|
|
+++ b/src/plugins/Makefile.am
|
|
@@ -97,16 +97,16 @@ libbd_loop_la_SOURCES = loop.c loop.h
|
|
endif
|
|
|
|
if WITH_LVM
|
|
-libbd_lvm_la_CFLAGS = $(GLIB_CFLAGS) $(GIO_CFLAGS) $(DEVMAPPER_CFLAGS) -Wall -Wextra -Werror
|
|
-libbd_lvm_la_LIBADD = ${builddir}/../utils/libbd_utils.la -lm $(GLIB_LIBS) $(GIO_LIBS) $(DEVMAPPER_LIBS)
|
|
+libbd_lvm_la_CFLAGS = $(GLIB_CFLAGS) $(GIO_CFLAGS) $(DEVMAPPER_CFLAGS) $(YAML_CFLAGS) -Wall -Wextra -Werror
|
|
+libbd_lvm_la_LIBADD = ${builddir}/../utils/libbd_utils.la -lm $(GLIB_LIBS) $(GIO_LIBS) $(DEVMAPPER_LIBS) $(YAML_LIBS)
|
|
libbd_lvm_la_LDFLAGS = -L${srcdir}/../utils/ -version-info 3:0:0 -Wl,--no-undefined -export-symbols-regex '^bd_.*'
|
|
libbd_lvm_la_CPPFLAGS = -I${builddir}/../../include/
|
|
libbd_lvm_la_SOURCES = lvm.c lvm.h check_deps.c check_deps.h dm_logging.c dm_logging.h vdo_stats.c vdo_stats.h
|
|
endif
|
|
|
|
if WITH_LVM_DBUS
|
|
-libbd_lvm_dbus_la_CFLAGS = $(GLIB_CFLAGS) $(GIO_CFLAGS) $(DEVMAPPER_CFLAGS) -Wall -Wextra -Werror
|
|
-libbd_lvm_dbus_la_LIBADD = ${builddir}/../utils/libbd_utils.la -lm $(GLIB_LIBS) $(GIO_LIBS) $(DEVMAPPER_LIBS)
|
|
+libbd_lvm_dbus_la_CFLAGS = $(GLIB_CFLAGS) $(GIO_CFLAGS) $(DEVMAPPER_CFLAGS) $(YAML_CFLAGS) -Wall -Wextra -Werror
|
|
+libbd_lvm_dbus_la_LIBADD = ${builddir}/../utils/libbd_utils.la -lm $(GLIB_LIBS) $(GIO_LIBS) $(DEVMAPPER_LIBS) $(YAML_LIBS)
|
|
libbd_lvm_dbus_la_LDFLAGS = -L${srcdir}/../utils/ -version-info 3:0:0 -Wl,--no-undefined -export-symbols-regex '^bd_.*'
|
|
libbd_lvm_dbus_la_CPPFLAGS = -I${builddir}/../../include/
|
|
libbd_lvm_dbus_la_SOURCES = lvm-dbus.c lvm.h check_deps.c check_deps.h dm_logging.c dm_logging.h vdo_stats.c vdo_stats.h
|
|
diff --git a/src/plugins/lvm-dbus.c b/src/plugins/lvm-dbus.c
|
|
index 0c017306..92d90718 100644
|
|
--- a/src/plugins/lvm-dbus.c
|
|
+++ b/src/plugins/lvm-dbus.c
|
|
@@ -4701,10 +4701,10 @@ BDLVMVDOWritePolicy bd_lvm_get_vdo_write_policy_from_str (const gchar *policy_st
|
|
* statistics or %NULL in case of error
|
|
* (@error gets populated in those cases)
|
|
*
|
|
- * Statistics are collected from the values exposed by the kernel `kvdo` module
|
|
- * at the `/sys/kvdo/<VDO_NAME>/statistics/` path.
|
|
+ * Statistics are collected from the values exposed by the kernel `dm-vdo` module.
|
|
+ *
|
|
* Some of the keys are computed to mimic the information produced by the vdo tools.
|
|
- * Please note the contents of the hashtable may vary depending on the actual kvdo module version.
|
|
+ * Please note the contents of the hashtable may vary depending on the actual dm-vdo module version.
|
|
*
|
|
* Tech category: %BD_LVM_TECH_VDO-%BD_LVM_TECH_MODE_QUERY
|
|
*/
|
|
@@ -4736,12 +4736,12 @@ BDLVMVDOStats* bd_lvm_vdo_get_stats (const gchar *vg_name, const gchar *pool_nam
|
|
return NULL;
|
|
|
|
stats = g_new0 (BDLVMVDOStats, 1);
|
|
- get_stat_val64_default (full_stats, "block_size", &stats->block_size, -1);
|
|
- get_stat_val64_default (full_stats, "logical_block_size", &stats->logical_block_size, -1);
|
|
- get_stat_val64_default (full_stats, "physical_blocks", &stats->physical_blocks, -1);
|
|
- get_stat_val64_default (full_stats, "data_blocks_used", &stats->data_blocks_used, -1);
|
|
- get_stat_val64_default (full_stats, "overhead_blocks_used", &stats->overhead_blocks_used, -1);
|
|
- get_stat_val64_default (full_stats, "logical_blocks_used", &stats->logical_blocks_used, -1);
|
|
+ get_stat_val64_default (full_stats, "blockSize", &stats->block_size, -1);
|
|
+ get_stat_val64_default (full_stats, "logicalBlockSize", &stats->logical_block_size, -1);
|
|
+ get_stat_val64_default (full_stats, "physicalBlocks", &stats->physical_blocks, -1);
|
|
+ get_stat_val64_default (full_stats, "dataBlocksUsed", &stats->data_blocks_used, -1);
|
|
+ get_stat_val64_default (full_stats, "overheadBlocksUsed", &stats->overhead_blocks_used, -1);
|
|
+ get_stat_val64_default (full_stats, "logicalBlocksUsed", &stats->logical_blocks_used, -1);
|
|
get_stat_val64_default (full_stats, "usedPercent", &stats->used_percent, -1);
|
|
get_stat_val64_default (full_stats, "savingPercent", &stats->saving_percent, -1);
|
|
if (!get_stat_val_double (full_stats, "writeAmplificationRatio", &stats->write_amplification_ratio))
|
|
diff --git a/src/plugins/lvm.c b/src/plugins/lvm.c
|
|
index dc7491a7..0af9a382 100644
|
|
--- a/src/plugins/lvm.c
|
|
+++ b/src/plugins/lvm.c
|
|
@@ -3799,10 +3799,10 @@ BDLVMVDOWritePolicy bd_lvm_get_vdo_write_policy_from_str (const gchar *policy_st
|
|
* statistics or %NULL in case of error
|
|
* (@error gets populated in those cases)
|
|
*
|
|
- * Statistics are collected from the values exposed by the kernel `kvdo` module
|
|
- * at the `/sys/kvdo/<VDO_NAME>/statistics/` path.
|
|
+ * Statistics are collected from the values exposed by the kernel `dm-vdo` module.
|
|
+ *
|
|
* Some of the keys are computed to mimic the information produced by the vdo tools.
|
|
- * Please note the contents of the hashtable may vary depending on the actual kvdo module version.
|
|
+ * Please note the contents of the hashtable may vary depending on the actual dm-vdo module version.
|
|
*
|
|
* Tech category: %BD_LVM_TECH_VDO-%BD_LVM_TECH_MODE_QUERY
|
|
*/
|
|
@@ -3834,12 +3834,12 @@ BDLVMVDOStats* bd_lvm_vdo_get_stats (const gchar *vg_name, const gchar *pool_nam
|
|
return NULL;
|
|
|
|
stats = g_new0 (BDLVMVDOStats, 1);
|
|
- get_stat_val64_default (full_stats, "block_size", &stats->block_size, -1);
|
|
- get_stat_val64_default (full_stats, "logical_block_size", &stats->logical_block_size, -1);
|
|
- get_stat_val64_default (full_stats, "physical_blocks", &stats->physical_blocks, -1);
|
|
- get_stat_val64_default (full_stats, "data_blocks_used", &stats->data_blocks_used, -1);
|
|
- get_stat_val64_default (full_stats, "overhead_blocks_used", &stats->overhead_blocks_used, -1);
|
|
- get_stat_val64_default (full_stats, "logical_blocks_used", &stats->logical_blocks_used, -1);
|
|
+ get_stat_val64_default (full_stats, "blockSize", &stats->block_size, -1);
|
|
+ get_stat_val64_default (full_stats, "logicalBlockSize", &stats->logical_block_size, -1);
|
|
+ get_stat_val64_default (full_stats, "physicalBlocks", &stats->physical_blocks, -1);
|
|
+ get_stat_val64_default (full_stats, "dataBlocksUsed", &stats->data_blocks_used, -1);
|
|
+ get_stat_val64_default (full_stats, "overheadBlocksUsed", &stats->overhead_blocks_used, -1);
|
|
+ get_stat_val64_default (full_stats, "logicalBlocksUsed", &stats->logical_blocks_used, -1);
|
|
get_stat_val64_default (full_stats, "usedPercent", &stats->used_percent, -1);
|
|
get_stat_val64_default (full_stats, "savingPercent", &stats->saving_percent, -1);
|
|
if (!get_stat_val_double (full_stats, "writeAmplificationRatio", &stats->write_amplification_ratio))
|
|
diff --git a/src/plugins/vdo_stats.c b/src/plugins/vdo_stats.c
|
|
index 620e972f..ab914821 100644
|
|
--- a/src/plugins/vdo_stats.c
|
|
+++ b/src/plugins/vdo_stats.c
|
|
@@ -19,10 +19,11 @@
|
|
|
|
#include <glib.h>
|
|
#include <blockdev/utils.h>
|
|
+#include <libdevmapper.h>
|
|
+#include <yaml.h>
|
|
|
|
#include "vdo_stats.h"
|
|
-
|
|
-#define VDO_SYS_PATH "/sys/kvdo"
|
|
+#include "lvm.h"
|
|
|
|
|
|
gboolean __attribute__ ((visibility ("hidden")))
|
|
@@ -67,9 +68,9 @@ get_stat_val_double (GHashTable *stats, const gchar *key, gdouble *val) {
|
|
static void add_write_ampl_r_stats (GHashTable *stats) {
|
|
gint64 bios_meta_write, bios_out_write, bios_in_write;
|
|
|
|
- if (! get_stat_val64 (stats, "bios_meta_write", &bios_meta_write) ||
|
|
- ! get_stat_val64 (stats, "bios_out_write", &bios_out_write) ||
|
|
- ! get_stat_val64 (stats, "bios_in_write", &bios_in_write))
|
|
+ if (! get_stat_val64 (stats, "biosMetaWrite", &bios_meta_write) ||
|
|
+ ! get_stat_val64 (stats, "biosOutWrite", &bios_out_write) ||
|
|
+ ! get_stat_val64 (stats, "biosInWrite", &bios_in_write))
|
|
return;
|
|
|
|
if (bios_in_write <= 0)
|
|
@@ -84,11 +85,11 @@ static void add_block_stats (GHashTable *stats) {
|
|
gint64 physical_blocks, block_size, data_blocks_used, overhead_blocks_used, logical_blocks_used;
|
|
gint64 savings;
|
|
|
|
- if (! get_stat_val64 (stats, "physical_blocks", &physical_blocks) ||
|
|
- ! get_stat_val64 (stats, "block_size", &block_size) ||
|
|
- ! get_stat_val64 (stats, "data_blocks_used", &data_blocks_used) ||
|
|
- ! get_stat_val64 (stats, "overhead_blocks_used", &overhead_blocks_used) ||
|
|
- ! get_stat_val64 (stats, "logical_blocks_used", &logical_blocks_used))
|
|
+ if (! get_stat_val64 (stats, "physicalBlocks", &physical_blocks) ||
|
|
+ ! get_stat_val64 (stats, "blockSize", &block_size) ||
|
|
+ ! get_stat_val64 (stats, "dataBlocksUsed", &data_blocks_used) ||
|
|
+ ! get_stat_val64 (stats, "overheadBlocksUsed", &overhead_blocks_used) ||
|
|
+ ! get_stat_val64 (stats, "logicalBlocksUsed", &logical_blocks_used))
|
|
return;
|
|
|
|
g_hash_table_replace (stats, g_strdup ("oneKBlocks"), g_strdup_printf ("%"G_GINT64_FORMAT, physical_blocks * block_size / 1024));
|
|
@@ -105,24 +106,24 @@ static void add_journal_stats (GHashTable *stats) {
|
|
gint64 journal_entries_committed, journal_entries_started, journal_entries_written;
|
|
gint64 journal_blocks_committed, journal_blocks_started, journal_blocks_written;
|
|
|
|
- if (! get_stat_val64 (stats, "journal_entries_committed", &journal_entries_committed) ||
|
|
- ! get_stat_val64 (stats, "journal_entries_started", &journal_entries_started) ||
|
|
- ! get_stat_val64 (stats, "journal_entries_written", &journal_entries_written) ||
|
|
- ! get_stat_val64 (stats, "journal_blocks_committed", &journal_blocks_committed) ||
|
|
- ! get_stat_val64 (stats, "journal_blocks_started", &journal_blocks_started) ||
|
|
- ! get_stat_val64 (stats, "journal_blocks_written", &journal_blocks_written))
|
|
+ if (! get_stat_val64 (stats, "journalEntriesCommitted", &journal_entries_committed) ||
|
|
+ ! get_stat_val64 (stats, "journalEntriesStarted", &journal_entries_started) ||
|
|
+ ! get_stat_val64 (stats, "journalEntriesWritten", &journal_entries_written) ||
|
|
+ ! get_stat_val64 (stats, "journalBlocksCommitted", &journal_blocks_committed) ||
|
|
+ ! get_stat_val64 (stats, "journalBlocksStarted", &journal_blocks_started) ||
|
|
+ ! get_stat_val64 (stats, "journalBlocksWritten", &journal_blocks_written))
|
|
return;
|
|
|
|
- g_hash_table_replace (stats, g_strdup ("journal_entries_batching"), g_strdup_printf ("%"G_GINT64_FORMAT, journal_entries_started - journal_entries_written));
|
|
- g_hash_table_replace (stats, g_strdup ("journal_entries_writing"), g_strdup_printf ("%"G_GINT64_FORMAT, journal_entries_written - journal_entries_committed));
|
|
- g_hash_table_replace (stats, g_strdup ("journal_blocks_batching"), g_strdup_printf ("%"G_GINT64_FORMAT, journal_blocks_started - journal_blocks_written));
|
|
- g_hash_table_replace (stats, g_strdup ("journal_blocks_writing"), g_strdup_printf ("%"G_GINT64_FORMAT, journal_blocks_written - journal_blocks_committed));
|
|
+ g_hash_table_replace (stats, g_strdup ("journalEntriesBatching"), g_strdup_printf ("%"G_GINT64_FORMAT, journal_entries_started - journal_entries_written));
|
|
+ g_hash_table_replace (stats, g_strdup ("journalEntriesWriting"), g_strdup_printf ("%"G_GINT64_FORMAT, journal_entries_written - journal_entries_committed));
|
|
+ g_hash_table_replace (stats, g_strdup ("journalBlocksBatching"), g_strdup_printf ("%"G_GINT64_FORMAT, journal_blocks_started - journal_blocks_written));
|
|
+ g_hash_table_replace (stats, g_strdup ("journalBlocksWriting"), g_strdup_printf ("%"G_GINT64_FORMAT, journal_blocks_written - journal_blocks_committed));
|
|
}
|
|
|
|
static void add_computed_stats (GHashTable *stats) {
|
|
const gchar *s;
|
|
|
|
- s = g_hash_table_lookup (stats, "logical_block_size");
|
|
+ s = g_hash_table_lookup (stats, "logicalBlockSize");
|
|
g_hash_table_replace (stats,
|
|
g_strdup ("fiveTwelveByteEmulation"),
|
|
g_strdup ((g_strcmp0 (s, "512") == 0) ? "true" : "false"));
|
|
@@ -132,76 +133,128 @@ static void add_computed_stats (GHashTable *stats) {
|
|
add_journal_stats (stats);
|
|
}
|
|
|
|
-static gchar* _dm_node_from_name (const gchar *map_name, GError **error) {
|
|
- gchar *dev_path = NULL;
|
|
- gchar *ret = NULL;
|
|
- gchar *dev_mapper_path = g_strdup_printf ("/dev/mapper/%s", map_name);
|
|
+enum parse_flags {
|
|
+ PARSE_NEXT_KEY,
|
|
+ PARSE_NEXT_VAL,
|
|
+ PARSE_NEXT_IGN,
|
|
+};
|
|
|
|
- dev_path = bd_utils_resolve_device (dev_mapper_path, error);
|
|
- g_free (dev_mapper_path);
|
|
- if (!dev_path)
|
|
- /* error is already populated */
|
|
+GHashTable __attribute__ ((visibility ("hidden")))
|
|
+*vdo_get_stats_full (const gchar *name, GError **error) {
|
|
+ struct dm_task *dmt = NULL;
|
|
+ const gchar *response = NULL;
|
|
+ yaml_parser_t parser;
|
|
+ yaml_token_t token;
|
|
+ GHashTable *stats = NULL;
|
|
+ gchar *key = NULL;
|
|
+ gsize len = 0;
|
|
+ int next_token = PARSE_NEXT_IGN;
|
|
+ gchar *prefix = NULL;
|
|
+
|
|
+ dmt = dm_task_create (DM_DEVICE_TARGET_MSG);
|
|
+ if (!dmt) {
|
|
+ g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
|
|
+ "Failed to create DM task");
|
|
return NULL;
|
|
+ }
|
|
|
|
- ret = g_path_get_basename (dev_path);
|
|
- g_free (dev_path);
|
|
+ if (!dm_task_set_name (dmt, name)) {
|
|
+ g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
|
|
+ "Failed to set name for DM task");
|
|
+ dm_task_destroy (dmt);
|
|
+ return NULL;
|
|
+ }
|
|
|
|
- return ret;
|
|
-}
|
|
+ if (!dm_task_set_message (dmt, "stats")) {
|
|
+ g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
|
|
+ "Failed to set message for DM task");
|
|
+ dm_task_destroy (dmt);
|
|
+ return NULL;
|
|
+ }
|
|
|
|
-GHashTable __attribute__ ((visibility ("hidden")))
|
|
-*vdo_get_stats_full (const gchar *name, GError **error) {
|
|
- GHashTable *stats;
|
|
- GDir *dir;
|
|
- gchar *stats_dir;
|
|
- const gchar *direntry;
|
|
- gchar *s;
|
|
- gchar *val = NULL;
|
|
- g_autofree gchar *dm_node = NULL;
|
|
- GError *l_error = NULL;
|
|
-
|
|
- /* try "new" (kvdo >= 8) path first -- /sys/block/dm-X/vdo/statistics */
|
|
- dm_node = _dm_node_from_name (name, error);
|
|
- if (dm_node == NULL) {
|
|
- g_prefix_error (error, "Failed to get DM node for %s: ", name);
|
|
+ if (!dm_task_run (dmt)) {
|
|
+ g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
|
|
+ "Failed to run DM task");
|
|
+ dm_task_destroy (dmt);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ response = dm_task_get_message_response (dmt);
|
|
+ if (!response) {
|
|
+ g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
|
|
+ "Failed to get response from the DM task");
|
|
+ dm_task_destroy (dmt);
|
|
return NULL;
|
|
}
|
|
|
|
- stats_dir = g_build_path (G_DIR_SEPARATOR_S, "/sys/block", dm_node, "vdo/statistics", NULL);
|
|
- dir = g_dir_open (stats_dir, 0, &l_error);
|
|
- if (dir == NULL) {
|
|
- bd_utils_log_format (BD_UTILS_LOG_INFO,
|
|
- "Failed to read VDO stats using the new API, falling back to %s: %s",
|
|
- VDO_SYS_PATH, l_error->message);
|
|
- g_free (stats_dir);
|
|
- g_clear_error (&l_error);
|
|
-
|
|
- /* lets try /sys/kvdo */
|
|
- stats_dir = g_build_path (G_DIR_SEPARATOR_S, VDO_SYS_PATH, name, "statistics", NULL);
|
|
- dir = g_dir_open (stats_dir, 0, error);
|
|
- if (dir == NULL) {
|
|
- g_prefix_error (error, "Error reading statistics from %s: ", stats_dir);
|
|
- g_free (stats_dir);
|
|
- return NULL;
|
|
- }
|
|
+ if (!yaml_parser_initialize (&parser)) {
|
|
+ g_set_error (error, BD_LVM_ERROR, BD_LVM_ERROR_DM_ERROR,
|
|
+ "Failed to get initialize YAML parser");
|
|
+ dm_task_destroy (dmt);
|
|
+ return NULL;
|
|
}
|
|
|
|
stats = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free);
|
|
- while ((direntry = g_dir_read_name (dir))) {
|
|
- s = g_build_filename (stats_dir, direntry, NULL);
|
|
- if (! g_file_get_contents (s, &val, NULL, error)) {
|
|
- g_prefix_error (error, "Error reading statistics from %s: ", s);
|
|
- g_free (s);
|
|
- g_hash_table_destroy (stats);
|
|
- stats = NULL;
|
|
- break;
|
|
- }
|
|
- g_hash_table_replace (stats, g_strdup (direntry), g_strdup (g_strstrip (val)));
|
|
- g_free (val);
|
|
- g_free (s);
|
|
- }
|
|
- g_dir_close (dir);
|
|
- g_free (stats_dir);
|
|
+ yaml_parser_set_input_string (&parser, (guchar *) response, strlen (response));
|
|
+
|
|
+ do {
|
|
+ yaml_parser_scan (&parser, &token);
|
|
+ switch (token.type) {
|
|
+ /* key */
|
|
+ case YAML_KEY_TOKEN:
|
|
+ next_token = PARSE_NEXT_KEY;
|
|
+ break;
|
|
+ /* value */
|
|
+ case YAML_VALUE_TOKEN:
|
|
+ next_token = PARSE_NEXT_VAL;
|
|
+ break;
|
|
+ /* block mapping */
|
|
+ case YAML_BLOCK_MAPPING_START_TOKEN:
|
|
+ if (next_token == PARSE_NEXT_VAL)
|
|
+ /* we were expecting to read a key-value pair but this is actually
|
|
+ a block start, so we need to free the key we're not going to use */
|
|
+ g_free (key);
|
|
+ break;
|
|
+ /* mapping */
|
|
+ case YAML_FLOW_MAPPING_START_TOKEN:
|
|
+ /* start of flow mapping -> previously read key will be used as prefix
|
|
+ for all keys in the mapping:
|
|
+ previous key: biosInProgress
|
|
+ keys in the mapping: Read, Write...
|
|
+ with prefix: biosInProgressRead, biosInProgressWrite...
|
|
+ */
|
|
+ prefix = key;
|
|
+ break;
|
|
+ case YAML_FLOW_MAPPING_END_TOKEN:
|
|
+ /* end of flow mapping, discard the prefix used */
|
|
+ g_free (prefix);
|
|
+ prefix = NULL;
|
|
+ break;
|
|
+ /* actual data */
|
|
+ case YAML_SCALAR_TOKEN:
|
|
+ if (next_token == PARSE_NEXT_KEY) {
|
|
+ if (prefix) {
|
|
+ key = g_strdup_printf ("%s%s", prefix, (const gchar *) token.data.scalar.value);
|
|
+ len = strlen (prefix);
|
|
+ /* make sure the key with the prefix is still camelCase */
|
|
+ key[len] = g_ascii_toupper (key[len]);
|
|
+ } else
|
|
+ key = g_strdup ((const gchar *) token.data.scalar.value);
|
|
+ } else if (next_token == PARSE_NEXT_VAL) {
|
|
+ gchar *val = g_strdup ((const gchar *) token.data.scalar.value);
|
|
+ g_hash_table_insert (stats, key, val);
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (token.type != YAML_STREAM_END_TOKEN)
|
|
+ yaml_token_delete (&token);
|
|
+ } while (token.type != YAML_STREAM_END_TOKEN);
|
|
+
|
|
+ yaml_parser_delete (&parser);
|
|
+ dm_task_destroy (dmt);
|
|
|
|
if (stats != NULL)
|
|
add_computed_stats (stats);
|
|
diff --git a/tests/lvm_dbus_tests.py b/tests/lvm_dbus_tests.py
|
|
index 9f302611..db0d5c90 100644
|
|
--- a/tests/lvm_dbus_tests.py
|
|
+++ b/tests/lvm_dbus_tests.py
|
|
@@ -2090,7 +2090,9 @@ class LVMVDOTest(LVMTestCase):
|
|
pool_info = BlockDev.lvm_lvinfo("testVDOVG", "vdoPool")
|
|
self.assertEqual(pool_info.segtype, "vdo-pool")
|
|
self.assertEqual(pool_info.data_lv, "vdoPool_vdata")
|
|
- self.assertGreater(pool_info.data_percent, 0)
|
|
+ lvm_version = self._get_lvm_version()
|
|
+ if lvm_version >= Version("2.03.24"):
|
|
+ self.assertGreater(pool_info.data_percent, 0)
|
|
|
|
pool = BlockDev.lvm_vdolvpoolname("testVDOVG", "vdoLV")
|
|
self.assertEqual(pool, lv_info.pool_lv)
|
|
diff --git a/tests/lvm_test.py b/tests/lvm_test.py
|
|
index b1d65baf..96aa352a 100644
|
|
--- a/tests/lvm_test.py
|
|
+++ b/tests/lvm_test.py
|
|
@@ -1994,7 +1994,9 @@ class LVMVDOTest(LVMTestCase):
|
|
pool_info = BlockDev.lvm_lvinfo("testVDOVG", "vdoPool")
|
|
self.assertEqual(pool_info.segtype, "vdo-pool")
|
|
self.assertEqual(pool_info.data_lv, "vdoPool_vdata")
|
|
- self.assertGreater(pool_info.data_percent, 0)
|
|
+ lvm_version = self._get_lvm_version()
|
|
+ if lvm_version >= Version("2.03.24"):
|
|
+ self.assertGreater(pool_info.data_percent, 0)
|
|
|
|
pool = BlockDev.lvm_vdolvpoolname("testVDOVG", "vdoLV")
|
|
self.assertEqual(pool, lv_info.pool_lv)
|
|
@@ -2155,7 +2157,11 @@ class LVMVDOTest(LVMTestCase):
|
|
self.assertTrue(vdo_info.deduplication)
|
|
|
|
vdo_stats = BlockDev.lvm_vdo_get_stats("testVDOVG", "vdoPool")
|
|
- self.assertEqual(vdo_info.saving_percent, vdo_stats.saving_percent)
|
|
+
|
|
+ lvm_version = self._get_lvm_version()
|
|
+ if lvm_version >= Version("2.03.24"):
|
|
+ # saving_percent is incorrect with LVM < 2.03.24
|
|
+ self.assertEqual(vdo_info.saving_percent, vdo_stats.saving_percent)
|
|
|
|
# just sanity check
|
|
self.assertNotEqual(vdo_stats.used_percent, -1)
|
|
--
|
|
2.45.2
|
|
|