nvme-cli update to version 2.16

Resolves: RHEL-129230

Signed-off-by: Maurizio Lombardi <mlombard@redhat.com>
This commit is contained in:
Maurizio Lombardi 2025-11-18 12:58:34 +01:00
parent 8ae781ab04
commit b0b6408d71
6 changed files with 9 additions and 230 deletions

1
.gitignore vendored
View File

@ -37,3 +37,4 @@
/nvme-cli-2.11.tar.gz
/nvme-cli-2.13.tar.gz
/nvme-cli-2.15.tar.gz
/nvme-cli-2.16.tar.gz

View File

@ -1,176 +0,0 @@
From 80ad62ba75f1f1dd5b99c6f56769141b48344485 Mon Sep 17 00:00:00 2001
From: Nilay Shroff <nilay@linux.ibm.com>
Date: Tue, 5 Aug 2025 15:50:50 +0530
Subject: [PATCH] nvme-list: make verbose JSON output backward compatible
The commit 64bed0a87a23 ("nvme-list: fix verbose JSON output for 'nvme
list' command") changed the JSON output format of the nvme list --verbose
command. While the new format is more structured, it introduced a
regression by breaking compatibility with tools and scripts relying on
the previous JSON schema.
So to restore backward compatibility, we now leverage the existing
--output-format-version option. With this patch,
1. The default --output-format-version=1 retains the original (legacy) JSON
format for nvme list --verbose.
2. If the user explicitly sets --output-format-version=2 then the newer
JSON structure introduced by commit 64bed0a87a23 ("nvme-list: fix verbose
JSON output for 'nvme list' command") is used.
This ensures that existing users and automation relying on the older format
do not break, while still supporting the newer schema for forward-looking
users.
Fixes: 64bed0a87a23 ("nvme-list: fix verbose JSON output for 'nvme list' command")
Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
Link: https://patch.msgid.link/20250805102055.3375272-1-nilay@linux.ibm.com
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
nvme-print-json.c | 117 ++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 113 insertions(+), 4 deletions(-)
diff --git a/nvme-print-json.c b/nvme-print-json.c
index 11b66616..829ba718 100644
--- a/nvme-print-json.c
+++ b/nvme-print-json.c
@@ -4519,7 +4519,7 @@ static void json_print_detail_list(nvme_subsystem_t s, struct json_object *jss)
obj_add_obj(jss, "Controllers", jctrls);
}
-static void json_detail_list(nvme_root_t t)
+static void json_detail_list_v2(nvme_root_t t)
{
struct json_object *r = json_create_object();
struct json_object *jdev = json_create_array();
@@ -4560,6 +4560,112 @@ static void json_detail_list(nvme_root_t t)
json_print(r);
}
+static void json_detail_list(nvme_root_t t)
+{
+ struct json_object *r = json_create_object();
+ struct json_object *jdev = json_create_array();
+
+ nvme_host_t h;
+ nvme_subsystem_t s;
+ nvme_ctrl_t c;
+ nvme_path_t p;
+ nvme_ns_t n;
+
+ nvme_for_each_host(t, h) {
+ struct json_object *hss = json_create_object();
+ struct json_object *jsslist = json_create_array();
+ const char *hostid;
+
+ obj_add_str(hss, "HostNQN", nvme_host_get_hostnqn(h));
+ hostid = nvme_host_get_hostid(h);
+ if (hostid)
+ obj_add_str(hss, "HostID", hostid);
+
+ nvme_for_each_subsystem(h, s) {
+ struct json_object *jss = json_create_object();
+ struct json_object *jctrls = json_create_array();
+ struct json_object *jnss = json_create_array();
+
+ obj_add_str(jss, "Subsystem", nvme_subsystem_get_name(s));
+ obj_add_str(jss, "SubsystemNQN", nvme_subsystem_get_nqn(s));
+
+ nvme_subsystem_for_each_ctrl(s, c) {
+ struct json_object *jctrl = json_create_object();
+ struct json_object *jnss = json_create_array();
+ struct json_object *jpaths = json_create_array();
+
+ obj_add_str(jctrl, "Controller", nvme_ctrl_get_name(c));
+ obj_add_str(jctrl, "Cntlid", nvme_ctrl_get_cntlid(c));
+ obj_add_str(jctrl, "SerialNumber", nvme_ctrl_get_serial(c));
+ obj_add_str(jctrl, "ModelNumber", nvme_ctrl_get_model(c));
+ obj_add_str(jctrl, "Firmware", nvme_ctrl_get_firmware(c));
+ obj_add_str(jctrl, "Transport", nvme_ctrl_get_transport(c));
+ obj_add_str(jctrl, "Address", nvme_ctrl_get_address(c));
+ obj_add_str(jctrl, "Slot", nvme_ctrl_get_phy_slot(c));
+
+ nvme_ctrl_for_each_ns(c, n) {
+ struct json_object *jns = json_create_object();
+ int lba = nvme_ns_get_lba_size(n);
+ uint64_t nsze = nvme_ns_get_lba_count(n) * lba;
+ uint64_t nuse = nvme_ns_get_lba_util(n) * lba;
+
+ obj_add_str(jns, "NameSpace", nvme_ns_get_name(n));
+ obj_add_str(jns, "Generic", nvme_ns_get_generic_name(n));
+ obj_add_int(jns, "NSID", nvme_ns_get_nsid(n));
+ obj_add_uint64(jns, "UsedBytes", nuse);
+ obj_add_uint64(jns, "MaximumLBA", nvme_ns_get_lba_count(n));
+ obj_add_uint64(jns, "PhysicalSize", nsze);
+ obj_add_int(jns, "SectorSize", lba);
+
+ array_add_obj(jnss, jns);
+ }
+ obj_add_obj(jctrl, "Namespaces", jnss);
+
+ nvme_ctrl_for_each_path(c, p) {
+ struct json_object *jpath = json_create_object();
+
+ obj_add_str(jpath, "Path", nvme_path_get_name(p));
+ obj_add_str(jpath, "ANAState", nvme_path_get_ana_state(p));
+
+ array_add_obj(jpaths, jpath);
+ }
+ obj_add_obj(jctrl, "Paths", jpaths);
+
+ array_add_obj(jctrls, jctrl);
+ }
+ obj_add_obj(jss, "Controllers", jctrls);
+
+ nvme_subsystem_for_each_ns(s, n) {
+ struct json_object *jns = json_create_object();
+
+ int lba = nvme_ns_get_lba_size(n);
+ uint64_t nsze = nvme_ns_get_lba_count(n) * lba;
+ uint64_t nuse = nvme_ns_get_lba_util(n) * lba;
+
+ obj_add_str(jns, "NameSpace", nvme_ns_get_name(n));
+ obj_add_str(jns, "Generic", nvme_ns_get_generic_name(n));
+ obj_add_int(jns, "NSID", nvme_ns_get_nsid(n));
+ obj_add_uint64(jns, "UsedBytes", nuse);
+ obj_add_uint64(jns, "MaximumLBA", nvme_ns_get_lba_count(n));
+ obj_add_uint64(jns, "PhysicalSize", nsze);
+ obj_add_int(jns, "SectorSize", lba);
+
+ array_add_obj(jnss, jns);
+ }
+ obj_add_obj(jss, "Namespaces", jnss);
+
+ array_add_obj(jsslist, jss);
+ }
+
+ obj_add_obj(hss, "Subsystems", jsslist);
+ array_add_obj(jdev, hss);
+ }
+
+ obj_add_array(r, "Devices", jdev);
+
+ json_print(r);
+}
+
static struct json_object *json_list_item_obj(nvme_ns_t n)
{
struct json_object *r = json_create_object();
@@ -4622,9 +4728,12 @@ static void json_list_item(nvme_ns_t n)
static void json_print_list_items(nvme_root_t t)
{
- if (verbose_mode())
- json_detail_list(t);
- else
+ if (json_print_ops.flags & VERBOSE) {
+ if (nvme_cfg.output_format_ver == 2)
+ json_detail_list_v2(t);
+ else
+ json_detail_list(t);
+ } else
json_simple_list(t);
}
--
2.47.3

View File

@ -1,45 +0,0 @@
From 468e60f442da1aa20b048551f520d3ffb7564d0f Mon Sep 17 00:00:00 2001
From: Maurizio Lombardi <mlombard@redhat.com>
Date: Fri, 1 Aug 2025 10:08:08 +0200
Subject: [PATCH] nvme: Fix get-feature on big-endian systems
The `parse_and_open()` function uses `OPT_BYTE` to parse the
feature_id and sel options. This means it writes a single byte
to the memory locations for `feature_id` and `sel` in the `feat_cfg`
struct.
However, these fields were declared as `enum`, which typically has
the size of an `int`. On big-endian architectures, writing a single
byte to the address of an `int` modifies the most significant byte,
resulting in an incorrect value being read. This caused the
`get-feature` command to fail. The command only worked on little-endian
systems.
Fix the issue by changing the type for `feature_id`
and `sel` to `__u8`. This ensures the fields are treated as single
bytes, guaranteeing correct behavior on all architectures.
Signed-off-by: Maurizio Lombardi <mlombard@redhat.com>
---
nvme.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/nvme.c b/nvme.c
index 307036ba..27dac37b 100644
--- a/nvme.c
+++ b/nvme.c
@@ -72,9 +72,9 @@
#include "malloc.h"
struct feat_cfg {
- enum nvme_features_id feature_id;
+ __u8 feature_id; /* enum nvme_features_id */
+ __u8 sel; /* enum nvme_get_features_sel */
__u32 namespace_id;
- enum nvme_get_features_sel sel;
__u32 cdw11;
__u32 cdw12;
__u8 uuid_index;
--
2.47.3

View File

@ -2,5 +2,3 @@
product_versions:
- rhel-10
decision_context: osci_compose_gate
rules:
- !PassingTestCaseRule {test_case_name: osci.brew-build.tier0.functional}

View File

@ -4,8 +4,8 @@
%global nmlibdir %{_prefix}/lib/NetworkManager
Name: nvme-cli
Version: 2.15
Release: 2%{?dist}
Version: 2.16
Release: 1%{?dist}
Summary: NVMe management command line interface
License: GPL-2.0-only
@ -13,8 +13,6 @@ URL: https://github.com/linux-nvme/nvme-cli
Source0: %{url}/archive/v%{version_no_tilde}/%{name}-%{version_no_tilde}.tar.gz
Source1: 99-nvme-nbft-connect.sh
Source2: 99-nvme-nbft-no-ignore-carrier.conf
Patch0: 0001-nvme-list-make-verbose-JSON-output-backward-compatib.patch
Patch1: 0002-nvme-Fix-get-feature-on-big-endian-systems.patch
BuildRequires: meson >= 0.53
BuildRequires: gcc gcc-c++
@ -24,7 +22,7 @@ BuildRequires: zlib-devel
BuildRequires: openssl-devel
BuildRequires: kernel-headers >= 6.12.0-37
BuildRequires: libnvme-devel >= 1.15-1
BuildRequires: libnvme-devel >= 1.16-1
BuildRequires: json-c-devel >= 0.14
BuildRequires: asciidoc
@ -79,7 +77,7 @@ rm -rf %{buildroot}%{_pkgdocdir}/nvme
%{_udevrulesdir}/70-nvmf-autoconnect.rules
%{_udevrulesdir}/70-nvmf-keys.rules
%{_udevrulesdir}/71-nvmf-netapp.rules
%{_udevrulesdir}/71-nvme-hpe.rules
%{_udevrulesdir}/71-nvmf-hpe.rules
%{_udevrulesdir}/71-nvmf-vastdata.rules
# Do not install the dracut rule yet. See rhbz 1742764
# /usr/lib/dracut/dracut.conf.d/70-nvmf-autoconnect.conf
@ -111,6 +109,9 @@ fi
%systemd_postun nvmf-connect-nbft.service
%changelog
* Tue Nov 18 2025 Maurizio Lombardi <mlombard@redhat.com> - 2.16-1
- Rebase to new version 2.16
* Tue Oct 07 2025 Maurizio Lombardi <mlombard@redhat.com> - 2.15-2
- Rebase to latest version

View File

@ -1 +1 @@
SHA512 (nvme-cli-2.15.tar.gz) = 6f4c9fe52883df5424ba28d8b66b00e61f4b6f7226d7385f026c1d3b8aeb473bdf637102d9cf7e049349c4f7e61eccbb33f7400cc09056f74be03a80b7d51c0d
SHA512 (nvme-cli-2.16.tar.gz) = 507018ff41832574bef5b88ea6a17336b9792c54f8e5c619d041bce23124e1d6d5e5e824407f46d4f1b4d6899125885eb14289f3399e61c7e7a59a603f1635e2