Update dmpd to upstream version 1.1.0
Resolves: RHEL-62155
This commit is contained in:
parent
b8de93da35
commit
84738d09c4
2
.gitignore
vendored
2
.gitignore
vendored
@ -17,3 +17,5 @@
|
||||
/dmpd106-vendor.tar.gz
|
||||
/v1.0.9.tar.gz
|
||||
/dmpd109-vendor.tar.gz
|
||||
/v1.1.0.tar.gz
|
||||
/dmpd110-vendor.tar.gz
|
||||
|
@ -1,4 +1,4 @@
|
||||
From 0d5347bd771e960294cd0c2f083d96448613ab9c Mon Sep 17 00:00:00 2001
|
||||
From b0b04e59eb381859f3858120d535cc24059fbc08 Mon Sep 17 00:00:00 2001
|
||||
From: Marian Csontos <mcsontos@redhat.com>
|
||||
Date: Thu, 27 Jul 2023 11:37:01 +0200
|
||||
Subject: [PATCH] Tweak cargo.toml to work with vendor directory
|
||||
@ -6,17 +6,19 @@ Subject: [PATCH] Tweak cargo.toml to work with vendor directory
|
||||
Mock works offline, cargo would try to download the files from github.
|
||||
So cargo vendor has to be run first, and then change the Cargo.toml to
|
||||
make mock happy.
|
||||
|
||||
(cherry picked from commit 0d5347bd771e960294cd0c2f083d96448613ab9c)
|
||||
---
|
||||
Cargo.toml | 3 ++-
|
||||
1 file changed, 2 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/Cargo.toml b/Cargo.toml
|
||||
index 500345a4..d4aa38a6 100644
|
||||
index 47703bfc..4c605a54 100644
|
||||
--- a/Cargo.toml
|
||||
+++ b/Cargo.toml
|
||||
@@ -27,7 +27,8 @@ quick-xml = "0.29"
|
||||
@@ -34,7 +34,8 @@ quick-xml = "0.36"
|
||||
rand = "0.8"
|
||||
rangemap = "1.4"
|
||||
rangemap = "1.5"
|
||||
roaring = "0.10"
|
||||
-rio = { git = "https://github.com/jthornber/rio", branch = "master", optional = true }
|
||||
+#rio = { git = "https://github.com/jthornber/rio", branch = "master", optional = true }
|
||||
@ -25,5 +27,5 @@ index 500345a4..d4aa38a6 100644
|
||||
threadpool = "1.8"
|
||||
thiserror = "1.0"
|
||||
--
|
||||
2.43.0
|
||||
2.46.0
|
||||
|
||||
|
@ -1,181 +0,0 @@
|
||||
From f088ce90f5f7934489a8d1062ebfd3d23e4aec38 Mon Sep 17 00:00:00 2001
|
||||
From: Ming-Hung Tsai <mtsai@redhat.com>
|
||||
Date: Tue, 12 Dec 2023 14:25:27 +0800
|
||||
Subject: [PATCH 2/6] [space map] Fix incorrect index_entry.nr_free while
|
||||
expansion
|
||||
|
||||
Do not truncate the nr_free value of the last index_entry to the space
|
||||
map boundary, to address the issue in kernel that sm_ll_extend() doesn't
|
||||
extend the nr_free value of the last index_entry while expanding the
|
||||
space map. Without this fix, we'll have incorrect free space estimation
|
||||
in later block allocations, leading to under-utilized space maps.
|
||||
|
||||
(cherry picked from commit 9a405f57c591020321fc16f00efdb5197e1df2c0)
|
||||
---
|
||||
src/pdata/space_map/common.rs | 20 +++++---
|
||||
src/pdata/space_map/tests.rs | 96 +++++++++++++++++++++++++++++++++++
|
||||
2 files changed, 108 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/src/pdata/space_map/common.rs b/src/pdata/space_map/common.rs
|
||||
index d92b8115..4be9c303 100644
|
||||
--- a/src/pdata/space_map/common.rs
|
||||
+++ b/src/pdata/space_map/common.rs
|
||||
@@ -211,22 +211,28 @@ pub fn write_common(
|
||||
let len = std::cmp::min(nr_blocks - begin, ENTRIES_PER_BITMAP as u64);
|
||||
let mut entries = Vec::with_capacity(ENTRIES_PER_BITMAP);
|
||||
let mut first_free: Option<u32> = None;
|
||||
- let mut nr_free: u32 = 0;
|
||||
+ let mut nr_free = ENTRIES_PER_BITMAP as u32; // do not truncate to the sm size boundary
|
||||
|
||||
for i in 0..len {
|
||||
let b = begin + i;
|
||||
let rc = sm.get(b)?;
|
||||
let e = match rc {
|
||||
0 => {
|
||||
- nr_free += 1;
|
||||
if first_free.is_none() {
|
||||
first_free = Some(i as u32);
|
||||
}
|
||||
Small(0)
|
||||
}
|
||||
- 1 => Small(1),
|
||||
- 2 => Small(2),
|
||||
+ 1 => {
|
||||
+ nr_free -= 1;
|
||||
+ Small(1)
|
||||
+ }
|
||||
+ 2 => {
|
||||
+ nr_free -= 1;
|
||||
+ Small(2)
|
||||
+ }
|
||||
_ => {
|
||||
+ nr_free -= 1;
|
||||
overflow_builder.push_value(w, b, rc)?;
|
||||
Overflow
|
||||
}
|
||||
@@ -334,16 +340,14 @@ pub fn write_metadata_common(w: &mut WriteBatcher) -> anyhow::Result<(Vec<IndexE
|
||||
let nr_blocks = w.sm.lock().unwrap().get_nr_blocks()?;
|
||||
let nr_bitmaps = div_up(nr_blocks, ENTRIES_PER_BITMAP as u64) as usize;
|
||||
let nr_used_bitmaps = index_entries.len();
|
||||
- for bm in nr_used_bitmaps..nr_bitmaps {
|
||||
- let begin = bm as u64 * ENTRIES_PER_BITMAP as u64;
|
||||
- let len = std::cmp::min(nr_blocks - begin, ENTRIES_PER_BITMAP as u64);
|
||||
+ for _bm in nr_used_bitmaps..nr_bitmaps {
|
||||
let entries = vec![BitmapEntry::Small(0); ENTRIES_PER_BITMAP];
|
||||
let blocknr = write_bitmap(w, entries)?;
|
||||
|
||||
// Insert into the index list
|
||||
let ie = IndexEntry {
|
||||
blocknr,
|
||||
- nr_free: len as u32,
|
||||
+ nr_free: ENTRIES_PER_BITMAP as u32, // do not truncate to the sm size boundary
|
||||
none_free_before: 0,
|
||||
};
|
||||
index_entries.push(ie);
|
||||
diff --git a/src/pdata/space_map/tests.rs b/src/pdata/space_map/tests.rs
|
||||
index 26d7834d..fb08a9dc 100644
|
||||
--- a/src/pdata/space_map/tests.rs
|
||||
+++ b/src/pdata/space_map/tests.rs
|
||||
@@ -152,3 +152,99 @@ mod core_sm_u8 {
|
||||
}
|
||||
|
||||
//------------------------------------------
|
||||
+
|
||||
+mod metadata_sm {
|
||||
+ use anyhow::{ensure, Result};
|
||||
+ use std::sync::Arc;
|
||||
+
|
||||
+ use crate::io_engine::core::CoreIoEngine;
|
||||
+ use crate::io_engine::*;
|
||||
+ use crate::math::div_up;
|
||||
+ use crate::pdata::space_map::common::ENTRIES_PER_BITMAP;
|
||||
+ use crate::pdata::space_map::metadata::*;
|
||||
+ use crate::write_batcher::WriteBatcher;
|
||||
+
|
||||
+ fn check_index_entries(nr_blocks: u64) -> Result<()> {
|
||||
+ let engine = Arc::new(CoreIoEngine::new(nr_blocks));
|
||||
+ let meta_sm = core_metadata_sm(engine.get_nr_blocks(), u32::MAX);
|
||||
+
|
||||
+ let mut w = WriteBatcher::new(engine.clone(), meta_sm.clone(), engine.get_batch_size());
|
||||
+ w.alloc()?; // reserved for the superblock
|
||||
+ let root = write_metadata_sm(&mut w)?;
|
||||
+
|
||||
+ let b = engine.read(root.bitmap_root)?;
|
||||
+ let entries = check_and_unpack_metadata_index(&b)?.indexes;
|
||||
+ ensure!(entries.len() as u64 == div_up(nr_blocks, ENTRIES_PER_BITMAP as u64));
|
||||
+
|
||||
+ // the number of blocks observed by index_entries must be multiple of ENTRIES_PER_BITMAP
|
||||
+ let nr_allocated = meta_sm.lock().unwrap().get_nr_allocated()?;
|
||||
+ let nr_free: u64 = entries.iter().map(|ie| ie.nr_free as u64).sum();
|
||||
+ ensure!(nr_allocated + nr_free == (entries.len() * ENTRIES_PER_BITMAP) as u64);
|
||||
+
|
||||
+ Ok(())
|
||||
+ }
|
||||
+
|
||||
+ #[test]
|
||||
+ fn check_single_index_entry() -> Result<()> {
|
||||
+ check_index_entries(1000)
|
||||
+ }
|
||||
+
|
||||
+ #[test]
|
||||
+ fn check_multiple_index_entries() -> Result<()> {
|
||||
+ check_index_entries(ENTRIES_PER_BITMAP as u64 * 16 + 1000)
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+//------------------------------------------
|
||||
+
|
||||
+mod disk_sm {
|
||||
+ use anyhow::{ensure, Result};
|
||||
+ use std::ops::Deref;
|
||||
+ use std::sync::Arc;
|
||||
+
|
||||
+ use crate::io_engine::core::CoreIoEngine;
|
||||
+ use crate::io_engine::*;
|
||||
+ use crate::math::div_up;
|
||||
+ use crate::pdata::btree_walker::btree_to_value_vec;
|
||||
+ use crate::pdata::space_map::common::{IndexEntry, ENTRIES_PER_BITMAP};
|
||||
+ use crate::pdata::space_map::disk::*;
|
||||
+ use crate::pdata::space_map::metadata::*;
|
||||
+ use crate::pdata::space_map::*;
|
||||
+ use crate::write_batcher::WriteBatcher;
|
||||
+
|
||||
+ fn check_index_entries(nr_blocks: u64) -> Result<()> {
|
||||
+ let engine = Arc::new(CoreIoEngine::new(1024));
|
||||
+ let meta_sm = core_metadata_sm(engine.get_nr_blocks(), u32::MAX);
|
||||
+
|
||||
+ let mut w = WriteBatcher::new(engine.clone(), meta_sm.clone(), engine.get_batch_size());
|
||||
+ w.alloc()?; // reserved for the superblock
|
||||
+
|
||||
+ let data_sm = core_sm(nr_blocks, u32::MAX);
|
||||
+ data_sm.lock().unwrap().inc(0, 100)?;
|
||||
+
|
||||
+ let root = write_disk_sm(&mut w, data_sm.lock().unwrap().deref())?;
|
||||
+
|
||||
+ let entries =
|
||||
+ btree_to_value_vec::<IndexEntry>(&mut Vec::new(), engine, false, root.bitmap_root)?;
|
||||
+ ensure!(entries.len() as u64 == div_up(nr_blocks, ENTRIES_PER_BITMAP as u64));
|
||||
+
|
||||
+ // the number of blocks observed by index_entries must be a multiple of ENTRIES_PER_BITMAP
|
||||
+ let nr_allocated = data_sm.lock().unwrap().get_nr_allocated()?;
|
||||
+ let nr_free: u64 = entries.iter().map(|ie| ie.nr_free as u64).sum();
|
||||
+ ensure!(nr_allocated + nr_free == (entries.len() * ENTRIES_PER_BITMAP) as u64);
|
||||
+
|
||||
+ Ok(())
|
||||
+ }
|
||||
+
|
||||
+ #[test]
|
||||
+ fn check_single_index_entry() -> Result<()> {
|
||||
+ check_index_entries(1000)
|
||||
+ }
|
||||
+
|
||||
+ #[test]
|
||||
+ fn check_multiple_index_entries() -> Result<()> {
|
||||
+ check_index_entries(ENTRIES_PER_BITMAP as u64 * 16 + 1000)
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+//------------------------------------------
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,69 +0,0 @@
|
||||
From add81da22a3998503a6f340350d7e59ed3b52e28 Mon Sep 17 00:00:00 2001
|
||||
From: Ming-Hung Tsai <mtsai@redhat.com>
|
||||
Date: Wed, 10 Jan 2024 15:56:39 +0800
|
||||
Subject: [PATCH 3/6] [thin_repair] Fix child keys checking on the node with a
|
||||
zero key
|
||||
|
||||
Fix the issue that keys overlapping between the second and the first
|
||||
child nodes indexed by zero not being checked.
|
||||
|
||||
(cherry picked from commit 386123bd0f74f7603e993bf3c26aac002162d5db)
|
||||
---
|
||||
src/thin/metadata_repair.rs | 27 +++++++++++++++------------
|
||||
1 file changed, 15 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/src/thin/metadata_repair.rs b/src/thin/metadata_repair.rs
|
||||
index 9716b1e3..8fece4b9 100644
|
||||
--- a/src/thin/metadata_repair.rs
|
||||
+++ b/src/thin/metadata_repair.rs
|
||||
@@ -128,10 +128,11 @@ impl DevInfo {
|
||||
}
|
||||
|
||||
fn push_child(&mut self, child: &DevInfo) -> Result<()> {
|
||||
- if self.key_high > 0 && child.key_low <= self.key_high {
|
||||
- return Err(anyhow!("incompatible child"));
|
||||
- }
|
||||
- if !self.pushed {
|
||||
+ if self.pushed {
|
||||
+ if child.key_low <= self.key_high {
|
||||
+ return Err(anyhow!("incompatible child"));
|
||||
+ }
|
||||
+ } else {
|
||||
self.key_low = child.key_low;
|
||||
self.pushed = true;
|
||||
}
|
||||
@@ -175,10 +176,11 @@ impl MappingsInfo {
|
||||
}
|
||||
|
||||
fn push_child(&mut self, child: &MappingsInfo) -> Result<()> {
|
||||
- if self.key_high > 0 && child.key_low <= self.key_high {
|
||||
- return Err(anyhow!("incompatible child"));
|
||||
- }
|
||||
- if !self.pushed {
|
||||
+ if self.pushed {
|
||||
+ if child.key_low <= self.key_high {
|
||||
+ return Err(anyhow!("incompatible child"));
|
||||
+ }
|
||||
+ } else {
|
||||
self.key_low = child.key_low;
|
||||
self.pushed = true;
|
||||
}
|
||||
@@ -221,10 +223,11 @@ impl DetailsInfo {
|
||||
}
|
||||
|
||||
fn push_child(&mut self, child: &DetailsInfo) -> Result<()> {
|
||||
- if self.key_high > 0 && child.key_low <= self.key_high {
|
||||
- return Err(anyhow!("incompatible child"));
|
||||
- }
|
||||
- if !self.pushed {
|
||||
+ if self.pushed {
|
||||
+ if child.key_low <= self.key_high {
|
||||
+ return Err(anyhow!("incompatible child"));
|
||||
+ }
|
||||
+ } else {
|
||||
self.key_low = child.key_low;
|
||||
self.pushed = true;
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,269 +0,0 @@
|
||||
From 9b948a9639b00fe9226065ca4dd6fd2257cc126c Mon Sep 17 00:00:00 2001
|
||||
From: Ming-Hung Tsai <mtsai@redhat.com>
|
||||
Date: Thu, 11 Jan 2024 22:57:36 +0800
|
||||
Subject: [PATCH 4/6] [space_map] Allow non-zero values in unused index block
|
||||
entries
|
||||
|
||||
Previously, we assumed unused entries in the index block were all
|
||||
zero-initialized, leading to issues while loading the block with
|
||||
unexpected bytes and a valid checksum [1]. The updated approach loads
|
||||
index entries based on actual size information from superblock and
|
||||
therefore improves compatibility.
|
||||
|
||||
[1] stratis-storage/stratisd#3520
|
||||
|
||||
(cherry picked from commit d5fe6a1e1c539a0f260a96eb4d7ed9b83c3f84c9)
|
||||
---
|
||||
src/commands/engine.rs | 6 ++++-
|
||||
src/pdata/space_map/allocated_blocks.rs | 15 ++++++-----
|
||||
src/pdata/space_map/checker.rs | 12 ++++++---
|
||||
src/pdata/space_map/metadata.rs | 22 +++++++++-------
|
||||
src/pdata/space_map/tests.rs | 35 +++++++++++++++++++++++--
|
||||
src/thin/damage_generator.rs | 4 +--
|
||||
src/thin/stat.rs | 6 +++--
|
||||
7 files changed, 75 insertions(+), 25 deletions(-)
|
||||
|
||||
diff --git a/src/commands/engine.rs b/src/commands/engine.rs
|
||||
index 1e752d6e..e8b75301 100644
|
||||
--- a/src/commands/engine.rs
|
||||
+++ b/src/commands/engine.rs
|
||||
@@ -137,7 +137,11 @@ fn thin_valid_blocks<P: AsRef<Path>>(path: P, opts: &EngineOptions) -> RoaringBi
|
||||
return all_blocks(e.get_nr_blocks() as u32);
|
||||
}
|
||||
let metadata_root = metadata_root.unwrap();
|
||||
- let valid_blocks = allocated_blocks(e.clone(), metadata_root.bitmap_root);
|
||||
+ let valid_blocks = allocated_blocks(
|
||||
+ e.clone(),
|
||||
+ metadata_root.bitmap_root,
|
||||
+ metadata_root.nr_blocks,
|
||||
+ );
|
||||
valid_blocks.unwrap_or_else(|_| all_blocks(e.get_nr_blocks() as u32))
|
||||
}
|
||||
|
||||
diff --git a/src/pdata/space_map/allocated_blocks.rs b/src/pdata/space_map/allocated_blocks.rs
|
||||
index aa75cb7a..71497a76 100644
|
||||
--- a/src/pdata/space_map/allocated_blocks.rs
|
||||
+++ b/src/pdata/space_map/allocated_blocks.rs
|
||||
@@ -17,18 +17,21 @@ struct IndexInfo {
|
||||
pub fn allocated_blocks(
|
||||
engine: Arc<dyn IoEngine + Send + Sync>,
|
||||
sm_root: u64,
|
||||
+ nr_blocks: u64,
|
||||
) -> Result<RoaringBitmap> {
|
||||
// Walk index tree to find where the bitmaps are.
|
||||
let b = engine.read(sm_root)?;
|
||||
- let (_, indexes) = MetadataIndex::unpack(b.get_data())?;
|
||||
+ let indexes = load_metadata_index(&b, nr_blocks)?;
|
||||
|
||||
- let mut infos = Vec::new();
|
||||
- for (key, entry) in indexes.indexes.iter().enumerate() {
|
||||
- infos.push(IndexInfo {
|
||||
+ let mut infos: Vec<_> = indexes
|
||||
+ .indexes
|
||||
+ .iter()
|
||||
+ .enumerate()
|
||||
+ .map(|(key, entry)| IndexInfo {
|
||||
key: key as u64,
|
||||
loc: entry.blocknr,
|
||||
- });
|
||||
- }
|
||||
+ })
|
||||
+ .collect();
|
||||
|
||||
// Read bitmaps in sequence
|
||||
infos.sort_by(|lhs, rhs| lhs.loc.partial_cmp(&rhs.loc).unwrap());
|
||||
diff --git a/src/pdata/space_map/checker.rs b/src/pdata/space_map/checker.rs
|
||||
index cfafa79b..7cc8286b 100644
|
||||
--- a/src/pdata/space_map/checker.rs
|
||||
+++ b/src/pdata/space_map/checker.rs
|
||||
@@ -194,10 +194,11 @@ fn gather_disk_index_entries(
|
||||
fn gather_metadata_index_entries(
|
||||
engine: Arc<dyn IoEngine + Send + Sync>,
|
||||
bitmap_root: u64,
|
||||
+ nr_blocks: u64,
|
||||
metadata_sm: ASpaceMap,
|
||||
) -> Result<Vec<IndexEntry>> {
|
||||
let b = engine.read(bitmap_root)?;
|
||||
- let entries = check_and_unpack_metadata_index(&b)?.indexes;
|
||||
+ let entries = load_metadata_index(&b, nr_blocks)?.indexes;
|
||||
metadata_sm.lock().unwrap().inc(bitmap_root, 1)?;
|
||||
inc_entries(&metadata_sm, &entries[0..])?;
|
||||
|
||||
@@ -254,8 +255,13 @@ pub fn check_metadata_space_map(
|
||||
metadata_sm.clone(),
|
||||
false,
|
||||
)?;
|
||||
- let entries =
|
||||
- gather_metadata_index_entries(engine.clone(), root.bitmap_root, metadata_sm.clone())?;
|
||||
+
|
||||
+ let entries = gather_metadata_index_entries(
|
||||
+ engine.clone(),
|
||||
+ root.bitmap_root,
|
||||
+ root.nr_blocks,
|
||||
+ metadata_sm.clone(),
|
||||
+ )?;
|
||||
|
||||
// check overflow ref-counts
|
||||
{
|
||||
diff --git a/src/pdata/space_map/metadata.rs b/src/pdata/space_map/metadata.rs
|
||||
index b466c135..be232389 100644
|
||||
--- a/src/pdata/space_map/metadata.rs
|
||||
+++ b/src/pdata/space_map/metadata.rs
|
||||
@@ -6,6 +6,7 @@ use std::sync::{Arc, Mutex};
|
||||
|
||||
use crate::checksum;
|
||||
use crate::io_engine::*;
|
||||
+use crate::math::div_up;
|
||||
use crate::pdata::space_map::common::*;
|
||||
use crate::pdata::space_map::*;
|
||||
use crate::pdata::unpack::*;
|
||||
@@ -32,14 +33,11 @@ impl Unpack for MetadataIndex {
|
||||
let (i, _csum) = le_u32(i)?;
|
||||
let (i, _padding) = le_u32(i)?;
|
||||
let (i, blocknr) = le_u64(i)?;
|
||||
- let (i, indexes) = nom::multi::count(IndexEntry::unpack, MAX_METADATA_BITMAPS)(i)?;
|
||||
+ let (i, mut indexes) = nom::multi::count(IndexEntry::unpack, MAX_METADATA_BITMAPS)(i)?;
|
||||
|
||||
- // Filter out unused entries
|
||||
- let indexes: Vec<IndexEntry> = indexes
|
||||
- .iter()
|
||||
- .take_while(|e| e.blocknr != 0)
|
||||
- .cloned()
|
||||
- .collect();
|
||||
+ // Drop unused entries that point to block 0
|
||||
+ let nr_bitmaps = indexes.iter().take_while(|e| e.blocknr != 0).count();
|
||||
+ indexes.truncate(nr_bitmaps);
|
||||
|
||||
Ok((i, MetadataIndex { blocknr, indexes }))
|
||||
}
|
||||
@@ -69,9 +67,15 @@ fn verify_checksum(b: &Block) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
-pub fn check_and_unpack_metadata_index(b: &Block) -> Result<MetadataIndex> {
|
||||
+pub fn load_metadata_index(b: &Block, nr_blocks: u64) -> Result<MetadataIndex> {
|
||||
verify_checksum(b)?;
|
||||
- unpack::<MetadataIndex>(b.get_data()).map_err(|e| e.into())
|
||||
+ let mut entries = unpack::<MetadataIndex>(b.get_data())?;
|
||||
+ if entries.blocknr != b.loc {
|
||||
+ return Err(anyhow!("blocknr mismatch"));
|
||||
+ }
|
||||
+ let nr_bitmaps = div_up(nr_blocks, ENTRIES_PER_BITMAP as u64) as usize;
|
||||
+ entries.indexes.truncate(nr_bitmaps);
|
||||
+ Ok(entries)
|
||||
}
|
||||
|
||||
//------------------------------------------
|
||||
diff --git a/src/pdata/space_map/tests.rs b/src/pdata/space_map/tests.rs
|
||||
index fb08a9dc..fa118189 100644
|
||||
--- a/src/pdata/space_map/tests.rs
|
||||
+++ b/src/pdata/space_map/tests.rs
|
||||
@@ -171,10 +171,11 @@ mod metadata_sm {
|
||||
let mut w = WriteBatcher::new(engine.clone(), meta_sm.clone(), engine.get_batch_size());
|
||||
w.alloc()?; // reserved for the superblock
|
||||
let root = write_metadata_sm(&mut w)?;
|
||||
+ drop(w);
|
||||
|
||||
let b = engine.read(root.bitmap_root)?;
|
||||
- let entries = check_and_unpack_metadata_index(&b)?.indexes;
|
||||
- ensure!(entries.len() as u64 == div_up(nr_blocks, ENTRIES_PER_BITMAP as u64));
|
||||
+ let entries = load_metadata_index(&b, root.nr_blocks)?.indexes;
|
||||
+ ensure!(entries.len() == div_up(nr_blocks, ENTRIES_PER_BITMAP as u64) as usize);
|
||||
|
||||
// the number of blocks observed by index_entries must be multiple of ENTRIES_PER_BITMAP
|
||||
let nr_allocated = meta_sm.lock().unwrap().get_nr_allocated()?;
|
||||
@@ -193,6 +194,35 @@ mod metadata_sm {
|
||||
fn check_multiple_index_entries() -> Result<()> {
|
||||
check_index_entries(ENTRIES_PER_BITMAP as u64 * 16 + 1000)
|
||||
}
|
||||
+
|
||||
+ #[test]
|
||||
+ fn ignore_junk_bytes_in_index_block() -> Result<()> {
|
||||
+ use crate::checksum;
|
||||
+ use crate::pdata::space_map::common::IndexEntry;
|
||||
+ use crate::pdata::unpack::Unpack;
|
||||
+
|
||||
+ let nr_blocks = ENTRIES_PER_BITMAP as u64 * 4 + 1000;
|
||||
+ let nr_bitmaps = div_up(nr_blocks, ENTRIES_PER_BITMAP as u64) as usize;
|
||||
+ let engine = Arc::new(CoreIoEngine::new(nr_blocks));
|
||||
+ let meta_sm = core_metadata_sm(engine.get_nr_blocks(), u32::MAX);
|
||||
+
|
||||
+ let mut w = WriteBatcher::new(engine.clone(), meta_sm.clone(), engine.get_batch_size());
|
||||
+ w.alloc()?; // reserved for the superblock
|
||||
+ let root = write_metadata_sm(&mut w)?;
|
||||
+
|
||||
+ // append junk bytes to the unused entry
|
||||
+ let index_block = w.read(root.bitmap_root)?;
|
||||
+ index_block.get_data()[nr_bitmaps * IndexEntry::disk_size() as usize + 16] = 1;
|
||||
+ w.write(index_block, checksum::BT::INDEX)?;
|
||||
+ w.flush()?;
|
||||
+ drop(w);
|
||||
+
|
||||
+ let b = engine.read(root.bitmap_root)?;
|
||||
+ let entries = load_metadata_index(&b, root.nr_blocks)?.indexes;
|
||||
+ ensure!(entries.len() == nr_bitmaps);
|
||||
+
|
||||
+ Ok(())
|
||||
+ }
|
||||
}
|
||||
|
||||
//------------------------------------------
|
||||
@@ -223,6 +253,7 @@ mod disk_sm {
|
||||
data_sm.lock().unwrap().inc(0, 100)?;
|
||||
|
||||
let root = write_disk_sm(&mut w, data_sm.lock().unwrap().deref())?;
|
||||
+ drop(w);
|
||||
|
||||
let entries =
|
||||
btree_to_value_vec::<IndexEntry>(&mut Vec::new(), engine, false, root.bitmap_root)?;
|
||||
diff --git a/src/thin/damage_generator.rs b/src/thin/damage_generator.rs
|
||||
index 56685d4d..df1be4cd 100644
|
||||
--- a/src/thin/damage_generator.rs
|
||||
+++ b/src/thin/damage_generator.rs
|
||||
@@ -23,7 +23,7 @@ fn find_blocks_of_rc(
|
||||
let mut found = Vec::<u64>::new();
|
||||
if ref_count < 3 {
|
||||
let b = engine.read(sm_root.bitmap_root)?;
|
||||
- let entries = check_and_unpack_metadata_index(&b)?.indexes;
|
||||
+ let entries = load_metadata_index(&b, sm_root.nr_blocks)?.indexes;
|
||||
let bitmaps: Vec<u64> = entries.iter().map(|ie| ie.blocknr).collect();
|
||||
let nr_bitmaps = bitmaps.len();
|
||||
|
||||
@@ -75,7 +75,7 @@ fn adjust_bitmap_entries(
|
||||
};
|
||||
|
||||
let index_block = engine.read(sm_root.bitmap_root)?;
|
||||
- let entries = check_and_unpack_metadata_index(&index_block)?.indexes;
|
||||
+ let entries = load_metadata_index(&index_block, sm_root.nr_blocks)?.indexes;
|
||||
|
||||
let bi = blocks_to_bitmaps(blocks);
|
||||
let bitmaps: Vec<u64> = bi.iter().map(|i| entries[*i].blocknr).collect();
|
||||
diff --git a/src/thin/stat.rs b/src/thin/stat.rs
|
||||
index 03ae6845..c6f2bf44 100644
|
||||
--- a/src/thin/stat.rs
|
||||
+++ b/src/thin/stat.rs
|
||||
@@ -72,9 +72,10 @@ fn gather_btree_index_entries(
|
||||
fn gather_metadata_index_entries(
|
||||
engine: Arc<dyn IoEngine + Send + Sync>,
|
||||
bitmap_root: u64,
|
||||
+ nr_blocks: u64,
|
||||
) -> Result<Vec<IndexEntry>> {
|
||||
let b = engine.read(bitmap_root)?;
|
||||
- let entries = check_and_unpack_metadata_index(&b)?.indexes;
|
||||
+ let entries = load_metadata_index(&b, nr_blocks)?.indexes;
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
@@ -152,7 +153,8 @@ fn stat_metadata_block_ref_counts(
|
||||
) -> Result<BTreeMap<u32, u64>> {
|
||||
let mut histogram = BTreeMap::<u32, u64>::new();
|
||||
|
||||
- let index_entries = gather_metadata_index_entries(engine.clone(), root.bitmap_root)?;
|
||||
+ let index_entries =
|
||||
+ gather_metadata_index_entries(engine.clone(), root.bitmap_root, root.nr_blocks)?;
|
||||
stat_low_ref_counts(engine.clone(), &index_entries, &mut histogram)?;
|
||||
|
||||
let histogram = stat_overflow_ref_counts(engine, root.ref_count_root, histogram)?;
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,111 +0,0 @@
|
||||
From 96fc598f76beac1deb0c9564dc67416f70ae4ac4 Mon Sep 17 00:00:00 2001
|
||||
From: Ming-Hung Tsai <mtsai@redhat.com>
|
||||
Date: Tue, 30 Jan 2024 15:20:31 +0800
|
||||
Subject: [PATCH 5/6] [cache_check] Fix boundary check on the bitset for cached
|
||||
blocks
|
||||
|
||||
The bitset for cached block addresses grows dynamically if the metadata
|
||||
is not shutdown properly, in which the size hint of the slow (backing)
|
||||
device is not available. Fix a bug in determining whether resizing is
|
||||
needed (bz2258485).
|
||||
|
||||
(cherry picked from commit d2390a50f38d88f0f32b13e59444bbbca7e660b3)
|
||||
---
|
||||
src/cache/check.rs | 6 +++---
|
||||
tests/cache_check.rs | 45 ++++++++++++++++++++++++++++++++++++++++++++
|
||||
2 files changed, 48 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/src/cache/check.rs b/src/cache/check.rs
|
||||
index 18bd51b3..c71d3059 100644
|
||||
--- a/src/cache/check.rs
|
||||
+++ b/src/cache/check.rs
|
||||
@@ -19,7 +19,7 @@ use crate::report::*;
|
||||
|
||||
//------------------------------------------
|
||||
|
||||
-// 16m entries is capable for a 1TB cache with 64KB block size
|
||||
+// 16m entries is capable to address a 1TB device with 64KB block size
|
||||
const DEFAULT_OBLOCKS: usize = 16777216;
|
||||
|
||||
fn inc_superblock(sm: &ASpaceMap) -> anyhow::Result<()> {
|
||||
@@ -82,7 +82,7 @@ mod format1 {
|
||||
}
|
||||
let mut seen_oblocks = self.seen_oblocks.lock().unwrap();
|
||||
|
||||
- if m.oblock as usize > seen_oblocks.len() {
|
||||
+ if m.oblock as usize >= seen_oblocks.len() {
|
||||
seen_oblocks.grow(m.oblock as usize + 1);
|
||||
} else if seen_oblocks.contains(m.oblock as usize) {
|
||||
return Err(array::value_err("origin block already mapped".to_string()));
|
||||
@@ -179,7 +179,7 @@ mod format2 {
|
||||
));
|
||||
}
|
||||
|
||||
- if m.oblock as usize > seen_oblocks.len() {
|
||||
+ if m.oblock as usize >= seen_oblocks.len() {
|
||||
seen_oblocks.grow(m.oblock as usize + 1);
|
||||
} else if seen_oblocks.contains(m.oblock as usize) {
|
||||
return Err(array::value_err("origin block already mapped".to_string()));
|
||||
diff --git a/tests/cache_check.rs b/tests/cache_check.rs
|
||||
index 81f4c578..8988694a 100644
|
||||
--- a/tests/cache_check.rs
|
||||
+++ b/tests/cache_check.rs
|
||||
@@ -11,6 +11,8 @@ use common::program::*;
|
||||
use common::target::*;
|
||||
use common::test_dir::*;
|
||||
|
||||
+use std::io::Write;
|
||||
+
|
||||
//------------------------------------------
|
||||
|
||||
const USAGE: &str = "Validates cache metadata on a device or file.
|
||||
@@ -294,3 +296,46 @@ fn no_clear_needs_check_if_error() -> Result<()> {
|
||||
}
|
||||
|
||||
//------------------------------------------
|
||||
+
|
||||
+fn metadata_without_slow_dev_size_info(use_v1: bool) -> Result<()> {
|
||||
+ let mut td = TestDir::new()?;
|
||||
+
|
||||
+ // The input metadata has a cached oblock with address equals to the default bitset size
|
||||
+ // boundary (DEFAULT_OBLOCKS = 16777216), triggering bitset resize.
|
||||
+ let xml = td.mk_path("meta.xml");
|
||||
+ let mut file = std::fs::File::create(&xml)?;
|
||||
+ file.write_all(b"<superblock uuid=\"\" block_size=\"128\" nr_cache_blocks=\"1024\" policy=\"smq\" hint_width=\"4\">
|
||||
+ <mappings>
|
||||
+ <mapping cache_block=\"0\" origin_block=\"16777216\" dirty=\"false\"/>
|
||||
+ </mappings>
|
||||
+ <hints>
|
||||
+ <hint cache_block=\"0\" data=\"AAAAAA==\"/>
|
||||
+ </hints>
|
||||
+</superblock>")?;
|
||||
+
|
||||
+ let md = td.mk_path("meta.bin");
|
||||
+ thinp::file_utils::create_sized_file(&md, 4096 * 4096)?;
|
||||
+
|
||||
+ let cache_restore_args = if use_v1 {
|
||||
+ args!["-i", &xml, "-o", &md, "--metadata-version=1"]
|
||||
+ } else {
|
||||
+ args!["-i", &xml, "-o", &md, "--metadata-version=2"]
|
||||
+ };
|
||||
+
|
||||
+ run_ok(cache_restore_cmd(cache_restore_args))?;
|
||||
+ run_ok(cache_check_cmd(args![&md]))?;
|
||||
+
|
||||
+ Ok(())
|
||||
+}
|
||||
+
|
||||
+#[test]
|
||||
+fn metadata_v1_without_slow_dev_size_info() -> Result<()> {
|
||||
+ metadata_without_slow_dev_size_info(true)
|
||||
+}
|
||||
+
|
||||
+#[test]
|
||||
+fn metadata_v2_without_slow_dev_size_info() -> Result<()> {
|
||||
+ metadata_without_slow_dev_size_info(false)
|
||||
+}
|
||||
+
|
||||
+//------------------------------------------
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,62 +0,0 @@
|
||||
From e295610fad85ec1a64e569cba425ca557a56c6e6 Mon Sep 17 00:00:00 2001
|
||||
From: Ming-Hung Tsai <mtsai@redhat.com>
|
||||
Date: Wed, 31 Jan 2024 11:36:17 +0800
|
||||
Subject: [PATCH 6/6] [thin/cache_check] Print suggestive hints for improving
|
||||
error resolution
|
||||
|
||||
Enhance error messages to instruct users on addressing recoverable
|
||||
errors, eliminating the guesswork (bz2233177).
|
||||
|
||||
(cherry picked from commit aaf3b396574709902ffba47e03a5c7ded6a103c5)
|
||||
---
|
||||
src/cache/check.rs | 5 ++++-
|
||||
src/thin/check.rs | 10 ++++++++--
|
||||
2 files changed, 12 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/src/cache/check.rs b/src/cache/check.rs
|
||||
index c71d3059..17d1af77 100644
|
||||
--- a/src/cache/check.rs
|
||||
+++ b/src/cache/check.rs
|
||||
@@ -387,7 +387,10 @@ pub fn check(opts: CacheCheckOptions) -> anyhow::Result<()> {
|
||||
ctx.report.warning("Repairing metadata leaks.");
|
||||
repair_space_map(ctx.engine.clone(), metadata_leaks, metadata_sm.clone())?;
|
||||
} else if !opts.ignore_non_fatal {
|
||||
- return Err(anyhow!("metadata space map contains leaks"));
|
||||
+ return Err(anyhow!(concat!(
|
||||
+ "metadata space map contains leaks\n",
|
||||
+ "perhaps you wanted to run with --auto-repair"
|
||||
+ )));
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/src/thin/check.rs b/src/thin/check.rs
|
||||
index 8b829899..f6fde359 100644
|
||||
--- a/src/thin/check.rs
|
||||
+++ b/src/thin/check.rs
|
||||
@@ -1246,7 +1246,10 @@ pub fn check(opts: ThinCheckOptions) -> Result<()> {
|
||||
report.warning("Repairing data leaks.");
|
||||
repair_space_map(engine.clone(), data_leaks, data_sm.clone())?;
|
||||
} else if !opts.ignore_non_fatal {
|
||||
- return Err(anyhow!("data space map contains leaks"));
|
||||
+ return Err(anyhow!(concat!(
|
||||
+ "data space map contains leaks\n",
|
||||
+ "perhaps you wanted to run with --auto-repair"
|
||||
+ )));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1255,7 +1258,10 @@ pub fn check(opts: ThinCheckOptions) -> Result<()> {
|
||||
report.warning("Repairing metadata leaks.");
|
||||
repair_space_map(engine.clone(), metadata_leaks, metadata_sm.clone())?;
|
||||
} else if !opts.ignore_non_fatal {
|
||||
- return Err(anyhow!("metadata space map contains leaks"));
|
||||
+ return Err(anyhow!(concat!(
|
||||
+ "metadata space map contains leaks\n",
|
||||
+ "perhaps you wanted to run with --auto-repair"
|
||||
+ )));
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,225 +0,0 @@
|
||||
From 6f7f70da8341cfe8447c6c70ebb4085629983b0d Mon Sep 17 00:00:00 2001
|
||||
From: Ming-Hung Tsai <mtsai@redhat.com>
|
||||
Date: Tue, 20 Feb 2024 04:16:46 +0800
|
||||
Subject: [PATCH 07/10] [thin_dump] Do not print error messages on BrokenPipe
|
||||
(EPIPE)
|
||||
|
||||
Considering BrokenPipe is an expected result in most use cases,
|
||||
thin_dump should not print error messages in this situation but should
|
||||
return a non-zero exit code instead. For instance, lvconvert fetches
|
||||
only the first line of thin_dump then closes the pipe immediately,
|
||||
causing BrokenPipe to terminate thin_dump intentionally.
|
||||
|
||||
Changes made include removing the unused NodeVisitor abstraction for
|
||||
MappingVisitor to keep the error type from low-level io, and printing
|
||||
messages based on the error type returned upward (bz2233177).
|
||||
|
||||
(cherry picked from commit d7b91f9f7e9e4118f1f47df19d81fc9bc6965ec5)
|
||||
---
|
||||
src/commands/utils.rs | 16 ++++++---
|
||||
src/thin/dump.rs | 31 +++---------------
|
||||
tests/common/process.rs | 2 +-
|
||||
tests/thin_dump.rs | 72 ++++++++++++++++++++++++++++++++++++++++-
|
||||
4 files changed, 89 insertions(+), 32 deletions(-)
|
||||
|
||||
diff --git a/src/commands/utils.rs b/src/commands/utils.rs
|
||||
index 0be4fa7f..38751603 100644
|
||||
--- a/src/commands/utils.rs
|
||||
+++ b/src/commands/utils.rs
|
||||
@@ -162,10 +162,18 @@ pub fn check_overwrite_metadata(report: &Report, path: &Path) -> Result<()> {
|
||||
|
||||
pub fn to_exit_code<T>(report: &Report, result: anyhow::Result<T>) -> exitcode::ExitCode {
|
||||
if let Err(e) = result {
|
||||
- if e.chain().len() > 1 {
|
||||
- report.fatal(&format!("{}: {}", e, e.root_cause()));
|
||||
- } else {
|
||||
- report.fatal(&format!("{}", e));
|
||||
+ let root_cause = e.root_cause();
|
||||
+ let is_broken_pipe = root_cause
|
||||
+ .downcast_ref::<Arc<std::io::Error>>() // quick_xml::Error::Io wraps io::Error in Arc
|
||||
+ .map(|err| err.kind() == std::io::ErrorKind::BrokenPipe)
|
||||
+ .unwrap_or(false);
|
||||
+
|
||||
+ if !is_broken_pipe {
|
||||
+ if e.chain().len() > 1 {
|
||||
+ report.fatal(&format!("{}: {}", e, root_cause));
|
||||
+ } else {
|
||||
+ report.fatal(&format!("{}", e));
|
||||
+ }
|
||||
}
|
||||
|
||||
// FIXME: we need a way of getting more meaningful error codes
|
||||
diff --git a/src/thin/dump.rs b/src/thin/dump.rs
|
||||
index f6046487..561ea566 100644
|
||||
--- a/src/thin/dump.rs
|
||||
+++ b/src/thin/dump.rs
|
||||
@@ -10,8 +10,7 @@ use crate::checksum;
|
||||
use crate::commands::engine::*;
|
||||
use crate::dump_utils::*;
|
||||
use crate::io_engine::*;
|
||||
-use crate::pdata::btree::{self, *};
|
||||
-use crate::pdata::btree_walker::*;
|
||||
+use crate::pdata::btree::*;
|
||||
use crate::pdata::space_map::common::*;
|
||||
use crate::pdata::unpack::*;
|
||||
use crate::report::*;
|
||||
@@ -104,9 +103,7 @@ impl<'a> MappingVisitor<'a> {
|
||||
}),
|
||||
}
|
||||
}
|
||||
-}
|
||||
|
||||
-impl<'a> NodeVisitor<BlockTime> for MappingVisitor<'a> {
|
||||
fn visit(
|
||||
&self,
|
||||
_path: &[u64],
|
||||
@@ -114,39 +111,21 @@ impl<'a> NodeVisitor<BlockTime> for MappingVisitor<'a> {
|
||||
_h: &NodeHeader,
|
||||
keys: &[u64],
|
||||
values: &[BlockTime],
|
||||
- ) -> btree::Result<()> {
|
||||
+ ) -> Result<()> {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
for (k, v) in keys.iter().zip(values.iter()) {
|
||||
if let Some(run) = inner.builder.next(*k, v.block, v.time) {
|
||||
- // FIXME: BTreeError should carry more information than a string
|
||||
- // so the caller could identify the actual root cause,
|
||||
- // e.g., a broken pipe error or something.
|
||||
- inner
|
||||
- .md_out
|
||||
- .map(&run)
|
||||
- .map_err(|e| btree::value_err(format!("{}", e)))?;
|
||||
+ inner.md_out.map(&run)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
- fn visit_again(&self, _path: &[u64], b: u64) -> btree::Result<()> {
|
||||
- let mut inner = self.inner.lock().unwrap();
|
||||
- inner
|
||||
- .md_out
|
||||
- .ref_shared(&format!("{}", b))
|
||||
- .map_err(|e| btree::value_err(format!("{}", e)))?;
|
||||
- Ok(())
|
||||
- }
|
||||
-
|
||||
- fn end_walk(&self) -> btree::Result<()> {
|
||||
+ fn end_walk(&self) -> Result<()> {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
if let Some(run) = inner.builder.complete() {
|
||||
- inner
|
||||
- .md_out
|
||||
- .map(&run)
|
||||
- .map_err(|e| btree::value_err(format!("{}", e)))?;
|
||||
+ inner.md_out.map(&run)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
diff --git a/tests/common/process.rs b/tests/common/process.rs
|
||||
index 99f10dc7..ad26c38b 100644
|
||||
--- a/tests/common/process.rs
|
||||
+++ b/tests/common/process.rs
|
||||
@@ -39,7 +39,7 @@ impl Command {
|
||||
Command { program, args }
|
||||
}
|
||||
|
||||
- fn to_expr(&self) -> duct::Expression {
|
||||
+ pub fn to_expr(&self) -> duct::Expression {
|
||||
duct::cmd(&self.program, &self.args)
|
||||
}
|
||||
}
|
||||
diff --git a/tests/thin_dump.rs b/tests/thin_dump.rs
|
||||
index 28cb237d..6e2cd3db 100644
|
||||
--- a/tests/thin_dump.rs
|
||||
+++ b/tests/thin_dump.rs
|
||||
@@ -129,7 +129,7 @@ fn dump_restore_cycle() -> Result<()> {
|
||||
// test no stderr with a normal dump
|
||||
|
||||
#[test]
|
||||
-fn no_stderr() -> Result<()> {
|
||||
+fn no_stderr_on_success() -> Result<()> {
|
||||
let mut td = TestDir::new()?;
|
||||
|
||||
let md = mk_valid_md(&mut td)?;
|
||||
@@ -139,6 +139,76 @@ fn no_stderr() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
+//------------------------------------------
|
||||
+// test no stderr on broken pipe errors
|
||||
+
|
||||
+#[test]
|
||||
+fn no_stderr_on_broken_pipe() -> Result<()> {
|
||||
+ use anyhow::ensure;
|
||||
+
|
||||
+ let mut td = TestDir::new()?;
|
||||
+
|
||||
+ // use the metadata producing dump more than 64KB (the default pipe buffer size),
|
||||
+ // such that thin_dump will be blocked on writing to the pipe, then hits EPIPE.
|
||||
+ let md = prep_metadata(&mut td)?;
|
||||
+
|
||||
+ let mut pipefd = [0i32; 2];
|
||||
+ unsafe {
|
||||
+ ensure!(libc::pipe2(pipefd.as_mut_slice().as_mut_ptr(), libc::O_CLOEXEC) == 0);
|
||||
+ }
|
||||
+
|
||||
+ let cmd = thin_dump_cmd(args![&md])
|
||||
+ .to_expr()
|
||||
+ .stdout_file(pipefd[1])
|
||||
+ .stderr_capture();
|
||||
+ let handle = cmd.unchecked().start()?;
|
||||
+
|
||||
+ // wait for thin_dump to fill the pipe buffer
|
||||
+ std::thread::sleep(std::time::Duration::from_millis(1000));
|
||||
+
|
||||
+ unsafe {
|
||||
+ libc::close(pipefd[1]); // close the unused write-end
|
||||
+ libc::close(pipefd[0]); // causing broken pipe
|
||||
+ }
|
||||
+
|
||||
+ let output = handle.wait()?;
|
||||
+ ensure!(!output.status.success());
|
||||
+ ensure!(output.stderr.is_empty());
|
||||
+
|
||||
+ Ok(())
|
||||
+}
|
||||
+
|
||||
+#[test]
|
||||
+fn no_stderr_on_broken_fifo() -> Result<()> {
|
||||
+ use anyhow::ensure;
|
||||
+
|
||||
+ let mut td = TestDir::new()?;
|
||||
+
|
||||
+ // use the metadata producing dump more than 64KB (the default pipe buffer size),
|
||||
+ // such that thin_dump will be blocked on writing to the pipe, then hits EPIPE.
|
||||
+ let md = prep_metadata(&mut td)?;
|
||||
+
|
||||
+ let out_fifo = td.mk_path("out_fifo");
|
||||
+ unsafe {
|
||||
+ let c_str = std::ffi::CString::new(out_fifo.as_os_str().as_encoded_bytes()).unwrap();
|
||||
+ ensure!(libc::mkfifo(c_str.as_ptr(), 0o666) == 0);
|
||||
+ };
|
||||
+
|
||||
+ let cmd = thin_dump_cmd(args![&md, "-o", &out_fifo])
|
||||
+ .to_expr()
|
||||
+ .stderr_capture();
|
||||
+ let handle = cmd.unchecked().start()?;
|
||||
+
|
||||
+ let fifo = std::fs::File::open(out_fifo)?;
|
||||
+ drop(fifo); // causing broken pipe
|
||||
+
|
||||
+ let output = handle.wait()?;
|
||||
+ ensure!(!output.status.success());
|
||||
+ ensure!(output.stderr.is_empty());
|
||||
+
|
||||
+ Ok(())
|
||||
+}
|
||||
+
|
||||
//------------------------------------------
|
||||
// test dump metadata snapshot from a live metadata
|
||||
// here we use a corrupted metadata to ensure that "thin_dump -m" reads the
|
||||
--
|
||||
2.43.0
|
||||
|
@ -1,34 +0,0 @@
|
||||
From e6659ff342516f1861cdb1f365dfb4f79bb45c54 Mon Sep 17 00:00:00 2001
|
||||
From: mulhern <amulhern@redhat.com>
|
||||
Date: Sun, 7 Jan 2024 20:17:13 -0500
|
||||
Subject: [PATCH 08/10] thin_metadata_pack: Allow long format for input and
|
||||
output
|
||||
|
||||
The options match the man pages that way.
|
||||
|
||||
(cherry picked from commit b5e7028effc51ad1c303424a9f1d161d2f7b0c7c)
|
||||
---
|
||||
src/commands/thin_metadata_pack.rs | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
|
||||
diff --git a/src/commands/thin_metadata_pack.rs b/src/commands/thin_metadata_pack.rs
|
||||
index 5aad973c..265439c9 100644
|
||||
--- a/src/commands/thin_metadata_pack.rs
|
||||
+++ b/src/commands/thin_metadata_pack.rs
|
||||
@@ -26,11 +26,13 @@ impl ThinMetadataPackCommand {
|
||||
.help("Specify thinp metadata binary device/file")
|
||||
.required(true)
|
||||
.short('i')
|
||||
+ .long("input")
|
||||
.value_name("DEV"))
|
||||
.arg(Arg::new("OUTPUT")
|
||||
.help("Specify packed output file")
|
||||
.required(true)
|
||||
.short('o')
|
||||
+ .long("output")
|
||||
.value_name("FILE"))
|
||||
}
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,109 +0,0 @@
|
||||
From a557bc55ef3c136b1fca3f27cd55fdb0014dc6e7 Mon Sep 17 00:00:00 2001
|
||||
From: Ming-Hung Tsai <mtsai@redhat.com>
|
||||
Date: Fri, 23 Feb 2024 20:20:01 +0800
|
||||
Subject: [PATCH 10/10] [thin_dump] Do not print error messages on BrokenPipe
|
||||
(EPIPE)
|
||||
|
||||
Handle the case that doesn't write through quick_xml,
|
||||
e.g., thin_dump --format human_readable
|
||||
|
||||
(cherry picked from commit b3e05f2eb9b704af897f14215536dadde5c13b2d)
|
||||
---
|
||||
src/commands/utils.rs | 7 +++++--
|
||||
tests/thin_dump.rs | 34 +++++++++++++++++++++++++++-------
|
||||
2 files changed, 32 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/src/commands/utils.rs b/src/commands/utils.rs
|
||||
index 38751603..82b2529e 100644
|
||||
--- a/src/commands/utils.rs
|
||||
+++ b/src/commands/utils.rs
|
||||
@@ -165,8 +165,11 @@ pub fn to_exit_code<T>(report: &Report, result: anyhow::Result<T>) -> exitcode::
|
||||
let root_cause = e.root_cause();
|
||||
let is_broken_pipe = root_cause
|
||||
.downcast_ref::<Arc<std::io::Error>>() // quick_xml::Error::Io wraps io::Error in Arc
|
||||
- .map(|err| err.kind() == std::io::ErrorKind::BrokenPipe)
|
||||
- .unwrap_or(false);
|
||||
+ .map_or_else(
|
||||
+ || root_cause.downcast_ref::<std::io::Error>(),
|
||||
+ |err| Some(err.as_ref()),
|
||||
+ )
|
||||
+ .map_or(false, |err| err.kind() == std::io::ErrorKind::BrokenPipe);
|
||||
|
||||
if !is_broken_pipe {
|
||||
if e.chain().len() > 1 {
|
||||
diff --git a/tests/thin_dump.rs b/tests/thin_dump.rs
|
||||
index 6e2cd3db..81982188 100644
|
||||
--- a/tests/thin_dump.rs
|
||||
+++ b/tests/thin_dump.rs
|
||||
@@ -142,8 +142,7 @@ fn no_stderr_on_success() -> Result<()> {
|
||||
//------------------------------------------
|
||||
// test no stderr on broken pipe errors
|
||||
|
||||
-#[test]
|
||||
-fn no_stderr_on_broken_pipe() -> Result<()> {
|
||||
+fn test_no_stderr_on_broken_pipe(extra_args: &[&std::ffi::OsStr]) -> Result<()> {
|
||||
use anyhow::ensure;
|
||||
|
||||
let mut td = TestDir::new()?;
|
||||
@@ -157,7 +156,9 @@ fn no_stderr_on_broken_pipe() -> Result<()> {
|
||||
ensure!(libc::pipe2(pipefd.as_mut_slice().as_mut_ptr(), libc::O_CLOEXEC) == 0);
|
||||
}
|
||||
|
||||
- let cmd = thin_dump_cmd(args![&md])
|
||||
+ let mut args = args![&md].to_vec();
|
||||
+ args.extend_from_slice(extra_args);
|
||||
+ let cmd = thin_dump_cmd(args)
|
||||
.to_expr()
|
||||
.stdout_file(pipefd[1])
|
||||
.stderr_capture();
|
||||
@@ -179,7 +180,16 @@ fn no_stderr_on_broken_pipe() -> Result<()> {
|
||||
}
|
||||
|
||||
#[test]
|
||||
-fn no_stderr_on_broken_fifo() -> Result<()> {
|
||||
+fn no_stderr_on_broken_pipe_xml() -> Result<()> {
|
||||
+ test_no_stderr_on_broken_pipe(&[])
|
||||
+}
|
||||
+
|
||||
+#[test]
|
||||
+fn no_stderr_on_broken_pipe_humanreadable() -> Result<()> {
|
||||
+ test_no_stderr_on_broken_pipe(&args!["--format", "human_readable"])
|
||||
+}
|
||||
+
|
||||
+fn test_no_stderr_on_broken_fifo(extra_args: &[&std::ffi::OsStr]) -> Result<()> {
|
||||
use anyhow::ensure;
|
||||
|
||||
let mut td = TestDir::new()?;
|
||||
@@ -194,9 +204,9 @@ fn no_stderr_on_broken_fifo() -> Result<()> {
|
||||
ensure!(libc::mkfifo(c_str.as_ptr(), 0o666) == 0);
|
||||
};
|
||||
|
||||
- let cmd = thin_dump_cmd(args![&md, "-o", &out_fifo])
|
||||
- .to_expr()
|
||||
- .stderr_capture();
|
||||
+ let mut args = args![&md, "-o", &out_fifo].to_vec();
|
||||
+ args.extend_from_slice(extra_args);
|
||||
+ let cmd = thin_dump_cmd(args).to_expr().stderr_capture();
|
||||
let handle = cmd.unchecked().start()?;
|
||||
|
||||
let fifo = std::fs::File::open(out_fifo)?;
|
||||
@@ -209,6 +219,16 @@ fn no_stderr_on_broken_fifo() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
+#[test]
|
||||
+fn no_stderr_on_broken_fifo_xml() -> Result<()> {
|
||||
+ test_no_stderr_on_broken_fifo(&[])
|
||||
+}
|
||||
+
|
||||
+#[test]
|
||||
+fn no_stderr_on_broken_fifo_humanreadable() -> Result<()> {
|
||||
+ test_no_stderr_on_broken_fifo(&args!["--format", "human_readable"])
|
||||
+}
|
||||
+
|
||||
//------------------------------------------
|
||||
// test dump metadata snapshot from a live metadata
|
||||
// here we use a corrupted metadata to ensure that "thin_dump -m" reads the
|
||||
--
|
||||
2.43.0
|
||||
|
@ -9,31 +9,29 @@
|
||||
|
||||
Summary: Device-mapper Persistent Data Tools
|
||||
Name: device-mapper-persistent-data
|
||||
Version: 1.0.9
|
||||
Release: 3%{?dist}%{?release_suffix}.1
|
||||
License: GPLv3+
|
||||
Version: 1.1.0
|
||||
Release: 1%{?dist}%{?release_suffix}
|
||||
License: GPL-3.0-only AND (0BSD OR MIT OR Apache-2.0) AND Apache-2.0 AND (Apache-2.0 OR MIT) AND (Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT) AND BSD-3-Clause AND MIT AND (MIT OR Apache-2.0) AND (MIT OR Zlib OR Apache-2.0) AND (Unlicense OR MIT) AND (Zlib OR Apache-2.0 OR MIT)
|
||||
URL: https://github.com/jthornber/thin-provisioning-tools
|
||||
#Source0: https://github.com/jthornber/thin-provisioning-tools/archive/thin-provisioning-tools-%%{version}.tar.gz
|
||||
Source0: https://github.com/jthornber/thin-provisioning-tools/archive/v%{version}%{?version_suffix}.tar.gz
|
||||
Source1: dmpd109-vendor.tar.gz
|
||||
Source1: dmpd110-vendor.tar.gz
|
||||
Patch1: 0001-Tweak-cargo.toml-to-work-with-vendor-directory.patch
|
||||
Patch2: 0002-space-map-Fix-incorrect-index_entry.nr_free-while-ex.patch
|
||||
Patch3: 0003-thin_repair-Fix-child-keys-checking-on-the-node-with.patch
|
||||
Patch4: 0004-space_map-Allow-non-zero-values-in-unused-index-bloc.patch
|
||||
Patch5: 0005-cache_check-Fix-boundary-check-on-the-bitset-for-cac.patch
|
||||
Patch6: 0006-thin-cache_check-Print-suggestive-hints-for-improvin.patch
|
||||
# RHEL-26521:
|
||||
Patch7: 0007-thin_dump-Do-not-print-error-messages-on-BrokenPipe-.patch
|
||||
# RHEL-26520:
|
||||
Patch8: 0008-thin_metadata_pack-Allow-long-format-for-input-and-o.patch
|
||||
Patch9: 0009-commands-Fix-version-string-compatibility-issue-with.patch
|
||||
# RHEL-26521:
|
||||
Patch10: 0010-thin_dump-Do-not-print-error-messages-on-BrokenPipe-.patch
|
||||
|
||||
%if %{defined rhel}
|
||||
BuildRequires: rust-toolset
|
||||
%else
|
||||
BuildRequires: rust-packaging
|
||||
%endif
|
||||
BuildRequires: rust >= 1.35
|
||||
BuildRequires: cargo
|
||||
BuildRequires: make
|
||||
BuildRequires: systemd-devel
|
||||
BuildRequires: clang-libs
|
||||
BuildRequires: glibc-static
|
||||
BuildRequires: device-mapper-devel
|
||||
BuildRequires: clang
|
||||
#BuildRequires: gcc
|
||||
|
||||
%description
|
||||
thin-provisioning-tools contains check,dump,restore,repair,rmap
|
||||
@ -114,7 +112,7 @@ RUST_BACKTRACE=1 %%cargo_test -- --nocapture --test-threads=1 || true
|
||||
%make_install MANDIR=%{_mandir} STRIP=true
|
||||
|
||||
%files
|
||||
%doc COPYING README.md
|
||||
%doc COPYING README.md CHANGES
|
||||
%{_mandir}/man8/cache_check.8.gz
|
||||
%{_mandir}/man8/cache_dump.8.gz
|
||||
%{_mandir}/man8/cache_metadata_size.8.gz
|
||||
@ -130,6 +128,7 @@ RUST_BACKTRACE=1 %%cargo_test -- --nocapture --test-threads=1 || true
|
||||
%{_mandir}/man8/thin_dump.8.gz
|
||||
%{_mandir}/man8/thin_ls.8.gz
|
||||
%{_mandir}/man8/thin_metadata_size.8.gz
|
||||
%{_mandir}/man8/thin_migrate.8.gz
|
||||
%{_mandir}/man8/thin_repair.8.gz
|
||||
%{_mandir}/man8/thin_restore.8.gz
|
||||
%{_mandir}/man8/thin_rmap.8.gz
|
||||
@ -152,6 +151,7 @@ RUST_BACKTRACE=1 %%cargo_test -- --nocapture --test-threads=1 || true
|
||||
%{_sbindir}/thin_dump
|
||||
%{_sbindir}/thin_ls
|
||||
%{_sbindir}/thin_metadata_size
|
||||
%{_sbindir}/thin_migrate
|
||||
%{_sbindir}/thin_repair
|
||||
%{_sbindir}/thin_restore
|
||||
%{_sbindir}/thin_rmap
|
||||
@ -161,6 +161,12 @@ RUST_BACKTRACE=1 %%cargo_test -- --nocapture --test-threads=1 || true
|
||||
#% {_sbindir}/thin_show_duplicates
|
||||
|
||||
%changelog
|
||||
* Fri Oct 11 2024 Marian Csontos <mcsontos@redhat.com> - 1.1.0-1
|
||||
- Update to latest upstream release 1.1.0.
|
||||
- Support listing the highest mapped block in thin_ls.
|
||||
- Introduce thin_migrate for volume migration.
|
||||
- See CHANGES.
|
||||
|
||||
* Mon Mar 04 2024 Marian Csontos <mcsontos@redhat.com> - 1.0.9-3
|
||||
- Fix --version string compatibility with LVM tools.
|
||||
- Fix confusing Broken pipe warning when used in lvconvert --repair.
|
||||
|
4
sources
4
sources
@ -1,2 +1,2 @@
|
||||
SHA512 (v1.0.9.tar.gz) = c7d137b82cce4286d43f49af039f8026d7d7746e96affebc82e8243173ba9a014e3b462fc4b55850067ecfbcc6113c49f009c1285e272a4d64455715d11a9da1
|
||||
SHA512 (dmpd109-vendor.tar.gz) = f2a581da80e4137c6ecab9237587ec42141fdfe8c1bfae2ab5b431b64c100ea6c65cfadbbdd10d665101364731d7c5e61780490b9cfd5231df3f463483890747
|
||||
SHA512 (v1.1.0.tar.gz) = 8cf3953743334b5a34504695757fa2de5a5fb5bdb8c7aed859995154fc004f52c3ef041558d307a2309c2de8dcdcbd8a0537bd3408fd78c7ff2f641f28944c1e
|
||||
SHA512 (dmpd110-vendor.tar.gz) = 05c32ade894331eb11239c88d702d20ac463745b51d2c8b71d5aed75ce443ab160d94cd33bdbf021982fe3edc21c630e5bd5dba66d890b352bb6636d09667077
|
||||
|
Loading…
Reference in New Issue
Block a user