From ad6205f3cd4da475f125338c70d67ef53a1e7376 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Tue, 27 Sep 2022 10:15:36 -0400 Subject: [PATCH] import kmod-kvdo-8.2.0.18-46.el9 --- .gitignore | 2 +- .kmod-kvdo.metadata | 2 +- SOURCES/add_lz4_dependency.patch | 1193 +++++++++++++++++ ...-logical-space-check-from-table-line.patch | 34 + SPECS/kmod-kvdo.spec | 160 ++- 5 files changed, 1367 insertions(+), 24 deletions(-) create mode 100644 SOURCES/add_lz4_dependency.patch create mode 100644 SOURCES/removed-logical-space-check-from-table-line.patch diff --git a/.gitignore b/.gitignore index c13466a..01ff92e 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -SOURCES/kvdo-e209cfe.tar.gz +SOURCES/kvdo-6077a12.tar.gz diff --git a/.kmod-kvdo.metadata b/.kmod-kvdo.metadata index 682909b..1a4d584 100644 --- a/.kmod-kvdo.metadata +++ b/.kmod-kvdo.metadata @@ -1 +1 @@ -a7f0962c3515796db0d935d1a78a08ec253a8a9a SOURCES/kvdo-e209cfe.tar.gz +9da64204a641311752f28ae172c7469da9668e56 SOURCES/kvdo-6077a12.tar.gz diff --git a/SOURCES/add_lz4_dependency.patch b/SOURCES/add_lz4_dependency.patch new file mode 100644 index 0000000..99e1aeb --- /dev/null +++ b/SOURCES/add_lz4_dependency.patch @@ -0,0 +1,1193 @@ +diff -Naur kvdo-127a993f34010e4d8b05e46c4af2a9295c4477d0.orig/vdo/lz4_compress.c kvdo-127a993f34010e4d8b05e46c4af2a9295c4477d0/vdo/lz4_compress.c +--- kvdo-127a993f34010e4d8b05e46c4af2a9295c4477d0.orig/vdo/lz4_compress.c 1969-12-31 19:00:00.000000000 -0500 ++++ kvdo-127a993f34010e4d8b05e46c4af2a9295c4477d0/vdo/lz4_compress.c 2022-07-27 21:39:51.288835411 -0400 +@@ -0,0 +1,940 @@ ++/* ++ * LZ4 - Fast LZ compression algorithm ++ * Copyright (C) 2011 - 2016, Yann Collet. ++ * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php) ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are ++ * met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above ++ * copyright notice, this list of conditions and the following disclaimer ++ * in the documentation and/or other materials provided with the ++ * distribution. ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * You can contact the author at : ++ * - LZ4 homepage : http://www.lz4.org ++ * - LZ4 source repository : https://github.com/lz4/lz4 ++ * ++ * Changed for kernel usage by: ++ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de> ++ */ ++ ++/*-************************************ ++ * Dependencies ++ **************************************/ ++#include ++#include "lz4defs.h" ++#include ++#include ++#include ++ ++static const int LZ4_minLength = (MFLIMIT + 1); ++static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1)); ++ ++/*-****************************** ++ * Compression functions ++ ********************************/ ++static FORCE_INLINE U32 LZ4_hash4( ++ U32 sequence, ++ tableType_t const tableType) ++{ ++ if (tableType == byU16) ++ return ((sequence * 2654435761U) ++ >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1))); ++ else ++ return ((sequence * 2654435761U) ++ >> ((MINMATCH * 8) - LZ4_HASHLOG)); ++} ++ ++static FORCE_INLINE U32 LZ4_hash5( ++ U64 sequence, ++ tableType_t const tableType) ++{ ++ const U32 hashLog = (tableType == byU16) ++ ? LZ4_HASHLOG + 1 ++ : LZ4_HASHLOG; ++ ++#if LZ4_LITTLE_ENDIAN ++ static const U64 prime5bytes = 889523592379ULL; ++ ++ return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog)); ++#else ++ static const U64 prime8bytes = 11400714785074694791ULL; ++ ++ return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog)); ++#endif ++} ++ ++static FORCE_INLINE U32 LZ4_hashPosition( ++ const void *p, ++ tableType_t const tableType) ++{ ++#if LZ4_ARCH64 ++ if (tableType == byU32) ++ return LZ4_hash5(LZ4_read_ARCH(p), tableType); ++#endif ++ ++ return LZ4_hash4(LZ4_read32(p), tableType); ++} ++ ++static void LZ4_putPositionOnHash( ++ const BYTE *p, ++ U32 h, ++ void *tableBase, ++ tableType_t const tableType, ++ const BYTE *srcBase) ++{ ++ switch (tableType) { ++ case byPtr: ++ { ++ const BYTE **hashTable = (const BYTE **)tableBase; ++ ++ hashTable[h] = p; ++ return; ++ } ++ case byU32: ++ { ++ U32 *hashTable = (U32 *) tableBase; ++ ++ hashTable[h] = (U32)(p - srcBase); ++ return; ++ } ++ case byU16: ++ { ++ U16 *hashTable = (U16 *) tableBase; ++ ++ hashTable[h] = (U16)(p - srcBase); ++ return; ++ } ++ } ++} ++ ++static FORCE_INLINE void LZ4_putPosition( ++ const BYTE *p, ++ void *tableBase, ++ tableType_t tableType, ++ const BYTE *srcBase) ++{ ++ U32 const h = LZ4_hashPosition(p, tableType); ++ ++ LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); ++} ++ ++static const BYTE *LZ4_getPositionOnHash( ++ U32 h, ++ void *tableBase, ++ tableType_t tableType, ++ const BYTE *srcBase) ++{ ++ if (tableType == byPtr) { ++ const BYTE **hashTable = (const BYTE **) tableBase; ++ ++ return hashTable[h]; ++ } ++ ++ if (tableType == byU32) { ++ const U32 * const hashTable = (U32 *) tableBase; ++ ++ return hashTable[h] + srcBase; ++ } ++ ++ { ++ /* default, to ensure a return */ ++ const U16 * const hashTable = (U16 *) tableBase; ++ ++ return hashTable[h] + srcBase; ++ } ++} ++ ++static FORCE_INLINE const BYTE *LZ4_getPosition( ++ const BYTE *p, ++ void *tableBase, ++ tableType_t tableType, ++ const BYTE *srcBase) ++{ ++ U32 const h = LZ4_hashPosition(p, tableType); ++ ++ return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase); ++} ++ ++ ++/* ++ * LZ4_compress_generic() : ++ * inlined, to ensure branches are decided at compilation time ++ */ ++static FORCE_INLINE int LZ4_compress_generic( ++ LZ4_stream_t_internal * const dictPtr, ++ const char * const source, ++ char * const dest, ++ const int inputSize, ++ const int maxOutputSize, ++ const limitedOutput_directive outputLimited, ++ const tableType_t tableType, ++ const dict_directive dict, ++ const dictIssue_directive dictIssue, ++ const U32 acceleration) ++{ ++ const BYTE *ip = (const BYTE *) source; ++ const BYTE *base; ++ const BYTE *lowLimit; ++ const BYTE * const lowRefLimit = ip - dictPtr->dictSize; ++ const BYTE * const dictionary = dictPtr->dictionary; ++ const BYTE * const dictEnd = dictionary + dictPtr->dictSize; ++ const size_t dictDelta = dictEnd - (const BYTE *)source; ++ const BYTE *anchor = (const BYTE *) source; ++ const BYTE * const iend = ip + inputSize; ++ const BYTE * const mflimit = iend - MFLIMIT; ++ const BYTE * const matchlimit = iend - LASTLITERALS; ++ ++ BYTE *op = (BYTE *) dest; ++ BYTE * const olimit = op + maxOutputSize; ++ ++ U32 forwardH; ++ size_t refDelta = 0; ++ ++ /* Init conditions */ ++ if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) { ++ /* Unsupported inputSize, too large (or negative) */ ++ return 0; ++ } ++ ++ switch (dict) { ++ case noDict: ++ default: ++ base = (const BYTE *)source; ++ lowLimit = (const BYTE *)source; ++ break; ++ case withPrefix64k: ++ base = (const BYTE *)source - dictPtr->currentOffset; ++ lowLimit = (const BYTE *)source - dictPtr->dictSize; ++ break; ++ case usingExtDict: ++ base = (const BYTE *)source - dictPtr->currentOffset; ++ lowLimit = (const BYTE *)source; ++ break; ++ } ++ ++ if ((tableType == byU16) ++ && (inputSize >= LZ4_64Klimit)) { ++ /* Size too large (not within 64K limit) */ ++ return 0; ++ } ++ ++ if (inputSize < LZ4_minLength) { ++ /* Input too small, no compression (all literals) */ ++ goto _last_literals; ++ } ++ ++ /* First Byte */ ++ LZ4_putPosition(ip, dictPtr->hashTable, tableType, base); ++ ip++; ++ forwardH = LZ4_hashPosition(ip, tableType); ++ ++ /* Main Loop */ ++ for ( ; ; ) { ++ const BYTE *match; ++ BYTE *token; ++ ++ /* Find a match */ ++ { ++ const BYTE *forwardIp = ip; ++ unsigned int step = 1; ++ unsigned int searchMatchNb = acceleration << LZ4_SKIPTRIGGER; ++ ++ do { ++ U32 const h = forwardH; ++ ++ ip = forwardIp; ++ forwardIp += step; ++ step = (searchMatchNb++ >> LZ4_SKIPTRIGGER); ++ ++ if (unlikely(forwardIp > mflimit)) ++ goto _last_literals; ++ ++ match = LZ4_getPositionOnHash(h, ++ dictPtr->hashTable, ++ tableType, base); ++ ++ if (dict == usingExtDict) { ++ if (match < (const BYTE *)source) { ++ refDelta = dictDelta; ++ lowLimit = dictionary; ++ } else { ++ refDelta = 0; ++ lowLimit = (const BYTE *)source; ++ } } ++ ++ forwardH = LZ4_hashPosition(forwardIp, ++ tableType); ++ ++ LZ4_putPositionOnHash(ip, h, dictPtr->hashTable, ++ tableType, base); ++ } while (((dictIssue == dictSmall) ++ ? (match < lowRefLimit) ++ : 0) ++ || ((tableType == byU16) ++ ? 0 ++ : (match + MAX_DISTANCE < ip)) ++ || (LZ4_read32(match + refDelta) ++ != LZ4_read32(ip))); ++ } ++ ++ /* Catch up */ ++ while (((ip > anchor) & (match + refDelta > lowLimit)) ++ && (unlikely(ip[-1] == match[refDelta - 1]))) { ++ ip--; ++ match--; ++ } ++ ++ /* Encode Literals */ ++ { ++ unsigned const int litLength = (unsigned int)(ip - anchor); ++ ++ token = op++; ++ ++ if ((outputLimited) && ++ /* Check output buffer overflow */ ++ (unlikely(op + litLength + ++ (2 + 1 + LASTLITERALS) + ++ (litLength / 255) > olimit))) ++ return 0; ++ ++ if (litLength >= RUN_MASK) { ++ int len = (int)litLength - RUN_MASK; ++ ++ *token = (RUN_MASK << ML_BITS); ++ ++ for (; len >= 255; len -= 255) ++ *op++ = 255; ++ *op++ = (BYTE)len; ++ } else ++ *token = (BYTE)(litLength << ML_BITS); ++ ++ /* Copy Literals */ ++ LZ4_wildCopy(op, anchor, op + litLength); ++ op += litLength; ++ } ++ ++_next_match: ++ /* Encode Offset */ ++ LZ4_writeLE16(op, (U16)(ip - match)); ++ op += 2; ++ ++ /* Encode MatchLength */ ++ { ++ unsigned int matchCode; ++ ++ if ((dict == usingExtDict) ++ && (lowLimit == dictionary)) { ++ const BYTE *limit; ++ ++ match += refDelta; ++ limit = ip + (dictEnd - match); ++ ++ if (limit > matchlimit) ++ limit = matchlimit; ++ ++ matchCode = LZ4_count(ip + MINMATCH, ++ match + MINMATCH, limit); ++ ++ ip += MINMATCH + matchCode; ++ ++ if (ip == limit) { ++ unsigned const int more = LZ4_count(ip, ++ (const BYTE *)source, ++ matchlimit); ++ ++ matchCode += more; ++ ip += more; ++ } ++ } else { ++ matchCode = LZ4_count(ip + MINMATCH, ++ match + MINMATCH, matchlimit); ++ ip += MINMATCH + matchCode; ++ } ++ ++ if (outputLimited && ++ /* Check output buffer overflow */ ++ (unlikely(op + ++ (1 + LASTLITERALS) + ++ (matchCode >> 8) > olimit))) ++ return 0; ++ ++ if (matchCode >= ML_MASK) { ++ *token += ML_MASK; ++ matchCode -= ML_MASK; ++ LZ4_write32(op, 0xFFFFFFFF); ++ ++ while (matchCode >= 4 * 255) { ++ op += 4; ++ LZ4_write32(op, 0xFFFFFFFF); ++ matchCode -= 4 * 255; ++ } ++ ++ op += matchCode / 255; ++ *op++ = (BYTE)(matchCode % 255); ++ } else ++ *token += (BYTE)(matchCode); ++ } ++ ++ anchor = ip; ++ ++ /* Test end of chunk */ ++ if (ip > mflimit) ++ break; ++ ++ /* Fill table */ ++ LZ4_putPosition(ip - 2, dictPtr->hashTable, tableType, base); ++ ++ /* Test next position */ ++ match = LZ4_getPosition(ip, dictPtr->hashTable, ++ tableType, base); ++ ++ if (dict == usingExtDict) { ++ if (match < (const BYTE *)source) { ++ refDelta = dictDelta; ++ lowLimit = dictionary; ++ } else { ++ refDelta = 0; ++ lowLimit = (const BYTE *)source; ++ } ++ } ++ ++ LZ4_putPosition(ip, dictPtr->hashTable, tableType, base); ++ ++ if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1) ++ && (match + MAX_DISTANCE >= ip) ++ && (LZ4_read32(match + refDelta) == LZ4_read32(ip))) { ++ token = op++; ++ *token = 0; ++ goto _next_match; ++ } ++ ++ /* Prepare next loop */ ++ forwardH = LZ4_hashPosition(++ip, tableType); ++ } ++ ++_last_literals: ++ /* Encode Last Literals */ ++ { ++ size_t const lastRun = (size_t)(iend - anchor); ++ ++ if ((outputLimited) && ++ /* Check output buffer overflow */ ++ ((op - (BYTE *)dest) + lastRun + 1 + ++ ((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize)) ++ return 0; ++ ++ if (lastRun >= RUN_MASK) { ++ size_t accumulator = lastRun - RUN_MASK; ++ *op++ = RUN_MASK << ML_BITS; ++ for (; accumulator >= 255; accumulator -= 255) ++ *op++ = 255; ++ *op++ = (BYTE) accumulator; ++ } else { ++ *op++ = (BYTE)(lastRun << ML_BITS); ++ } ++ ++ LZ4_memcpy(op, anchor, lastRun); ++ ++ op += lastRun; ++ } ++ ++ /* End */ ++ return (int) (((char *)op) - dest); ++} ++ ++static int LZ4_compress_fast_extState( ++ void *state, ++ const char *source, ++ char *dest, ++ int inputSize, ++ int maxOutputSize, ++ int acceleration) ++{ ++ LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse; ++#if LZ4_ARCH64 ++ const tableType_t tableType = byU32; ++#else ++ const tableType_t tableType = byPtr; ++#endif ++ ++ LZ4_resetStream((LZ4_stream_t *)state); ++ ++ if (acceleration < 1) ++ acceleration = LZ4_ACCELERATION_DEFAULT; ++ ++ if (maxOutputSize >= LZ4_COMPRESSBOUND(inputSize)) { ++ if (inputSize < LZ4_64Klimit) ++ return LZ4_compress_generic(ctx, source, ++ dest, inputSize, 0, ++ noLimit, byU16, noDict, ++ noDictIssue, acceleration); ++ else ++ return LZ4_compress_generic(ctx, source, ++ dest, inputSize, 0, ++ noLimit, tableType, noDict, ++ noDictIssue, acceleration); ++ } else { ++ if (inputSize < LZ4_64Klimit) ++ return LZ4_compress_generic(ctx, source, ++ dest, inputSize, ++ maxOutputSize, limitedOutput, byU16, noDict, ++ noDictIssue, acceleration); ++ else ++ return LZ4_compress_generic(ctx, source, ++ dest, inputSize, ++ maxOutputSize, limitedOutput, tableType, noDict, ++ noDictIssue, acceleration); ++ } ++} ++ ++int LZ4_compress_fast(const char *source, char *dest, int inputSize, ++ int maxOutputSize, int acceleration, void *wrkmem) ++{ ++ return LZ4_compress_fast_extState(wrkmem, source, dest, inputSize, ++ maxOutputSize, acceleration); ++} ++EXPORT_SYMBOL(LZ4_compress_fast); ++ ++int LZ4_compress_default(const char *source, char *dest, int inputSize, ++ int maxOutputSize, void *wrkmem) ++{ ++ return LZ4_compress_fast(source, dest, inputSize, ++ maxOutputSize, LZ4_ACCELERATION_DEFAULT, wrkmem); ++} ++EXPORT_SYMBOL(LZ4_compress_default); ++ ++/*-****************************** ++ * *_destSize() variant ++ ********************************/ ++static int LZ4_compress_destSize_generic( ++ LZ4_stream_t_internal * const ctx, ++ const char * const src, ++ char * const dst, ++ int * const srcSizePtr, ++ const int targetDstSize, ++ const tableType_t tableType) ++{ ++ const BYTE *ip = (const BYTE *) src; ++ const BYTE *base = (const BYTE *) src; ++ const BYTE *lowLimit = (const BYTE *) src; ++ const BYTE *anchor = ip; ++ const BYTE * const iend = ip + *srcSizePtr; ++ const BYTE * const mflimit = iend - MFLIMIT; ++ const BYTE * const matchlimit = iend - LASTLITERALS; ++ ++ BYTE *op = (BYTE *) dst; ++ BYTE * const oend = op + targetDstSize; ++ BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */ ++ - 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */; ++ BYTE * const oMaxMatch = op + targetDstSize ++ - (LASTLITERALS + 1 /* token */); ++ BYTE * const oMaxSeq = oMaxLit - 1 /* token */; ++ ++ U32 forwardH; ++ ++ /* Init conditions */ ++ /* Impossible to store anything */ ++ if (targetDstSize < 1) ++ return 0; ++ /* Unsupported input size, too large (or negative) */ ++ if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) ++ return 0; ++ /* Size too large (not within 64K limit) */ ++ if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit)) ++ return 0; ++ /* Input too small, no compression (all literals) */ ++ if (*srcSizePtr < LZ4_minLength) ++ goto _last_literals; ++ ++ /* First Byte */ ++ *srcSizePtr = 0; ++ LZ4_putPosition(ip, ctx->hashTable, tableType, base); ++ ip++; forwardH = LZ4_hashPosition(ip, tableType); ++ ++ /* Main Loop */ ++ for ( ; ; ) { ++ const BYTE *match; ++ BYTE *token; ++ ++ /* Find a match */ ++ { ++ const BYTE *forwardIp = ip; ++ unsigned int step = 1; ++ unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER; ++ ++ do { ++ U32 h = forwardH; ++ ++ ip = forwardIp; ++ forwardIp += step; ++ step = (searchMatchNb++ >> LZ4_SKIPTRIGGER); ++ ++ if (unlikely(forwardIp > mflimit)) ++ goto _last_literals; ++ ++ match = LZ4_getPositionOnHash(h, ctx->hashTable, ++ tableType, base); ++ forwardH = LZ4_hashPosition(forwardIp, ++ tableType); ++ LZ4_putPositionOnHash(ip, h, ++ ctx->hashTable, tableType, ++ base); ++ ++ } while (((tableType == byU16) ++ ? 0 ++ : (match + MAX_DISTANCE < ip)) ++ || (LZ4_read32(match) != LZ4_read32(ip))); ++ } ++ ++ /* Catch up */ ++ while ((ip > anchor) ++ && (match > lowLimit) ++ && (unlikely(ip[-1] == match[-1]))) { ++ ip--; ++ match--; ++ } ++ ++ /* Encode Literal length */ ++ { ++ unsigned int litLength = (unsigned int)(ip - anchor); ++ ++ token = op++; ++ if (op + ((litLength + 240) / 255) ++ + litLength > oMaxLit) { ++ /* Not enough space for a last match */ ++ op--; ++ goto _last_literals; ++ } ++ if (litLength >= RUN_MASK) { ++ unsigned int len = litLength - RUN_MASK; ++ *token = (RUN_MASK<= 255; len -= 255) ++ *op++ = 255; ++ *op++ = (BYTE)len; ++ } else ++ *token = (BYTE)(litLength << ML_BITS); ++ ++ /* Copy Literals */ ++ LZ4_wildCopy(op, anchor, op + litLength); ++ op += litLength; ++ } ++ ++_next_match: ++ /* Encode Offset */ ++ LZ4_writeLE16(op, (U16)(ip - match)); op += 2; ++ ++ /* Encode MatchLength */ ++ { ++ size_t matchLength = LZ4_count(ip + MINMATCH, ++ match + MINMATCH, matchlimit); ++ ++ if (op + ((matchLength + 240)/255) > oMaxMatch) { ++ /* Match description too long : reduce it */ ++ matchLength = (15 - 1) + (oMaxMatch - op) * 255; ++ } ++ ip += MINMATCH + matchLength; ++ ++ if (matchLength >= ML_MASK) { ++ *token += ML_MASK; ++ matchLength -= ML_MASK; ++ while (matchLength >= 255) { ++ matchLength -= 255; ++ *op++ = 255; ++ } ++ *op++ = (BYTE)matchLength; ++ } else ++ *token += (BYTE)(matchLength); ++ } ++ ++ anchor = ip; ++ ++ /* Test end of block */ ++ if (ip > mflimit) ++ break; ++ if (op > oMaxSeq) ++ break; ++ ++ /* Fill table */ ++ LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base); ++ ++ /* Test next position */ ++ match = LZ4_getPosition(ip, ctx->hashTable, tableType, base); ++ LZ4_putPosition(ip, ctx->hashTable, tableType, base); ++ ++ if ((match + MAX_DISTANCE >= ip) ++ && (LZ4_read32(match) == LZ4_read32(ip))) { ++ token = op++; *token = 0; ++ goto _next_match; ++ } ++ ++ /* Prepare next loop */ ++ forwardH = LZ4_hashPosition(++ip, tableType); ++ } ++ ++_last_literals: ++ /* Encode Last Literals */ ++ { ++ size_t lastRunSize = (size_t)(iend - anchor); ++ ++ if (op + 1 /* token */ ++ + ((lastRunSize + 240) / 255) /* litLength */ ++ + lastRunSize /* literals */ > oend) { ++ /* adapt lastRunSize to fill 'dst' */ ++ lastRunSize = (oend - op) - 1; ++ lastRunSize -= (lastRunSize + 240) / 255; ++ } ++ ip = anchor + lastRunSize; ++ ++ if (lastRunSize >= RUN_MASK) { ++ size_t accumulator = lastRunSize - RUN_MASK; ++ ++ *op++ = RUN_MASK << ML_BITS; ++ for (; accumulator >= 255; accumulator -= 255) ++ *op++ = 255; ++ *op++ = (BYTE) accumulator; ++ } else { ++ *op++ = (BYTE)(lastRunSize<= LZ4_COMPRESSBOUND(*srcSizePtr)) { ++ /* compression success is guaranteed */ ++ return LZ4_compress_fast_extState( ++ state, src, dst, *srcSizePtr, ++ targetDstSize, 1); ++ } else { ++ if (*srcSizePtr < LZ4_64Klimit) ++ return LZ4_compress_destSize_generic( ++ &state->internal_donotuse, ++ src, dst, srcSizePtr, ++ targetDstSize, byU16); ++ else ++ return LZ4_compress_destSize_generic( ++ &state->internal_donotuse, ++ src, dst, srcSizePtr, ++ targetDstSize, tableType); ++ } ++} ++ ++ ++int LZ4_compress_destSize( ++ const char *src, ++ char *dst, ++ int *srcSizePtr, ++ int targetDstSize, ++ void *wrkmem) ++{ ++ return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr, ++ targetDstSize); ++} ++EXPORT_SYMBOL(LZ4_compress_destSize); ++ ++/*-****************************** ++ * Streaming functions ++ ********************************/ ++void LZ4_resetStream(LZ4_stream_t *LZ4_stream) ++{ ++ memset(LZ4_stream, 0, sizeof(LZ4_stream_t)); ++} ++ ++int LZ4_loadDict(LZ4_stream_t *LZ4_dict, ++ const char *dictionary, int dictSize) ++{ ++ LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse; ++ const BYTE *p = (const BYTE *)dictionary; ++ const BYTE * const dictEnd = p + dictSize; ++ const BYTE *base; ++ ++ if ((dict->initCheck) ++ || (dict->currentOffset > 1 * GB)) { ++ /* Uninitialized structure, or reuse overflow */ ++ LZ4_resetStream(LZ4_dict); ++ } ++ ++ if (dictSize < (int)HASH_UNIT) { ++ dict->dictionary = NULL; ++ dict->dictSize = 0; ++ return 0; ++ } ++ ++ if ((dictEnd - p) > 64 * KB) ++ p = dictEnd - 64 * KB; ++ dict->currentOffset += 64 * KB; ++ base = p - dict->currentOffset; ++ dict->dictionary = p; ++ dict->dictSize = (U32)(dictEnd - p); ++ dict->currentOffset += dict->dictSize; ++ ++ while (p <= dictEnd - HASH_UNIT) { ++ LZ4_putPosition(p, dict->hashTable, byU32, base); ++ p += 3; ++ } ++ ++ return dict->dictSize; ++} ++EXPORT_SYMBOL(LZ4_loadDict); ++ ++static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict, ++ const BYTE *src) ++{ ++ if ((LZ4_dict->currentOffset > 0x80000000) || ++ ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) { ++ /* address space overflow */ ++ /* rescale hash table */ ++ U32 const delta = LZ4_dict->currentOffset - 64 * KB; ++ const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; ++ int i; ++ ++ for (i = 0; i < LZ4_HASH_SIZE_U32; i++) { ++ if (LZ4_dict->hashTable[i] < delta) ++ LZ4_dict->hashTable[i] = 0; ++ else ++ LZ4_dict->hashTable[i] -= delta; ++ } ++ LZ4_dict->currentOffset = 64 * KB; ++ if (LZ4_dict->dictSize > 64 * KB) ++ LZ4_dict->dictSize = 64 * KB; ++ LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize; ++ } ++} ++ ++int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize) ++{ ++ LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse; ++ const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize; ++ ++ if ((U32)dictSize > 64 * KB) { ++ /* useless to define a dictionary > 64 * KB */ ++ dictSize = 64 * KB; ++ } ++ if ((U32)dictSize > dict->dictSize) ++ dictSize = dict->dictSize; ++ ++ memmove(safeBuffer, previousDictEnd - dictSize, dictSize); ++ ++ dict->dictionary = (const BYTE *)safeBuffer; ++ dict->dictSize = (U32)dictSize; ++ ++ return dictSize; ++} ++EXPORT_SYMBOL(LZ4_saveDict); ++ ++int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source, ++ char *dest, int inputSize, int maxOutputSize, int acceleration) ++{ ++ LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse; ++ const BYTE * const dictEnd = streamPtr->dictionary ++ + streamPtr->dictSize; ++ ++ const BYTE *smallest = (const BYTE *) source; ++ ++ if (streamPtr->initCheck) { ++ /* Uninitialized structure detected */ ++ return 0; ++ } ++ ++ if ((streamPtr->dictSize > 0) && (smallest > dictEnd)) ++ smallest = dictEnd; ++ ++ LZ4_renormDictT(streamPtr, smallest); ++ ++ if (acceleration < 1) ++ acceleration = LZ4_ACCELERATION_DEFAULT; ++ ++ /* Check overlapping input/dictionary space */ ++ { ++ const BYTE *sourceEnd = (const BYTE *) source + inputSize; ++ ++ if ((sourceEnd > streamPtr->dictionary) ++ && (sourceEnd < dictEnd)) { ++ streamPtr->dictSize = (U32)(dictEnd - sourceEnd); ++ if (streamPtr->dictSize > 64 * KB) ++ streamPtr->dictSize = 64 * KB; ++ if (streamPtr->dictSize < 4) ++ streamPtr->dictSize = 0; ++ streamPtr->dictionary = dictEnd - streamPtr->dictSize; ++ } ++ } ++ ++ /* prefix mode : source data follows dictionary */ ++ if (dictEnd == (const BYTE *)source) { ++ int result; ++ ++ if ((streamPtr->dictSize < 64 * KB) && ++ (streamPtr->dictSize < streamPtr->currentOffset)) { ++ result = LZ4_compress_generic( ++ streamPtr, source, dest, inputSize, ++ maxOutputSize, limitedOutput, byU32, ++ withPrefix64k, dictSmall, acceleration); ++ } else { ++ result = LZ4_compress_generic( ++ streamPtr, source, dest, inputSize, ++ maxOutputSize, limitedOutput, byU32, ++ withPrefix64k, noDictIssue, acceleration); ++ } ++ streamPtr->dictSize += (U32)inputSize; ++ streamPtr->currentOffset += (U32)inputSize; ++ return result; ++ } ++ ++ /* external dictionary mode */ ++ { ++ int result; ++ ++ if ((streamPtr->dictSize < 64 * KB) && ++ (streamPtr->dictSize < streamPtr->currentOffset)) { ++ result = LZ4_compress_generic( ++ streamPtr, source, dest, inputSize, ++ maxOutputSize, limitedOutput, byU32, ++ usingExtDict, dictSmall, acceleration); ++ } else { ++ result = LZ4_compress_generic( ++ streamPtr, source, dest, inputSize, ++ maxOutputSize, limitedOutput, byU32, ++ usingExtDict, noDictIssue, acceleration); ++ } ++ streamPtr->dictionary = (const BYTE *)source; ++ streamPtr->dictSize = (U32)inputSize; ++ streamPtr->currentOffset += (U32)inputSize; ++ return result; ++ } ++} ++EXPORT_SYMBOL(LZ4_compress_fast_continue); ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_DESCRIPTION("LZ4 compressor"); +diff -Naur kvdo-127a993f34010e4d8b05e46c4af2a9295c4477d0.orig/vdo/lz4defs.h kvdo-127a993f34010e4d8b05e46c4af2a9295c4477d0/vdo/lz4defs.h +--- kvdo-127a993f34010e4d8b05e46c4af2a9295c4477d0.orig/vdo/lz4defs.h 1969-12-31 19:00:00.000000000 -0500 ++++ kvdo-127a993f34010e4d8b05e46c4af2a9295c4477d0/vdo/lz4defs.h 2022-07-27 21:39:43.896830772 -0400 +@@ -0,0 +1,245 @@ ++#ifndef __LZ4DEFS_H__ ++#define __LZ4DEFS_H__ ++ ++/* ++ * lz4defs.h -- common and architecture specific defines for the kernel usage ++ ++ * LZ4 - Fast LZ compression algorithm ++ * Copyright (C) 2011-2016, Yann Collet. ++ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are ++ * met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above ++ * copyright notice, this list of conditions and the following disclaimer ++ * in the documentation and/or other materials provided with the ++ * distribution. ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * You can contact the author at : ++ * - LZ4 homepage : http://www.lz4.org ++ * - LZ4 source repository : https://github.com/lz4/lz4 ++ * ++ * Changed for kernel usage by: ++ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de> ++ */ ++ ++#include ++#include /* memset, memcpy */ ++ ++#define FORCE_INLINE __always_inline ++ ++/*-************************************ ++ * Basic Types ++ **************************************/ ++#include ++ ++typedef uint8_t BYTE; ++typedef uint16_t U16; ++typedef uint32_t U32; ++typedef int32_t S32; ++typedef uint64_t U64; ++typedef uintptr_t uptrval; ++ ++/*-************************************ ++ * Architecture specifics ++ **************************************/ ++#if defined(CONFIG_64BIT) ++#define LZ4_ARCH64 1 ++#else ++#define LZ4_ARCH64 0 ++#endif ++ ++#if defined(__LITTLE_ENDIAN) ++#define LZ4_LITTLE_ENDIAN 1 ++#else ++#define LZ4_LITTLE_ENDIAN 0 ++#endif ++ ++/*-************************************ ++ * Constants ++ **************************************/ ++#define MINMATCH 4 ++ ++#define WILDCOPYLENGTH 8 ++#define LASTLITERALS 5 ++#define MFLIMIT (WILDCOPYLENGTH + MINMATCH) ++/* ++ * ensure it's possible to write 2 x wildcopyLength ++ * without overflowing output buffer ++ */ ++#define MATCH_SAFEGUARD_DISTANCE ((2 * WILDCOPYLENGTH) - MINMATCH) ++ ++/* Increase this value ==> compression run slower on incompressible data */ ++#define LZ4_SKIPTRIGGER 6 ++ ++#define HASH_UNIT sizeof(size_t) ++ ++#define KB (1 << 10) ++#define MB (1 << 20) ++#define GB (1U << 30) ++ ++#define MAXD_LOG 16 ++#define MAX_DISTANCE ((1 << MAXD_LOG) - 1) ++#define STEPSIZE sizeof(size_t) ++ ++#define ML_BITS 4 ++#define ML_MASK ((1U << ML_BITS) - 1) ++#define RUN_BITS (8 - ML_BITS) ++#define RUN_MASK ((1U << RUN_BITS) - 1) ++ ++/*-************************************ ++ * Reading and writing into memory ++ **************************************/ ++static FORCE_INLINE U16 LZ4_read16(const void *ptr) ++{ ++ return get_unaligned((const U16 *)ptr); ++} ++ ++static FORCE_INLINE U32 LZ4_read32(const void *ptr) ++{ ++ return get_unaligned((const U32 *)ptr); ++} ++ ++static FORCE_INLINE size_t LZ4_read_ARCH(const void *ptr) ++{ ++ return get_unaligned((const size_t *)ptr); ++} ++ ++static FORCE_INLINE void LZ4_write16(void *memPtr, U16 value) ++{ ++ put_unaligned(value, (U16 *)memPtr); ++} ++ ++static FORCE_INLINE void LZ4_write32(void *memPtr, U32 value) ++{ ++ put_unaligned(value, (U32 *)memPtr); ++} ++ ++static FORCE_INLINE U16 LZ4_readLE16(const void *memPtr) ++{ ++ return get_unaligned_le16(memPtr); ++} ++ ++static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value) ++{ ++ return put_unaligned_le16(value, memPtr); ++} ++ ++/* ++ * LZ4 relies on memcpy with a constant size being inlined. In freestanding ++ * environments, the compiler can't assume the implementation of memcpy() is ++ * standard compliant, so apply its specialized memcpy() inlining logic. When ++ * possible, use __builtin_memcpy() to tell the compiler to analyze memcpy() ++ * as-if it were standard compliant, so it can inline it in freestanding ++ * environments. This is needed when decompressing the Linux Kernel, for example. ++ */ ++#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size) ++#define LZ4_memmove(dst, src, size) __builtin_memmove(dst, src, size) ++ ++static FORCE_INLINE void LZ4_copy8(void *dst, const void *src) ++{ ++#if LZ4_ARCH64 ++ U64 a = get_unaligned((const U64 *)src); ++ ++ put_unaligned(a, (U64 *)dst); ++#else ++ U32 a = get_unaligned((const U32 *)src); ++ U32 b = get_unaligned((const U32 *)src + 1); ++ ++ put_unaligned(a, (U32 *)dst); ++ put_unaligned(b, (U32 *)dst + 1); ++#endif ++} ++ ++/* ++ * customized variant of memcpy, ++ * which can overwrite up to 7 bytes beyond dstEnd ++ */ ++static FORCE_INLINE void LZ4_wildCopy(void *dstPtr, ++ const void *srcPtr, void *dstEnd) ++{ ++ BYTE *d = (BYTE *)dstPtr; ++ const BYTE *s = (const BYTE *)srcPtr; ++ BYTE *const e = (BYTE *)dstEnd; ++ ++ do { ++ LZ4_copy8(d, s); ++ d += 8; ++ s += 8; ++ } while (d < e); ++} ++ ++static FORCE_INLINE unsigned int LZ4_NbCommonBytes(register size_t val) ++{ ++#if LZ4_LITTLE_ENDIAN ++ return __ffs(val) >> 3; ++#else ++ return (BITS_PER_LONG - 1 - __fls(val)) >> 3; ++#endif ++} ++ ++static FORCE_INLINE unsigned int LZ4_count( ++ const BYTE *pIn, ++ const BYTE *pMatch, ++ const BYTE *pInLimit) ++{ ++ const BYTE *const pStart = pIn; ++ ++ while (likely(pIn < pInLimit - (STEPSIZE - 1))) { ++ size_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); ++ ++ if (!diff) { ++ pIn += STEPSIZE; ++ pMatch += STEPSIZE; ++ continue; ++ } ++ ++ pIn += LZ4_NbCommonBytes(diff); ++ ++ return (unsigned int)(pIn - pStart); ++ } ++ ++#if LZ4_ARCH64 ++ if ((pIn < (pInLimit - 3)) ++ && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { ++ pIn += 4; ++ pMatch += 4; ++ } ++#endif ++ ++ if ((pIn < (pInLimit - 1)) ++ && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { ++ pIn += 2; ++ pMatch += 2; ++ } ++ ++ if ((pIn < pInLimit) && (*pMatch == *pIn)) ++ pIn++; ++ ++ return (unsigned int)(pIn - pStart); ++} ++ ++typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive; ++typedef enum { byPtr, byU32, byU16 } tableType_t; ++ ++typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive; ++typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; ++ ++typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; ++typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive; ++ ++#define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c)) ++ ++#endif diff --git a/SOURCES/removed-logical-space-check-from-table-line.patch b/SOURCES/removed-logical-space-check-from-table-line.patch new file mode 100644 index 0000000..3afc90d --- /dev/null +++ b/SOURCES/removed-logical-space-check-from-table-line.patch @@ -0,0 +1,34 @@ +From 8fb4ef85097e7b46cb3604612a49e480efd4465a Mon Sep 17 00:00:00 2001 +From: Andrew Walsh +Date: Wed, 24 Aug 2022 12:00:12 -0400 +Subject: [PATCH] Removed logical space check from table line. + +Until the LVM tooling can be updated to use accurate sizes, this check +can't be implemented. + +Signed-off-by: Andrew Walsh +--- + vdo/vdo-component.c | 7 ------- + 1 file changed, 7 deletions(-) + +diff --git a/vdo/vdo-component.c b/vdo/vdo-component.c +index ac1ac1f7..50ba438b 100644 +--- a/vdo/vdo-component.c ++++ b/vdo/vdo-component.c +@@ -303,13 +303,6 @@ int vdo_validate_config(const struct vdo_config *config, + if (result != UDS_SUCCESS) { + return result; + } +- +- if (logical_block_count != config->logical_blocks) { +- uds_log_error("A logical size of %llu blocks was specified, but that differs from the %llu blocks configured in the vdo super block", +- (unsigned long long) logical_block_count, +- (unsigned long long) config->logical_blocks); +- return VDO_PARAMETER_MISMATCH; +- } + } + + result = ASSERT(config->logical_blocks <= MAXIMUM_VDO_LOGICAL_BLOCKS, +-- +2.37.1 + diff --git a/SPECS/kmod-kvdo.spec b/SPECS/kmod-kvdo.spec index 568ff0f..8e38ce8 100644 --- a/SPECS/kmod-kvdo.spec +++ b/SPECS/kmod-kvdo.spec @@ -1,17 +1,19 @@ -%global commit e209cfeb3fe1dfac03b34041a3deefdb077e3e11 -%global gittag 8.1.1.371 +%global commit 6077a128b887100e478048f67e6b8734cddd0ec7 +%global gittag 8.2.0.18 %global shortcommit %(c=%{commit}; echo ${c:0:7}) -%define spec_release 21 +%define spec_release 46 %define kmod_name kvdo %define kmod_driver_version %{gittag} %define kmod_rpm_release %{spec_release} -%define kmod_kernel_version 5.14.0-70.4.1.el9_0 +%define kmod_kernel_version 5.14.0-152.el9 %define kmod_headers_version %(rpm -qa kernel-devel | sed 's/^kernel-devel-//') %define kmod_kbuild_dir . %define kmod_devel_package 0 Source0: https://github.com/dm-vdo/%{kmod_name}/archive/%{commit}/%{kmod_name}-%{shortcommit}.tar.gz +Patch0: add_lz4_dependency.patch +Patch1: removed-logical-space-check-from-table-line.patch %define findpat %( echo "%""P" ) @@ -103,6 +105,8 @@ printf '%s\n' "${modules[@]}" | %{_sbindir}/weak-modules --dracut=/usr/bin/dracu %prep %setup -n %{kmod_name}-%{commit} +%patch0 -p1 +%patch1 -p1 %{nil} set -- * mkdir source @@ -151,34 +155,146 @@ install -m 644 -D source/greylist.txt $RPM_BUILD_ROOT/usr/share/doc/kmod-%{kmod_ rm -rf $RPM_BUILD_ROOT %changelog -* Mon Mar 28 2022 - Andy Walsh - 8.1.1.371-21 -- Rebuilt for latest kernel. -- Related: rhbz#2000926 +* Wed Aug 24 2022 - Andy Walsh - 8.2.0.18-46 +- Temporarily dropped a check that validates the logical size specified from + the table line. +- Related: rhbz#2071648 -* Wed Mar 23 2022 - Andy Walsh - 8.1.1.371-20 -- Rebuilt for latest kernel. -- Related: rhbz#2000926 +* Tue Aug 23 2022 - Andy Walsh - 8.2.0.18-45 +- Fixed a race handling timeouts of dedupe requests. +- Resolves: rhbz#2115504 -* Tue Mar 22 2022 - Andy Walsh - 8.1.1.371-19 +* Tue Aug 23 2022 - Susan LeGendre-McGhee - 8.2.0.2-45 - Rebuilt for latest kernel. -- Related: rhbz#2000926 +- Related: rhbz#2060486 -* Tue Mar 22 2022 - Andy Walsh - 8.1.1.371-18 +* Thu Aug 18 2022 - Susan LeGendre-McGhee - 8.2.0.2-44 - Rebuilt for latest kernel. -- Related: rhbz#2000926 +- Related: rhbz#2060486 -* Mon Mar 21 2022 - Andy Walsh - 8.1.1.371-17 +* Wed Aug 10 2022 - Chung Chung - 8.2.0.2-43 - Rebuilt for latest kernel. -- Related: rhbz#2000926 +- Related: rhbz#2060486 -* Mon Mar 21 2022 - Andy Walsh - 8.1.1.371-16 +* Wed Jul 27 2022 - Andy Walsh - 8.2.0.2-42 +- Added missing lz4 libs to rebased code +- Resolves: rhbz#2071648 + +* Tue Jul 19 2022 - Andy Walsh - 8.2.0.2-41 +- Rebased to latest upstream candidate. +- Resolves: rhbz#2071648 + +* Sat Jul 16 2022 - Susan LeGendre-McGhee - 8.1.1.371-41 - Rebuilt for latest kernel. -- Related: rhbz#2000926 +- Related: rhbz#2060486 -* Sat Mar 05 2022 - Andy Walsh - 8.1.1.371-15 -- Fixed bugs in the UDS index's handling of backing storage changes. -- Resolves: rhbz#2007803 -- Resolves: rhbz#2058464 +* Fri Jul 15 2022 - Susan LeGendre-McGhee - 8.1.1.371-40 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Mon Jul 11 2022 - Chung Chung - 8.1.1.371-39 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Tue Jul 05 2022 - Chung Chung - 8.1.1.371-38 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Mon Jul 04 2022 - Chung Chung - 8.1.1.371-37 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Tue Jun 28 2022 - Susan LeGendre-McGhee - 8.1.1.371-36 +- TEMPORARY FIX to correct build failures regarding bio_reset(), __bio_clone_fast(), and bio_init(). +- Related: rhbz#2060486 + +* Tue Jun 28 2022 - Susan LeGendre-McGhee - 8.1.1.371-36 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Sun Jun 26 2022 - Chung Chung - 8.1.1.371-35 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Wed Jun 15 2022 - Andy Walsh - 8.1.1.371-34 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Tue Jun 07 2022 - Andy Walsh - 8.1.1.371-33 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Wed Jun 01 2022 - Andy Walsh - 8.1.1.371-32 +- Rebased to newer version. +- Related: rhbz#2071648 + +* Tue May 31 2022 - Andy Walsh - 8.1.1.360-32 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Sat May 28 2022 - Andy Walsh - 8.1.1.360-31 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Mon May 23 2022 - Andy Walsh - 8.1.1.360-30 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Mon May 16 2022 - Andy Walsh - 8.1.1.360-29 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Fri May 13 2022 - Andy Walsh - 8.1.1.360-28 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Thu May 12 2022 - Andy Walsh - 8.1.1.360-27 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Tue May 10 2022 - Andy Walsh - 8.1.1.360-26 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Sat May 07 2022 - Andy Walsh - 8.1.1.360-25 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Thu May 05 2022 - Andy Walsh - 8.1.1.360-24 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Wed May 04 2022 - Andy Walsh - 8.1.1.360-23 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Fri Apr 29 2022 - Andy Walsh - 8.1.1.360-22 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Fri Apr 22 2022 - Andy Walsh - 8.1.1.360-21 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Thu Apr 21 2022 - Andy Walsh - 8.1.1.360-20 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Wed Apr 13 2022 - Andy Walsh - 8.1.1.360-19 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Mon Apr 11 2022 - Andy Walsh - 8.1.1.360-18 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Mon Mar 28 2022 - Andy Walsh - 8.1.1.360-17 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 + +* Mon Mar 21 2022 - Andy Walsh - 8.1.1.360-16 +- Rebuilt for latest kernel. +- Related: rhbz#2060486 * Mon Feb 28 2022 - Andy Walsh - 8.1.1.360-15 - Rebuilt for latest kernel.