import CS git fence-agents-4.2.1-129.el8_10.24
This commit is contained in:
parent
453069547c
commit
9898bcdf73
@ -43,26 +43,3 @@
|
||||
|
||||
def flush(self):
|
||||
return self._decoders[0].flush()
|
||||
|
||||
--- a/kubevirt/urllib3/response.py 2023-10-17 19:42:56.000000000 +0200
|
||||
+++ b/kubevirt/urllib3/response.py 2026-01-02 11:19:25.583808492 +0100
|
||||
@@ -135,8 +135,18 @@
|
||||
they were applied.
|
||||
"""
|
||||
|
||||
+ # Maximum allowed number of chained HTTP encodings in the
|
||||
+ # Content-Encoding header.
|
||||
+ max_decode_links = 5
|
||||
+
|
||||
def __init__(self, modes):
|
||||
- self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
|
||||
+ encodings = [m.strip() for m in modes.split(",")]
|
||||
+ if len(encodings) > self.max_decode_links:
|
||||
+ raise DecodeError(
|
||||
+ "Too many content encodings in the chain: "
|
||||
+ f"{len(encodings)} > {self.max_decode_links}"
|
||||
+ )
|
||||
+ self._decoders = [_get_decoder(e) for e in encodings]
|
||||
|
||||
def flush(self):
|
||||
return self._decoders[0].flush()
|
||||
|
||||
@ -508,288 +508,6 @@
|
||||
+ return any(d.has_unconsumed_tail for d in self._decoders)
|
||||
|
||||
|
||||
def _get_decoder(mode):
|
||||
@@ -405,16 +517,25 @@
|
||||
if brotli is not None:
|
||||
DECODER_ERROR_CLASSES += (brotli.error,)
|
||||
|
||||
- def _decode(self, data, decode_content, flush_decoder):
|
||||
+ def _decode(
|
||||
+ self,
|
||||
+ data: bytes,
|
||||
+ decode_content: bool,
|
||||
+ flush_decoder: bool,
|
||||
+ max_length: int = None,
|
||||
+ ) -> bytes:
|
||||
"""
|
||||
Decode the data passed in and potentially flush the decoder.
|
||||
"""
|
||||
if not decode_content:
|
||||
return data
|
||||
|
||||
+ if max_length is None or flush_decoder:
|
||||
+ max_length = -1
|
||||
+
|
||||
try:
|
||||
if self._decoder:
|
||||
- data = self._decoder.decompress(data)
|
||||
+ data = self._decoder.decompress(data, max_length=max_length)
|
||||
except self.DECODER_ERROR_CLASSES as e:
|
||||
content_encoding = self.headers.get("content-encoding", "").lower()
|
||||
raise DecodeError(
|
||||
@@ -634,7 +755,10 @@
|
||||
for line in self.read_chunked(amt, decode_content=decode_content):
|
||||
yield line
|
||||
else:
|
||||
- while not is_fp_closed(self._fp):
|
||||
+ while (
|
||||
+ not is_fp_closed(self._fp)
|
||||
+ or (self._decoder and self._decoder.has_unconsumed_tail)
|
||||
+ ):
|
||||
data = self.read(amt=amt, decode_content=decode_content)
|
||||
|
||||
if data:
|
||||
@@ -840,7 +964,10 @@
|
||||
break
|
||||
chunk = self._handle_chunk(amt)
|
||||
decoded = self._decode(
|
||||
- chunk, decode_content=decode_content, flush_decoder=False
|
||||
+ chunk,
|
||||
+ decode_content=decode_content,
|
||||
+ flush_decoder=False,
|
||||
+ max_length=amt,
|
||||
)
|
||||
if decoded:
|
||||
yield decoded
|
||||
|
||||
--- a/kubevirt/urllib3/response.py 2026-01-20 10:46:57.006470161 +0100
|
||||
+++ b/kubevirt/urllib3/response.py 2026-01-20 10:55:44.090084896 +0100
|
||||
@@ -23,6 +23,7 @@
|
||||
from .exceptions import (
|
||||
BodyNotHttplibCompatible,
|
||||
DecodeError,
|
||||
+ DependencyWarning,
|
||||
HTTPError,
|
||||
IncompleteRead,
|
||||
InvalidChunkLength,
|
||||
@@ -41,34 +42,60 @@
|
||||
class DeflateDecoder(object):
|
||||
def __init__(self):
|
||||
self._first_try = True
|
||||
- self._data = b""
|
||||
+ self._first_try_data = b""
|
||||
+ self._unfed_data = b""
|
||||
self._obj = zlib.decompressobj()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._obj, name)
|
||||
|
||||
- def decompress(self, data):
|
||||
- if not data:
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
+ data = self._unfed_data + data
|
||||
+ self._unfed_data = b""
|
||||
+ if not data and not self._obj.unconsumed_tail:
|
||||
return data
|
||||
+ original_max_length = max_length
|
||||
+ if original_max_length < 0:
|
||||
+ max_length = 0
|
||||
+ elif original_max_length == 0:
|
||||
+ # We should not pass 0 to the zlib decompressor because 0 is
|
||||
+ # the default value that will make zlib decompress without a
|
||||
+ # length limit.
|
||||
+ # Data should be stored for subsequent calls.
|
||||
+ self._unfed_data = data
|
||||
+ return b""
|
||||
|
||||
+ # Subsequent calls always reuse `self._obj`. zlib requires
|
||||
+ # passing the unconsumed tail if decompression is to continue.
|
||||
if not self._first_try:
|
||||
- return self._obj.decompress(data)
|
||||
+ return self._obj.decompress(
|
||||
+ self._obj.unconsumed_tail + data, max_length=max_length
|
||||
+ )
|
||||
|
||||
- self._data += data
|
||||
+ # First call tries with RFC 1950 ZLIB format.
|
||||
+ self._first_try_data += data
|
||||
try:
|
||||
- decompressed = self._obj.decompress(data)
|
||||
+ decompressed = self._obj.decompress(data, max_length=max_length)
|
||||
if decompressed:
|
||||
self._first_try = False
|
||||
- self._data = None
|
||||
+ self._first_try_data = b""
|
||||
return decompressed
|
||||
+ # On failure, it falls back to RFC 1951 DEFLATE format.
|
||||
except zlib.error:
|
||||
self._first_try = False
|
||||
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
|
||||
try:
|
||||
- return self.decompress(self._data)
|
||||
+ return self.decompress(
|
||||
+ self._first_try_data, max_length=original_max_length
|
||||
+ )
|
||||
finally:
|
||||
- self._data = None
|
||||
+ self._first_try_data = b""
|
||||
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ return bool(self._unfed_data) or (
|
||||
+ bool(self._obj.unconsumed_tail) and not self._first_try
|
||||
+ )
|
||||
|
||||
class GzipDecoderState(object):
|
||||
|
||||
@@ -81,30 +108,64 @@
|
||||
def __init__(self):
|
||||
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
|
||||
self._state = GzipDecoderState.FIRST_MEMBER
|
||||
+ self._unconsumed_tail = b""
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._obj, name)
|
||||
|
||||
- def decompress(self, data):
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
ret = bytearray()
|
||||
- if self._state == GzipDecoderState.SWALLOW_DATA or not data:
|
||||
+ if self._state == GzipDecoderState.SWALLOW_DATA:
|
||||
+ return bytes(ret)
|
||||
+
|
||||
+ if max_length == 0:
|
||||
+ # We should not pass 0 to the zlib decompressor because 0 is
|
||||
+ # the default value that will make zlib decompress without a
|
||||
+ # length limit.
|
||||
+ # Data should be stored for subsequent calls.
|
||||
+ self._unconsumed_tail += data
|
||||
+ return b""
|
||||
+
|
||||
+ # zlib requires passing the unconsumed tail to the subsequent
|
||||
+ # call if decompression is to continue.
|
||||
+ data = self._unconsumed_tail + data
|
||||
+ if not data and self._obj.eof:
|
||||
return bytes(ret)
|
||||
+
|
||||
while True:
|
||||
try:
|
||||
- ret += self._obj.decompress(data)
|
||||
+ ret += self._obj.decompress(
|
||||
+ data, max_length=max(max_length - len(ret), 0)
|
||||
+ )
|
||||
except zlib.error:
|
||||
previous_state = self._state
|
||||
# Ignore data after the first error
|
||||
self._state = GzipDecoderState.SWALLOW_DATA
|
||||
+ self._unconsumed_tail = b""
|
||||
if previous_state == GzipDecoderState.OTHER_MEMBERS:
|
||||
# Allow trailing garbage acceptable in other gzip clients
|
||||
return bytes(ret)
|
||||
raise
|
||||
- data = self._obj.unused_data
|
||||
+
|
||||
+ self._unconsumed_tail = data = (
|
||||
+ self._obj.unconsumed_tail or self._obj.unused_data
|
||||
+ )
|
||||
+ if max_length > 0 and len(ret) >= max_length:
|
||||
+ break
|
||||
+
|
||||
if not data:
|
||||
return bytes(ret)
|
||||
- self._state = GzipDecoderState.OTHER_MEMBERS
|
||||
- self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
|
||||
+ # When the end of a gzip member is reached, a new decompressor
|
||||
+ # must be created for unused (possibly future) data.
|
||||
+ if self._obj.eof:
|
||||
+ self._state = GzipDecoderState.OTHER_MEMBERS
|
||||
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
|
||||
+
|
||||
+ return bytes(ret)
|
||||
+
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ return bool(self._unconsumed_tail)
|
||||
|
||||
|
||||
if brotli is not None:
|
||||
@@ -116,9 +177,35 @@
|
||||
def __init__(self):
|
||||
self._obj = brotli.Decompressor()
|
||||
if hasattr(self._obj, "decompress"):
|
||||
- self.decompress = self._obj.decompress
|
||||
+ setattr(self, "_decompress", self._obj.decompress)
|
||||
else:
|
||||
- self.decompress = self._obj.process
|
||||
+ setattr(self, "_decompress", self._obj.process)
|
||||
+
|
||||
+ # Requires Brotli >= 1.2.0 for `output_buffer_limit`.
|
||||
+ def _decompress(self, data: bytes, output_buffer_limit: int = -1) -> bytes:
|
||||
+ raise NotImplementedError()
|
||||
+
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
+ try:
|
||||
+ if max_length > 0:
|
||||
+ return self._decompress(data, output_buffer_limit=max_length)
|
||||
+ else:
|
||||
+ return self._decompress(data)
|
||||
+ except TypeError:
|
||||
+ # Fallback for Brotli/brotlicffi/brotlipy versions without
|
||||
+ # the `output_buffer_limit` parameter.
|
||||
+ warnings.warn(
|
||||
+ "Brotli >= 1.2.0 is required to prevent decompression bombs.",
|
||||
+ DependencyWarning,
|
||||
+ )
|
||||
+ return self._decompress(data)
|
||||
+
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ try:
|
||||
+ return not self._obj.can_accept_more_data()
|
||||
+ except AttributeError:
|
||||
+ return False
|
||||
|
||||
def flush(self):
|
||||
if hasattr(self._obj, "flush"):
|
||||
@@ -151,10 +238,35 @@
|
||||
def flush(self):
|
||||
return self._decoders[0].flush()
|
||||
|
||||
- def decompress(self, data):
|
||||
- for d in reversed(self._decoders):
|
||||
- data = d.decompress(data)
|
||||
- return data
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
+ if max_length <= 0:
|
||||
+ for d in reversed(self._decoders):
|
||||
+ data = d.decompress(data)
|
||||
+ return data
|
||||
+
|
||||
+ ret = bytearray()
|
||||
+ # Every while loop iteration goes through all decoders once.
|
||||
+ # It exits when enough data is read or no more data can be read.
|
||||
+ # It is possible that the while loop iteration does not produce
|
||||
+ # any data because we retrieve up to `max_length` from every
|
||||
+ # decoder, and the amount of bytes may be insufficient for the
|
||||
+ # next decoder to produce enough/any output.
|
||||
+ while True:
|
||||
+ any_data = False
|
||||
+ for d in reversed(self._decoders):
|
||||
+ data = d.decompress(data, max_length=max_length - len(ret))
|
||||
+ if data:
|
||||
+ any_data = True
|
||||
+ # We should not break when no data is returned because
|
||||
+ # next decoders may produce data even with empty input.
|
||||
+ ret += data
|
||||
+ if not any_data or len(ret) >= max_length:
|
||||
+ return bytes(ret)
|
||||
+ data = b""
|
||||
+
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ return any(d.has_unconsumed_tail for d in self._decoders)
|
||||
|
||||
|
||||
def _get_decoder(mode):
|
||||
@@ -405,16 +517,25 @@
|
||||
if brotli is not None:
|
||||
|
||||
@ -0,0 +1,63 @@
|
||||
--- a/aws/urllib3/response.py 2026-02-03 08:20:11.000000000 +0100
|
||||
+++ b/aws/urllib3/response.py 2026-02-03 09:11:38.017998476 +0100
|
||||
@@ -350,6 +350,7 @@
|
||||
self.reason = reason
|
||||
self.strict = strict
|
||||
self.decode_content = decode_content
|
||||
+ self._has_decoded_content = False
|
||||
self.retries = retries
|
||||
self.enforce_content_length = enforce_content_length
|
||||
self.auto_close = auto_close
|
||||
@@ -414,7 +415,11 @@
|
||||
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
|
||||
"""
|
||||
try:
|
||||
- self.read()
|
||||
+ self.read(
|
||||
+ # Do not spend resources decoding the content unless
|
||||
+ # decoding has already been initiated.
|
||||
+ decode_content=self._has_decoded_content,
|
||||
+ )
|
||||
except (HTTPError, SocketError, BaseSSLError, HTTPException):
|
||||
pass
|
||||
|
||||
@@ -536,6 +541,7 @@
|
||||
try:
|
||||
if self._decoder:
|
||||
data = self._decoder.decompress(data, max_length=max_length)
|
||||
+ self._has_decoded_content = True
|
||||
except self.DECODER_ERROR_CLASSES as e:
|
||||
content_encoding = self.headers.get("content-encoding", "").lower()
|
||||
raise DecodeError(
|
||||
|
||||
--- a/azure/urllib3/response.py 2026-02-03 08:20:11.000000000 +0100
|
||||
+++ b/azure/urllib3/response.py 2026-02-03 09:11:38.017998476 +0100
|
||||
@@ -350,6 +350,7 @@
|
||||
self.reason = reason
|
||||
self.strict = strict
|
||||
self.decode_content = decode_content
|
||||
+ self._has_decoded_content = False
|
||||
self.retries = retries
|
||||
self.enforce_content_length = enforce_content_length
|
||||
self.auto_close = auto_close
|
||||
@@ -414,7 +415,11 @@
|
||||
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
|
||||
"""
|
||||
try:
|
||||
- self.read()
|
||||
+ self.read(
|
||||
+ # Do not spend resources decoding the content unless
|
||||
+ # decoding has already been initiated.
|
||||
+ decode_content=self._has_decoded_content,
|
||||
+ )
|
||||
except (HTTPError, SocketError, BaseSSLError, HTTPException):
|
||||
pass
|
||||
|
||||
@@ -536,6 +541,7 @@
|
||||
try:
|
||||
if self._decoder:
|
||||
data = self._decoder.decompress(data, max_length=max_length)
|
||||
+ self._has_decoded_content = True
|
||||
except self.DECODER_ERROR_CLASSES as e:
|
||||
content_encoding = self.headers.get("content-encoding", "").lower()
|
||||
raise DecodeError(
|
||||
@ -1,44 +0,0 @@
|
||||
--- a/aws/urllib3/response.py 2023-10-17 19:42:56.000000000 +0200
|
||||
+++ b/aws/urllib3/response.py 2026-01-13 14:17:48.477104360 +0100
|
||||
@@ -292,7 +292,11 @@
|
||||
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
|
||||
"""
|
||||
try:
|
||||
- self.read()
|
||||
+ self.read(
|
||||
+ # Do not spend resources decoding the content unless
|
||||
+ # decoding has already been initiated.
|
||||
+ decode_content=self._has_decoded_content,
|
||||
+ )
|
||||
except (HTTPError, SocketError, BaseSSLError, HTTPException):
|
||||
pass
|
||||
|
||||
--- a/azure/urllib3/response.py 2023-10-17 19:42:56.000000000 +0200
|
||||
+++ b/azure/urllib3/response.py 2026-01-13 14:17:48.477104360 +0100
|
||||
@@ -292,7 +292,11 @@
|
||||
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
|
||||
"""
|
||||
try:
|
||||
- self.read()
|
||||
+ self.read(
|
||||
+ # Do not spend resources decoding the content unless
|
||||
+ # decoding has already been initiated.
|
||||
+ decode_content=self._has_decoded_content,
|
||||
+ )
|
||||
except (HTTPError, SocketError, BaseSSLError, HTTPException):
|
||||
pass
|
||||
|
||||
--- a/kubevirt/urllib3/response.py 2023-10-17 19:42:56.000000000 +0200
|
||||
+++ b/kubevirt/urllib3/response.py 2026-01-13 14:17:48.477104360 +0100
|
||||
@@ -292,7 +292,11 @@
|
||||
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
|
||||
"""
|
||||
try:
|
||||
- self.read()
|
||||
+ self.read(
|
||||
+ # Do not spend resources decoding the content unless
|
||||
+ # decoding has already been initiated.
|
||||
+ decode_content=self._has_decoded_content,
|
||||
+ )
|
||||
except (HTTPError, SocketError, BaseSSLError, HTTPException):
|
||||
pass
|
||||
22
SOURCES/RHEL-145087-fence_ibm_vpc-fix-missing-statuses.patch
Normal file
22
SOURCES/RHEL-145087-fence_ibm_vpc-fix-missing-statuses.patch
Normal file
@ -0,0 +1,22 @@
|
||||
From fb50ca8a9e552a4b900c1101d49fb4ed9fa4144a Mon Sep 17 00:00:00 2001
|
||||
From: Ilias Romanos <ilias@rwx.gr>
|
||||
Date: Mon, 8 Dec 2025 16:57:48 +0100
|
||||
Subject: [PATCH] fence_ibm_vpc: fix missing statuses
|
||||
|
||||
---
|
||||
agents/ibm_vpc/fence_ibm_vpc.py | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
|
||||
diff --git a/agents/ibm_vpc/fence_ibm_vpc.py b/agents/ibm_vpc/fence_ibm_vpc.py
|
||||
index a87e9e6dc..46cf9cc87 100755
|
||||
--- a/agents/ibm_vpc/fence_ibm_vpc.py
|
||||
+++ b/agents/ibm_vpc/fence_ibm_vpc.py
|
||||
@@ -16,6 +16,8 @@
|
||||
"stopping": "unknown",
|
||||
"restarting": "unknown",
|
||||
"pending": "unknown",
|
||||
+ "deleting": "unknown",
|
||||
+ "failed": "unknown",
|
||||
}
|
||||
|
||||
def get_list(conn, options):
|
||||
@ -0,0 +1,22 @@
|
||||
--- a/kubevirt/urllib3/response.py 2023-10-17 19:42:56.000000000 +0200
|
||||
+++ b/kubevirt/urllib3/response.py 2026-01-02 11:19:25.583808492 +0100
|
||||
@@ -135,8 +135,18 @@
|
||||
they were applied.
|
||||
"""
|
||||
|
||||
+ # Maximum allowed number of chained HTTP encodings in the
|
||||
+ # Content-Encoding header.
|
||||
+ max_decode_links = 5
|
||||
+
|
||||
def __init__(self, modes):
|
||||
- self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
|
||||
+ encodings = [m.strip() for m in modes.split(",")]
|
||||
+ if len(encodings) > self.max_decode_links:
|
||||
+ raise DecodeError(
|
||||
+ "Too many content encodings in the chain: "
|
||||
+ f"{len(encodings)} > {self.max_decode_links}"
|
||||
+ )
|
||||
+ self._decoders = [_get_decoder(e) for e in encodings]
|
||||
|
||||
def flush(self):
|
||||
return self._decoders[0].flush()
|
||||
@ -0,0 +1,281 @@
|
||||
--- a/kubevirt/urllib3/response.py 2026-01-20 10:46:57.006470161 +0100
|
||||
+++ b/kubevirt/urllib3/response.py 2026-01-20 10:55:44.090084896 +0100
|
||||
@@ -23,6 +23,7 @@
|
||||
from .exceptions import (
|
||||
BodyNotHttplibCompatible,
|
||||
DecodeError,
|
||||
+ DependencyWarning,
|
||||
HTTPError,
|
||||
IncompleteRead,
|
||||
InvalidChunkLength,
|
||||
@@ -41,34 +42,60 @@
|
||||
class DeflateDecoder(object):
|
||||
def __init__(self):
|
||||
self._first_try = True
|
||||
- self._data = b""
|
||||
+ self._first_try_data = b""
|
||||
+ self._unfed_data = b""
|
||||
self._obj = zlib.decompressobj()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._obj, name)
|
||||
|
||||
- def decompress(self, data):
|
||||
- if not data:
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
+ data = self._unfed_data + data
|
||||
+ self._unfed_data = b""
|
||||
+ if not data and not self._obj.unconsumed_tail:
|
||||
return data
|
||||
+ original_max_length = max_length
|
||||
+ if original_max_length < 0:
|
||||
+ max_length = 0
|
||||
+ elif original_max_length == 0:
|
||||
+ # We should not pass 0 to the zlib decompressor because 0 is
|
||||
+ # the default value that will make zlib decompress without a
|
||||
+ # length limit.
|
||||
+ # Data should be stored for subsequent calls.
|
||||
+ self._unfed_data = data
|
||||
+ return b""
|
||||
|
||||
+ # Subsequent calls always reuse `self._obj`. zlib requires
|
||||
+ # passing the unconsumed tail if decompression is to continue.
|
||||
if not self._first_try:
|
||||
- return self._obj.decompress(data)
|
||||
+ return self._obj.decompress(
|
||||
+ self._obj.unconsumed_tail + data, max_length=max_length
|
||||
+ )
|
||||
|
||||
- self._data += data
|
||||
+ # First call tries with RFC 1950 ZLIB format.
|
||||
+ self._first_try_data += data
|
||||
try:
|
||||
- decompressed = self._obj.decompress(data)
|
||||
+ decompressed = self._obj.decompress(data, max_length=max_length)
|
||||
if decompressed:
|
||||
self._first_try = False
|
||||
- self._data = None
|
||||
+ self._first_try_data = b""
|
||||
return decompressed
|
||||
+ # On failure, it falls back to RFC 1951 DEFLATE format.
|
||||
except zlib.error:
|
||||
self._first_try = False
|
||||
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
|
||||
try:
|
||||
- return self.decompress(self._data)
|
||||
+ return self.decompress(
|
||||
+ self._first_try_data, max_length=original_max_length
|
||||
+ )
|
||||
finally:
|
||||
- self._data = None
|
||||
+ self._first_try_data = b""
|
||||
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ return bool(self._unfed_data) or (
|
||||
+ bool(self._obj.unconsumed_tail) and not self._first_try
|
||||
+ )
|
||||
|
||||
class GzipDecoderState(object):
|
||||
|
||||
@@ -81,30 +108,64 @@
|
||||
def __init__(self):
|
||||
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
|
||||
self._state = GzipDecoderState.FIRST_MEMBER
|
||||
+ self._unconsumed_tail = b""
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._obj, name)
|
||||
|
||||
- def decompress(self, data):
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
ret = bytearray()
|
||||
- if self._state == GzipDecoderState.SWALLOW_DATA or not data:
|
||||
+ if self._state == GzipDecoderState.SWALLOW_DATA:
|
||||
+ return bytes(ret)
|
||||
+
|
||||
+ if max_length == 0:
|
||||
+ # We should not pass 0 to the zlib decompressor because 0 is
|
||||
+ # the default value that will make zlib decompress without a
|
||||
+ # length limit.
|
||||
+ # Data should be stored for subsequent calls.
|
||||
+ self._unconsumed_tail += data
|
||||
+ return b""
|
||||
+
|
||||
+ # zlib requires passing the unconsumed tail to the subsequent
|
||||
+ # call if decompression is to continue.
|
||||
+ data = self._unconsumed_tail + data
|
||||
+ if not data and self._obj.eof:
|
||||
return bytes(ret)
|
||||
+
|
||||
while True:
|
||||
try:
|
||||
- ret += self._obj.decompress(data)
|
||||
+ ret += self._obj.decompress(
|
||||
+ data, max_length=max(max_length - len(ret), 0)
|
||||
+ )
|
||||
except zlib.error:
|
||||
previous_state = self._state
|
||||
# Ignore data after the first error
|
||||
self._state = GzipDecoderState.SWALLOW_DATA
|
||||
+ self._unconsumed_tail = b""
|
||||
if previous_state == GzipDecoderState.OTHER_MEMBERS:
|
||||
# Allow trailing garbage acceptable in other gzip clients
|
||||
return bytes(ret)
|
||||
raise
|
||||
- data = self._obj.unused_data
|
||||
+
|
||||
+ self._unconsumed_tail = data = (
|
||||
+ self._obj.unconsumed_tail or self._obj.unused_data
|
||||
+ )
|
||||
+ if max_length > 0 and len(ret) >= max_length:
|
||||
+ break
|
||||
+
|
||||
if not data:
|
||||
return bytes(ret)
|
||||
- self._state = GzipDecoderState.OTHER_MEMBERS
|
||||
- self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
|
||||
+ # When the end of a gzip member is reached, a new decompressor
|
||||
+ # must be created for unused (possibly future) data.
|
||||
+ if self._obj.eof:
|
||||
+ self._state = GzipDecoderState.OTHER_MEMBERS
|
||||
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
|
||||
+
|
||||
+ return bytes(ret)
|
||||
+
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ return bool(self._unconsumed_tail)
|
||||
|
||||
|
||||
if brotli is not None:
|
||||
@@ -116,9 +177,35 @@
|
||||
def __init__(self):
|
||||
self._obj = brotli.Decompressor()
|
||||
if hasattr(self._obj, "decompress"):
|
||||
- self.decompress = self._obj.decompress
|
||||
+ setattr(self, "_decompress", self._obj.decompress)
|
||||
else:
|
||||
- self.decompress = self._obj.process
|
||||
+ setattr(self, "_decompress", self._obj.process)
|
||||
+
|
||||
+ # Requires Brotli >= 1.2.0 for `output_buffer_limit`.
|
||||
+ def _decompress(self, data: bytes, output_buffer_limit: int = -1) -> bytes:
|
||||
+ raise NotImplementedError()
|
||||
+
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
+ try:
|
||||
+ if max_length > 0:
|
||||
+ return self._decompress(data, output_buffer_limit=max_length)
|
||||
+ else:
|
||||
+ return self._decompress(data)
|
||||
+ except TypeError:
|
||||
+ # Fallback for Brotli/brotlicffi/brotlipy versions without
|
||||
+ # the `output_buffer_limit` parameter.
|
||||
+ warnings.warn(
|
||||
+ "Brotli >= 1.2.0 is required to prevent decompression bombs.",
|
||||
+ DependencyWarning,
|
||||
+ )
|
||||
+ return self._decompress(data)
|
||||
+
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ try:
|
||||
+ return not self._obj.can_accept_more_data()
|
||||
+ except AttributeError:
|
||||
+ return False
|
||||
|
||||
def flush(self):
|
||||
if hasattr(self._obj, "flush"):
|
||||
@@ -151,10 +238,35 @@
|
||||
def flush(self):
|
||||
return self._decoders[0].flush()
|
||||
|
||||
- def decompress(self, data):
|
||||
- for d in reversed(self._decoders):
|
||||
- data = d.decompress(data)
|
||||
- return data
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
+ if max_length <= 0:
|
||||
+ for d in reversed(self._decoders):
|
||||
+ data = d.decompress(data)
|
||||
+ return data
|
||||
+
|
||||
+ ret = bytearray()
|
||||
+ # Every while loop iteration goes through all decoders once.
|
||||
+ # It exits when enough data is read or no more data can be read.
|
||||
+ # It is possible that the while loop iteration does not produce
|
||||
+ # any data because we retrieve up to `max_length` from every
|
||||
+ # decoder, and the amount of bytes may be insufficient for the
|
||||
+ # next decoder to produce enough/any output.
|
||||
+ while True:
|
||||
+ any_data = False
|
||||
+ for d in reversed(self._decoders):
|
||||
+ data = d.decompress(data, max_length=max_length - len(ret))
|
||||
+ if data:
|
||||
+ any_data = True
|
||||
+ # We should not break when no data is returned because
|
||||
+ # next decoders may produce data even with empty input.
|
||||
+ ret += data
|
||||
+ if not any_data or len(ret) >= max_length:
|
||||
+ return bytes(ret)
|
||||
+ data = b""
|
||||
+
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ return any(d.has_unconsumed_tail for d in self._decoders)
|
||||
|
||||
|
||||
def _get_decoder(mode):
|
||||
@@ -405,16 +517,25 @@
|
||||
if brotli is not None:
|
||||
DECODER_ERROR_CLASSES += (brotli.error,)
|
||||
|
||||
- def _decode(self, data, decode_content, flush_decoder):
|
||||
+ def _decode(
|
||||
+ self,
|
||||
+ data: bytes,
|
||||
+ decode_content: bool,
|
||||
+ flush_decoder: bool,
|
||||
+ max_length: int = None,
|
||||
+ ) -> bytes:
|
||||
"""
|
||||
Decode the data passed in and potentially flush the decoder.
|
||||
"""
|
||||
if not decode_content:
|
||||
return data
|
||||
|
||||
+ if max_length is None or flush_decoder:
|
||||
+ max_length = -1
|
||||
+
|
||||
try:
|
||||
if self._decoder:
|
||||
- data = self._decoder.decompress(data)
|
||||
+ data = self._decoder.decompress(data, max_length=max_length)
|
||||
except self.DECODER_ERROR_CLASSES as e:
|
||||
content_encoding = self.headers.get("content-encoding", "").lower()
|
||||
raise DecodeError(
|
||||
@@ -634,7 +755,10 @@
|
||||
for line in self.read_chunked(amt, decode_content=decode_content):
|
||||
yield line
|
||||
else:
|
||||
- while not is_fp_closed(self._fp):
|
||||
+ while (
|
||||
+ not is_fp_closed(self._fp)
|
||||
+ or (self._decoder and self._decoder.has_unconsumed_tail)
|
||||
+ ):
|
||||
data = self.read(amt=amt, decode_content=decode_content)
|
||||
|
||||
if data:
|
||||
@@ -840,7 +964,10 @@
|
||||
break
|
||||
chunk = self._handle_chunk(amt)
|
||||
decoded = self._decode(
|
||||
- chunk, decode_content=decode_content, flush_decoder=False
|
||||
+ chunk,
|
||||
+ decode_content=decode_content,
|
||||
+ flush_decoder=False,
|
||||
+ max_length=amt,
|
||||
)
|
||||
if decoded:
|
||||
yield decoded
|
||||
@ -0,0 +1,31 @@
|
||||
--- a/kubevirt/urllib3/response.py 2026-02-03 08:20:11.000000000 +0100
|
||||
+++ b/kubevirt/urllib3/response.py 2026-02-03 09:11:38.017998476 +0100
|
||||
@@ -350,6 +350,7 @@
|
||||
self.reason = reason
|
||||
self.strict = strict
|
||||
self.decode_content = decode_content
|
||||
+ self._has_decoded_content = False
|
||||
self.retries = retries
|
||||
self.enforce_content_length = enforce_content_length
|
||||
self.auto_close = auto_close
|
||||
@@ -414,7 +415,11 @@
|
||||
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
|
||||
"""
|
||||
try:
|
||||
- self.read()
|
||||
+ self.read(
|
||||
+ # Do not spend resources decoding the content unless
|
||||
+ # decoding has already been initiated.
|
||||
+ decode_content=self._has_decoded_content,
|
||||
+ )
|
||||
except (HTTPError, SocketError, BaseSSLError, HTTPException):
|
||||
pass
|
||||
|
||||
@@ -536,6 +541,7 @@
|
||||
try:
|
||||
if self._decoder:
|
||||
data = self._decoder.decompress(data, max_length=max_length)
|
||||
+ self._has_decoded_content = True
|
||||
except self.DECODER_ERROR_CLASSES as e:
|
||||
content_encoding = self.headers.get("content-encoding", "").lower()
|
||||
raise DecodeError(
|
||||
@ -87,7 +87,7 @@
|
||||
Name: fence-agents
|
||||
Summary: Set of unified programs capable of host isolation ("fencing")
|
||||
Version: 4.2.1
|
||||
Release: 129%{?alphatag:.%{alphatag}}%{?dist}.21
|
||||
Release: 129%{?alphatag:.%{alphatag}}%{?dist}.24
|
||||
License: GPLv2+ and LGPLv2+
|
||||
Group: System Environment/Base
|
||||
URL: https://github.com/ClusterLabs/fence-agents
|
||||
@ -322,6 +322,7 @@ Patch149: RHEL-109814-1-fence_aws-add-skipshutdown-parameter.patch
|
||||
Patch150: RHEL-96179-fence_kubevirt-force-off.patch
|
||||
Patch151: RHEL-110964-1-fence_nutanix_ahv.patch
|
||||
Patch152: RHEL-110964-2-fence_nutanix_ahv-update-metadata.patch
|
||||
Patch153: RHEL-145087-fence_ibm_vpc-fix-missing-statuses.patch
|
||||
|
||||
### HA support libs/utils ###
|
||||
# all archs
|
||||
@ -331,6 +332,10 @@ Patch1002: RHEL-35655-kubevirt-fix-bundled-jinja2-CVE-2024-34064.patch
|
||||
Patch1003: RHEL-43568-1-kubevirt-fix-bundled-urllib3-CVE-2024-37891.patch
|
||||
Patch1004: RHEL-50223-setuptools-fix-CVE-2024-6345.patch
|
||||
Patch1005: RHEL-104741-1-kubevirt-fix-bundled-requests-CVE-2024-47081.patch
|
||||
Patch1006: RHEL-148156-kubevirt-1-fix-bundled-urllib3-CVE-2025-66418.patch
|
||||
Patch1007: RHEL-148156-kubevirt-2-fix-bundled-urllib3-CVE-2025-66471.patch
|
||||
Patch1008: RHEL-148156-kubevirt-3-RHEL-146288-fix-bundled-urllib3-CVE-2026-21441.patch
|
||||
Patch1009: RHEL-148156-kubevirt-4-RHEL-142447-fix-bundled-pyasn1-CVE-2026-23490.patch
|
||||
# cloud (x86_64 only)
|
||||
Patch2000: bz2218234-2-aws-fix-bundled-dateutil-CVE-2007-4559.patch
|
||||
Patch2001: RHEL-43568-2-aws-fix-bundled-urllib3-CVE-2024-37891.patch
|
||||
@ -338,8 +343,7 @@ Patch2002: RHEL-104741-2-aliyun-aws-azure-fix-bundled-requests-CVE-2024-47081.pa
|
||||
Patch2003: RHEL-109814-2-botocore-add-SkipOsShutdown.patch
|
||||
Patch2004: RHEL-136027-fix-bundled-urllib3-CVE-2025-66418.patch
|
||||
Patch2005: RHEL-139756-fix-bundled-urllib3-CVE-2025-66471.patch
|
||||
Patch2006: RHEL-140783-fix-bundled-urllib3-CVE-2026-21441.patch
|
||||
Patch2007: RHEL-142447-fix-bundled-pyasn1-CVE-2026-23490.patch
|
||||
Patch2006: RHEL-140783-RHEL-146288-fix-bundled-urllib3-CVE-2026-21441.patch
|
||||
|
||||
%if 0%{?fedora} || 0%{?rhel} > 7
|
||||
%global supportedagents amt_ws apc apc_snmp bladecenter brocade cisco_mds cisco_ucs compute drac5 eaton_snmp emerson eps evacuate hds_cb hpblade ibmblade ibm_powervs ibm_vpc ifmib ilo ilo_moonshot ilo_mp ilo_ssh intelmodular ipdu ipmilan kdump kubevirt lpar mpath nutanix_ahv redfish rhevm rsa rsb sbd scsi vmware_rest vmware_soap wti
|
||||
@ -570,6 +574,7 @@ BuildRequires: python3-google-api-client python3-pip python3-wheel python3-jinja
|
||||
%patch -p1 -P 150
|
||||
%patch -p1 -P 151
|
||||
%patch -p1 -P 152
|
||||
%patch -p1 -P 153
|
||||
|
||||
# prevent compilation of something that won't get used anyway
|
||||
sed -i.orig 's|FENCE_ZVM=1|FENCE_ZVM=0|' configure.ac
|
||||
@ -693,6 +698,10 @@ pushd %{buildroot}/usr/lib/fence-agents/%{bundled_lib_dir}
|
||||
/usr/bin/patch --no-backup-if-mismatch -p1 --fuzz=2 < %{PATCH1003}
|
||||
/usr/bin/patch --no-backup-if-mismatch -p1 --fuzz=0 < %{PATCH1004}
|
||||
/usr/bin/patch --no-backup-if-mismatch -p1 --fuzz=0 < %{PATCH1005}
|
||||
/usr/bin/patch --no-backup-if-mismatch -p1 --fuzz=0 < %{PATCH1006}
|
||||
/usr/bin/patch --no-backup-if-mismatch -p1 --fuzz=0 < %{PATCH1007}
|
||||
/usr/bin/patch --no-backup-if-mismatch -p1 --fuzz=0 < %{PATCH1008}
|
||||
/usr/bin/patch --no-backup-if-mismatch -p1 --fuzz=0 < %{PATCH1009}
|
||||
|
||||
%ifarch x86_64
|
||||
/usr/bin/patch --no-backup-if-mismatch -p1 --fuzz=0 < %{PATCH2000}
|
||||
@ -702,7 +711,6 @@ pushd %{buildroot}/usr/lib/fence-agents/%{bundled_lib_dir}
|
||||
/usr/bin/patch --no-backup-if-mismatch -p1 --fuzz=0 < %{PATCH2004}
|
||||
/usr/bin/patch --no-backup-if-mismatch -p1 --fuzz=0 < %{PATCH2005}
|
||||
/usr/bin/patch --no-backup-if-mismatch -p1 --fuzz=0 < %{PATCH2006}
|
||||
/usr/bin/patch --no-backup-if-mismatch -p1 --fuzz=0 < %{PATCH2007}
|
||||
%endif
|
||||
popd
|
||||
|
||||
@ -1636,6 +1644,19 @@ Fence agent for IBM z/VM over IP.
|
||||
%endif
|
||||
|
||||
%changelog
|
||||
* Wed Feb 11 2026 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.2.1-129.24
|
||||
- bundled urllib3: fix CVE-2025-66418, CVE-2025-66471, CVE-2026-21441,
|
||||
and pyasn1 CVE-2026-23490 on all archs
|
||||
Resolves: RHEL-148156
|
||||
|
||||
* Tue Feb 3 2026 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.2.1-129.23
|
||||
- bundled urllib3: fix issue with CVE-2026-21441 patch
|
||||
Resolves: RHEL-146288
|
||||
|
||||
* Thu Jan 29 2026 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.2.1-129.22
|
||||
- fence_ibm_vpc: fix missing statuses
|
||||
Resolves: RHEL-145087
|
||||
|
||||
* Tue Jan 27 2026 Oyvind Albrigtsen <oalbrigt@redhat.com> - 4.2.1-129.21
|
||||
- bundled pyasn1: fix CVE-2026-23490
|
||||
Resolves: RHEL-142447
|
||||
|
||||
Loading…
Reference in New Issue
Block a user