Compare commits
No commits in common. "c8" and "c8s" have entirely different histories.
19
.gitignore
vendored
19
.gitignore
vendored
@ -1,12 +1,7 @@
|
||||
SOURCES/ClusterLabs-resource-agents-55a4e2c9.tar.gz
|
||||
SOURCES/aliyun-cli-2.1.10.tar.gz
|
||||
SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz
|
||||
SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz
|
||||
SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz
|
||||
SOURCES/colorama-0.3.3.tar.gz
|
||||
SOURCES/google-cloud-sdk-360.0.0-linux-x86_64.tar.gz
|
||||
SOURCES/httplib2-0.20.4.tar.gz
|
||||
SOURCES/pycryptodome-3.20.0.tar.gz
|
||||
SOURCES/pyparsing-2.4.7-py2.py3-none-any.whl
|
||||
SOURCES/pyroute2-0.4.13.tar.gz
|
||||
SOURCES/urllib3-1.26.18.tar.gz
|
||||
/*.tar.gz
|
||||
/*.rpm
|
||||
/*.whl
|
||||
/.*
|
||||
/*/
|
||||
!/tests/
|
||||
/tests/*.retry
|
||||
|
||||
@ -1,12 +0,0 @@
|
||||
dfc65f4cac3f95026b2f5674019814a527333004 SOURCES/ClusterLabs-resource-agents-55a4e2c9.tar.gz
|
||||
306e131d8908ca794276bfe3a0b55ccc3bbd482f SOURCES/aliyun-cli-2.1.10.tar.gz
|
||||
0a56f6d9ed2014a363486d33b63eca094379be06 SOURCES/aliyun-python-sdk-core-2.13.1.tar.gz
|
||||
c2a98b9a1562d223a76514f05028488ca000c395 SOURCES/aliyun-python-sdk-ecs-4.9.3.tar.gz
|
||||
f14647a4d37a9a254c4e711b95a7654fc418e41e SOURCES/aliyun-python-sdk-vpc-3.0.2.tar.gz
|
||||
0fe5bd8bca54dd71223778a1e0bcca9af324abb1 SOURCES/colorama-0.3.3.tar.gz
|
||||
81f039cf075e9c8b70d5af99c189296a9e031de3 SOURCES/google-cloud-sdk-360.0.0-linux-x86_64.tar.gz
|
||||
7caf4412d9473bf17352316249a8133fa70b7e37 SOURCES/httplib2-0.20.4.tar.gz
|
||||
c55d177e9484d974c95078d4ae945f89ba2c7251 SOURCES/pycryptodome-3.20.0.tar.gz
|
||||
c8307f47e3b75a2d02af72982a2dfefa3f56e407 SOURCES/pyparsing-2.4.7-py2.py3-none-any.whl
|
||||
147149db11104c06d405fd077dcd2aa1c345f109 SOURCES/pyroute2-0.4.13.tar.gz
|
||||
84e2852d8da1655373f7ce5e7d5d3e256b62b4e4 SOURCES/urllib3-1.26.18.tar.gz
|
||||
45
RHEL-136031-fix-bundled-urllib3-CVE-2025-66418.patch
Normal file
45
RHEL-136031-fix-bundled-urllib3-CVE-2025-66418.patch
Normal file
@ -0,0 +1,45 @@
|
||||
--- a/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/response.py 2023-10-17 19:42:56.000000000 +0200
|
||||
+++ b/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/response.py 2026-01-02 11:19:25.583808492 +0100
|
||||
@@ -135,8 +135,18 @@
|
||||
they were applied.
|
||||
"""
|
||||
|
||||
+ # Maximum allowed number of chained HTTP encodings in the
|
||||
+ # Content-Encoding header.
|
||||
+ max_decode_links = 5
|
||||
+
|
||||
def __init__(self, modes):
|
||||
- self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
|
||||
+ encodings = [m.strip() for m in modes.split(",")]
|
||||
+ if len(encodings) > self.max_decode_links:
|
||||
+ raise DecodeError(
|
||||
+ "Too many content encodings in the chain: "
|
||||
+ f"{len(encodings)} > {self.max_decode_links}"
|
||||
+ )
|
||||
+ self._decoders = [_get_decoder(e) for e in encodings]
|
||||
|
||||
def flush(self):
|
||||
return self._decoders[0].flush()
|
||||
|
||||
--- a/gcp/google-cloud-sdk/lib/third_party/urllib3/response.py 2023-10-17 19:42:56.000000000 +0200
|
||||
+++ b/gcp/google-cloud-sdk/lib/third_party/urllib3/response.py 2026-01-02 11:19:25.583808492 +0100
|
||||
@@ -135,8 +135,18 @@
|
||||
they were applied.
|
||||
"""
|
||||
|
||||
+ # Maximum allowed number of chained HTTP encodings in the
|
||||
+ # Content-Encoding header.
|
||||
+ max_decode_links = 5
|
||||
+
|
||||
def __init__(self, modes):
|
||||
- self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
|
||||
+ encodings = [m.strip() for m in modes.split(",")]
|
||||
+ if len(encodings) > self.max_decode_links:
|
||||
+ raise DecodeError(
|
||||
+ "Too many content encodings in the chain: "
|
||||
+ f"{len(encodings)} > {self.max_decode_links}"
|
||||
+ )
|
||||
+ self._decoders = [_get_decoder(e) for e in encodings]
|
||||
|
||||
def flush(self):
|
||||
return self._decoders[0].flush()
|
||||
563
RHEL-139760-fix-bundled-urllib3-CVE-2025-66471.patch
Normal file
563
RHEL-139760-fix-bundled-urllib3-CVE-2025-66471.patch
Normal file
@ -0,0 +1,563 @@
|
||||
--- a/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/response.py 2026-01-20 10:46:57.006470161 +0100
|
||||
+++ b/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/response.py 2026-01-20 10:55:44.090084896 +0100
|
||||
@@ -23,6 +23,7 @@
|
||||
from .exceptions import (
|
||||
BodyNotHttplibCompatible,
|
||||
DecodeError,
|
||||
+ DependencyWarning,
|
||||
HTTPError,
|
||||
IncompleteRead,
|
||||
InvalidChunkLength,
|
||||
@@ -41,34 +42,60 @@
|
||||
class DeflateDecoder(object):
|
||||
def __init__(self):
|
||||
self._first_try = True
|
||||
- self._data = b""
|
||||
+ self._first_try_data = b""
|
||||
+ self._unfed_data = b""
|
||||
self._obj = zlib.decompressobj()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._obj, name)
|
||||
|
||||
- def decompress(self, data):
|
||||
- if not data:
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
+ data = self._unfed_data + data
|
||||
+ self._unfed_data = b""
|
||||
+ if not data and not self._obj.unconsumed_tail:
|
||||
return data
|
||||
+ original_max_length = max_length
|
||||
+ if original_max_length < 0:
|
||||
+ max_length = 0
|
||||
+ elif original_max_length == 0:
|
||||
+ # We should not pass 0 to the zlib decompressor because 0 is
|
||||
+ # the default value that will make zlib decompress without a
|
||||
+ # length limit.
|
||||
+ # Data should be stored for subsequent calls.
|
||||
+ self._unfed_data = data
|
||||
+ return b""
|
||||
|
||||
+ # Subsequent calls always reuse `self._obj`. zlib requires
|
||||
+ # passing the unconsumed tail if decompression is to continue.
|
||||
if not self._first_try:
|
||||
- return self._obj.decompress(data)
|
||||
+ return self._obj.decompress(
|
||||
+ self._obj.unconsumed_tail + data, max_length=max_length
|
||||
+ )
|
||||
|
||||
- self._data += data
|
||||
+ # First call tries with RFC 1950 ZLIB format.
|
||||
+ self._first_try_data += data
|
||||
try:
|
||||
- decompressed = self._obj.decompress(data)
|
||||
+ decompressed = self._obj.decompress(data, max_length=max_length)
|
||||
if decompressed:
|
||||
self._first_try = False
|
||||
- self._data = None
|
||||
+ self._first_try_data = b""
|
||||
return decompressed
|
||||
+ # On failure, it falls back to RFC 1951 DEFLATE format.
|
||||
except zlib.error:
|
||||
self._first_try = False
|
||||
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
|
||||
try:
|
||||
- return self.decompress(self._data)
|
||||
+ return self.decompress(
|
||||
+ self._first_try_data, max_length=original_max_length
|
||||
+ )
|
||||
finally:
|
||||
- self._data = None
|
||||
+ self._first_try_data = b""
|
||||
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ return bool(self._unfed_data) or (
|
||||
+ bool(self._obj.unconsumed_tail) and not self._first_try
|
||||
+ )
|
||||
|
||||
class GzipDecoderState(object):
|
||||
|
||||
@@ -81,30 +108,64 @@
|
||||
def __init__(self):
|
||||
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
|
||||
self._state = GzipDecoderState.FIRST_MEMBER
|
||||
+ self._unconsumed_tail = b""
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._obj, name)
|
||||
|
||||
- def decompress(self, data):
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
ret = bytearray()
|
||||
- if self._state == GzipDecoderState.SWALLOW_DATA or not data:
|
||||
+ if self._state == GzipDecoderState.SWALLOW_DATA:
|
||||
+ return bytes(ret)
|
||||
+
|
||||
+ if max_length == 0:
|
||||
+ # We should not pass 0 to the zlib decompressor because 0 is
|
||||
+ # the default value that will make zlib decompress without a
|
||||
+ # length limit.
|
||||
+ # Data should be stored for subsequent calls.
|
||||
+ self._unconsumed_tail += data
|
||||
+ return b""
|
||||
+
|
||||
+ # zlib requires passing the unconsumed tail to the subsequent
|
||||
+ # call if decompression is to continue.
|
||||
+ data = self._unconsumed_tail + data
|
||||
+ if not data and self._obj.eof:
|
||||
return bytes(ret)
|
||||
+
|
||||
while True:
|
||||
try:
|
||||
- ret += self._obj.decompress(data)
|
||||
+ ret += self._obj.decompress(
|
||||
+ data, max_length=max(max_length - len(ret), 0)
|
||||
+ )
|
||||
except zlib.error:
|
||||
previous_state = self._state
|
||||
# Ignore data after the first error
|
||||
self._state = GzipDecoderState.SWALLOW_DATA
|
||||
+ self._unconsumed_tail = b""
|
||||
if previous_state == GzipDecoderState.OTHER_MEMBERS:
|
||||
# Allow trailing garbage acceptable in other gzip clients
|
||||
return bytes(ret)
|
||||
raise
|
||||
- data = self._obj.unused_data
|
||||
+
|
||||
+ self._unconsumed_tail = data = (
|
||||
+ self._obj.unconsumed_tail or self._obj.unused_data
|
||||
+ )
|
||||
+ if max_length > 0 and len(ret) >= max_length:
|
||||
+ break
|
||||
+
|
||||
if not data:
|
||||
return bytes(ret)
|
||||
- self._state = GzipDecoderState.OTHER_MEMBERS
|
||||
- self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
|
||||
+ # When the end of a gzip member is reached, a new decompressor
|
||||
+ # must be created for unused (possibly future) data.
|
||||
+ if self._obj.eof:
|
||||
+ self._state = GzipDecoderState.OTHER_MEMBERS
|
||||
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
|
||||
+
|
||||
+ return bytes(ret)
|
||||
+
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ return bool(self._unconsumed_tail)
|
||||
|
||||
|
||||
if brotli is not None:
|
||||
@@ -116,9 +177,35 @@
|
||||
def __init__(self):
|
||||
self._obj = brotli.Decompressor()
|
||||
if hasattr(self._obj, "decompress"):
|
||||
- self.decompress = self._obj.decompress
|
||||
+ setattr(self, "_decompress", self._obj.decompress)
|
||||
else:
|
||||
- self.decompress = self._obj.process
|
||||
+ setattr(self, "_decompress", self._obj.process)
|
||||
+
|
||||
+ # Requires Brotli >= 1.2.0 for `output_buffer_limit`.
|
||||
+ def _decompress(self, data: bytes, output_buffer_limit: int = -1) -> bytes:
|
||||
+ raise NotImplementedError()
|
||||
+
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
+ try:
|
||||
+ if max_length > 0:
|
||||
+ return self._decompress(data, output_buffer_limit=max_length)
|
||||
+ else:
|
||||
+ return self._decompress(data)
|
||||
+ except TypeError:
|
||||
+ # Fallback for Brotli/brotlicffi/brotlipy versions without
|
||||
+ # the `output_buffer_limit` parameter.
|
||||
+ warnings.warn(
|
||||
+ "Brotli >= 1.2.0 is required to prevent decompression bombs.",
|
||||
+ DependencyWarning,
|
||||
+ )
|
||||
+ return self._decompress(data)
|
||||
+
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ try:
|
||||
+ return not self._obj.can_accept_more_data()
|
||||
+ except AttributeError:
|
||||
+ return False
|
||||
|
||||
def flush(self):
|
||||
if hasattr(self._obj, "flush"):
|
||||
@@ -151,10 +238,35 @@
|
||||
def flush(self):
|
||||
return self._decoders[0].flush()
|
||||
|
||||
- def decompress(self, data):
|
||||
- for d in reversed(self._decoders):
|
||||
- data = d.decompress(data)
|
||||
- return data
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
+ if max_length <= 0:
|
||||
+ for d in reversed(self._decoders):
|
||||
+ data = d.decompress(data)
|
||||
+ return data
|
||||
+
|
||||
+ ret = bytearray()
|
||||
+ # Every while loop iteration goes through all decoders once.
|
||||
+ # It exits when enough data is read or no more data can be read.
|
||||
+ # It is possible that the while loop iteration does not produce
|
||||
+ # any data because we retrieve up to `max_length` from every
|
||||
+ # decoder, and the amount of bytes may be insufficient for the
|
||||
+ # next decoder to produce enough/any output.
|
||||
+ while True:
|
||||
+ any_data = False
|
||||
+ for d in reversed(self._decoders):
|
||||
+ data = d.decompress(data, max_length=max_length - len(ret))
|
||||
+ if data:
|
||||
+ any_data = True
|
||||
+ # We should not break when no data is returned because
|
||||
+ # next decoders may produce data even with empty input.
|
||||
+ ret += data
|
||||
+ if not any_data or len(ret) >= max_length:
|
||||
+ return bytes(ret)
|
||||
+ data = b""
|
||||
+
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ return any(d.has_unconsumed_tail for d in self._decoders)
|
||||
|
||||
|
||||
def _get_decoder(mode):
|
||||
@@ -405,16 +517,25 @@
|
||||
if brotli is not None:
|
||||
DECODER_ERROR_CLASSES += (brotli.error,)
|
||||
|
||||
- def _decode(self, data, decode_content, flush_decoder):
|
||||
+ def _decode(
|
||||
+ self,
|
||||
+ data: bytes,
|
||||
+ decode_content: bool,
|
||||
+ flush_decoder: bool,
|
||||
+ max_length: int = None,
|
||||
+ ) -> bytes:
|
||||
"""
|
||||
Decode the data passed in and potentially flush the decoder.
|
||||
"""
|
||||
if not decode_content:
|
||||
return data
|
||||
|
||||
+ if max_length is None or flush_decoder:
|
||||
+ max_length = -1
|
||||
+
|
||||
try:
|
||||
if self._decoder:
|
||||
- data = self._decoder.decompress(data)
|
||||
+ data = self._decoder.decompress(data, max_length=max_length)
|
||||
except self.DECODER_ERROR_CLASSES as e:
|
||||
content_encoding = self.headers.get("content-encoding", "").lower()
|
||||
raise DecodeError(
|
||||
@@ -634,7 +755,10 @@
|
||||
for line in self.read_chunked(amt, decode_content=decode_content):
|
||||
yield line
|
||||
else:
|
||||
- while not is_fp_closed(self._fp):
|
||||
+ while (
|
||||
+ not is_fp_closed(self._fp)
|
||||
+ or (self._decoder and self._decoder.has_unconsumed_tail)
|
||||
+ ):
|
||||
data = self.read(amt=amt, decode_content=decode_content)
|
||||
|
||||
if data:
|
||||
@@ -840,7 +964,10 @@
|
||||
break
|
||||
chunk = self._handle_chunk(amt)
|
||||
decoded = self._decode(
|
||||
- chunk, decode_content=decode_content, flush_decoder=False
|
||||
+ chunk,
|
||||
+ decode_content=decode_content,
|
||||
+ flush_decoder=False,
|
||||
+ max_length=amt,
|
||||
)
|
||||
if decoded:
|
||||
yield decoded
|
||||
|
||||
--- a/gcp/google-cloud-sdk/lib/third_party/urllib3/response.py 2026-01-20 10:46:57.006470161 +0100
|
||||
+++ b/gcp/google-cloud-sdk/lib/third_party/urllib3/response.py 2026-01-20 10:55:44.090084896 +0100
|
||||
@@ -23,6 +23,7 @@
|
||||
from .exceptions import (
|
||||
BodyNotHttplibCompatible,
|
||||
DecodeError,
|
||||
+ DependencyWarning,
|
||||
HTTPError,
|
||||
IncompleteRead,
|
||||
InvalidChunkLength,
|
||||
@@ -41,34 +42,60 @@
|
||||
class DeflateDecoder(object):
|
||||
def __init__(self):
|
||||
self._first_try = True
|
||||
- self._data = b""
|
||||
+ self._first_try_data = b""
|
||||
+ self._unfed_data = b""
|
||||
self._obj = zlib.decompressobj()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._obj, name)
|
||||
|
||||
- def decompress(self, data):
|
||||
- if not data:
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
+ data = self._unfed_data + data
|
||||
+ self._unfed_data = b""
|
||||
+ if not data and not self._obj.unconsumed_tail:
|
||||
return data
|
||||
+ original_max_length = max_length
|
||||
+ if original_max_length < 0:
|
||||
+ max_length = 0
|
||||
+ elif original_max_length == 0:
|
||||
+ # We should not pass 0 to the zlib decompressor because 0 is
|
||||
+ # the default value that will make zlib decompress without a
|
||||
+ # length limit.
|
||||
+ # Data should be stored for subsequent calls.
|
||||
+ self._unfed_data = data
|
||||
+ return b""
|
||||
|
||||
+ # Subsequent calls always reuse `self._obj`. zlib requires
|
||||
+ # passing the unconsumed tail if decompression is to continue.
|
||||
if not self._first_try:
|
||||
- return self._obj.decompress(data)
|
||||
+ return self._obj.decompress(
|
||||
+ self._obj.unconsumed_tail + data, max_length=max_length
|
||||
+ )
|
||||
|
||||
- self._data += data
|
||||
+ # First call tries with RFC 1950 ZLIB format.
|
||||
+ self._first_try_data += data
|
||||
try:
|
||||
- decompressed = self._obj.decompress(data)
|
||||
+ decompressed = self._obj.decompress(data, max_length=max_length)
|
||||
if decompressed:
|
||||
self._first_try = False
|
||||
- self._data = None
|
||||
+ self._first_try_data = b""
|
||||
return decompressed
|
||||
+ # On failure, it falls back to RFC 1951 DEFLATE format.
|
||||
except zlib.error:
|
||||
self._first_try = False
|
||||
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
|
||||
try:
|
||||
- return self.decompress(self._data)
|
||||
+ return self.decompress(
|
||||
+ self._first_try_data, max_length=original_max_length
|
||||
+ )
|
||||
finally:
|
||||
- self._data = None
|
||||
+ self._first_try_data = b""
|
||||
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ return bool(self._unfed_data) or (
|
||||
+ bool(self._obj.unconsumed_tail) and not self._first_try
|
||||
+ )
|
||||
|
||||
class GzipDecoderState(object):
|
||||
|
||||
@@ -81,30 +108,64 @@
|
||||
def __init__(self):
|
||||
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
|
||||
self._state = GzipDecoderState.FIRST_MEMBER
|
||||
+ self._unconsumed_tail = b""
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._obj, name)
|
||||
|
||||
- def decompress(self, data):
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
ret = bytearray()
|
||||
- if self._state == GzipDecoderState.SWALLOW_DATA or not data:
|
||||
+ if self._state == GzipDecoderState.SWALLOW_DATA:
|
||||
+ return bytes(ret)
|
||||
+
|
||||
+ if max_length == 0:
|
||||
+ # We should not pass 0 to the zlib decompressor because 0 is
|
||||
+ # the default value that will make zlib decompress without a
|
||||
+ # length limit.
|
||||
+ # Data should be stored for subsequent calls.
|
||||
+ self._unconsumed_tail += data
|
||||
+ return b""
|
||||
+
|
||||
+ # zlib requires passing the unconsumed tail to the subsequent
|
||||
+ # call if decompression is to continue.
|
||||
+ data = self._unconsumed_tail + data
|
||||
+ if not data and self._obj.eof:
|
||||
return bytes(ret)
|
||||
+
|
||||
while True:
|
||||
try:
|
||||
- ret += self._obj.decompress(data)
|
||||
+ ret += self._obj.decompress(
|
||||
+ data, max_length=max(max_length - len(ret), 0)
|
||||
+ )
|
||||
except zlib.error:
|
||||
previous_state = self._state
|
||||
# Ignore data after the first error
|
||||
self._state = GzipDecoderState.SWALLOW_DATA
|
||||
+ self._unconsumed_tail = b""
|
||||
if previous_state == GzipDecoderState.OTHER_MEMBERS:
|
||||
# Allow trailing garbage acceptable in other gzip clients
|
||||
return bytes(ret)
|
||||
raise
|
||||
- data = self._obj.unused_data
|
||||
+
|
||||
+ self._unconsumed_tail = data = (
|
||||
+ self._obj.unconsumed_tail or self._obj.unused_data
|
||||
+ )
|
||||
+ if max_length > 0 and len(ret) >= max_length:
|
||||
+ break
|
||||
+
|
||||
if not data:
|
||||
return bytes(ret)
|
||||
- self._state = GzipDecoderState.OTHER_MEMBERS
|
||||
- self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
|
||||
+ # When the end of a gzip member is reached, a new decompressor
|
||||
+ # must be created for unused (possibly future) data.
|
||||
+ if self._obj.eof:
|
||||
+ self._state = GzipDecoderState.OTHER_MEMBERS
|
||||
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
|
||||
+
|
||||
+ return bytes(ret)
|
||||
+
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ return bool(self._unconsumed_tail)
|
||||
|
||||
|
||||
if brotli is not None:
|
||||
@@ -116,9 +177,35 @@
|
||||
def __init__(self):
|
||||
self._obj = brotli.Decompressor()
|
||||
if hasattr(self._obj, "decompress"):
|
||||
- self.decompress = self._obj.decompress
|
||||
+ setattr(self, "_decompress", self._obj.decompress)
|
||||
else:
|
||||
- self.decompress = self._obj.process
|
||||
+ setattr(self, "_decompress", self._obj.process)
|
||||
+
|
||||
+ # Requires Brotli >= 1.2.0 for `output_buffer_limit`.
|
||||
+ def _decompress(self, data: bytes, output_buffer_limit: int = -1) -> bytes:
|
||||
+ raise NotImplementedError()
|
||||
+
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
+ try:
|
||||
+ if max_length > 0:
|
||||
+ return self._decompress(data, output_buffer_limit=max_length)
|
||||
+ else:
|
||||
+ return self._decompress(data)
|
||||
+ except TypeError:
|
||||
+ # Fallback for Brotli/brotlicffi/brotlipy versions without
|
||||
+ # the `output_buffer_limit` parameter.
|
||||
+ warnings.warn(
|
||||
+ "Brotli >= 1.2.0 is required to prevent decompression bombs.",
|
||||
+ DependencyWarning,
|
||||
+ )
|
||||
+ return self._decompress(data)
|
||||
+
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ try:
|
||||
+ return not self._obj.can_accept_more_data()
|
||||
+ except AttributeError:
|
||||
+ return False
|
||||
|
||||
def flush(self):
|
||||
if hasattr(self._obj, "flush"):
|
||||
@@ -151,10 +238,35 @@
|
||||
def flush(self):
|
||||
return self._decoders[0].flush()
|
||||
|
||||
- def decompress(self, data):
|
||||
- for d in reversed(self._decoders):
|
||||
- data = d.decompress(data)
|
||||
- return data
|
||||
+ def decompress(self, data: bytes, max_length: int = -1) -> bytes:
|
||||
+ if max_length <= 0:
|
||||
+ for d in reversed(self._decoders):
|
||||
+ data = d.decompress(data)
|
||||
+ return data
|
||||
+
|
||||
+ ret = bytearray()
|
||||
+ # Every while loop iteration goes through all decoders once.
|
||||
+ # It exits when enough data is read or no more data can be read.
|
||||
+ # It is possible that the while loop iteration does not produce
|
||||
+ # any data because we retrieve up to `max_length` from every
|
||||
+ # decoder, and the amount of bytes may be insufficient for the
|
||||
+ # next decoder to produce enough/any output.
|
||||
+ while True:
|
||||
+ any_data = False
|
||||
+ for d in reversed(self._decoders):
|
||||
+ data = d.decompress(data, max_length=max_length - len(ret))
|
||||
+ if data:
|
||||
+ any_data = True
|
||||
+ # We should not break when no data is returned because
|
||||
+ # next decoders may produce data even with empty input.
|
||||
+ ret += data
|
||||
+ if not any_data or len(ret) >= max_length:
|
||||
+ return bytes(ret)
|
||||
+ data = b""
|
||||
+
|
||||
+ @property
|
||||
+ def has_unconsumed_tail(self) -> bool:
|
||||
+ return any(d.has_unconsumed_tail for d in self._decoders)
|
||||
|
||||
|
||||
def _get_decoder(mode):
|
||||
@@ -405,16 +517,25 @@
|
||||
if brotli is not None:
|
||||
DECODER_ERROR_CLASSES += (brotli.error,)
|
||||
|
||||
- def _decode(self, data, decode_content, flush_decoder):
|
||||
+ def _decode(
|
||||
+ self,
|
||||
+ data: bytes,
|
||||
+ decode_content: bool,
|
||||
+ flush_decoder: bool,
|
||||
+ max_length: int = None,
|
||||
+ ) -> bytes:
|
||||
"""
|
||||
Decode the data passed in and potentially flush the decoder.
|
||||
"""
|
||||
if not decode_content:
|
||||
return data
|
||||
|
||||
+ if max_length is None or flush_decoder:
|
||||
+ max_length = -1
|
||||
+
|
||||
try:
|
||||
if self._decoder:
|
||||
- data = self._decoder.decompress(data)
|
||||
+ data = self._decoder.decompress(data, max_length=max_length)
|
||||
except self.DECODER_ERROR_CLASSES as e:
|
||||
content_encoding = self.headers.get("content-encoding", "").lower()
|
||||
raise DecodeError(
|
||||
@@ -634,7 +755,10 @@
|
||||
for line in self.read_chunked(amt, decode_content=decode_content):
|
||||
yield line
|
||||
else:
|
||||
- while not is_fp_closed(self._fp):
|
||||
+ while (
|
||||
+ not is_fp_closed(self._fp)
|
||||
+ or (self._decoder and self._decoder.has_unconsumed_tail)
|
||||
+ ):
|
||||
data = self.read(amt=amt, decode_content=decode_content)
|
||||
|
||||
if data:
|
||||
@@ -840,7 +964,10 @@
|
||||
break
|
||||
chunk = self._handle_chunk(amt)
|
||||
decoded = self._decode(
|
||||
- chunk, decode_content=decode_content, flush_decoder=False
|
||||
+ chunk,
|
||||
+ decode_content=decode_content,
|
||||
+ flush_decoder=False,
|
||||
+ max_length=amt,
|
||||
)
|
||||
if decoded:
|
||||
yield decoded
|
||||
29
RHEL-140787-fix-bundled-urllib3-CVE-2026-21441.patch
Normal file
29
RHEL-140787-fix-bundled-urllib3-CVE-2026-21441.patch
Normal file
@ -0,0 +1,29 @@
|
||||
--- a/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/response.py 2023-10-17 19:42:56.000000000 +0200
|
||||
+++ b/aliyun/aliyunsdkcore/vendored/requests/packages/urllib3/response.py 2026-01-13 14:17:48.477104360 +0100
|
||||
@@ -292,7 +292,11 @@
|
||||
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
|
||||
"""
|
||||
try:
|
||||
- self.read()
|
||||
+ self.read(
|
||||
+ # Do not spend resources decoding the content unless
|
||||
+ # decoding has already been initiated.
|
||||
+ decode_content=self._has_decoded_content,
|
||||
+ )
|
||||
except (HTTPError, SocketError, BaseSSLError, HTTPException):
|
||||
pass
|
||||
|
||||
--- a/gcp/google-cloud-sdk/lib/third_party/urllib3/response.py 2023-10-17 19:42:56.000000000 +0200
|
||||
+++ b/gcp/google-cloud-sdk/lib/third_party/urllib3/response.py 2026-01-13 14:17:48.477104360 +0100
|
||||
@@ -292,7 +292,11 @@
|
||||
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
|
||||
"""
|
||||
try:
|
||||
- self.read()
|
||||
+ self.read(
|
||||
+ # Do not spend resources decoding the content unless
|
||||
+ # decoding has already been initiated.
|
||||
+ decode_content=self._has_decoded_content,
|
||||
+ )
|
||||
except (HTTPError, SocketError, BaseSSLError, HTTPException):
|
||||
pass
|
||||
28
RHEL-142448-fix-bundled-pyasn1-CVE-2026-23490.patch
Normal file
28
RHEL-142448-fix-bundled-pyasn1-CVE-2026-23490.patch
Normal file
@ -0,0 +1,28 @@
|
||||
--- a/gcp/google-cloud-sdk/lib/third_party/pyasn1/codec/ber/decoder.py 2019-10-17 07:00:19.000000000 +0200
|
||||
+++ b/gcp/google-cloud-sdk/lib/third_party/pyasn1/codec/ber/decoder.py 2026-01-27 10:43:12.757563432 +0100
|
||||
@@ -22,6 +22,10 @@
|
||||
|
||||
noValue = base.noValue
|
||||
|
||||
+# Maximum number of continuation octets (high-bit set) allowed per OID arc.
|
||||
+# 20 octets allows up to 140-bit integers, supporting UUID-based OIDs
|
||||
+MAX_OID_ARC_CONTINUATION_OCTETS = 20
|
||||
+
|
||||
|
||||
class AbstractDecoder(object):
|
||||
protoComponent = None
|
||||
@@ -342,7 +346,14 @@
|
||||
# Construct subid from a number of octets
|
||||
nextSubId = subId
|
||||
subId = 0
|
||||
+ continuationOctetCount = 0
|
||||
while nextSubId >= 128:
|
||||
+ continuationOctetCount += 1
|
||||
+ if continuationOctetCount > MAX_OID_ARC_CONTINUATION_OCTETS:
|
||||
+ raise error.PyAsn1Error(
|
||||
+ 'OID arc exceeds maximum continuation octets limit (%d) '
|
||||
+ 'at position %d' % (MAX_OID_ARC_CONTINUATION_OCTETS, index)
|
||||
+ )
|
||||
subId = (subId << 7) + (nextSubId & 0x7F)
|
||||
if index >= substrateLen:
|
||||
raise error.SubstrateUnderrunError(
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user