import UBI squid-5.5-6.el9_3.8
This commit is contained in:
parent
6e185ba669
commit
576a3f125b
50
SOURCES/squid-5.5-CVE-2023-50269.patch
Normal file
50
SOURCES/squid-5.5-CVE-2023-50269.patch
Normal file
@ -0,0 +1,50 @@
|
||||
diff --git a/src/ClientRequestContext.h b/src/ClientRequestContext.h
|
||||
index 55a7a43..94a8700 100644
|
||||
--- a/src/ClientRequestContext.h
|
||||
+++ b/src/ClientRequestContext.h
|
||||
@@ -80,6 +80,10 @@ public:
|
||||
#endif
|
||||
ErrorState *error; ///< saved error page for centralized/delayed processing
|
||||
bool readNextRequest; ///< whether Squid should read after error handling
|
||||
+
|
||||
+#if FOLLOW_X_FORWARDED_FOR
|
||||
+ size_t currentXffHopNumber = 0; ///< number of X-Forwarded-For header values processed so far
|
||||
+#endif
|
||||
};
|
||||
|
||||
#endif /* SQUID_CLIENTREQUESTCONTEXT_H */
|
||||
diff --git a/src/client_side_request.cc b/src/client_side_request.cc
|
||||
index f44849e..c7c09d4 100644
|
||||
--- a/src/client_side_request.cc
|
||||
+++ b/src/client_side_request.cc
|
||||
@@ -80,6 +80,11 @@
|
||||
static const char *const crlf = "\r\n";
|
||||
|
||||
#if FOLLOW_X_FORWARDED_FOR
|
||||
+
|
||||
+#if !defined(SQUID_X_FORWARDED_FOR_HOP_MAX)
|
||||
+#define SQUID_X_FORWARDED_FOR_HOP_MAX 64
|
||||
+#endif
|
||||
+
|
||||
static void clientFollowXForwardedForCheck(Acl::Answer answer, void *data);
|
||||
#endif /* FOLLOW_X_FORWARDED_FOR */
|
||||
|
||||
@@ -485,8 +490,16 @@ clientFollowXForwardedForCheck(Acl::Answer answer, void *data)
|
||||
/* override the default src_addr tested if we have to go deeper than one level into XFF */
|
||||
Filled(calloutContext->acl_checklist)->src_addr = request->indirect_client_addr;
|
||||
}
|
||||
- calloutContext->acl_checklist->nonBlockingCheck(clientFollowXForwardedForCheck, data);
|
||||
- return;
|
||||
+ if (++calloutContext->currentXffHopNumber < SQUID_X_FORWARDED_FOR_HOP_MAX) {
|
||||
+ calloutContext->acl_checklist->nonBlockingCheck(clientFollowXForwardedForCheck, data);
|
||||
+ return;
|
||||
+ }
|
||||
+ const auto headerName = Http::HeaderLookupTable.lookup(Http::HdrType::X_FORWARDED_FOR).name;
|
||||
+ debugs(28, DBG_CRITICAL, "ERROR: Ignoring trailing " << headerName << " addresses" <<
|
||||
+ Debug::Extra << "addresses allowed by follow_x_forwarded_for: " << calloutContext->currentXffHopNumber <<
|
||||
+ Debug::Extra << "last/accepted address: " << request->indirect_client_addr <<
|
||||
+ Debug::Extra << "ignored trailing addresses: " << request->x_forwarded_for_iterator);
|
||||
+ // fall through to resume clientAccessCheck() processing
|
||||
}
|
||||
}
|
||||
|
192
SOURCES/squid-5.5-CVE-2024-25111.patch
Normal file
192
SOURCES/squid-5.5-CVE-2024-25111.patch
Normal file
@ -0,0 +1,192 @@
|
||||
diff --git a/src/http.cc b/src/http.cc
|
||||
index 98e3969..8b55bf3 100644
|
||||
--- a/src/http.cc
|
||||
+++ b/src/http.cc
|
||||
@@ -54,6 +54,7 @@
|
||||
#include "rfc1738.h"
|
||||
#include "SquidConfig.h"
|
||||
#include "SquidTime.h"
|
||||
+#include "SquidMath.h"
|
||||
#include "StatCounters.h"
|
||||
#include "Store.h"
|
||||
#include "StrList.h"
|
||||
@@ -1235,18 +1236,26 @@ HttpStateData::readReply(const CommIoCbParams &io)
|
||||
* Plus, it breaks our lame *HalfClosed() detection
|
||||
*/
|
||||
|
||||
- Must(maybeMakeSpaceAvailable(true));
|
||||
- CommIoCbParams rd(this); // will be expanded with ReadNow results
|
||||
- rd.conn = io.conn;
|
||||
- rd.size = entry->bytesWanted(Range<size_t>(0, inBuf.spaceSize()));
|
||||
+ const auto moreDataPermission = canBufferMoreReplyBytes();
|
||||
+ if (!moreDataPermission) {
|
||||
+ abortTransaction("ready to read required data, but the read buffer is full and cannot be drained");
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ const auto readSizeMax = maybeMakeSpaceAvailable(moreDataPermission.value());
|
||||
+ // TODO: Move this logic inside maybeMakeSpaceAvailable():
|
||||
+ const auto readSizeWanted = readSizeMax ? entry->bytesWanted(Range<size_t>(0, readSizeMax)) : 0;
|
||||
|
||||
- if (rd.size <= 0) {
|
||||
+ if (readSizeWanted <= 0) {
|
||||
assert(entry->mem_obj);
|
||||
AsyncCall::Pointer nilCall;
|
||||
entry->mem_obj->delayRead(DeferredRead(readDelayed, this, CommRead(io.conn, NULL, 0, nilCall)));
|
||||
return;
|
||||
}
|
||||
|
||||
+ CommIoCbParams rd(this); // will be expanded with ReadNow results
|
||||
+ rd.conn = io.conn;
|
||||
+ rd.size = readSizeWanted;
|
||||
switch (Comm::ReadNow(rd, inBuf)) {
|
||||
case Comm::INPROGRESS:
|
||||
if (inBuf.isEmpty())
|
||||
@@ -1617,8 +1626,10 @@ HttpStateData::maybeReadVirginBody()
|
||||
if (!Comm::IsConnOpen(serverConnection) || fd_table[serverConnection->fd].closing())
|
||||
return;
|
||||
|
||||
- if (!maybeMakeSpaceAvailable(false))
|
||||
+ if (!canBufferMoreReplyBytes()) {
|
||||
+ abortTransaction("more response bytes required, but the read buffer is full and cannot be drained");
|
||||
return;
|
||||
+ }
|
||||
|
||||
// XXX: get rid of the do_next_read flag
|
||||
// check for the proper reasons preventing read(2)
|
||||
@@ -1636,40 +1647,78 @@ HttpStateData::maybeReadVirginBody()
|
||||
Comm::Read(serverConnection, call);
|
||||
}
|
||||
|
||||
-bool
|
||||
-HttpStateData::maybeMakeSpaceAvailable(bool doGrow)
|
||||
+/// Desired inBuf capacity based on various capacity preferences/limits:
|
||||
+/// * a smaller buffer may not hold enough for look-ahead header/body parsers;
|
||||
+/// * a smaller buffer may result in inefficient tiny network reads;
|
||||
+/// * a bigger buffer may waste memory;
|
||||
+/// * a bigger buffer may exceed SBuf storage capabilities (SBuf::maxSize);
|
||||
+size_t
|
||||
+HttpStateData::calcReadBufferCapacityLimit() const
|
||||
{
|
||||
- // how much we are allowed to buffer
|
||||
- const int limitBuffer = (flags.headers_parsed ? Config.readAheadGap : Config.maxReplyHeaderSize);
|
||||
-
|
||||
- if (limitBuffer < 0 || inBuf.length() >= (SBuf::size_type)limitBuffer) {
|
||||
- // when buffer is at or over limit already
|
||||
- debugs(11, 7, "will not read up to " << limitBuffer << ". buffer has (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
|
||||
- debugs(11, DBG_DATA, "buffer has {" << inBuf << "}");
|
||||
- // Process next response from buffer
|
||||
- processReply();
|
||||
- return false;
|
||||
+ if (!flags.headers_parsed)
|
||||
+ return Config.maxReplyHeaderSize;
|
||||
+
|
||||
+ // XXX: Our inBuf is not used to maintain the read-ahead gap, and using
|
||||
+ // Config.readAheadGap like this creates huge read buffers for large
|
||||
+ // read_ahead_gap values. TODO: Switch to using tcp_recv_bufsize as the
|
||||
+ // primary read buffer capacity factor.
|
||||
+ //
|
||||
+ // TODO: Cannot reuse throwing NaturalCast() here. Consider removing
|
||||
+ // .value() dereference in NaturalCast() or add/use NaturalCastOrMax().
|
||||
+ const auto configurationPreferences = NaturalSum<size_t>(Config.readAheadGap).value_or(SBuf::maxSize);
|
||||
+
|
||||
+ // TODO: Honor TeChunkedParser look-ahead and trailer parsing requirements
|
||||
+ // (when explicit configurationPreferences are set too low).
|
||||
+
|
||||
+ return std::min<size_t>(configurationPreferences, SBuf::maxSize);
|
||||
+}
|
||||
+
|
||||
+/// The maximum number of virgin reply bytes we may buffer before we violate
|
||||
+/// the currently configured response buffering limits.
|
||||
+/// \retval std::nullopt means that no more virgin response bytes can be read
|
||||
+/// \retval 0 means that more virgin response bytes may be read later
|
||||
+/// \retval >0 is the number of bytes that can be read now (subject to other constraints)
|
||||
+std::optional<size_t>
|
||||
+HttpStateData::canBufferMoreReplyBytes() const
|
||||
+{
|
||||
+#if USE_ADAPTATION
|
||||
+ // If we do not check this now, we may say the final "no" prematurely below
|
||||
+ // because inBuf.length() will decrease as adaptation drains buffered bytes.
|
||||
+ if (responseBodyBuffer) {
|
||||
+ debugs(11, 3, "yes, but waiting for adaptation to drain read buffer");
|
||||
+ return 0; // yes, we may be able to buffer more (but later)
|
||||
+ }
|
||||
+#endif
|
||||
+
|
||||
+ const auto maxCapacity = calcReadBufferCapacityLimit();
|
||||
+ if (inBuf.length() >= maxCapacity) {
|
||||
+ debugs(11, 3, "no, due to a full buffer: " << inBuf.length() << '/' << inBuf.spaceSize() << "; limit: " << maxCapacity);
|
||||
+ return std::nullopt; // no, configuration prohibits buffering more
|
||||
}
|
||||
|
||||
+ const auto maxReadSize = maxCapacity - inBuf.length(); // positive
|
||||
+ debugs(11, 7, "yes, may read up to " << maxReadSize << " into " << inBuf.length() << '/' << inBuf.spaceSize());
|
||||
+ return maxReadSize; // yes, can read up to this many bytes (subject to other constraints)
|
||||
+}
|
||||
+
|
||||
+/// prepare read buffer for reading
|
||||
+/// \return the maximum number of bytes the caller should attempt to read
|
||||
+/// \retval 0 means that the caller should delay reading
|
||||
+size_t
|
||||
+HttpStateData::maybeMakeSpaceAvailable(const size_t maxReadSize)
|
||||
+{
|
||||
// how much we want to read
|
||||
- const size_t read_size = calcBufferSpaceToReserve(inBuf.spaceSize(), (limitBuffer - inBuf.length()));
|
||||
+ const size_t read_size = calcBufferSpaceToReserve(inBuf.spaceSize(), maxReadSize);
|
||||
|
||||
- if (!read_size) {
|
||||
+ if (read_size < 2) {
|
||||
debugs(11, 7, "will not read up to " << read_size << " into buffer (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
|
||||
- return false;
|
||||
+ return 0;
|
||||
}
|
||||
|
||||
- // just report whether we could grow or not, do not actually do it
|
||||
- if (doGrow)
|
||||
- return (read_size >= 2);
|
||||
-
|
||||
// we may need to grow the buffer
|
||||
inBuf.reserveSpace(read_size);
|
||||
- debugs(11, 8, (!flags.do_next_read ? "will not" : "may") <<
|
||||
- " read up to " << read_size << " bytes info buf(" << inBuf.length() << "/" << inBuf.spaceSize() <<
|
||||
- ") from " << serverConnection);
|
||||
-
|
||||
- return (inBuf.spaceSize() >= 2); // only read if there is 1+ bytes of space available
|
||||
+ debugs(11, 7, "may read up to " << read_size << " bytes info buffer (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
|
||||
+ return read_size;
|
||||
}
|
||||
|
||||
/// called after writing the very last request byte (body, last-chunk, etc)
|
||||
diff --git a/src/http.h b/src/http.h
|
||||
index e70cd7e..f7ed40d 100644
|
||||
--- a/src/http.h
|
||||
+++ b/src/http.h
|
||||
@@ -15,6 +15,8 @@
|
||||
#include "http/StateFlags.h"
|
||||
#include "sbuf/SBuf.h"
|
||||
|
||||
+#include <optional>
|
||||
+
|
||||
class FwdState;
|
||||
class HttpHeader;
|
||||
class String;
|
||||
@@ -112,16 +114,9 @@ private:
|
||||
|
||||
void abortTransaction(const char *reason) { abortAll(reason); } // abnormal termination
|
||||
|
||||
- /**
|
||||
- * determine if read buffer can have space made available
|
||||
- * for a read.
|
||||
- *
|
||||
- * \param grow whether to actually expand the buffer
|
||||
- *
|
||||
- * \return whether the buffer can be grown to provide space
|
||||
- * regardless of whether the grow actually happened.
|
||||
- */
|
||||
- bool maybeMakeSpaceAvailable(bool grow);
|
||||
+ size_t calcReadBufferCapacityLimit() const;
|
||||
+ std::optional<size_t> canBufferMoreReplyBytes() const;
|
||||
+ size_t maybeMakeSpaceAvailable(size_t maxReadSize);
|
||||
|
||||
// consuming request body
|
||||
virtual void handleMoreRequestBodyAvailable();
|
105
SOURCES/squid-5.5-CVE-2024-25617.patch
Normal file
105
SOURCES/squid-5.5-CVE-2024-25617.patch
Normal file
@ -0,0 +1,105 @@
|
||||
diff --git a/src/SquidString.h b/src/SquidString.h
|
||||
index e36cd27..ea613ad 100644
|
||||
--- a/src/SquidString.h
|
||||
+++ b/src/SquidString.h
|
||||
@@ -140,7 +140,16 @@ private:
|
||||
|
||||
size_type len_ = 0; /* current length */
|
||||
|
||||
- static const size_type SizeMax_ = 65535; ///< 64K limit protects some fixed-size buffers
|
||||
+ /// An earlier 64KB limit was meant to protect some fixed-size buffers, but
|
||||
+ /// (a) we do not know where those buffers are (or whether they still exist)
|
||||
+ /// (b) too many String users unknowingly exceeded that limit and asserted.
|
||||
+ /// We are now using a larger limit to reduce the number of (b) cases,
|
||||
+ /// especially cases where "compact" lists of items grow 50% in size when we
|
||||
+ /// convert them to canonical form. The new limit is selected to withstand
|
||||
+ /// concatenation and ~50% expansion of two HTTP headers limited by default
|
||||
+ /// request_header_max_size and reply_header_max_size settings.
|
||||
+ static const size_type SizeMax_ = 3*64*1024 - 1;
|
||||
+
|
||||
/// returns true after increasing the first argument by extra if the sum does not exceed SizeMax_
|
||||
static bool SafeAdd(size_type &base, size_type extra) { if (extra <= SizeMax_ && base <= SizeMax_ - extra) { base += extra; return true; } return false; }
|
||||
|
||||
diff --git a/src/cache_cf.cc b/src/cache_cf.cc
|
||||
index cb746dc..c4ade96 100644
|
||||
--- a/src/cache_cf.cc
|
||||
+++ b/src/cache_cf.cc
|
||||
@@ -950,6 +950,18 @@ configDoConfigure(void)
|
||||
(uint32_t)Config.maxRequestBufferSize, (uint32_t)Config.maxRequestHeaderSize);
|
||||
}
|
||||
|
||||
+ // Warn about the dangers of exceeding String limits when manipulating HTTP
|
||||
+ // headers. Technically, we do not concatenate _requests_, so we could relax
|
||||
+ // their check, but we keep the two checks the same for simplicity sake.
|
||||
+ const auto safeRawHeaderValueSizeMax = (String::SizeMaxXXX()+1)/3;
|
||||
+ // TODO: static_assert(safeRawHeaderValueSizeMax >= 64*1024); // no WARNINGs for default settings
|
||||
+ if (Config.maxRequestHeaderSize > safeRawHeaderValueSizeMax)
|
||||
+ debugs(3, DBG_CRITICAL, "WARNING: Increasing request_header_max_size beyond " << safeRawHeaderValueSizeMax <<
|
||||
+ " bytes makes Squid more vulnerable to denial-of-service attacks; configured value: " << Config.maxRequestHeaderSize << " bytes");
|
||||
+ if (Config.maxReplyHeaderSize > safeRawHeaderValueSizeMax)
|
||||
+ debugs(3, DBG_CRITICAL, "WARNING: Increasing reply_header_max_size beyond " << safeRawHeaderValueSizeMax <<
|
||||
+ " bytes makes Squid more vulnerable to denial-of-service attacks; configured value: " << Config.maxReplyHeaderSize << " bytes");
|
||||
+
|
||||
/*
|
||||
* Disable client side request pipelining if client_persistent_connections OFF.
|
||||
* Waste of resources queueing any pipelined requests when the first will close the connection.
|
||||
diff --git a/src/cf.data.pre b/src/cf.data.pre
|
||||
index 67a66b0..61a66f1 100644
|
||||
--- a/src/cf.data.pre
|
||||
+++ b/src/cf.data.pre
|
||||
@@ -6489,11 +6489,14 @@ TYPE: b_size_t
|
||||
DEFAULT: 64 KB
|
||||
LOC: Config.maxRequestHeaderSize
|
||||
DOC_START
|
||||
- This specifies the maximum size for HTTP headers in a request.
|
||||
- Request headers are usually relatively small (about 512 bytes).
|
||||
- Placing a limit on the request header size will catch certain
|
||||
- bugs (for example with persistent connections) and possibly
|
||||
- buffer-overflow or denial-of-service attacks.
|
||||
+ This directives limits the header size of a received HTTP request
|
||||
+ (including request-line). Increasing this limit beyond its 64 KB default
|
||||
+ exposes certain old Squid code to various denial-of-service attacks. This
|
||||
+ limit also applies to received FTP commands.
|
||||
+
|
||||
+ This limit has no direct affect on Squid memory consumption.
|
||||
+
|
||||
+ Squid does not check this limit when sending requests.
|
||||
DOC_END
|
||||
|
||||
NAME: reply_header_max_size
|
||||
@@ -6502,11 +6505,14 @@ TYPE: b_size_t
|
||||
DEFAULT: 64 KB
|
||||
LOC: Config.maxReplyHeaderSize
|
||||
DOC_START
|
||||
- This specifies the maximum size for HTTP headers in a reply.
|
||||
- Reply headers are usually relatively small (about 512 bytes).
|
||||
- Placing a limit on the reply header size will catch certain
|
||||
- bugs (for example with persistent connections) and possibly
|
||||
- buffer-overflow or denial-of-service attacks.
|
||||
+ This directives limits the header size of a received HTTP response
|
||||
+ (including status-line). Increasing this limit beyond its 64 KB default
|
||||
+ exposes certain old Squid code to various denial-of-service attacks. This
|
||||
+ limit also applies to FTP command responses.
|
||||
+
|
||||
+ Squid also checks this limit when loading hit responses from disk cache.
|
||||
+
|
||||
+ Squid does not check this limit when sending responses.
|
||||
DOC_END
|
||||
|
||||
NAME: request_body_max_size
|
||||
diff --git a/src/http.cc b/src/http.cc
|
||||
index 7c9ae70..98e3969 100644
|
||||
--- a/src/http.cc
|
||||
+++ b/src/http.cc
|
||||
@@ -1926,8 +1926,9 @@ HttpStateData::httpBuildRequestHeader(HttpRequest * request,
|
||||
|
||||
String strFwd = hdr_in->getList(Http::HdrType::X_FORWARDED_FOR);
|
||||
|
||||
- // if we cannot double strFwd size, then it grew past 50% of the limit
|
||||
- if (!strFwd.canGrowBy(strFwd.size())) {
|
||||
+ // Detect unreasonably long header values. And paranoidly check String
|
||||
+ // limits: a String ought to accommodate two reasonable-length values.
|
||||
+ if (strFwd.size() > 32*1024 || !strFwd.canGrowBy(strFwd.size())) {
|
||||
// There is probably a forwarding loop with Via detection disabled.
|
||||
// If we do nothing, String will assert on overflow soon.
|
||||
// TODO: Terminate all transactions with huge XFF?
|
@ -2,7 +2,7 @@
|
||||
|
||||
Name: squid
|
||||
Version: 5.5
|
||||
Release: 6%{?dist}.5
|
||||
Release: 6%{?dist}.8
|
||||
Summary: The Squid proxy caching server
|
||||
Epoch: 7
|
||||
# See CREDITS for breakdown of non GPLv2+ code
|
||||
@ -68,6 +68,12 @@ Patch508: squid-5.5-CVE-2023-46724.patch
|
||||
Patch509: squid-5.5-CVE-2023-49285.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=2252923
|
||||
Patch510: squid-5.5-CVE-2023-49286.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=2264309
|
||||
Patch511: squid-5.5-CVE-2024-25617.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=2268366
|
||||
Patch512: squid-5.5-CVE-2024-25111.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=2254663
|
||||
Patch513: squid-5.5-CVE-2023-50269.patch
|
||||
|
||||
# cache_swap.sh
|
||||
Requires: bash gawk
|
||||
@ -153,6 +159,9 @@ lookup program (dnsserver), a program for retrieving FTP data
|
||||
%patch508 -p1 -b .CVE-2023-46724
|
||||
%patch509 -p1 -b .CVE-2023-49285
|
||||
%patch510 -p1 -b .CVE-2023-49286
|
||||
%patch511 -p1 -b .CVE-2024-25617
|
||||
%patch512 -p1 -b .CVE-2024-25111
|
||||
%patch513 -p1 -b .CVE-2023-50269
|
||||
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1679526
|
||||
# Patch in the vendor documentation and used different location for documentation
|
||||
@ -379,6 +388,18 @@ fi
|
||||
|
||||
|
||||
%changelog
|
||||
* Thu Mar 14 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-6.8
|
||||
- Resolves: RHEL-19555 - squid: denial of service in HTTP request
|
||||
parsing (CVE-2023-50269)
|
||||
|
||||
* Fri Mar 08 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-6.7
|
||||
- Resolves: RHEL-28614 - squid: Denial of Service in HTTP Chunked
|
||||
Decoding (CVE-2024-25111)
|
||||
|
||||
* Mon Feb 26 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-6.6
|
||||
- Resolves: RHEL-26091 - squid: denial of service in HTTP header
|
||||
parser (CVE-2024-25617)
|
||||
|
||||
* Wed Dec 06 2023 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-6.5
|
||||
- Resolves: RHEL-18484 - squid: Buffer over-read in the HTTP Message processing
|
||||
feature (CVE-2023-49285)
|
||||
|
Loading…
Reference in New Issue
Block a user