Compare commits

..

No commits in common. "c8s-stream-4" and "c8-stream-4" have entirely different histories.

14 changed files with 8440 additions and 4 deletions

View File

@ -0,0 +1,24 @@
diff --git a/src/anyp/Uri.cc b/src/anyp/Uri.cc
index 20b9bf1..81ebb18 100644
--- a/src/anyp/Uri.cc
+++ b/src/anyp/Uri.cc
@@ -173,6 +173,10 @@ urlInitialize(void)
assert(0 == matchDomainName("*.foo.com", ".foo.com", mdnHonorWildcards));
assert(0 != matchDomainName("*.foo.com", "foo.com", mdnHonorWildcards));
+ assert(0 != matchDomainName("foo.com", ""));
+ assert(0 != matchDomainName("foo.com", "", mdnHonorWildcards));
+ assert(0 != matchDomainName("foo.com", "", mdnRejectSubsubDomains));
+
/* more cases? */
}
@@ -756,6 +760,8 @@ matchDomainName(const char *h, const char *d, MatchDomainNameFlags flags)
return -1;
dl = strlen(d);
+ if (dl == 0)
+ return 1;
/*
* Start at the ends of the two strings and work towards the

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,23 @@
diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc
index 6a9736f..0a883fa 100644
--- a/src/auth/digest/Config.cc
+++ b/src/auth/digest/Config.cc
@@ -847,11 +847,15 @@ Auth::Digest::Config::decode(char const *proxy_auth, const char *aRequestRealm)
break;
case DIGEST_NC:
- if (value.size() != 8) {
+ if (value.size() == 8) {
+ // for historical reasons, the nc value MUST be exactly 8 bytes
+ static_assert(sizeof(digest_request->nc) == 8 + 1, "bad nc buffer size");
+ xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
+ debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
+ } else {
debugs(29, 9, "Invalid nc '" << value << "' in '" << temp << "'");
+ digest_request->nc[0] = 0;
}
- xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
- debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
break;
case DIGEST_CNONCE:

View File

@ -0,0 +1,30 @@
commit 77b3fb4df0f126784d5fd4967c28ed40eb8d521b
Author: Alex Rousskov <rousskov@measurement-factory.com>
Date: Wed Oct 25 19:41:45 2023 +0000
RFC 1123: Fix date parsing (#1538)
The bug was discovered and detailed by Joshua Rogers at
https://megamansec.github.io/Squid-Security-Audit/datetime-overflow.html
where it was filed as "1-Byte Buffer OverRead in RFC 1123 date/time
Handling".
diff --git a/lib/rfc1123.c b/lib/rfc1123.c
index e5bf9a4d7..cb484cc00 100644
--- a/lib/rfc1123.c
+++ b/lib/rfc1123.c
@@ -50,7 +50,13 @@ make_month(const char *s)
char month[3];
month[0] = xtoupper(*s);
+ if (!month[0])
+ return -1; // protects *(s + 1) below
+
month[1] = xtolower(*(s + 1));
+ if (!month[1])
+ return -1; // protects *(s + 2) below
+
month[2] = xtolower(*(s + 2));
for (i = 0; i < 12; i++)

View File

@ -0,0 +1,62 @@
diff --git a/src/ipc.cc b/src/ipc.cc
index 42e11e6..a68e623 100644
--- a/src/ipc.cc
+++ b/src/ipc.cc
@@ -19,6 +19,11 @@
#include "SquidConfig.h"
#include "SquidIpc.h"
#include "tools.h"
+#include <cstdlib>
+
+#if HAVE_UNISTD_H
+#include <unistd.h>
+#endif
static const char *hello_string = "hi there\n";
#ifndef HELLO_BUF_SZ
@@ -365,6 +370,22 @@ ipcCreate(int type, const char *prog, const char *const args[], const char *name
}
PutEnvironment();
+
+ // A dup(2) wrapper that reports and exits the process on errors. The
+ // exiting logic is only suitable for this child process context.
+ const auto dupOrExit = [prog,name](const int oldFd) {
+ const auto newFd = dup(oldFd);
+ if (newFd < 0) {
+ const auto savedErrno = errno;
+ debugs(54, DBG_CRITICAL, "ERROR: Helper process initialization failure: " << name <<
+ Debug::Extra << "helper (CHILD) PID: " << getpid() <<
+ Debug::Extra << "helper program name: " << prog <<
+ Debug::Extra << "dup(2) system call error for FD " << oldFd << ": " << xstrerr(savedErrno));
+ _exit(EXIT_FAILURE);
+ }
+ return newFd;
+ };
+
/*
* This double-dup stuff avoids problems when one of
* crfd, cwfd, or debug_log are in the rage 0-2.
@@ -372,17 +393,16 @@ ipcCreate(int type, const char *prog, const char *const args[], const char *name
do {
/* First make sure 0-2 is occupied by something. Gets cleaned up later */
- x = dup(crfd);
- assert(x > -1);
- } while (x < 3 && x > -1);
+ x = dupOrExit(crfd);
+ } while (x < 3);
close(x);
- t1 = dup(crfd);
+ t1 = dupOrExit(crfd);
- t2 = dup(cwfd);
+ t2 = dupOrExit(cwfd);
- t3 = dup(fileno(debug_log));
+ t3 = dupOrExit(fileno(debug_log));
assert(t1 > 2 && t2 > 2 && t3 > 2);

View File

@ -0,0 +1,50 @@
diff --git a/src/ClientRequestContext.h b/src/ClientRequestContext.h
index fe2edf6..47aa935 100644
--- a/src/ClientRequestContext.h
+++ b/src/ClientRequestContext.h
@@ -81,6 +81,10 @@ public:
#endif
ErrorState *error; ///< saved error page for centralized/delayed processing
bool readNextRequest; ///< whether Squid should read after error handling
+
+#if FOLLOW_X_FORWARDED_FOR
+ size_t currentXffHopNumber = 0; ///< number of X-Forwarded-For header values processed so far
+#endif
};
#endif /* SQUID_CLIENTREQUESTCONTEXT_H */
diff --git a/src/client_side_request.cc b/src/client_side_request.cc
index 1c6ff62..b758f6f 100644
--- a/src/client_side_request.cc
+++ b/src/client_side_request.cc
@@ -78,6 +78,11 @@
static const char *const crlf = "\r\n";
#if FOLLOW_X_FORWARDED_FOR
+
+#if !defined(SQUID_X_FORWARDED_FOR_HOP_MAX)
+#define SQUID_X_FORWARDED_FOR_HOP_MAX 64
+#endif
+
static void clientFollowXForwardedForCheck(allow_t answer, void *data);
#endif /* FOLLOW_X_FORWARDED_FOR */
@@ -485,8 +490,16 @@ clientFollowXForwardedForCheck(allow_t answer, void *data)
/* override the default src_addr tested if we have to go deeper than one level into XFF */
Filled(calloutContext->acl_checklist)->src_addr = request->indirect_client_addr;
}
- calloutContext->acl_checklist->nonBlockingCheck(clientFollowXForwardedForCheck, data);
- return;
+ if (++calloutContext->currentXffHopNumber < SQUID_X_FORWARDED_FOR_HOP_MAX) {
+ calloutContext->acl_checklist->nonBlockingCheck(clientFollowXForwardedForCheck, data);
+ return;
+ }
+ const auto headerName = Http::HeaderLookupTable.lookup(Http::HdrType::X_FORWARDED_FOR).name;
+ debugs(28, DBG_CRITICAL, "ERROR: Ignoring trailing " << headerName << " addresses" <<
+ Debug::Extra << "addresses allowed by follow_x_forwarded_for: " << calloutContext->currentXffHopNumber <<
+ Debug::Extra << "last/accepted address: " << request->indirect_client_addr <<
+ Debug::Extra << "ignored trailing addresses: " << request->x_forwarded_for_iterator);
+ // fall through to resume clientAccessCheck() processing
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,31 @@
commit 8fcff9c09824b18628f010d26a04247f6a6cbcb8
Author: Alex Rousskov <rousskov@measurement-factory.com>
Date: Sun Nov 12 09:33:20 2023 +0000
Do not update StoreEntry expiration after errorAppendEntry() (#1580)
errorAppendEntry() is responsible for setting entry expiration times,
which it does by calling StoreEntry::storeErrorResponse() that calls
StoreEntry::negativeCache().
This change was triggered by a vulnerability report by Joshua Rogers at
https://megamansec.github.io/Squid-Security-Audit/cache-uaf.html where
it was filed as "Use-After-Free in Cache Manager Errors". The reported
"use after free" vulnerability was unknowingly addressed by 2022 commit
1fa761a that removed excessively long "reentrant" store_client calls
responsible for the disappearance of the properly locked StoreEntry in
this (and probably other) contexts.
diff --git a/src/cache_manager.cc b/src/cache_manager.cc
index 8055ece..fdcc9cf 100644
--- a/src/cache_manager.cc
+++ b/src/cache_manager.cc
@@ -323,7 +323,6 @@ CacheManager::Start(const Comm::ConnectionPointer &client, HttpRequest * request
const auto err = new ErrorState(ERR_INVALID_URL, Http::scNotFound, request);
err->url = xstrdup(entry->url());
errorAppendEntry(entry, err);
- entry->expires = squid_curtime;
return;
}

View File

@ -0,0 +1,193 @@
diff --git a/src/http.cc b/src/http.cc
index b006300..023e411 100644
--- a/src/http.cc
+++ b/src/http.cc
@@ -52,6 +52,7 @@
#include "rfc1738.h"
#include "SquidConfig.h"
#include "SquidTime.h"
+#include "SquidMath.h"
#include "StatCounters.h"
#include "Store.h"
#include "StrList.h"
@@ -1150,18 +1151,26 @@ HttpStateData::readReply(const CommIoCbParams &io)
* Plus, it breaks our lame *HalfClosed() detection
*/
- Must(maybeMakeSpaceAvailable(true));
- CommIoCbParams rd(this); // will be expanded with ReadNow results
- rd.conn = io.conn;
- rd.size = entry->bytesWanted(Range<size_t>(0, inBuf.spaceSize()));
+ size_t moreDataPermission = 0;
+ if ((!canBufferMoreReplyBytes(&moreDataPermission) || !moreDataPermission)) {
+ abortTransaction("ready to read required data, but the read buffer is full and cannot be drained");
+ return;
+ }
+
+ const auto readSizeMax = maybeMakeSpaceAvailable(moreDataPermission);
+ // TODO: Move this logic inside maybeMakeSpaceAvailable():
+ const auto readSizeWanted = readSizeMax ? entry->bytesWanted(Range<size_t>(0, readSizeMax)) : 0;
- if (rd.size <= 0) {
+ if (readSizeWanted <= 0) {
assert(entry->mem_obj);
AsyncCall::Pointer nilCall;
entry->mem_obj->delayRead(DeferredRead(readDelayed, this, CommRead(io.conn, NULL, 0, nilCall)));
return;
}
+ CommIoCbParams rd(this); // will be expanded with ReadNow results
+ rd.conn = io.conn;
+ rd.size = readSizeWanted;
switch (Comm::ReadNow(rd, inBuf)) {
case Comm::INPROGRESS:
if (inBuf.isEmpty())
@@ -1520,8 +1529,11 @@ HttpStateData::maybeReadVirginBody()
if (!Comm::IsConnOpen(serverConnection) || fd_table[serverConnection->fd].closing())
return;
- if (!maybeMakeSpaceAvailable(false))
+ size_t moreDataPermission = 0;
+ if ((!canBufferMoreReplyBytes(&moreDataPermission)) || !moreDataPermission) {
+ abortTransaction("more response bytes required, but the read buffer is full and cannot be drained");
return;
+ }
// XXX: get rid of the do_next_read flag
// check for the proper reasons preventing read(2)
@@ -1539,40 +1551,79 @@ HttpStateData::maybeReadVirginBody()
Comm::Read(serverConnection, call);
}
+/// Desired inBuf capacity based on various capacity preferences/limits:
+/// * a smaller buffer may not hold enough for look-ahead header/body parsers;
+/// * a smaller buffer may result in inefficient tiny network reads;
+/// * a bigger buffer may waste memory;
+/// * a bigger buffer may exceed SBuf storage capabilities (SBuf::maxSize);
+size_t
+HttpStateData::calcReadBufferCapacityLimit() const
+{
+ if (!flags.headers_parsed)
+ return Config.maxReplyHeaderSize;
+
+ // XXX: Our inBuf is not used to maintain the read-ahead gap, and using
+ // Config.readAheadGap like this creates huge read buffers for large
+ // read_ahead_gap values. TODO: Switch to using tcp_recv_bufsize as the
+ // primary read buffer capacity factor.
+ //
+ // TODO: Cannot reuse throwing NaturalCast() here. Consider removing
+ // .value() dereference in NaturalCast() or add/use NaturalCastOrMax().
+ const auto configurationPreferences = NaturalSum<size_t>(Config.readAheadGap).second ? NaturalSum<size_t>(Config.readAheadGap).first : SBuf::maxSize;
+
+ // TODO: Honor TeChunkedParser look-ahead and trailer parsing requirements
+ // (when explicit configurationPreferences are set too low).
+
+ return std::min<size_t>(configurationPreferences, SBuf::maxSize);
+}
+
+/// The maximum number of virgin reply bytes we may buffer before we violate
+/// the currently configured response buffering limits.
+/// \retval std::nullopt means that no more virgin response bytes can be read
+/// \retval 0 means that more virgin response bytes may be read later
+/// \retval >0 is the number of bytes that can be read now (subject to other constraints)
bool
-HttpStateData::maybeMakeSpaceAvailable(bool doGrow)
+HttpStateData::canBufferMoreReplyBytes(size_t *maxReadSize) const
{
- // how much we are allowed to buffer
- const int limitBuffer = (flags.headers_parsed ? Config.readAheadGap : Config.maxReplyHeaderSize);
-
- if (limitBuffer < 0 || inBuf.length() >= (SBuf::size_type)limitBuffer) {
- // when buffer is at or over limit already
- debugs(11, 7, "will not read up to " << limitBuffer << ". buffer has (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
- debugs(11, DBG_DATA, "buffer has {" << inBuf << "}");
- // Process next response from buffer
- processReply();
- return false;
+#if USE_ADAPTATION
+ // If we do not check this now, we may say the final "no" prematurely below
+ // because inBuf.length() will decrease as adaptation drains buffered bytes.
+ if (responseBodyBuffer) {
+ debugs(11, 3, "yes, but waiting for adaptation to drain read buffer");
+ *maxReadSize = 0; // yes, we may be able to buffer more (but later)
+ return true;
+ }
+#endif
+
+ const auto maxCapacity = calcReadBufferCapacityLimit();
+ if (inBuf.length() >= maxCapacity) {
+ debugs(11, 3, "no, due to a full buffer: " << inBuf.length() << '/' << inBuf.spaceSize() << "; limit: " << maxCapacity);
+ return false; // no, configuration prohibits buffering more
}
+ *maxReadSize = (maxCapacity - inBuf.length()); // positive
+ debugs(11, 7, "yes, may read up to " << *maxReadSize << " into " << inBuf.length() << '/' << inBuf.spaceSize());
+ return true; // yes, can read up to this many bytes (subject to other constraints)
+}
+
+/// prepare read buffer for reading
+/// \return the maximum number of bytes the caller should attempt to read
+/// \retval 0 means that the caller should delay reading
+size_t
+HttpStateData::maybeMakeSpaceAvailable(const size_t maxReadSize)
+{
// how much we want to read
- const size_t read_size = calcBufferSpaceToReserve(inBuf.spaceSize(), (limitBuffer - inBuf.length()));
+ const size_t read_size = calcBufferSpaceToReserve(inBuf.spaceSize(), maxReadSize);
- if (!read_size) {
+ if (read_size < 2) {
debugs(11, 7, "will not read up to " << read_size << " into buffer (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
- return false;
+ return 0;
}
- // just report whether we could grow or not, do not actually do it
- if (doGrow)
- return (read_size >= 2);
-
// we may need to grow the buffer
inBuf.reserveSpace(read_size);
- debugs(11, 8, (!flags.do_next_read ? "will not" : "may") <<
- " read up to " << read_size << " bytes info buf(" << inBuf.length() << "/" << inBuf.spaceSize() <<
- ") from " << serverConnection);
-
- return (inBuf.spaceSize() >= 2); // only read if there is 1+ bytes of space available
+ debugs(11, 7, "may read up to " << read_size << " bytes info buffer (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
+ return read_size;
}
/// called after writing the very last request byte (body, last-chunk, etc)
diff --git a/src/http.h b/src/http.h
index 8965b77..007d2e6 100644
--- a/src/http.h
+++ b/src/http.h
@@ -15,6 +15,8 @@
#include "http/StateFlags.h"
#include "sbuf/SBuf.h"
+#include <optional>
+
class FwdState;
class HttpHeader;
@@ -107,16 +109,9 @@ private:
void abortTransaction(const char *reason) { abortAll(reason); } // abnormal termination
- /**
- * determine if read buffer can have space made available
- * for a read.
- *
- * \param grow whether to actually expand the buffer
- *
- * \return whether the buffer can be grown to provide space
- * regardless of whether the grow actually happened.
- */
- bool maybeMakeSpaceAvailable(bool grow);
+ size_t calcReadBufferCapacityLimit() const;
+ bool canBufferMoreReplyBytes(size_t *maxReadSize) const;
+ size_t maybeMakeSpaceAvailable(size_t maxReadSize);
// consuming request body
virtual void handleMoreRequestBodyAvailable();

View File

@ -0,0 +1,105 @@
diff --git a/src/SquidString.h b/src/SquidString.h
index a791885..b9aef38 100644
--- a/src/SquidString.h
+++ b/src/SquidString.h
@@ -114,7 +114,16 @@ private:
size_type len_; /* current length */
- static const size_type SizeMax_ = 65535; ///< 64K limit protects some fixed-size buffers
+ /// An earlier 64KB limit was meant to protect some fixed-size buffers, but
+ /// (a) we do not know where those buffers are (or whether they still exist)
+ /// (b) too many String users unknowingly exceeded that limit and asserted.
+ /// We are now using a larger limit to reduce the number of (b) cases,
+ /// especially cases where "compact" lists of items grow 50% in size when we
+ /// convert them to canonical form. The new limit is selected to withstand
+ /// concatenation and ~50% expansion of two HTTP headers limited by default
+ /// request_header_max_size and reply_header_max_size settings.
+ static const size_type SizeMax_ = 3*64*1024 - 1;
+
/// returns true after increasing the first argument by extra if the sum does not exceed SizeMax_
static bool SafeAdd(size_type &base, size_type extra) { if (extra <= SizeMax_ && base <= SizeMax_ - extra) { base += extra; return true; } return false; }
diff --git a/src/cache_cf.cc b/src/cache_cf.cc
index a9c1b7e..46f07bb 100644
--- a/src/cache_cf.cc
+++ b/src/cache_cf.cc
@@ -935,6 +935,18 @@ configDoConfigure(void)
(uint32_t)Config.maxRequestBufferSize, (uint32_t)Config.maxRequestHeaderSize);
}
+ // Warn about the dangers of exceeding String limits when manipulating HTTP
+ // headers. Technically, we do not concatenate _requests_, so we could relax
+ // their check, but we keep the two checks the same for simplicity sake.
+ const auto safeRawHeaderValueSizeMax = (String::SizeMaxXXX()+1)/3;
+ // TODO: static_assert(safeRawHeaderValueSizeMax >= 64*1024); // no WARNINGs for default settings
+ if (Config.maxRequestHeaderSize > safeRawHeaderValueSizeMax)
+ debugs(3, DBG_CRITICAL, "WARNING: Increasing request_header_max_size beyond " << safeRawHeaderValueSizeMax <<
+ " bytes makes Squid more vulnerable to denial-of-service attacks; configured value: " << Config.maxRequestHeaderSize << " bytes");
+ if (Config.maxReplyHeaderSize > safeRawHeaderValueSizeMax)
+ debugs(3, DBG_CRITICAL, "WARNING: Increasing reply_header_max_size beyond " << safeRawHeaderValueSizeMax <<
+ " bytes makes Squid more vulnerable to denial-of-service attacks; configured value: " << Config.maxReplyHeaderSize << " bytes");
+
/*
* Disable client side request pipelining if client_persistent_connections OFF.
* Waste of resources queueing any pipelined requests when the first will close the connection.
diff --git a/src/cf.data.pre b/src/cf.data.pre
index bc2ddcd..d55b870 100644
--- a/src/cf.data.pre
+++ b/src/cf.data.pre
@@ -6196,11 +6196,14 @@ TYPE: b_size_t
DEFAULT: 64 KB
LOC: Config.maxRequestHeaderSize
DOC_START
- This specifies the maximum size for HTTP headers in a request.
- Request headers are usually relatively small (about 512 bytes).
- Placing a limit on the request header size will catch certain
- bugs (for example with persistent connections) and possibly
- buffer-overflow or denial-of-service attacks.
+ This directives limits the header size of a received HTTP request
+ (including request-line). Increasing this limit beyond its 64 KB default
+ exposes certain old Squid code to various denial-of-service attacks. This
+ limit also applies to received FTP commands.
+
+ This limit has no direct affect on Squid memory consumption.
+
+ Squid does not check this limit when sending requests.
DOC_END
NAME: reply_header_max_size
@@ -6209,11 +6212,14 @@ TYPE: b_size_t
DEFAULT: 64 KB
LOC: Config.maxReplyHeaderSize
DOC_START
- This specifies the maximum size for HTTP headers in a reply.
- Reply headers are usually relatively small (about 512 bytes).
- Placing a limit on the reply header size will catch certain
- bugs (for example with persistent connections) and possibly
- buffer-overflow or denial-of-service attacks.
+ This directives limits the header size of a received HTTP response
+ (including status-line). Increasing this limit beyond its 64 KB default
+ exposes certain old Squid code to various denial-of-service attacks. This
+ limit also applies to FTP command responses.
+
+ Squid also checks this limit when loading hit responses from disk cache.
+
+ Squid does not check this limit when sending responses.
DOC_END
NAME: request_body_max_size
diff --git a/src/http.cc b/src/http.cc
index 877172d..b006300 100644
--- a/src/http.cc
+++ b/src/http.cc
@@ -1820,8 +1820,9 @@ HttpStateData::httpBuildRequestHeader(HttpRequest * request,
String strFwd = hdr_in->getList(Http::HdrType::X_FORWARDED_FOR);
- // if we cannot double strFwd size, then it grew past 50% of the limit
- if (!strFwd.canGrowBy(strFwd.size())) {
+ // Detect unreasonably long header values. And paranoidly check String
+ // limits: a String ought to accommodate two reasonable-length values.
+ if (strFwd.size() > 32*1024 || !strFwd.canGrowBy(strFwd.size())) {
// There is probably a forwarding loop with Via detection disabled.
// If we do nothing, String will assert on overflow soon.
// TODO: Terminate all transactions with huge XFF?

View File

@ -0,0 +1,163 @@
diff --git a/src/client_side.cc b/src/client_side.cc
index f57f3f7..ab393e4 100644
--- a/src/client_side.cc
+++ b/src/client_side.cc
@@ -906,7 +906,7 @@ ConnStateData::kick()
* We are done with the response, and we are either still receiving request
* body (early response!) or have already stopped receiving anything.
*
- * If we are still receiving, then clientParseRequest() below will fail.
+ * If we are still receiving, then parseRequests() below will fail.
* (XXX: but then we will call readNextRequest() which may succeed and
* execute a smuggled request as we are not done with the current request).
*
@@ -926,28 +926,12 @@ ConnStateData::kick()
* Attempt to parse a request from the request buffer.
* If we've been fed a pipelined request it may already
* be in our read buffer.
- *
- \par
- * This needs to fall through - if we're unlucky and parse the _last_ request
- * from our read buffer we may never re-register for another client read.
*/
- if (clientParseRequests()) {
- debugs(33, 3, clientConnection << ": parsed next request from buffer");
- }
+ parseRequests();
- /** \par
- * Either we need to kick-start another read or, if we have
- * a half-closed connection, kill it after the last request.
- * This saves waiting for half-closed connections to finished being
- * half-closed _AND_ then, sometimes, spending "Timeout" time in
- * the keepalive "Waiting for next request" state.
- */
- if (commIsHalfClosed(clientConnection->fd) && pipeline.empty()) {
- debugs(33, 3, "half-closed client with no pending requests, closing");
- clientConnection->close();
+ if (!isOpen())
return;
- }
/** \par
* At this point we either have a parsed request (which we've
@@ -2058,16 +2042,11 @@ ConnStateData::receivedFirstByte()
commSetConnTimeout(clientConnection, Config.Timeout.request, timeoutCall);
}
-/**
- * Attempt to parse one or more requests from the input buffer.
- * Returns true after completing parsing of at least one request [header]. That
- * includes cases where parsing ended with an error (e.g., a huge request).
- */
-bool
-ConnStateData::clientParseRequests()
+/// Attempt to parse one or more requests from the input buffer.
+/// May close the connection.
+void
+ConnStateData::parseRequests()
{
- bool parsed_req = false;
-
debugs(33, 5, HERE << clientConnection << ": attempting to parse");
// Loop while we have read bytes that are not needed for producing the body
@@ -2116,8 +2095,6 @@ ConnStateData::clientParseRequests()
processParsedRequest(context);
- parsed_req = true; // XXX: do we really need to parse everything right NOW ?
-
if (context->mayUseConnection()) {
debugs(33, 3, HERE << "Not parsing new requests, as this request may need the connection");
break;
@@ -2130,8 +2107,19 @@ ConnStateData::clientParseRequests()
}
}
- /* XXX where to 'finish' the parsing pass? */
- return parsed_req;
+ debugs(33, 7, "buffered leftovers: " << inBuf.length());
+
+ if (isOpen() && commIsHalfClosed(clientConnection->fd)) {
+ if (pipeline.empty()) {
+ // we processed what we could parse, and no more data is coming
+ debugs(33, 5, "closing half-closed without parsed requests: " << clientConnection);
+ clientConnection->close();
+ } else {
+ // we parsed what we could, and no more data is coming
+ debugs(33, 5, "monitoring half-closed while processing parsed requests: " << clientConnection);
+ flags.readMore = false; // may already be false
+ }
+ }
}
void
@@ -2148,23 +2136,7 @@ ConnStateData::afterClientRead()
if (pipeline.empty())
fd_note(clientConnection->fd, "Reading next request");
- if (!clientParseRequests()) {
- if (!isOpen())
- return;
- /*
- * If the client here is half closed and we failed
- * to parse a request, close the connection.
- * The above check with connFinishedWithConn() only
- * succeeds _if_ the buffer is empty which it won't
- * be if we have an incomplete request.
- * XXX: This duplicates ConnStateData::kick
- */
- if (pipeline.empty() && commIsHalfClosed(clientConnection->fd)) {
- debugs(33, 5, clientConnection << ": half-closed connection, no completed request parsed, connection closing.");
- clientConnection->close();
- return;
- }
- }
+ parseRequests();
if (!isOpen())
return;
@@ -3945,7 +3917,7 @@ ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic)
startPinnedConnectionMonitoring();
if (pipeline.empty())
- kick(); // in case clientParseRequests() was blocked by a busy pic.connection
+ kick(); // in case parseRequests() was blocked by a busy pic.connection
}
/// Forward future client requests using the given server connection.
diff --git a/src/client_side.h b/src/client_side.h
index 9fe8463..dfb4d8e 100644
--- a/src/client_side.h
+++ b/src/client_side.h
@@ -85,7 +85,6 @@ public:
virtual void doneWithControlMsg();
/// Traffic parsing
- bool clientParseRequests();
void readNextRequest();
/// try to make progress on a transaction or read more I/O
@@ -373,6 +372,7 @@ private:
virtual bool connFinishedWithConn(int size);
virtual void checkLogging();
+ void parseRequests();
void clientAfterReadingRequests();
bool concurrentRequestQueueFilled() const;
diff --git a/src/tests/stub_client_side.cc b/src/tests/stub_client_side.cc
index d7efb0f..655ed83 100644
--- a/src/tests/stub_client_side.cc
+++ b/src/tests/stub_client_side.cc
@@ -14,7 +14,7 @@
#include "tests/STUB.h"
#include "client_side.h"
-bool ConnStateData::clientParseRequests() STUB_RETVAL(false)
+void ConnStateData::parseRequests() STUB
void ConnStateData::readNextRequest() STUB
bool ConnStateData::isOpen() const STUB_RETVAL(false)
void ConnStateData::kick() STUB

View File

@ -0,0 +1,367 @@
From 8d0ee420a4d91ac7fd97316338f1e28b4b060cbf Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= <luhliari@redhat.com>
Date: Thu, 10 Oct 2024 19:26:27 +0200
Subject: [PATCH 1/6] Ignore whitespace chars after chunk-size
Previously (before #1498 change), squid was accepting TE-chunked replies
with whitespaces after chunk-size and missing chunk-ext data. After
It turned out that replies with such whitespace chars are pretty
common and other webservers which can act as forward proxies (e.g.
nginx, httpd...) are accepting them.
This change will allow to proxy chunked responses from origin server,
which had whitespaces inbetween chunk-size and CRLF.
---
src/http/one/TeChunkedParser.cc | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
index 9cce10fdc91..04753395e16 100644
--- a/src/http/one/TeChunkedParser.cc
+++ b/src/http/one/TeChunkedParser.cc
@@ -125,6 +125,7 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
// Code becomes much simpler when incremental parsing functions throw on
// bad or insufficient input, like in the code below. TODO: Expand up.
try {
+ tok.skipAll(CharacterSet::WSP); // Some servers send SP/TAB after chunk-size
parseChunkExtensions(tok); // a possibly empty chunk-ext list
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
buf_ = tok.remaining();
From 9c8d35f899035fa06021ab3fe6919f892c2f0c6b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= <luhliari@redhat.com>
Date: Fri, 11 Oct 2024 02:06:31 +0200
Subject: [PATCH 2/6] Added new argument to Http::One::ParseBws()
Depending on new wsp_only argument in ParseBws() it will be decided
which set of whitespaces characters will be parsed. If wsp_only is set
to true, only SP and HTAB chars will be parsed.
Also optimized number of ParseBws calls.
---
src/http/one/Parser.cc | 4 ++--
src/http/one/Parser.h | 3 ++-
src/http/one/TeChunkedParser.cc | 13 +++++++++----
src/http/one/TeChunkedParser.h | 2 +-
4 files changed, 14 insertions(+), 8 deletions(-)
diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
index b1908316a0b..01d7e3bc0e8 100644
--- a/src/http/one/Parser.cc
+++ b/src/http/one/Parser.cc
@@ -273,9 +273,9 @@ Http::One::ErrorLevel()
// BWS = *( SP / HTAB ) ; WhitespaceCharacters() may relax this RFC 7230 rule
void
-Http::One::ParseBws(Parser::Tokenizer &tok)
+Http::One::ParseBws(Parser::Tokenizer &tok, const bool wsp_only)
{
- const auto count = tok.skipAll(Parser::WhitespaceCharacters());
+ const auto count = tok.skipAll(wsp_only ? CharacterSet::WSP : Parser::WhitespaceCharacters());
if (tok.atEnd())
throw InsufficientInput(); // even if count is positive
diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
index d9a0ac8c273..08200371cd6 100644
--- a/src/http/one/Parser.h
+++ b/src/http/one/Parser.h
@@ -163,8 +163,9 @@ class Parser : public RefCountable
};
/// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
+/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimeter chars
/// \throws InsufficientInput when the end of BWS cannot be confirmed
-void ParseBws(Parser::Tokenizer &);
+void ParseBws(Parser::Tokenizer &, const bool wsp_only = false);
/// the right debugs() level for logging HTTP violation messages
int ErrorLevel();
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
index 04753395e16..41e1e5ddaea 100644
--- a/src/http/one/TeChunkedParser.cc
+++ b/src/http/one/TeChunkedParser.cc
@@ -125,8 +125,11 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
// Code becomes much simpler when incremental parsing functions throw on
// bad or insufficient input, like in the code below. TODO: Expand up.
try {
- tok.skipAll(CharacterSet::WSP); // Some servers send SP/TAB after chunk-size
- parseChunkExtensions(tok); // a possibly empty chunk-ext list
+ // A possibly empty chunk-ext list. If no chunk-ext has been found,
+ // try to skip trailing BWS, because some servers send "chunk-size BWS CRLF".
+ if (!parseChunkExtensions(tok))
+ ParseBws(tok, true);
+
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
buf_ = tok.remaining();
parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
@@ -140,20 +143,22 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
/// Parses the chunk-ext list (RFC 9112 section 7.1.1:
/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
-void
+bool
Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
{
+ bool foundChunkExt = false;
do {
auto tok = callerTok;
ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
if (!tok.skip(';'))
- return; // reached the end of extensions (if any)
+ return foundChunkExt; // reached the end of extensions (if any)
parseOneChunkExtension(tok);
buf_ = tok.remaining(); // got one extension
callerTok = tok;
+ foundChunkExt = true;
} while (true);
}
diff --git a/src/http/one/TeChunkedParser.h b/src/http/one/TeChunkedParser.h
index 02eacd1bb89..8c5d4bb4cba 100644
--- a/src/http/one/TeChunkedParser.h
+++ b/src/http/one/TeChunkedParser.h
@@ -71,7 +71,7 @@ class TeChunkedParser : public Http1::Parser
private:
bool parseChunkSize(Tokenizer &tok);
bool parseChunkMetadataSuffix(Tokenizer &);
- void parseChunkExtensions(Tokenizer &);
+ bool parseChunkExtensions(Tokenizer &);
void parseOneChunkExtension(Tokenizer &);
bool parseChunkBody(Tokenizer &tok);
bool parseChunkEnd(Tokenizer &tok);
From 81e67f97f9c386bdd0bb4a5e182395c46adb70ad Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= <luhliari@redhat.com>
Date: Fri, 11 Oct 2024 02:44:33 +0200
Subject: [PATCH 3/6] Fix typo in Parser.h
---
src/http/one/Parser.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
index 08200371cd6..3ef4c5f7752 100644
--- a/src/http/one/Parser.h
+++ b/src/http/one/Parser.h
@@ -163,7 +163,7 @@ class Parser : public RefCountable
};
/// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
-/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimeter chars
+/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimiter chars
/// \throws InsufficientInput when the end of BWS cannot be confirmed
void ParseBws(Parser::Tokenizer &, const bool wsp_only = false);
From a0d4fe1794e605f8299a5c118c758a807453f016 Mon Sep 17 00:00:00 2001
From: Alex Rousskov <rousskov@measurement-factory.com>
Date: Thu, 10 Oct 2024 22:39:42 -0400
Subject: [PATCH 4/6] Bug 5449 is a regression of Bug 4492!
Both bugs deal with "chunk-size SP+ CRLF" use cases. Bug 4492 had _two_
spaces after chunk-size, which answers one of the PR review questions:
Should we skip just one space? No, we should not.
The lines moved around in many commits, but I believe this regression
was introduced in commit 951013d0 because that commit stopped consuming
partially parsed chunk-ext sequences. That consumption was wrong, but it
had a positive side effect -- fixing Bug 4492...
---
src/http/one/TeChunkedParser.cc | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
index 41e1e5ddaea..aa4a840fdcf 100644
--- a/src/http/one/TeChunkedParser.cc
+++ b/src/http/one/TeChunkedParser.cc
@@ -125,10 +125,10 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
// Code becomes much simpler when incremental parsing functions throw on
// bad or insufficient input, like in the code below. TODO: Expand up.
try {
- // A possibly empty chunk-ext list. If no chunk-ext has been found,
- // try to skip trailing BWS, because some servers send "chunk-size BWS CRLF".
- if (!parseChunkExtensions(tok))
- ParseBws(tok, true);
+ // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
+ ParseBws(tok, true);
+
+ parseChunkExtensions(tok);
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
buf_ = tok.remaining();
@@ -150,7 +150,7 @@ Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
do {
auto tok = callerTok;
- ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
+ ParseBws(tok);
if (!tok.skip(';'))
return foundChunkExt; // reached the end of extensions (if any)
From f837f5ff61301a17008f16ce1fb793c2abf19786 Mon Sep 17 00:00:00 2001
From: Alex Rousskov <rousskov@measurement-factory.com>
Date: Thu, 10 Oct 2024 23:06:42 -0400
Subject: [PATCH 5/6] fixup: Fewer conditionals/ifs and more explicit spelling
... to draw code reader attention when something unusual is going on.
---
src/http/one/Parser.cc | 22 ++++++++++++++++++----
src/http/one/Parser.h | 10 ++++++++--
src/http/one/TeChunkedParser.cc | 14 ++++++--------
src/http/one/TeChunkedParser.h | 2 +-
4 files changed, 33 insertions(+), 15 deletions(-)
diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
index 01d7e3bc0e8..d3937e5e96b 100644
--- a/src/http/one/Parser.cc
+++ b/src/http/one/Parser.cc
@@ -271,11 +271,12 @@ Http::One::ErrorLevel()
return Config.onoff.relaxed_header_parser < 0 ? DBG_IMPORTANT : 5;
}
-// BWS = *( SP / HTAB ) ; WhitespaceCharacters() may relax this RFC 7230 rule
-void
-Http::One::ParseBws(Parser::Tokenizer &tok, const bool wsp_only)
+/// common part of ParseBws() and ParseStrctBws()
+namespace Http::One {
+static void
+ParseBws_(Parser::Tokenizer &tok, const CharacterSet &bwsChars)
{
- const auto count = tok.skipAll(wsp_only ? CharacterSet::WSP : Parser::WhitespaceCharacters());
+ const auto count = tok.skipAll(bwsChars);
if (tok.atEnd())
throw InsufficientInput(); // even if count is positive
@@ -290,4 +291,17 @@ Http::One::ParseBws(Parser::Tokenizer &tok, const bool wsp_only)
// success: no more BWS characters expected
}
+} // namespace Http::One
+
+void
+Http::One::ParseBws(Parser::Tokenizer &tok)
+{
+ ParseBws_(tok, CharacterSet::WSP);
+}
+
+void
+Http::One::ParseStrictBws(Parser::Tokenizer &tok)
+{
+ ParseBws_(tok, Parser::WhitespaceCharacters());
+}
diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
index 3ef4c5f7752..49e399de546 100644
--- a/src/http/one/Parser.h
+++ b/src/http/one/Parser.h
@@ -163,9 +163,15 @@ class Parser : public RefCountable
};
/// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
-/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimiter chars
/// \throws InsufficientInput when the end of BWS cannot be confirmed
-void ParseBws(Parser::Tokenizer &, const bool wsp_only = false);
+/// \sa WhitespaceCharacters() for the definition of BWS characters
+/// \sa ParseStrictBws() that avoids WhitespaceCharacters() uncertainties
+void ParseBws(Parser::Tokenizer &);
+
+/// Like ParseBws() but only skips CharacterSet::WSP characters. This variation
+/// must be used if the next element may start with CR or any other character
+/// from RelaxedDelimiterCharacters().
+void ParseStrictBws(Parser::Tokenizer &);
/// the right debugs() level for logging HTTP violation messages
int ErrorLevel();
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
index aa4a840fdcf..859471b8c77 100644
--- a/src/http/one/TeChunkedParser.cc
+++ b/src/http/one/TeChunkedParser.cc
@@ -125,11 +125,11 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
// Code becomes much simpler when incremental parsing functions throw on
// bad or insufficient input, like in the code below. TODO: Expand up.
try {
- // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
- ParseBws(tok, true);
-
- parseChunkExtensions(tok);
+ // Bug 4492: IBM_HTTP_Server sends SP after chunk-size.
+ // No ParseBws() here because it may consume CR required further below.
+ ParseStrictBws(tok);
+ parseChunkExtensions(tok); // a possibly empty chunk-ext list
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
buf_ = tok.remaining();
parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
@@ -143,22 +143,20 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
/// Parses the chunk-ext list (RFC 9112 section 7.1.1:
/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
-bool
+void
Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
{
- bool foundChunkExt = false;
do {
auto tok = callerTok;
ParseBws(tok);
if (!tok.skip(';'))
- return foundChunkExt; // reached the end of extensions (if any)
+ return; // reached the end of extensions (if any)
parseOneChunkExtension(tok);
buf_ = tok.remaining(); // got one extension
callerTok = tok;
- foundChunkExt = true;
} while (true);
}
diff --git a/src/http/one/TeChunkedParser.h b/src/http/one/TeChunkedParser.h
index 8c5d4bb4cba..02eacd1bb89 100644
--- a/src/http/one/TeChunkedParser.h
+++ b/src/http/one/TeChunkedParser.h
@@ -71,7 +71,7 @@ class TeChunkedParser : public Http1::Parser
private:
bool parseChunkSize(Tokenizer &tok);
bool parseChunkMetadataSuffix(Tokenizer &);
- bool parseChunkExtensions(Tokenizer &);
+ void parseChunkExtensions(Tokenizer &);
void parseOneChunkExtension(Tokenizer &);
bool parseChunkBody(Tokenizer &tok);
bool parseChunkEnd(Tokenizer &tok);
From f79936a234e722adb2dd08f31cf6019d81ee712c Mon Sep 17 00:00:00 2001
From: Alex Rousskov <rousskov@measurement-factory.com>
Date: Thu, 10 Oct 2024 23:31:08 -0400
Subject: [PATCH 6/6] fixup: Deadly typo
---
src/http/one/Parser.cc | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
index d3937e5e96b..7403a9163a2 100644
--- a/src/http/one/Parser.cc
+++ b/src/http/one/Parser.cc
@@ -296,12 +296,12 @@ ParseBws_(Parser::Tokenizer &tok, const CharacterSet &bwsChars)
void
Http::One::ParseBws(Parser::Tokenizer &tok)
{
- ParseBws_(tok, CharacterSet::WSP);
+ ParseBws_(tok, Parser::WhitespaceCharacters());
}
void
Http::One::ParseStrictBws(Parser::Tokenizer &tok)
{
- ParseBws_(tok, Parser::WhitespaceCharacters());
+ ParseBws_(tok, CharacterSet::WSP);
}

View File

@ -2,7 +2,7 @@
Name: squid Name: squid
Version: 4.15 Version: 4.15
Release: 6%{?dist} Release: 10%{?dist}.3
Summary: The Squid proxy caching server Summary: The Squid proxy caching server
Epoch: 7 Epoch: 7
# See CREDITS for breakdown of non GPLv2+ code # See CREDITS for breakdown of non GPLv2+ code
@ -38,6 +38,8 @@ Patch206: squid-4.11-active-ftp.patch
Patch208: squid-4.11-convert-ipv4.patch Patch208: squid-4.11-convert-ipv4.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2006121 # https://bugzilla.redhat.com/show_bug.cgi?id=2006121
Patch209: squid-4.15-ftp-filename-extraction.patch Patch209: squid-4.15-ftp-filename-extraction.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2076717
Patch210: squid-4.15-halfclosed.patch
# Security fixes # Security fixes
# https://bugzilla.redhat.com/show_bug.cgi?id=1941506 # https://bugzilla.redhat.com/show_bug.cgi?id=1941506
@ -46,6 +48,32 @@ Patch300: squid-4.15-CVE-2021-28116.patch
Patch301: squid-4.15-CVE-2021-46784.patch Patch301: squid-4.15-CVE-2021-46784.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2129771 # https://bugzilla.redhat.com/show_bug.cgi?id=2129771
Patch302: squid-4.15-CVE-2022-41318.patch Patch302: squid-4.15-CVE-2022-41318.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2245910
# +backported: https://github.com/squid-cache/squid/commit/417da4006cf5c97d44e74431b816fc58fec9e270
Patch303: squid-4.15-CVE-2023-46846.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2245916
Patch304: squid-4.15-CVE-2023-46847.patch
# https://issues.redhat.com/browse/RHEL-14792
Patch305: squid-4.15-CVE-2023-5824.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2248521
Patch306: squid-4.15-CVE-2023-46728.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2247567
Patch307: squid-4.15-CVE-2023-46724.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2252926
Patch308: squid-4.15-CVE-2023-49285.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2252923
Patch309: squid-4.15-CVE-2023-49286.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2254663
Patch310: squid-4.15-CVE-2023-50269.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2264309
Patch311: squid-4.15-CVE-2024-25617.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2268366
Patch312: squid-4.15-CVE-2024-25111.patch
# Regression caused by squid-4.15-CVE-2023-46846.patch
# Upstream PR: https://github.com/squid-cache/squid/pull/1914
Patch313: squid-4.15-ignore-wsp-after-chunk-size.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2260051
Patch314: squid-4.15-CVE-2024-23638.patch
Requires: bash >= 2.0 Requires: bash >= 2.0
Requires(pre): shadow-utils Requires(pre): shadow-utils
@ -62,8 +90,6 @@ BuildRequires: openssl-devel
BuildRequires: krb5-devel BuildRequires: krb5-devel
# time_quota requires DB # time_quota requires DB
BuildRequires: libdb-devel BuildRequires: libdb-devel
# ESI support requires Expat & libxml2
BuildRequires: expat-devel libxml2-devel
# TPROXY requires libcap, and also increases security somewhat # TPROXY requires libcap, and also increases security somewhat
BuildRequires: libcap-devel BuildRequires: libcap-devel
# eCAP support # eCAP support
@ -107,11 +133,24 @@ lookup program (dnsserver), a program for retrieving FTP data
%patch206 -p1 -b .active-ftp %patch206 -p1 -b .active-ftp
%patch208 -p1 -b .convert-ipv4 %patch208 -p1 -b .convert-ipv4
%patch209 -p1 -b .ftp-fn-extraction %patch209 -p1 -b .ftp-fn-extraction
%patch210 -p1 -b .halfclosed
# Security patches # Security patches
%patch300 -p1 -b .CVE-2021-28116 %patch300 -p1 -b .CVE-2021-28116
%patch301 -p1 -b .CVE-2021-46784 %patch301 -p1 -b .CVE-2021-46784
%patch302 -p1 -b .CVE-2022-41318 %patch302 -p1 -b .CVE-2022-41318
%patch303 -p1 -b .CVE-2023-46846
%patch304 -p1 -b .CVE-2023-46847
%patch305 -p1 -b .CVE-2023-5824
%patch306 -p1 -b .CVE-2023-46728
%patch307 -p1 -b .CVE-2023-46724
%patch308 -p1 -b .CVE-2023-49285
%patch309 -p1 -b .CVE-2023-49286
%patch310 -p1 -b .CVE-2023-50269
%patch311 -p1 -b .CVE-2024-25617
%patch312 -p1 -b .CVE-2024-25111
%patch313 -p1 -b .ignore-wsp-chunk-sz
%patch314 -p1 -b .CVE-2024-23638
# https://bugzilla.redhat.com/show_bug.cgi?id=1679526 # https://bugzilla.redhat.com/show_bug.cgi?id=1679526
# Patch in the vendor documentation and used different location for documentation # Patch in the vendor documentation and used different location for documentation
@ -156,7 +195,7 @@ autoconf
--enable-storeio="aufs,diskd,ufs,rock" \ --enable-storeio="aufs,diskd,ufs,rock" \
--enable-diskio \ --enable-diskio \
--enable-wccpv2 \ --enable-wccpv2 \
--enable-esi \ --disable-esi \
--enable-ecap \ --enable-ecap \
--with-aio \ --with-aio \
--with-default-user="squid" \ --with-default-user="squid" \
@ -328,6 +367,49 @@ fi
%changelog %changelog
* Wed Nov 13 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:4.15-10.3
- Resolves: RHEL-22593 - CVE-2024-23638 squid:4/squid: vulnerable to
a Denial of Service attack against Cache Manager error responses
* Thu Nov 07 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:4.15-10.2
- Disable ESI support
- Resolves: RHEL-65075 - CVE-2024-45802 squid:4/squid: Denial of Service
processing ESI response content
* Mon Oct 14 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:4.15-10.1
- Resolves: RHEL-56024 - (Regression) Transfer-encoding:chunked data is not sent
to the client in its complementary
* Tue Mar 19 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:4.15-10
- Resolves: RHEL-28529 - squid:4/squid: Denial of Service in HTTP Chunked
Decoding (CVE-2024-25111)
- Resolves: RHEL-26088 - squid:4/squid: denial of service in HTTP header
parser (CVE-2024-25617)
* Fri Feb 02 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:4.15-9
- Resolves: RHEL-19552 - squid:4/squid: denial of service in HTTP request
parsing (CVE-2023-50269)
* Fri Feb 02 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:4.15-8
- Resolves: RHEL-18351 - squid:4/squid: Buffer over-read in the HTTP Message
processing feature (CVE-2023-49285)
- Resolves: RHEL-18342 - squid:4/squid: Incorrect Check of Function Return
Value In Helper Process management (CVE-2023-49286)
- Resolves: RHEL-18230 - squid:4/squid: Denial of Service in SSL Certificate
validation (CVE-2023-46724)
- Resolves: RHEL-15911 - squid:4/squid: NULL pointer dereference in the gopher
protocol code (CVE-2023-46728)
- Resolves: RHEL-18251 - squid crashes in assertion when a parent peer exists
- Resolves: RHEL-14794 - squid: squid multiple issues in HTTP response caching
(CVE-2023-5824)
- Resolves: RHEL-14803 - squid: squid: Denial of Service in HTTP Digest
Authentication (CVE-2023-46847)
- Resolves: RHEL-14777 - squid: squid: Request/Response smuggling in HTTP/1.1
and ICAP (CVE-2023-46846)
* Wed Aug 16 2023 Luboš Uhliarik <luhliari@redhat.com> - 7:4.15-7
- Resolves: #2076717 - Crash with half_closed_client on
* Thu Dec 08 2022 Tomas Korbar <tkorbar@redhat.com> - 4.15-6 * Thu Dec 08 2022 Tomas Korbar <tkorbar@redhat.com> - 4.15-6
- Resolves: #2072988 - [RFE] Add the "IP_BIND_ADDRESS_NO_PORT" - Resolves: #2072988 - [RFE] Add the "IP_BIND_ADDRESS_NO_PORT"
flag to sockets created for outgoing connections in the squid source code. flag to sockets created for outgoing connections in the squid source code.