import CS squid-6.10-12.el10

This commit is contained in:
AlmaLinux RelEng Bot 2026-04-27 07:00:19 -04:00
parent 1e3863fd14
commit 1493f19b59
9 changed files with 1530 additions and 4 deletions

View File

@ -0,0 +1,173 @@
diff --git a/src/HttpRequest.cc b/src/HttpRequest.cc
index d6be6ae..5c85eb8 100644
--- a/src/HttpRequest.cc
+++ b/src/HttpRequest.cc
@@ -341,7 +341,7 @@ HttpRequest::swapOut(StoreEntry * e)
/* packs request-line and headers, appends <crlf> terminator */
void
-HttpRequest::pack(Packable * p) const
+HttpRequest::pack(Packable * const p, const bool maskSensitiveInfo) const
{
assert(p);
/* pack request-line */
@@ -349,8 +349,8 @@ HttpRequest::pack(Packable * p) const
SQUIDSBUFPRINT(method.image()), SQUIDSBUFPRINT(url.path()),
http_ver.major, http_ver.minor);
/* headers */
- header.packInto(p);
- /* trailer */
+ header.packInto(p, maskSensitiveInfo);
+ /* indicate the end of the header section */
p->append("\r\n", 2);
}
diff --git a/src/HttpRequest.h b/src/HttpRequest.h
index 2256a55..2ada8e5 100644
--- a/src/HttpRequest.h
+++ b/src/HttpRequest.h
@@ -206,7 +206,7 @@ public:
void swapOut(StoreEntry * e);
- void pack(Packable * p) const;
+ void pack(Packable * p, bool maskSensitiveInfo = false) const;
static void httpRequestPack(void *obj, Packable *p);
diff --git a/src/cf.data.pre b/src/cf.data.pre
index 20a7338..d1f3317 100644
--- a/src/cf.data.pre
+++ b/src/cf.data.pre
@@ -8941,12 +8941,18 @@ NAME: email_err_data
COMMENT: on|off
TYPE: onoff
LOC: Config.onoff.emailErrData
-DEFAULT: on
+DEFAULT: off
DOC_START
If enabled, information about the occurred error will be
included in the mailto links of the ERR pages (if %W is set)
so that the email body contains the data.
Syntax is <A HREF="mailto:%w%W">%w</A>
+
+ SECURITY WARNING:
+ Request headers and other included facts may contain
+ sensitive information about transaction history, the
+ Squid instance, and its environment which would be
+ unavailable to error recipients otherwise.
DOC_END
NAME: deny_info
diff --git a/src/client_side_reply.cc b/src/client_side_reply.cc
index 6818d76..860edfc 100644
--- a/src/client_side_reply.cc
+++ b/src/client_side_reply.cc
@@ -94,7 +94,7 @@ clientReplyContext::clientReplyContext(ClientHttpRequest *clientContext) :
void
clientReplyContext::setReplyToError(
err_type err, Http::StatusCode status, char const *uri,
- const ConnStateData *conn, HttpRequest *failedrequest, const char *unparsedrequest,
+ const ConnStateData *conn, HttpRequest *failedrequest, const char *,
#if USE_AUTH
Auth::UserRequest::Pointer auth_user_request
#else
@@ -104,9 +104,6 @@ clientReplyContext::setReplyToError(
{
auto errstate = clientBuildError(err, status, uri, conn, failedrequest, http->al);
- if (unparsedrequest)
- errstate->request_hdrs = xstrdup(unparsedrequest);
-
#if USE_AUTH
errstate->auth_user_request = auth_user_request;
#endif
@@ -995,11 +992,14 @@ clientReplyContext::traceReply()
triggerInitialStoreRead();
http->storeEntry()->releaseRequest();
http->storeEntry()->buffer();
+ MemBuf content;
+ content.init();
+ http->request->pack(&content, true /* hide authorization data */);
const HttpReplyPointer rep(new HttpReply);
- rep->setHeaders(Http::scOkay, nullptr, "text/plain", http->request->prefixLen(), 0, squid_curtime);
+ rep->setHeaders(Http::scOkay, nullptr, "message/http", content.contentSize(), 0, squid_curtime);
+ rep->body.set(SBuf(content.buf, content.size));
http->storeEntry()->replaceHttpReply(rep);
- http->request->swapOut(http->storeEntry());
- http->storeEntry()->complete();
+ http->storeEntry()->completeSuccessfully("traceReply() stored the entire response");
}
#define SENDING_BODY 0
diff --git a/src/errorpage.cc b/src/errorpage.cc
index 0b7e5b8..31566dc 100644
--- a/src/errorpage.cc
+++ b/src/errorpage.cc
@@ -792,7 +792,6 @@ ErrorState::~ErrorState()
{
safe_free(redirect_url);
safe_free(url);
- safe_free(request_hdrs);
wordlistDestroy(&ftp.server_msg);
safe_free(ftp.request);
safe_free(ftp.reply);
@@ -850,7 +849,10 @@ ErrorState::Dump(MemBuf * mb)
SQUIDSBUFPRINT(request->url.path()),
AnyP::ProtocolType_str[request->http_ver.protocol],
request->http_ver.major, request->http_ver.minor);
- request->header.packInto(&str);
+ MemBuf r;
+ r.init();
+ request->pack(&r, true /* hide authorization data */);
+ str.append(r.content(), r.contentSize());
}
str.append("\r\n", 2);
@@ -1112,18 +1114,10 @@ ErrorState::compileLegacyCode(Build &build)
p = "[no request]";
break;
}
- if (request) {
- mb.appendf(SQUIDSBUFPH " " SQUIDSBUFPH " %s/%d.%d\n",
- SQUIDSBUFPRINT(request->method.image()),
- SQUIDSBUFPRINT(request->url.path()),
- AnyP::ProtocolType_str[request->http_ver.protocol],
- request->http_ver.major, request->http_ver.minor);
- request->header.packInto(&mb, true); //hide authorization data
- } else if (request_hdrs) {
- p = request_hdrs;
- } else {
+ else if (request)
+ request->pack(&mb, true /* hide authorization data */);
+ else
p = "[no request]";
- }
break;
case 's':
diff --git a/src/errorpage.h b/src/errorpage.h
index 8d23857..0dc10d7 100644
--- a/src/errorpage.h
+++ b/src/errorpage.h
@@ -194,7 +194,6 @@ public:
MemBuf *listing = nullptr;
} ftp;
- char *request_hdrs = nullptr;
char *err_msg = nullptr; /* Preformatted error message from the cache */
AccessLogEntryPointer ale; ///< transaction details (or nil)
diff --git a/src/tests/stub_HttpRequest.cc b/src/tests/stub_HttpRequest.cc
index 495597d..48a0f1c 100644
--- a/src/tests/stub_HttpRequest.cc
+++ b/src/tests/stub_HttpRequest.cc
@@ -45,7 +45,7 @@ bool HttpRequest::expectingBody(const HttpRequestMethod &, int64_t &) const STUB
bool HttpRequest::bodyNibbled() const STUB_RETVAL(false)
int HttpRequest::prefixLen() const STUB_RETVAL(0)
void HttpRequest::swapOut(StoreEntry *) STUB
-void HttpRequest::pack(Packable *) const STUB
+void HttpRequest::pack(Packable *, bool) const STUB
void HttpRequest::httpRequestPack(void *, Packable *) STUB
HttpRequest * HttpRequest::FromUrl(const SBuf &, const MasterXaction::Pointer &, const HttpRequestMethod &) STUB_RETVAL(nullptr)
HttpRequest * HttpRequest::FromUrlXXX(const char *, const MasterXaction::Pointer &, const HttpRequestMethod &) STUB_RETVAL(nullptr)

View File

@ -0,0 +1,179 @@
commit 19fe29039da63063ad078fbeab59b1043de8cdb6
Author: Tomas Korbar <tkorbar@redhat.com>
Date: Fri Mar 27 11:42:00 2026 +0100
Fix CVE-2026-32748
diff --git a/src/ICP.h b/src/ICP.h
index cb13c1c..e7f3a18 100644
--- a/src/ICP.h
+++ b/src/ICP.h
@@ -90,10 +90,7 @@ extern Comm::ConnectionPointer icpOutgoingConn;
extern Ip::Address theIcpPublicHostID;
/// \ingroup ServerProtocolICPAPI
-HttpRequest* icpGetRequest(char *url, int reqnum, int fd, Ip::Address &from);
-
-/// \ingroup ServerProtocolICPAPI
-bool icpAccessAllowed(Ip::Address &from, HttpRequest * icp_request);
+HttpRequestPointer icpGetRequest(const char *url, int reqnum, int fd, const Ip::Address &from);
/// \ingroup ServerProtocolICPAPI
void icpCreateAndSend(icp_opcode, int flags, char const *url, int reqnum, int pad, int fd, const Ip::Address &from, AccessLogEntryPointer);
@@ -102,7 +99,7 @@ void icpCreateAndSend(icp_opcode, int flags, char const *url, int reqnum, int pa
icp_opcode icpGetCommonOpcode();
/// \ingroup ServerProtocolICPAPI
-void icpDenyAccess(Ip::Address &from, char *url, int reqnum, int fd);
+void icpDenyAccess(const Ip::Address &from, const char *url, int reqnum, int fd);
/// \ingroup ServerProtocolICPAPI
PF icpHandleUdp;
diff --git a/src/icp_v2.cc b/src/icp_v2.cc
index 1b7a0f3..c6f9a7d 100644
--- a/src/icp_v2.cc
+++ b/src/icp_v2.cc
@@ -425,7 +425,7 @@ icpCreateAndSend(icp_opcode opcode, int flags, char const *url, int reqnum, int
}
void
-icpDenyAccess(Ip::Address &from, char *url, int reqnum, int fd)
+icpDenyAccess(const Ip::Address &from, const char * const url, const int reqnum, const int fd)
{
debugs(12, 2, "icpDenyAccess: Access Denied for " << from << " by " << AclMatchedName << ".");
@@ -440,8 +440,9 @@ icpDenyAccess(Ip::Address &from, char *url, int reqnum, int fd)
}
}
-bool
-icpAccessAllowed(Ip::Address &from, HttpRequest * icp_request)
+/// icpGetRequest() helper that determines whether squid.conf allows the given ICP query
+static bool
+icpAccessAllowed(const Ip::Address &from, HttpRequest * icp_request)
{
/* absent any explicit rules, we deny all */
if (!Config.accessList.icp)
@@ -453,8 +454,8 @@ icpAccessAllowed(Ip::Address &from, HttpRequest * icp_request)
return checklist.fastCheck().allowed();
}
-HttpRequest *
-icpGetRequest(char *url, int reqnum, int fd, Ip::Address &from)
+HttpRequest::Pointer
+icpGetRequest(const char *url, int reqnum, int fd, const Ip::Address &from)
{
if (strpbrk(url, w_space)) {
icpCreateAndSend(ICP_ERR, 0, rfc1738_escape(url), reqnum, 0, fd, from, nullptr);
@@ -462,12 +463,17 @@ icpGetRequest(char *url, int reqnum, int fd, Ip::Address &from)
}
const auto mx = MasterXaction::MakePortless<XactionInitiator::initIcp>();
- auto *result = HttpRequest::FromUrlXXX(url, mx);
- if (!result)
- icpCreateAndSend(ICP_ERR, 0, url, reqnum, 0, fd, from, nullptr);
+ if (const HttpRequest::Pointer request = HttpRequest::FromUrlXXX(url, mx)) {
+ if (!icpAccessAllowed(from, request.getRaw())) {
+ icpDenyAccess(from, url, reqnum, fd);
+ return nullptr;
+ }
- return result;
+ return request;
+ }
+ icpCreateAndSend(ICP_ERR, 0, url, reqnum, 0, fd, from, nullptr);
+ return nullptr;
}
static void
@@ -478,18 +484,11 @@ doV2Query(int fd, Ip::Address &from, char *buf, icp_common_t header)
uint32_t flags = 0;
/* We have a valid packet */
char *url = buf + sizeof(icp_common_t) + sizeof(uint32_t);
- HttpRequest *icp_request = icpGetRequest(url, header.reqnum, fd, from);
+ const auto icp_request = icpGetRequest(url, header.reqnum, fd, from);
if (!icp_request)
return;
- HTTPMSGLOCK(icp_request);
-
- if (!icpAccessAllowed(from, icp_request)) {
- icpDenyAccess(from, url, header.reqnum, fd);
- HTTPMSGUNLOCK(icp_request);
- return;
- }
#if USE_ICMP
if (header.flags & ICP_FLAG_SRC_RTT) {
rtt = netdbHostRtt(icp_request->url.host());
@@ -502,7 +501,7 @@ doV2Query(int fd, Ip::Address &from, char *buf, icp_common_t header)
#endif /* USE_ICMP */
/* The peer is allowed to use this cache */
- ICP2State state(header, icp_request);
+ ICP2State state(header, icp_request.getRaw());
state.fd = fd;
state.from = from;
state.url = xstrdup(url);
@@ -531,8 +530,6 @@ doV2Query(int fd, Ip::Address &from, char *buf, icp_common_t header)
}
icpCreateAndSend(codeToSend, flags, url, header.reqnum, src_rtt, fd, from, state.al);
-
- HTTPMSGUNLOCK(icp_request);
}
void
diff --git a/src/icp_v3.cc b/src/icp_v3.cc
index 290fdae..9cceb9a 100644
--- a/src/icp_v3.cc
+++ b/src/icp_v3.cc
@@ -36,19 +36,13 @@ doV3Query(int fd, Ip::Address &from, char *buf, icp_common_t header)
{
/* We have a valid packet */
char *url = buf + sizeof(icp_common_t) + sizeof(uint32_t);
- HttpRequest *icp_request = icpGetRequest(url, header.reqnum, fd, from);
+ const auto icp_request = icpGetRequest(url, header.reqnum, fd, from);
if (!icp_request)
return;
- if (!icpAccessAllowed(from, icp_request)) {
- icpDenyAccess (from, url, header.reqnum, fd);
- delete icp_request;
- return;
- }
-
/* The peer is allowed to use this cache */
- ICP3State state(header, icp_request);
+ ICP3State state(header, icp_request.getRaw());
state.fd = fd;
state.from = from;
state.url = xstrdup(url);
diff --git a/src/tests/stub_icp.cc b/src/tests/stub_icp.cc
index 95c523b..e4f0b7d 100644
--- a/src/tests/stub_icp.cc
+++ b/src/tests/stub_icp.cc
@@ -9,6 +9,7 @@
#include "squid.h"
#include "AccessLogEntry.h"
#include "comm/Connection.h"
+#include "HttpRequest.h"
#include "ICP.h"
#define STUB_API "icp_*.cc"
@@ -29,11 +30,10 @@ Comm::ConnectionPointer icpIncomingConn;
Comm::ConnectionPointer icpOutgoingConn;
Ip::Address theIcpPublicHostID;
-HttpRequest* icpGetRequest(char *, int, int, Ip::Address &) STUB_RETVAL(nullptr)
-bool icpAccessAllowed(Ip::Address &, HttpRequest *) STUB_RETVAL(false)
+HttpRequest::Pointer icpGetRequest(const char *, int, int, const Ip::Address &) STUB_RETVAL(nullptr)
void icpCreateAndSend(icp_opcode, int, char const *, int, int, int, const Ip::Address &, AccessLogEntryPointer) STUB
icp_opcode icpGetCommonOpcode() STUB_RETVAL(ICP_INVALID)
-void icpDenyAccess(Ip::Address &, char *, int, int) STUB
+void icpDenyAccess(const Ip::Address &, const char *, int, int) STUB
void icpHandleIcpV3(int, Ip::Address &, char *, int) STUB
void icpConnectionShutdown(void) STUB
int icpSetCacheKey(const cache_key *) STUB_RETVAL(0)

View File

@ -0,0 +1,12 @@
diff --git a/src/icp_v2.cc b/src/icp_v2.cc
index 2a4ced3bfab..25f7b71d25e 100644
--- a/src/icp_v2.cc
+++ b/src/icp_v2.cc
@@ -461,7 +461,6 @@ HttpRequest *
icpGetRequest(char *url, int reqnum, int fd, Ip::Address &from)
{
if (strpbrk(url, w_space)) {
- url = rfc1738_escape(url);
icpCreateAndSend(ICP_ERR, 0, rfc1738_escape(url), reqnum, 0, fd, from, nullptr);
return nullptr;
}

View File

@ -0,0 +1,223 @@
diff --git a/src/CachePeer.cc b/src/CachePeer.cc
index 1a5ea04..b67ac3f 100644
--- a/src/CachePeer.cc
+++ b/src/CachePeer.cc
@@ -68,20 +68,11 @@ CachePeer::noteSuccess()
}
}
-void
-CachePeer::noteFailure(const Http::StatusCode code)
-{
- if (Http::Is4xx(code))
- return; // this failure is not our fault
-
- countFailure();
-}
-
// TODO: Require callers to detail failures instead of using one (and often
// misleading!) "connection failed" phrase for all of them.
/// noteFailure() helper for handling failures attributed to this peer
void
-CachePeer::countFailure()
+CachePeer::noteFailure()
{
stats.last_connect_failure = squid_curtime;
if (tcp_up > 0)
diff --git a/src/CachePeer.h b/src/CachePeer.h
index 6c4e4fc..180c76d 100644
--- a/src/CachePeer.h
+++ b/src/CachePeer.h
@@ -38,9 +38,8 @@ public:
/// reacts to a successful establishment of a connection to this cache_peer
void noteSuccess();
- /// reacts to a failure on a connection to this cache_peer
- /// \param code a received response status code, if any
- void noteFailure(Http::StatusCode code);
+ /// reacts to a failed attempt to establish a connection to this cache_peer
+ void noteFailure();
/// (re)configure cache_peer name=value
void rename(const char *);
@@ -238,14 +237,13 @@ NoteOutgoingConnectionSuccess(CachePeer * const peer)
peer->noteSuccess();
}
-/// reacts to a failure on a connection to an origin server or cache_peer
+/// reacts to a failed attempt to establish a connection to an origin server or cache_peer
/// \param peer nil if the connection is to an origin server
-/// \param code a received response status code, if any
inline void
-NoteOutgoingConnectionFailure(CachePeer * const peer, const Http::StatusCode code)
+NoteOutgoingConnectionFailure(CachePeer * const peer)
{
if (peer)
- peer->noteFailure(code);
+ peer->noteFailure();
}
/// identify the given cache peer in cache.log messages and such
diff --git a/src/HappyConnOpener.cc b/src/HappyConnOpener.cc
index 9812160..a2d3052 100644
--- a/src/HappyConnOpener.cc
+++ b/src/HappyConnOpener.cc
@@ -638,7 +638,7 @@ HappyConnOpener::handleConnOpenerAnswer(Attempt &attempt, const CommConnectCbPar
lastError = makeError(ERR_CONNECT_FAIL);
lastError->xerrno = params.xerrno;
- NoteOutgoingConnectionFailure(params.conn->getPeer(), lastError->httpStatus);
+ NoteOutgoingConnectionFailure(params.conn->getPeer());
if (spareWaiting)
updateSpareWaitAfterPrimeFailure();
diff --git a/src/PeerPoolMgr.cc b/src/PeerPoolMgr.cc
index 7becaf6..448bfed 100644
--- a/src/PeerPoolMgr.cc
+++ b/src/PeerPoolMgr.cc
@@ -86,7 +86,7 @@ PeerPoolMgr::handleOpenedConnection(const CommConnectCbParams &params)
}
if (params.flag != Comm::OK) {
- NoteOutgoingConnectionFailure(peer, Http::scNone);
+ NoteOutgoingConnectionFailure(peer);
checkpoint("conn opening failure"); // may retry
return;
}
diff --git a/src/clients/HttpTunneler.cc b/src/clients/HttpTunneler.cc
index f31ce52..4840866 100644
--- a/src/clients/HttpTunneler.cc
+++ b/src/clients/HttpTunneler.cc
@@ -90,7 +90,7 @@ Http::Tunneler::handleConnectionClosure(const CommCloseCbParams &)
{
closer = nullptr;
if (connection) {
- countFailingConnection(nullptr);
+ countFailingConnection();
connection->noteClosure();
connection = nullptr;
}
@@ -355,7 +355,7 @@ Http::Tunneler::bailWith(ErrorState *error)
if (const auto failingConnection = connection) {
// TODO: Reuse to-peer connections after a CONNECT error response.
- countFailingConnection(error);
+ countFailingConnection();
disconnect();
failingConnection->close();
}
@@ -374,10 +374,12 @@ Http::Tunneler::sendSuccess()
}
void
-Http::Tunneler::countFailingConnection(const ErrorState * const error)
+Http::Tunneler::countFailingConnection()
{
assert(connection);
- NoteOutgoingConnectionFailure(connection->getPeer(), error ? error->httpStatus : Http::scNone);
+ // No NoteOutgoingConnectionFailure(connection->getPeer()) call here because
+ // we do not blame cache_peer for CONNECT failures (on top of a successfully
+ // established connection to that cache_peer).
if (noteFwdPconnUse && connection->isOpen())
fwdPconnPool->noteUses(fd_table[connection->fd].pconn.uses);
}
diff --git a/src/clients/HttpTunneler.h b/src/clients/HttpTunneler.h
index eb8dcee..d5090d3 100644
--- a/src/clients/HttpTunneler.h
+++ b/src/clients/HttpTunneler.h
@@ -80,7 +80,7 @@ private:
void disconnect();
/// updates connection usage history before the connection is closed
- void countFailingConnection(const ErrorState *);
+ void countFailingConnection();
AsyncCall::Pointer writer; ///< called when the request has been written
AsyncCall::Pointer reader; ///< called when the response should be read
diff --git a/src/neighbors.cc b/src/neighbors.cc
index cf03505..628b801 100644
--- a/src/neighbors.cc
+++ b/src/neighbors.cc
@@ -1311,7 +1311,7 @@ peerProbeConnectDone(const Comm::ConnectionPointer &conn, Comm::Flag status, int
if (status == Comm::OK)
p->noteSuccess();
else
- p->noteFailure(Http::scNone);
+ p->noteFailure();
-- p->testing_now;
conn->close();
diff --git a/src/security/BlindPeerConnector.cc b/src/security/BlindPeerConnector.cc
index 7372df9..b884127 100644
--- a/src/security/BlindPeerConnector.cc
+++ b/src/security/BlindPeerConnector.cc
@@ -76,7 +76,7 @@ Security::BlindPeerConnector::noteNegotiationDone(ErrorState *error)
// based on TCP results, SSL results, or both. And the code is probably not
// consistent in this aspect across tunnelling and forwarding modules.
if (peer && peer->secure.encryptTransport)
- peer->noteFailure(error->httpStatus);
+ peer->noteFailure();
return;
}
diff --git a/src/security/PeerConnector.cc b/src/security/PeerConnector.cc
index f122294..96c2f0e 100644
--- a/src/security/PeerConnector.cc
+++ b/src/security/PeerConnector.cc
@@ -115,7 +115,7 @@ Security::PeerConnector::commCloseHandler(const CommCloseCbParams &params)
err->detailError(d);
if (serverConn) {
- countFailingConnection(err);
+ countFailingConnection();
serverConn->noteClosure();
serverConn = nullptr;
}
@@ -507,7 +507,7 @@ Security::PeerConnector::bail(ErrorState *error)
answer().error = error;
if (const auto failingConnection = serverConn) {
- countFailingConnection(error);
+ countFailingConnection();
disconnect();
failingConnection->close();
}
@@ -525,10 +525,10 @@ Security::PeerConnector::sendSuccess()
}
void
-Security::PeerConnector::countFailingConnection(const ErrorState * const error)
+Security::PeerConnector::countFailingConnection()
{
assert(serverConn);
- NoteOutgoingConnectionFailure(serverConn->getPeer(), error ? error->httpStatus : Http::scNone);
+ NoteOutgoingConnectionFailure(serverConn->getPeer());
// TODO: Calling PconnPool::noteUses() should not be our responsibility.
if (noteFwdPconnUse && serverConn->isOpen())
fwdPconnPool->noteUses(fd_table[serverConn->fd].pconn.uses);
diff --git a/src/security/PeerConnector.h b/src/security/PeerConnector.h
index 3c7c01b..b00f385 100644
--- a/src/security/PeerConnector.h
+++ b/src/security/PeerConnector.h
@@ -150,7 +150,7 @@ protected:
void disconnect();
/// updates connection usage history before the connection is closed
- void countFailingConnection(const ErrorState *);
+ void countFailingConnection();
/// If called the certificates validator will not used
void bypassCertValidator() {useCertValidator_ = false;}
diff --git a/src/tests/stub_libsecurity.cc b/src/tests/stub_libsecurity.cc
index 3b6eb79..8e14ac1 100644
--- a/src/tests/stub_libsecurity.cc
+++ b/src/tests/stub_libsecurity.cc
@@ -97,7 +97,7 @@ void PeerConnector::bail(ErrorState *) STUB
void PeerConnector::sendSuccess() STUB
void PeerConnector::callBack() STUB
void PeerConnector::disconnect() STUB
-void PeerConnector::countFailingConnection(const ErrorState *) STUB
+void PeerConnector::countFailingConnection() STUB
void PeerConnector::recordNegotiationDetails() STUB
EncryptorAnswer &PeerConnector::answer() STUB_RETREF(EncryptorAnswer)
}

View File

@ -0,0 +1,342 @@
From e0b1163f19ad7f63e92e6ea9ee94d51d5eebeacf Mon Sep 17 00:00:00 2001
From: Alex Rousskov <rousskov@measurement-factory.com>
Date: Thu, 27 Nov 2025 10:51:27 +0100
Subject: [PATCH] Bug 5352: Do not get stuck when RESPMOD is slower than
read(2) (#1777)
... RESPMOD BodyPipe buffer becomes full ...
maybeMakeSpaceAvailable: will not read up to 0
The AsyncCall Client::noteDelayAwareReadChance constructed
... RESPMOD consumes some buffered virgin body data ...
entering BodyProducer::noteMoreBodySpaceAvailable
leaving BodyProducer::noteMoreBodySpaceAvailable
... read_timeout seconds later ...
http.cc(148) httpTimeout
FwdState.cc(471) fail: ERR_READ_TIMEOUT "Gateway Timeout"
When RESPMOD does not empty its adaptation BodyPipe buffer fast enough,
readReply() may eventually fill that buffer and call delayRead(),
anticipating a noteDelayAwareReadChance() callback from Store or Server
delay pools. That callback never happens if Store and Server are not
getting any data -- they do not even start working until RESPMOD service
starts releasing adapted/echoed response back to Squid! Meanwhile, our
flags.do_next_read (cleared by readReply() caller) remains false.
When/if RESPMOD service eventually frees some BodyPipe buffer space,
triggering noteMoreBodySpaceAvailable() notification, nothing changes
because maybeReadVirginBody() quits when flags.do_next_read is false.
noteMoreBodySpaceAvailable() could not just make flags.do_next_read true
because that flag may be false for a variety of other/permanent reasons.
Instead, we replaced that one-size-fits-all flag with more specific
checks so that reading can resume if it is safe to resume it. This
change addresses a couple of flag-related XXXs.
The bug was introduced in 2023 commit 50c5af88. Prior that that change,
delayRead() was not called when RESPMOD BodyPipe buffer became full
because maybeMakeSpaceAvailable() returned false in that case, blocking
maybeReadVirginBody() from triggering readReply() via Comm::Read(). We
missed flags.do_next_read dependency and that Store-specific delayRead()
cannot be used to wait for adaptation buffer space to become available.
XXX: To reduce risks, this change duplicates a part of
calcBufferSpaceToReserve() logic. Removing that duplication requires
significant (and risky) refactoring of several related methods.
---
src/adaptation/icap/ModXact.cc | 8 +---
src/clients/Client.cc | 3 ++
src/clients/Client.h | 3 ++
src/clients/FtpClient.cc | 2 +
src/http.cc | 70 ++++++++++++++++++++++------------
src/http.h | 3 ++
src/http/StateFlags.h | 1 -
7 files changed, 59 insertions(+), 31 deletions(-)
diff --git a/src/adaptation/icap/ModXact.cc b/src/adaptation/icap/ModXact.cc
index d2f9529..9331215 100644
--- a/src/adaptation/icap/ModXact.cc
+++ b/src/adaptation/icap/ModXact.cc
@@ -435,14 +435,10 @@ void Adaptation::Icap::ModXact::virginConsume()
BodyPipe &bp = *virgin.body_pipe;
const bool wantToPostpone = isRepeatable || canStartBypass || protectGroupBypass;
- // Why > 2? HttpState does not use the last bytes in the buffer
- // because Client::delayRead() is arguably broken. See
- // HttpStateData::maybeReadVirginBody for more details.
- if (wantToPostpone && bp.buf().spaceSize() > 2) {
+ if (wantToPostpone && bp.buf().potentialSpaceSize() > 0) {
// Postponing may increase memory footprint and slow the HTTP side
// down. Not postponing may increase the number of ICAP errors
- // if the ICAP service fails. We may also use "potential" space to
- // postpone more aggressively. Should the trade-off be configurable?
+ // if the ICAP service fails. Should the trade-off be configurable?
debugs(93, 8, "postponing consumption from " << bp.status());
return;
}
diff --git a/src/clients/Client.cc b/src/clients/Client.cc
index cbf5693..7de6274 100644
--- a/src/clients/Client.cc
+++ b/src/clients/Client.cc
@@ -1028,6 +1028,9 @@ Client::adjustBodyBytesRead(const int64_t delta)
void
Client::delayRead()
{
+ Assure(!waitingForDelayAwareReadChance);
+ waitingForDelayAwareReadChance = true;
+
using DeferredReadDialer = NullaryMemFunT<Client>;
AsyncCall::Pointer call = asyncCall(11, 5, "Client::noteDelayAwareReadChance",
DeferredReadDialer(this, &Client::noteDelayAwareReadChance));
diff --git a/src/clients/Client.h b/src/clients/Client.h
index f60d8ce..6a53315 100644
--- a/src/clients/Client.h
+++ b/src/clients/Client.h
@@ -194,6 +194,9 @@ protected:
#endif
bool receivedWholeRequestBody = false; ///< handleRequestBodyProductionEnded called
+ /// whether we are waiting for MemObject::delayRead() to call us back
+ bool waitingForDelayAwareReadChance = false;
+
/// whether we should not be talking to FwdState; XXX: clear fwd instead
/// points to a string literal which is used only for debugging
const char *doneWithFwd = nullptr;
diff --git a/src/clients/FtpClient.cc b/src/clients/FtpClient.cc
index 3630766..6f9d8fa 100644
--- a/src/clients/FtpClient.cc
+++ b/src/clients/FtpClient.cc
@@ -907,6 +907,8 @@ Ftp::Client::dataConnection() const
void
Ftp::Client::noteDelayAwareReadChance()
{
+ // TODO: Merge with HttpStateData::noteDelayAwareReadChance()
+ waitingForDelayAwareReadChance = false;
data.read_pending = false;
maybeReadVirginBody();
}
diff --git a/src/http.cc b/src/http.cc
index 4916c15..e3f9b4c 100644
--- a/src/http.cc
+++ b/src/http.cc
@@ -1168,17 +1168,15 @@ HttpStateData::persistentConnStatus() const
void
HttpStateData::noteDelayAwareReadChance()
{
- flags.do_next_read = true;
+ waitingForDelayAwareReadChance = false;
maybeReadVirginBody();
}
void
HttpStateData::readReply(const CommIoCbParams &io)
{
- Must(!flags.do_next_read); // XXX: should have been set false by mayReadVirginBody()
- flags.do_next_read = false;
-
debugs(11, 5, io.conn);
+ waitingForCommRead = false;
// Bail out early on Comm::ERR_CLOSING - close handlers will tidy up for us
if (io.flag == Comm::ERR_CLOSING) {
@@ -1212,7 +1210,27 @@ HttpStateData::readReply(const CommIoCbParams &io)
const auto readSizeWanted = readSizeMax ? entry->bytesWanted(Range<size_t>(0, readSizeMax)) : 0;
if (readSizeWanted <= 0) {
- delayRead();
+ // XXX: If we avoid Comm::ReadNow(), we should not Comm::Read() again
+ // when the wait is over. We should go straight to readReply() instead.
+
+#if USE_ADAPTATION
+ // XXX: We are duplicating Client::calcBufferSpaceToReserve() logic.
+ // XXX: Some other delayRead() cases may lack kickReads() guarantees.
+ // TODO: Refactor maybeMakeSpaceAvailable() to properly treat each
+ // no-read case instead of calling delayRead() for the remaining cases.
+
+ if (responseBodyBuffer) {
+ debugs(11, 5, "avoid delayRead() to give adaptation a chance to drain overflow buffer: " << responseBodyBuffer->contentSize());
+ return; // wait for Client::noteMoreBodySpaceAvailable()
+ }
+
+ if (virginBodyDestination && !virginBodyDestination->buf().hasPotentialSpace()) {
+ debugs(11, 5, "avoid delayRead() to give adaptation a chance to drain body pipe buffer: " << virginBodyDestination->buf().contentSize());
+ return; // wait for Client::noteMoreBodySpaceAvailable()
+ }
+#endif
+
+ delayRead(); /// wait for Client::noteDelayAwareReadChance()
return;
}
@@ -1223,7 +1241,6 @@ HttpStateData::readReply(const CommIoCbParams &io)
case Comm::INPROGRESS:
if (inBuf.isEmpty())
debugs(33, 2, io.conn << ": no data to process, " << xstrerr(rd.xerrno));
- flags.do_next_read = true;
maybeReadVirginBody();
return;
@@ -1253,7 +1270,6 @@ HttpStateData::readReply(const CommIoCbParams &io)
case Comm::ENDFILE: // close detected by 0-byte read
eof = 1;
- flags.do_next_read = false;
/* Continue to process previously read data */
break;
@@ -1264,7 +1280,6 @@ HttpStateData::readReply(const CommIoCbParams &io)
const auto err = new ErrorState(ERR_READ_ERROR, Http::scBadGateway, fwd->request, fwd->al);
err->xerrno = rd.xerrno;
fwd->fail(err);
- flags.do_next_read = false;
closeServer();
mustStop("HttpStateData::readReply");
return;
@@ -1318,7 +1333,6 @@ HttpStateData::continueAfterParsingHeader()
if (!flags.headers_parsed && !eof) {
debugs(11, 9, "needs more at " << inBuf.length());
- flags.do_next_read = true;
/** \retval false If we have not finished parsing the headers and may get more data.
* Schedules more reads to retrieve the missing data.
*/
@@ -1369,7 +1383,6 @@ HttpStateData::continueAfterParsingHeader()
assert(error != ERR_NONE);
entry->reset();
fwd->fail(new ErrorState(error, Http::scBadGateway, fwd->request, fwd->al));
- flags.do_next_read = false;
closeServer();
mustStop("HttpStateData::continueAfterParsingHeader");
return false; // quit on error
@@ -1455,7 +1468,6 @@ HttpStateData::decodeAndWriteReplyBody()
addVirginReplyBody(decodedData.content(), decodedData.contentSize());
if (doneParsing) {
lastChunk = 1;
- flags.do_next_read = false;
markParsedVirginReplyAsWhole("http parsed last-chunk");
} else if (eof) {
markPrematureReplyBodyEofFailure();
@@ -1479,7 +1491,6 @@ void
HttpStateData::processReplyBody()
{
if (!flags.headers_parsed) {
- flags.do_next_read = true;
maybeReadVirginBody();
return;
}
@@ -1499,7 +1510,6 @@ HttpStateData::processReplyBody()
if (entry->isAccepting()) {
if (flags.chunked) {
if (!decodeAndWriteReplyBody()) {
- flags.do_next_read = false;
serverComplete();
return;
}
@@ -1525,8 +1535,6 @@ HttpStateData::processReplyBody()
} else {
commSetConnTimeout(serverConnection, Config.Timeout.read, nil);
}
-
- flags.do_next_read = true;
}
break;
@@ -1536,7 +1544,6 @@ HttpStateData::processReplyBody()
// TODO: Remove serverConnectionSaved but preserve exception safety.
commUnsetConnTimeout(serverConnection);
- flags.do_next_read = false;
comm_remove_close_handler(serverConnection->fd, closeHandler);
closeHandler = nullptr;
@@ -1596,29 +1603,45 @@ HttpStateData::mayReadVirginReplyBody() const
void
HttpStateData::maybeReadVirginBody()
{
- // too late to read
- if (!Comm::IsConnOpen(serverConnection) || fd_table[serverConnection->fd].closing())
+ if (!Comm::IsConnOpen(serverConnection) || fd_table[serverConnection->fd].closing()) {
+ debugs(11, 3, "no, peer connection gone");
return;
+ }
+
+ if (eof) {
+ // tolerate hypothetical calls between Comm::ENDFILE and closeServer()
+ debugs(11, 3, "no, saw EOF");
+ return;
+ }
+
+ if (lastChunk) {
+ // tolerate hypothetical calls between setting lastChunk and clearing serverConnection
+ debugs(11, 3, "no, saw last-chunk");
+ return;
+ }
if (!canBufferMoreReplyBytes()) {
abortTransaction("more response bytes required, but the read buffer is full and cannot be drained");
return;
}
- // XXX: get rid of the do_next_read flag
- // check for the proper reasons preventing read(2)
- if (!flags.do_next_read)
+ if (waitingForDelayAwareReadChance) {
+ debugs(11, 5, "no, still waiting for noteDelayAwareReadChance()");
return;
+ }
- flags.do_next_read = false;
+ if (waitingForCommRead) {
+ debugs(11, 3, "no, already waiting for readReply()");
+ return;
+ }
- // must not already be waiting for read(2) ...
assert(!Comm::MonitorsRead(serverConnection->fd));
// wait for read(2) to be possible.
typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
AsyncCall::Pointer call = JobCallback(11, 5, Dialer, this, HttpStateData::readReply);
Comm::Read(serverConnection, call);
+ waitingForCommRead = true;
}
/// Desired inBuf capacity based on various capacity preferences/limits:
@@ -2406,7 +2429,6 @@ HttpStateData::sendRequest()
AsyncCall::Pointer timeoutCall = JobCallback(11, 5,
TimeoutDialer, this, HttpStateData::httpTimeout);
commSetConnTimeout(serverConnection, Config.Timeout.lifetime, timeoutCall);
- flags.do_next_read = true;
maybeReadVirginBody();
if (request->body_pipe != nullptr) {
diff --git a/src/http.h b/src/http.h
index cb4ae06..a15f103 100644
--- a/src/http.h
+++ b/src/http.h
@@ -152,6 +152,9 @@ private:
/// positive when we read more than we wanted
int64_t payloadTruncated = 0;
+ /// whether we are waiting for our Comm::Read() handler to be called
+ bool waitingForCommRead = false;
+
/// Whether we received a Date header older than that of a matching
/// cached response.
bool sawDateGoBack = false;
diff --git a/src/http/StateFlags.h b/src/http/StateFlags.h
index ae3ca99..6d26b3e 100644
--- a/src/http/StateFlags.h
+++ b/src/http/StateFlags.h
@@ -58,7 +58,6 @@ public:
bool keepalive_broken = false;
bool abuse_detected = false;
bool request_sent = false;
- bool do_next_read = false;
bool chunked = false; ///< reading a chunked response; TODO: rename
bool chunked_request = false; ///< writing a chunked request
bool sentLastChunk = false; ///< do not try to write last-chunk again
--
2.44.0

View File

@ -0,0 +1,367 @@
From 8d0ee420a4d91ac7fd97316338f1e28b4b060cbf Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= <luhliari@redhat.com>
Date: Thu, 10 Oct 2024 19:26:27 +0200
Subject: [PATCH 1/6] Ignore whitespace chars after chunk-size
Previously (before #1498 change), squid was accepting TE-chunked replies
with whitespaces after chunk-size and missing chunk-ext data. After
It turned out that replies with such whitespace chars are pretty
common and other webservers which can act as forward proxies (e.g.
nginx, httpd...) are accepting them.
This change will allow to proxy chunked responses from origin server,
which had whitespaces inbetween chunk-size and CRLF.
---
src/http/one/TeChunkedParser.cc | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
index 9cce10fdc91..04753395e16 100644
--- a/src/http/one/TeChunkedParser.cc
+++ b/src/http/one/TeChunkedParser.cc
@@ -125,6 +125,7 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
// Code becomes much simpler when incremental parsing functions throw on
// bad or insufficient input, like in the code below. TODO: Expand up.
try {
+ tok.skipAll(CharacterSet::WSP); // Some servers send SP/TAB after chunk-size
parseChunkExtensions(tok); // a possibly empty chunk-ext list
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
buf_ = tok.remaining();
From 9c8d35f899035fa06021ab3fe6919f892c2f0c6b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= <luhliari@redhat.com>
Date: Fri, 11 Oct 2024 02:06:31 +0200
Subject: [PATCH 2/6] Added new argument to Http::One::ParseBws()
Depending on new wsp_only argument in ParseBws() it will be decided
which set of whitespaces characters will be parsed. If wsp_only is set
to true, only SP and HTAB chars will be parsed.
Also optimized number of ParseBws calls.
---
src/http/one/Parser.cc | 4 ++--
src/http/one/Parser.h | 3 ++-
src/http/one/TeChunkedParser.cc | 13 +++++++++----
src/http/one/TeChunkedParser.h | 2 +-
4 files changed, 14 insertions(+), 8 deletions(-)
diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
index b1908316a0b..01d7e3bc0e8 100644
--- a/src/http/one/Parser.cc
+++ b/src/http/one/Parser.cc
@@ -273,9 +273,9 @@ Http::One::ErrorLevel()
// BWS = *( SP / HTAB ) ; WhitespaceCharacters() may relax this RFC 7230 rule
void
-Http::One::ParseBws(Parser::Tokenizer &tok)
+Http::One::ParseBws(Parser::Tokenizer &tok, const bool wsp_only)
{
- const auto count = tok.skipAll(Parser::WhitespaceCharacters());
+ const auto count = tok.skipAll(wsp_only ? CharacterSet::WSP : Parser::WhitespaceCharacters());
if (tok.atEnd())
throw InsufficientInput(); // even if count is positive
diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
index d9a0ac8c273..08200371cd6 100644
--- a/src/http/one/Parser.h
+++ b/src/http/one/Parser.h
@@ -163,8 +163,9 @@ class Parser : public RefCountable
};
/// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
+/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimeter chars
/// \throws InsufficientInput when the end of BWS cannot be confirmed
-void ParseBws(Parser::Tokenizer &);
+void ParseBws(Parser::Tokenizer &, const bool wsp_only = false);
/// the right debugs() level for logging HTTP violation messages
int ErrorLevel();
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
index 04753395e16..41e1e5ddaea 100644
--- a/src/http/one/TeChunkedParser.cc
+++ b/src/http/one/TeChunkedParser.cc
@@ -125,8 +125,11 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
// Code becomes much simpler when incremental parsing functions throw on
// bad or insufficient input, like in the code below. TODO: Expand up.
try {
- tok.skipAll(CharacterSet::WSP); // Some servers send SP/TAB after chunk-size
- parseChunkExtensions(tok); // a possibly empty chunk-ext list
+ // A possibly empty chunk-ext list. If no chunk-ext has been found,
+ // try to skip trailing BWS, because some servers send "chunk-size BWS CRLF".
+ if (!parseChunkExtensions(tok))
+ ParseBws(tok, true);
+
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
buf_ = tok.remaining();
parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
@@ -140,20 +143,22 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
/// Parses the chunk-ext list (RFC 9112 section 7.1.1:
/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
-void
+bool
Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
{
+ bool foundChunkExt = false;
do {
auto tok = callerTok;
ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
if (!tok.skip(';'))
- return; // reached the end of extensions (if any)
+ return foundChunkExt; // reached the end of extensions (if any)
parseOneChunkExtension(tok);
buf_ = tok.remaining(); // got one extension
callerTok = tok;
+ foundChunkExt = true;
} while (true);
}
diff --git a/src/http/one/TeChunkedParser.h b/src/http/one/TeChunkedParser.h
index 02eacd1bb89..8c5d4bb4cba 100644
--- a/src/http/one/TeChunkedParser.h
+++ b/src/http/one/TeChunkedParser.h
@@ -71,7 +71,7 @@ class TeChunkedParser : public Http1::Parser
private:
bool parseChunkSize(Tokenizer &tok);
bool parseChunkMetadataSuffix(Tokenizer &);
- void parseChunkExtensions(Tokenizer &);
+ bool parseChunkExtensions(Tokenizer &);
void parseOneChunkExtension(Tokenizer &);
bool parseChunkBody(Tokenizer &tok);
bool parseChunkEnd(Tokenizer &tok);
From 81e67f97f9c386bdd0bb4a5e182395c46adb70ad Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= <luhliari@redhat.com>
Date: Fri, 11 Oct 2024 02:44:33 +0200
Subject: [PATCH 3/6] Fix typo in Parser.h
---
src/http/one/Parser.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
index 08200371cd6..3ef4c5f7752 100644
--- a/src/http/one/Parser.h
+++ b/src/http/one/Parser.h
@@ -163,7 +163,7 @@ class Parser : public RefCountable
};
/// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
-/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimeter chars
+/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimiter chars
/// \throws InsufficientInput when the end of BWS cannot be confirmed
void ParseBws(Parser::Tokenizer &, const bool wsp_only = false);
From a0d4fe1794e605f8299a5c118c758a807453f016 Mon Sep 17 00:00:00 2001
From: Alex Rousskov <rousskov@measurement-factory.com>
Date: Thu, 10 Oct 2024 22:39:42 -0400
Subject: [PATCH 4/6] Bug 5449 is a regression of Bug 4492!
Both bugs deal with "chunk-size SP+ CRLF" use cases. Bug 4492 had _two_
spaces after chunk-size, which answers one of the PR review questions:
Should we skip just one space? No, we should not.
The lines moved around in many commits, but I believe this regression
was introduced in commit 951013d0 because that commit stopped consuming
partially parsed chunk-ext sequences. That consumption was wrong, but it
had a positive side effect -- fixing Bug 4492...
---
src/http/one/TeChunkedParser.cc | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
index 41e1e5ddaea..aa4a840fdcf 100644
--- a/src/http/one/TeChunkedParser.cc
+++ b/src/http/one/TeChunkedParser.cc
@@ -125,10 +125,10 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
// Code becomes much simpler when incremental parsing functions throw on
// bad or insufficient input, like in the code below. TODO: Expand up.
try {
- // A possibly empty chunk-ext list. If no chunk-ext has been found,
- // try to skip trailing BWS, because some servers send "chunk-size BWS CRLF".
- if (!parseChunkExtensions(tok))
- ParseBws(tok, true);
+ // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
+ ParseBws(tok, true);
+
+ parseChunkExtensions(tok);
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
buf_ = tok.remaining();
@@ -150,7 +150,7 @@ Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
do {
auto tok = callerTok;
- ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
+ ParseBws(tok);
if (!tok.skip(';'))
return foundChunkExt; // reached the end of extensions (if any)
From f837f5ff61301a17008f16ce1fb793c2abf19786 Mon Sep 17 00:00:00 2001
From: Alex Rousskov <rousskov@measurement-factory.com>
Date: Thu, 10 Oct 2024 23:06:42 -0400
Subject: [PATCH 5/6] fixup: Fewer conditionals/ifs and more explicit spelling
... to draw code reader attention when something unusual is going on.
---
src/http/one/Parser.cc | 22 ++++++++++++++++++----
src/http/one/Parser.h | 10 ++++++++--
src/http/one/TeChunkedParser.cc | 14 ++++++--------
src/http/one/TeChunkedParser.h | 2 +-
4 files changed, 33 insertions(+), 15 deletions(-)
diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
index 01d7e3bc0e8..d3937e5e96b 100644
--- a/src/http/one/Parser.cc
+++ b/src/http/one/Parser.cc
@@ -271,11 +271,12 @@ Http::One::ErrorLevel()
return Config.onoff.relaxed_header_parser < 0 ? DBG_IMPORTANT : 5;
}
-// BWS = *( SP / HTAB ) ; WhitespaceCharacters() may relax this RFC 7230 rule
-void
-Http::One::ParseBws(Parser::Tokenizer &tok, const bool wsp_only)
+/// common part of ParseBws() and ParseStrctBws()
+namespace Http::One {
+static void
+ParseBws_(Parser::Tokenizer &tok, const CharacterSet &bwsChars)
{
- const auto count = tok.skipAll(wsp_only ? CharacterSet::WSP : Parser::WhitespaceCharacters());
+ const auto count = tok.skipAll(bwsChars);
if (tok.atEnd())
throw InsufficientInput(); // even if count is positive
@@ -290,4 +291,17 @@ Http::One::ParseBws(Parser::Tokenizer &tok, const bool wsp_only)
// success: no more BWS characters expected
}
+} // namespace Http::One
+
+void
+Http::One::ParseBws(Parser::Tokenizer &tok)
+{
+ ParseBws_(tok, CharacterSet::WSP);
+}
+
+void
+Http::One::ParseStrictBws(Parser::Tokenizer &tok)
+{
+ ParseBws_(tok, Parser::WhitespaceCharacters());
+}
diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
index 3ef4c5f7752..49e399de546 100644
--- a/src/http/one/Parser.h
+++ b/src/http/one/Parser.h
@@ -163,9 +163,15 @@ class Parser : public RefCountable
};
/// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
-/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimiter chars
/// \throws InsufficientInput when the end of BWS cannot be confirmed
-void ParseBws(Parser::Tokenizer &, const bool wsp_only = false);
+/// \sa WhitespaceCharacters() for the definition of BWS characters
+/// \sa ParseStrictBws() that avoids WhitespaceCharacters() uncertainties
+void ParseBws(Parser::Tokenizer &);
+
+/// Like ParseBws() but only skips CharacterSet::WSP characters. This variation
+/// must be used if the next element may start with CR or any other character
+/// from RelaxedDelimiterCharacters().
+void ParseStrictBws(Parser::Tokenizer &);
/// the right debugs() level for logging HTTP violation messages
int ErrorLevel();
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
index aa4a840fdcf..859471b8c77 100644
--- a/src/http/one/TeChunkedParser.cc
+++ b/src/http/one/TeChunkedParser.cc
@@ -125,11 +125,11 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
// Code becomes much simpler when incremental parsing functions throw on
// bad or insufficient input, like in the code below. TODO: Expand up.
try {
- // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
- ParseBws(tok, true);
-
- parseChunkExtensions(tok);
+ // Bug 4492: IBM_HTTP_Server sends SP after chunk-size.
+ // No ParseBws() here because it may consume CR required further below.
+ ParseStrictBws(tok);
+ parseChunkExtensions(tok); // a possibly empty chunk-ext list
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
buf_ = tok.remaining();
parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
@@ -143,22 +143,20 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
/// Parses the chunk-ext list (RFC 9112 section 7.1.1:
/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
-bool
+void
Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
{
- bool foundChunkExt = false;
do {
auto tok = callerTok;
ParseBws(tok);
if (!tok.skip(';'))
- return foundChunkExt; // reached the end of extensions (if any)
+ return; // reached the end of extensions (if any)
parseOneChunkExtension(tok);
buf_ = tok.remaining(); // got one extension
callerTok = tok;
- foundChunkExt = true;
} while (true);
}
diff --git a/src/http/one/TeChunkedParser.h b/src/http/one/TeChunkedParser.h
index 8c5d4bb4cba..02eacd1bb89 100644
--- a/src/http/one/TeChunkedParser.h
+++ b/src/http/one/TeChunkedParser.h
@@ -71,7 +71,7 @@ class TeChunkedParser : public Http1::Parser
private:
bool parseChunkSize(Tokenizer &tok);
bool parseChunkMetadataSuffix(Tokenizer &);
- bool parseChunkExtensions(Tokenizer &);
+ void parseChunkExtensions(Tokenizer &);
void parseOneChunkExtension(Tokenizer &);
bool parseChunkBody(Tokenizer &tok);
bool parseChunkEnd(Tokenizer &tok);
From f79936a234e722adb2dd08f31cf6019d81ee712c Mon Sep 17 00:00:00 2001
From: Alex Rousskov <rousskov@measurement-factory.com>
Date: Thu, 10 Oct 2024 23:31:08 -0400
Subject: [PATCH 6/6] fixup: Deadly typo
---
src/http/one/Parser.cc | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
index d3937e5e96b..7403a9163a2 100644
--- a/src/http/one/Parser.cc
+++ b/src/http/one/Parser.cc
@@ -296,12 +296,12 @@ ParseBws_(Parser::Tokenizer &tok, const CharacterSet &bwsChars)
void
Http::One::ParseBws(Parser::Tokenizer &tok)
{
- ParseBws_(tok, CharacterSet::WSP);
+ ParseBws_(tok, Parser::WhitespaceCharacters());
}
void
Http::One::ParseStrictBws(Parser::Tokenizer &tok)
{
- ParseBws_(tok, Parser::WhitespaceCharacters());
+ ParseBws_(tok, CharacterSet::WSP);
}

View File

@ -0,0 +1,117 @@
From 4d6dd3ddba5e850a42c86d8233735165a371c31c Mon Sep 17 00:00:00 2001
From: Alex Rousskov <rousskov@measurement-factory.com>
Date: Sun, 1 Sep 2024 00:39:34 +0000
Subject: [PATCH] Bug 5405: Large uploads fill request buffer and die (#1887)
maybeMakeSpaceAvailable: request buffer full
ReadNow: ... size 0, retval 0, errno 0
terminateAll: 1/1 after ERR_CLIENT_GONE/WITH_CLIENT
%Ss=TCP_MISS_ABORTED
This bug is triggered by a combination of the following two conditions:
* HTTP client upload fills Squid request buffer faster than it is
drained by an origin server, cache_peer, or REQMOD service. The buffer
accumulates 576 KB (default 512 KB client_request_buffer_max_size + 64
KB internal "pipe" buffer).
* The affected server or service consumes a few bytes after the critical
accumulation is reached. In other words, the bug cannot be triggered
if nothing is consumed after the first condition above is met.
Comm::ReadNow() must not be called with a full buffer: Related
FD_READ_METHOD() code cannot distinguish "received EOF" from "had no
buffer space" outcomes. Server::readSomeData() tried to prevent such
calls, but the corresponding check had two problems:
* The check had an unsigned integer underflow bug[^1] that made it
ineffective when inBuf length exceeded Config.maxRequestBufferSize.
That length could exceed the limit due to reconfiguration and when
inBuf space size first grew outside of maybeMakeSpaceAvailable()
protections (e.g., during an inBuf.c_str() call) and then got filled
with newly read data. That growth started happening after 2020 commit
1dfbca06 optimized SBuf::cow() to merge leading and trailing space.
Prior to that commit, Bug 5405 could probably only affect Squid
reconfigurations that lower client_request_buffer_max_size.
* The check was separated from the ReadNow() call it was meant to
protect. While ConnStateData was waiting for the socket to become
ready for reading, various asynchronous events could alter inBuf or
Config.maxRequestBufferSize.
This change fixes both problems.
This change also fixes Squid Bug 5214.
[^1]: That underflow bug was probably introduced in 2015 commit 4d1376d7
while trying to emulate the original "do not read less than two bytes"
ConnStateData::In::maybeMakeSpaceAvailable() condition. That condition
itself looks like a leftover from manual zero-terminated input buffer
days that ended with 2014 commit e7287625. It is now removed.
---
diff --git a/src/servers/Server.cc b/src/servers/Server.cc
index 70fd10b..dd20619 100644
--- a/src/servers/Server.cc
+++ b/src/servers/Server.cc
@@ -83,16 +83,25 @@ Server::maybeMakeSpaceAvailable()
debugs(33, 4, "request buffer full: client_request_buffer_max_size=" << Config.maxRequestBufferSize);
}
+bool
+Server::mayBufferMoreRequestBytes() const
+{
+ // TODO: Account for bodyPipe buffering as well.
+ if (inBuf.length() >= Config.maxRequestBufferSize) {
+ debugs(33, 4, "no: " << inBuf.length() << '-' << Config.maxRequestBufferSize << '=' << (inBuf.length() - Config.maxRequestBufferSize));
+ return false;
+ }
+ debugs(33, 7, "yes: " << Config.maxRequestBufferSize << '-' << inBuf.length() << '=' << (Config.maxRequestBufferSize - inBuf.length()));
+ return true;
+}
+
void
Server::readSomeData()
{
if (reading())
return;
- debugs(33, 4, clientConnection << ": reading request...");
-
- // we can only read if there is more than 1 byte of space free
- if (Config.maxRequestBufferSize - inBuf.length() < 2)
+ if (!mayBufferMoreRequestBytes())
return;
typedef CommCbMemFunT<Server, CommIoCbParams> Dialer;
@@ -123,7 +132,16 @@ Server::doClientRead(const CommIoCbParams &io)
* Plus, it breaks our lame *HalfClosed() detection
*/
+ // mayBufferMoreRequestBytes() was true during readSomeData(), but variables
+ // like Config.maxRequestBufferSize may have changed since that check
+ if (!mayBufferMoreRequestBytes()) {
+ // XXX: If we avoid Comm::ReadNow(), we should not Comm::Read() again
+ // when the wait is over; resume these doClientRead() checks instead.
+ return; // wait for noteMoreBodySpaceAvailable() or a similar inBuf draining event
+ }
maybeMakeSpaceAvailable();
+ Assure(inBuf.spaceSize());
+
CommIoCbParams rd(this); // will be expanded with ReadNow results
rd.conn = io.conn;
switch (Comm::ReadNow(rd, inBuf)) {
diff --git a/src/servers/Server.h b/src/servers/Server.h
index ef105f5..6e549b3 100644
--- a/src/servers/Server.h
+++ b/src/servers/Server.h
@@ -119,6 +119,9 @@ protected:
/// abort any pending transactions and prevent new ones (by closing)
virtual void terminateAll(const Error &, const LogTagsErrors &) = 0;
+ /// whether client_request_buffer_max_size allows inBuf.length() increase
+ bool mayBufferMoreRequestBytes() const;
+
void doClientRead(const CommIoCbParams &io);
void clientWriteDone(const CommIoCbParams &io);

View File

@ -0,0 +1,59 @@
diff --git a/src/ssl/gadgets.cc b/src/ssl/gadgets.cc
index 1f8ac9d..3f54e3d 100644
--- a/src/ssl/gadgets.cc
+++ b/src/ssl/gadgets.cc
@@ -13,6 +13,42 @@
#include "security/Io.h"
#include "ssl/gadgets.h"
+/// whether to supply a digest algorithm name when calling X509_sign() with the given key
+static bool
+signWithDigest(const Security::PrivateKeyPointer &key) {
+ Assure(key); // TODO: Add and use Security::PrivateKey (here and in caller).
+ const auto pkey = key.get();
+
+ // OpenSSL does not define a maximum name size, but does terminate longer
+ // names without returning an error to the caller. Many similar callers in
+ // OpenSSL sources use 80-byte buffers.
+ char defaultDigestName[80] = "";
+ const auto nameGetterResult = EVP_PKEY_get_default_digest_name(pkey, defaultDigestName, sizeof(defaultDigestName));
+ debugs(83, 3, "nameGetterResult=" << nameGetterResult << " defaultDigestName=" << defaultDigestName);
+ if (nameGetterResult <= 0) {
+ debugs(83, 3, "ERROR: EVP_PKEY_get_default_digest_name() failure: " << Ssl::ReportAndForgetErrors);
+ // Backward compatibility: On error, assume digest should be used.
+ // TODO: Return false for -2 nameGetterResult as it "indicates the
+ // operation is not supported by the public key algorithm"?
+ return true;
+ }
+
+ // The name "UNDEF" signifies that a digest must (for return value 2) or may
+ // (for return value 1) be left unspecified.
+ if (nameGetterResult == 2 && strcmp(defaultDigestName, "UNDEF") == 0)
+ return false;
+
+ // Defined mandatory algorithms and "may be left unspecified" cases mentioned above.
+ return true;
+}
+
+/// OpenSSL X509_sign() wrapper
+static auto
+Sign(Security::Certificate &cert, const Security::PrivateKeyPointer &key, const EVP_MD &availableDigest) {
+ const auto digestOrNil = signWithDigest(key) ? &availableDigest : nullptr;
+ return X509_sign(&cert, key.get(), digestOrNil);
+}
+
void
Ssl::ForgetErrors()
{
@@ -618,9 +654,9 @@ static bool generateFakeSslCertificate(Security::CertPointer & certToStore, Secu
assert(hash);
/*Now sign the request */
if (properties.signAlgorithm != Ssl::algSignSelf && properties.signWithPkey.get())
- ret = X509_sign(cert.get(), properties.signWithPkey.get(), hash);
+ ret = Sign(*cert, properties.signWithPkey, *hash);
else //else sign with self key (self signed request)
- ret = X509_sign(cert.get(), pkey.get(), hash);
+ ret = Sign(*cert, pkey, *hash);
if (!ret)
return false;

View File

@ -2,7 +2,7 @@
Name: squid
Version: 6.10
Release: 1%{?dist}
Release: 12%{?dist}
Summary: The Squid proxy caching server
Epoch: 7
# See CREDITS for breakdown of non GPLv2+ code
@ -38,6 +38,25 @@ Patch203: squid-6.1-perlpath.patch
Patch204: squid-6.1-symlink-lang-err.patch
# Upstream PR: https://github.com/squid-cache/squid/pull/1442
Patch205: squid-6.1-crash-half-closed.patch
# Upstream PR: https://github.com/squid-cache/squid/pull/1914
Patch206: squid-6.10-ignore-wsp-after-chunk-size.patch
# https://bugs.squid-cache.org/show_bug.cgi?id=5214
Patch207: squid-6.10-large-upload-buffer-dies.patch
# Upstream commit: https://github.com/squid-cache/squid/commit/2e7dea3cedd3ef2f071dee82867c4147f17376dd
# https://issues.redhat.com/browse/RHEL-86817
Patch208: squid-6.10-cache-peer-connect-errors.patch
# https://issues.redhat.com/browse/RHEL-107994
Patch209: squid-6.10-provider-keys-digest.patch
# https://issues.redhat.com/browse/RHEL-129457
Patch210: squid-6.10-dont-stuck-respmod.patch
# Security patches
# https://bugzilla.redhat.com/show_bug.cgi?id=2404736
Patch500: squid-6.10-CVE-2025-62168.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2451574
Patch501: squid-6.10-CVE-2026-33526.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2451577
Patch502: squid-6.10-CVE-2026-32748.patch
# cache_swap.sh
Requires: bash gawk
@ -55,8 +74,6 @@ BuildRequires: openssl-devel
BuildRequires: krb5-devel
# time_quota requires TrivialDB
BuildRequires: libtdb-devel
# ESI support requires Expat & libxml2
BuildRequires: expat-devel libxml2-devel
# TPROXY requires libcap, and also increases security somewhat
BuildRequires: libcap-devel
# eCAP support
@ -139,7 +156,7 @@ sed -i 's|@SYSCONFDIR@/squid.conf.documented|%{_pkgdocdir}/squid.conf.documented
--enable-storeio="aufs,diskd,ufs,rock" \
--enable-diskio \
--enable-wccpv2 \
--enable-esi \
--disable-esi \
--enable-ecap \
--with-aio \
--with-default-user="squid" \
@ -324,6 +341,43 @@ fi
%changelog
* Wed Apr 22 2026 Luboš Uhliarik <luhliari@redhat.com> - 7:6.10-12
- Resolves: RHEL-160670 - squid: Squid: Denial of Service via heap
Use-After-Free vulnerability in ICP handling (CVE-2026-33526)
- Resolves: RHEL-160671 - squid: Squid: Denial of Service via crafted
ICP traffic (CVE-2026-32748)
* Thu Nov 27 2025 Luboš Uhliarik <luhliari@redhat.com> - 7:6.10-10
- Resolves: RHEL-129457 - "ICAP_ERR_OTHER/408" occurs in icap.log when
downloading a file on RHEL9
* Mon Nov 10 2025 Luboš Uhliarik <luhliari@redhat.com> - 7:6.10-9
- Resolves: RHEL-122481 - squid: Squid vulnerable to information disclosure via
authentication credential leakage in error handling (CVE-2025-62168)
* Fri Sep 12 2025 Luboš Uhliarik <luhliari@redhat.com> - 7:6.10-8
- Resolves: RHEL-107994 - squid does not work with post-quantum crypto
* Thu Apr 10 2025 Luboš Uhliarik <luhliari@redhat.com> - 7:6.10-6
- Resolves: RHEL-86817 - ”TCP connection to XX.XX.XX.XX/XXXX failed” message is
output frequently on RHEL10
* Thu Nov 07 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:6.10-5
- Disable ESI support
- Resolves: RHEL-65069 - CVE-2024-45802 squid: Denial of Service processing ESI
response content
* Tue Oct 29 2024 Troy Dawson <tdawson@redhat.com> - 7:6.10-4
- Bump release for October 2024 mass rebuild:
Resolves: RHEL-64018
* Wed Oct 23 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:6.10-3
- Resolves: RHEL-64426 - TCP_MISS_ABORTED/100 erros when uploading
* Mon Oct 14 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:6.10-2
- Resolves: RHEL-62323 - (Regression) Transfer-encoding:chunked data is not sent
to the client in its complementary
* Mon Jul 01 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:6.10-1
- new version 6.10
- Resolves: RHEL-45048 - squid: Out-of-bounds write error may lead to Denial of