diff --git a/squid-5.5-crash-notedestinationsend.patch b/squid-5.5-crash-notedestinationsend.patch new file mode 100644 index 0000000..51fa24c --- /dev/null +++ b/squid-5.5-crash-notedestinationsend.patch @@ -0,0 +1,244 @@ +From 4319f4ce66e6421e5928a96b10752537f27b5388 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= +Date: Thu, 2 Oct 2025 14:40:07 +0200 +Subject: [PATCH] Bug 5186: noteDestinationsEnd check failed: transportWait + (squid-cache#985) When the "no more destinations to try" notification comes + after the last forwarding/tunneling attempt has failed, the + !destinationsFound block does not run (because destinations were found), the + usingDestination() block does not run (because we are done with that + last/failed destination), but transportWait is false (for the same reason). + +Also applied Bug 5090 (master/v6 commit 15bde30) FwdState protections to +tunnel.cc code. Tunnels may continue writing to the client while the +to-server connection is closing or closed, so TunnelStateData can be +potentially exposed to bug 5090 "no server connection but still +transporting" concerns. TunnelStateData never _retries_ successfully +established tunnels and, hence, can (and now does) stop receiving spare +destinations after committedToServer becomes true, but future tunnels +may start reforwarding in many cases, and most of the code does not need +to know about this (temporary?) simplification. + +Also re-unified and polished related FwdState and TunnelStateData code, +including fixing lying source code comments and debug messages. +--- + src/FwdState.cc | 18 +++++++----- + src/tunnel.cc | 73 ++++++++++++++++++++++++++++++++----------------- + 2 files changed, 59 insertions(+), 32 deletions(-) + +diff --git a/src/FwdState.cc b/src/FwdState.cc +index 254b4d5..1185ddf 100644 +--- a/src/FwdState.cc ++++ b/src/FwdState.cc +@@ -640,7 +640,6 @@ FwdState::noteDestination(Comm::ConnectionPointer path) + if (transporting()) + return; // and continue to receive destinations for backup + +- // This is the first path candidate we have seen. Use it. + useDestinations(); + } + +@@ -656,12 +655,8 @@ FwdState::noteDestinationsEnd(ErrorState *selectionError) + Must(!err); // if we tried to connect, then path selection succeeded + fail(selectionError); + } +- else if (err) +- debugs(17, 3, "Will abort forwarding because all found paths have failed."); +- else +- debugs(17, 3, "Will abort forwarding because path selection found no paths."); + +- useDestinations(); // will detect and handle the lack of paths ++ stopAndDestroy("path selection found no paths"); + return; + } + // else continue to use one of the previously noted destinations; +@@ -674,7 +669,16 @@ FwdState::noteDestinationsEnd(ErrorState *selectionError) + return; // and continue to wait for FwdState::noteConnection() callback + } + +- Must(transporting()); // or we would be stuck with nothing to do or wait for ++ if (transporting()) { ++ // We are already using a previously opened connection (but were also ++ // receiving more destinations in case we need to re-forward). ++ debugs(17, 7, "keep transporting"); ++ return; ++ } ++ ++ // destinationsFound, but none of them worked, and we were waiting for more ++ assert(err); ++ stopAndDestroy("all found paths have failed"); + } + + /// makes sure connection opener knows that the destinations have changed +diff --git a/src/tunnel.cc b/src/tunnel.cc +index 2d37d2e..b8d2035 100644 +--- a/src/tunnel.cc ++++ b/src/tunnel.cc +@@ -97,6 +97,10 @@ public: + return (server.conn != NULL && server.conn->getPeer() ? server.conn->getPeer()->host : request->url.host()); + }; + ++ /// store the given to-server connection; prohibit retries and do not look ++ /// for any other destinations ++ void commitToServer(const Comm::ConnectionPointer &); ++ + /// Whether the client sent a CONNECT request to us. + bool clientExpectsConnectResponse() const { + // If we are forcing a tunnel after receiving a client CONNECT, then we +@@ -186,6 +190,10 @@ public: + /// whether another destination may be still attempted if the TCP connection + /// was unexpectedly closed + bool retriable; ++ ++ /// whether the decision to tunnel to a particular destination was final ++ bool committedToServer; ++ + // TODO: remove after fixing deferred reads in TunnelStateData::copyRead() + CodeContext::Pointer codeContext; ///< our creator context + +@@ -263,9 +271,8 @@ private: + + /// \returns whether the request should be retried (nil) or the description why it should not + const char *checkRetry(); +- /// whether the successfully selected path destination or the established +- /// server connection is still in use +- bool usingDestination() const; ++ ++ bool transporting() const; + + /// details of the "last tunneling attempt" failure (if it failed) + ErrorState *savedError = nullptr; +@@ -401,6 +408,7 @@ TunnelStateData::TunnelStateData(ClientHttpRequest *clientRequest) : + destinations(new ResolvedPeers()), + destinationsFound(false), + retriable(true), ++ committedToServer(false), + codeContext(CodeContext::Current()) + { + debugs(26, 3, "TunnelStateData constructed this=" << this); +@@ -1041,8 +1049,7 @@ void + TunnelStateData::notePeerReadyToShovel(const Comm::ConnectionPointer &conn) + { + assert(!client.dirty); +- retriable = false; +- server.initConnection(conn, tunnelServerClosed, "tunnelServerClosed", this); ++ commitToServer(conn); + + if (!clientExpectsConnectResponse()) + tunnelStartShoveling(this); // ssl-bumped connection, be quiet +@@ -1057,6 +1064,15 @@ TunnelStateData::notePeerReadyToShovel(const Comm::ConnectionPointer &conn) + } + } + ++void ++TunnelStateData::commitToServer(const Comm::ConnectionPointer &conn) ++{ ++ committedToServer = true; ++ retriable = false; // may already be false ++ PeerSelectionInitiator::subscribed = false; // may already be false ++ server.initConnection(conn, tunnelServerClosed, "tunnelServerClosed", this); ++} ++ + static void + tunnelErrorComplete(int fd/*const Comm::ConnectionPointer &*/, void *data, size_t) + { +@@ -1284,18 +1300,15 @@ TunnelStateData::noteDestination(Comm::ConnectionPointer path) + + destinations->addPath(path); + +- if (usingDestination()) { +- // We are already using a previously opened connection but also +- // receiving destinations in case we need to re-forward. +- Must(!transportWait); +- return; +- } +- + if (transportWait) { ++ assert(!transporting()); + notifyConnOpener(); + return; // and continue to wait for tunnelConnectDone() callback + } + ++ if (transporting()) ++ return; // and continue to receive destinations for backup ++ + startConnecting(); + } + +@@ -1311,8 +1324,9 @@ TunnelStateData::noteDestinationsEnd(ErrorState *selectionError) + if (selectionError) + return sendError(selectionError, "path selection has failed"); + ++ // TODO: Merge with FwdState and remove this likely unnecessary check. + if (savedError) +- return sendError(savedError, "all found paths have failed"); ++ return sendError(savedError, "path selection found no paths (with an impossible early error)"); + + return sendError(new ErrorState(ERR_CANNOT_FORWARD, Http::scInternalServerError, request.getRaw(), al), + "path selection found no paths"); +@@ -1321,21 +1335,32 @@ TunnelStateData::noteDestinationsEnd(ErrorState *selectionError) + // if all of them fail, tunneling as whole will fail + Must(!selectionError); // finding at least one path means selection succeeded + +- if (usingDestination()) { +- // We are already using a previously opened connection but also +- // receiving destinations in case we need to re-forward. +- Must(!transportWait); ++ if (transportWait) { ++ assert(!transporting()); ++ notifyConnOpener(); ++ return; // and continue to wait for the noteConnection() callback ++ } ++ ++ if (transporting()) { ++ // We are already using a previously opened connection (but were also ++ // receiving more destinations in case we need to re-forward). ++ debugs(17, 7, "keep transporting"); + return; + } + +- Must(transportWait); // or we would be stuck with nothing to do or wait for +- notifyConnOpener(); ++ // destinationsFound, but none of them worked, and we were waiting for more ++ assert(savedError); ++ // XXX: Honor clientExpectsConnectResponse() before replying. ++ sendError(savedError, "all found paths have failed"); + } + ++/// Whether a tunneling attempt to some selected destination X is in progress ++/// (after successfully opening/reusing a transport connection to X). ++/// \sa transportWait + bool +-TunnelStateData::usingDestination() const ++TunnelStateData::transporting() const + { +- return encryptionWait || peerWait || Comm::IsConnOpen(server.conn); ++ return encryptionWait || peerWait || committedToServer; + } + + /// remembers an error to be used if there will be no more connection attempts +@@ -1394,7 +1419,7 @@ TunnelStateData::startConnecting() + request->hier.startPeerClock(); + + assert(!destinations->empty()); +- assert(!usingDestination()); ++ assert(!transporting()); + AsyncCall::Pointer callback = asyncCall(17, 5, "TunnelStateData::noteConnection", HappyConnOpener::CbDialer(&TunnelStateData::noteConnection, this)); + const auto cs = new HappyConnOpener(destinations, callback, request, startTime, 0, al); + cs->setHost(request->url.host()); +@@ -1489,12 +1514,10 @@ switchToTunnel(HttpRequest *request, const Comm::ConnectionPointer &clientConn, + debugs(26, 3, request->method << " " << context->http->uri << " " << request->http_ver); + + TunnelStateData *tunnelState = new TunnelStateData(context->http); +- tunnelState->retriable = false; ++ tunnelState->commitToServer(srvConn); + + request->hier.resetPeerNotes(srvConn, tunnelState->getHost()); + +- tunnelState->server.initConnection(srvConn, tunnelServerClosed, "tunnelServerClosed", tunnelState); +- + #if USE_DELAY_POOLS + /* no point using the delayIsNoDelay stuff since tunnel is nice and simple */ + if (!srvConn->getPeer() || !srvConn->getPeer()->options.no_delay) +-- +2.44.0 + diff --git a/squid.spec b/squid.spec index 58de82a..87970c8 100644 --- a/squid.spec +++ b/squid.spec @@ -2,7 +2,7 @@ Name: squid Version: 5.5 -Release: 21%{?dist} +Release: 22%{?dist} Summary: The Squid proxy caching server Epoch: 7 # See CREDITS for breakdown of non GPLv2+ code @@ -56,6 +56,8 @@ Patch212: squid-5.5-fatal-read-data-from-mem.patch Patch213: squid-5.5-cache-peer-connect-errors.patch # https://issues.redhat.com/browse/RHEL-50261 Patch214: squid-5.5-store-client-leak-fix.patch +# https://issues.redhat.com/browse/RHEL-77084 +Patch215: squid-5.5-crash-notedestinationsend.patch # Security patches # https://bugzilla.redhat.com/show_bug.cgi?id=2100721 @@ -166,6 +168,7 @@ lookup program (dnsserver), a program for retrieving FTP data %patch210 -p1 -b .ipv6-crash %patch211 -p1 -b .large-upload-buffer-dies %patch213 -p1 -b .cache-peer-connect-errors +%patch215 -p1 -b .notedestinationsend %patch501 -p1 -b .CVE-2021-46784 %patch502 -p1 -b .CVE-2022-41318 @@ -414,6 +417,9 @@ fi %changelog +* Thu Oct 02 2025 Luboš Uhliarik - 7:5.5-22 +- Resolves: RHEL-77084 - squid crashes with noteDestinationsEnd check failed + * Wed Jul 16 2025 Luboš Uhliarik - 7:5.5-21 - Resolves: RHEL-50261 - Squid memory usage increases until OOM.