import UBI squid-5.5-22.el9_7.1

This commit is contained in:
eabdullin 2025-11-11 16:13:55 +00:00
parent 0a1ce507cc
commit 16c407b0ea
4 changed files with 869 additions and 6 deletions

View File

@ -0,0 +1,173 @@
diff --git a/src/HttpRequest.cc b/src/HttpRequest.cc
index 1b4e337..c4ef027 100644
--- a/src/HttpRequest.cc
+++ b/src/HttpRequest.cc
@@ -341,7 +341,7 @@ HttpRequest::swapOut(StoreEntry * e)
/* packs request-line and headers, appends <crlf> terminator */
void
-HttpRequest::pack(Packable * p) const
+HttpRequest::pack(Packable * const p, const bool maskSensitiveInfo) const
{
assert(p);
/* pack request-line */
@@ -349,8 +349,8 @@ HttpRequest::pack(Packable * p) const
SQUIDSBUFPRINT(method.image()), SQUIDSBUFPRINT(url.path()),
http_ver.major, http_ver.minor);
/* headers */
- header.packInto(p);
- /* trailer */
+ header.packInto(p, maskSensitiveInfo);
+ /* indicate the end of the header section */
p->append("\r\n", 2);
}
diff --git a/src/HttpRequest.h b/src/HttpRequest.h
index 9ac47d5..540ac6b 100644
--- a/src/HttpRequest.h
+++ b/src/HttpRequest.h
@@ -206,7 +206,7 @@ public:
void swapOut(StoreEntry * e);
- void pack(Packable * p) const;
+ void pack(Packable * p, bool maskSensitiveInfo = false) const;
static void httpRequestPack(void *obj, Packable *p);
diff --git a/src/cf.data.pre b/src/cf.data.pre
index 61a66f1..5fd9f25 100644
--- a/src/cf.data.pre
+++ b/src/cf.data.pre
@@ -8714,12 +8714,18 @@ NAME: email_err_data
COMMENT: on|off
TYPE: onoff
LOC: Config.onoff.emailErrData
-DEFAULT: on
+DEFAULT: off
DOC_START
If enabled, information about the occurred error will be
included in the mailto links of the ERR pages (if %W is set)
so that the email body contains the data.
Syntax is <A HREF="mailto:%w%W">%w</A>
+
+ SECURITY WARNING:
+ Request headers and other included facts may contain
+ sensitive information about transaction history, the
+ Squid instance, and its environment which would be
+ unavailable to error recipients otherwise.
DOC_END
NAME: deny_info
diff --git a/src/client_side_reply.cc b/src/client_side_reply.cc
index 27746c6..d10a829 100644
--- a/src/client_side_reply.cc
+++ b/src/client_side_reply.cc
@@ -100,7 +100,7 @@ clientReplyContext::clientReplyContext(ClientHttpRequest *clientContext) :
void
clientReplyContext::setReplyToError(
err_type err, Http::StatusCode status, const HttpRequestMethod& method, char const *uri,
- Ip::Address &addr, HttpRequest * failedrequest, const char *unparsedrequest,
+ Ip::Address &addr, HttpRequest * failedrequest, const char *,
#if USE_AUTH
Auth::UserRequest::Pointer auth_user_request
#else
@@ -110,9 +110,6 @@ clientReplyContext::setReplyToError(
{
auto errstate = clientBuildError(err, status, uri, addr, failedrequest, http->al);
- if (unparsedrequest)
- errstate->request_hdrs = xstrdup(unparsedrequest);
-
#if USE_AUTH
errstate->auth_user_request = auth_user_request;
#endif
@@ -1091,11 +1088,15 @@ clientReplyContext::traceReply()
triggerInitialStoreRead();
http->storeEntry()->releaseRequest();
http->storeEntry()->buffer();
+ MemBuf content;
+ content.init();
+ http->request->pack(&content, true /* hide authorization data */);
const HttpReplyPointer rep(new HttpReply);
rep->setHeaders(Http::scOkay, NULL, "text/plain", http->request->prefixLen(), 0, squid_curtime);
+ rep->setHeaders(Http::scOkay, NULL, "message/http", content.contentSize(), 0, squid_curtime);
+ rep->body.set(SBuf(content.buf, content.size));
http->storeEntry()->replaceHttpReply(rep);
- http->request->swapOut(http->storeEntry());
- http->storeEntry()->complete();
+ http->storeEntry()->completeSuccessfully("traceReply() stored the entire response");
}
#define SENDING_BODY 0
diff --git a/src/errorpage.cc b/src/errorpage.cc
index 6fbedbe..9df3864 100644
--- a/src/errorpage.cc
+++ b/src/errorpage.cc
@@ -790,7 +790,6 @@ ErrorState::~ErrorState()
{
safe_free(redirect_url);
safe_free(url);
- safe_free(request_hdrs);
wordlistDestroy(&ftp.server_msg);
safe_free(ftp.request);
safe_free(ftp.reply);
@@ -848,7 +847,10 @@ ErrorState::Dump(MemBuf * mb)
SQUIDSBUFPRINT(request->url.path()),
AnyP::ProtocolType_str[request->http_ver.protocol],
request->http_ver.major, request->http_ver.minor);
- request->header.packInto(&str);
+ MemBuf r;
+ r.init();
+ request->pack(&r, true /* hide authorization data */);
+ str.append(r.content(), r.contentSize());
}
str.append("\r\n", 2);
@@ -1109,18 +1111,10 @@ ErrorState::compileLegacyCode(Build &build)
p = "[no request]";
break;
}
- if (request) {
- mb.appendf(SQUIDSBUFPH " " SQUIDSBUFPH " %s/%d.%d\n",
- SQUIDSBUFPRINT(request->method.image()),
- SQUIDSBUFPRINT(request->url.path()),
- AnyP::ProtocolType_str[request->http_ver.protocol],
- request->http_ver.major, request->http_ver.minor);
- request->header.packInto(&mb, true); //hide authorization data
- } else if (request_hdrs) {
- p = request_hdrs;
- } else {
+ else if (request)
+ request->pack(&mb, true /* hide authorization data */);
+ else
p = "[no request]";
- }
break;
case 's':
diff --git a/src/errorpage.h b/src/errorpage.h
index 4d4c955..e98424b 100644
--- a/src/errorpage.h
+++ b/src/errorpage.h
@@ -192,7 +192,6 @@ public:
MemBuf *listing = nullptr;
} ftp;
- char *request_hdrs = nullptr;
char *err_msg = nullptr; /* Preformatted error message from the cache */
AccessLogEntryPointer ale; ///< transaction details (or nil)
diff --git a/src/tests/stub_HttpRequest.cc b/src/tests/stub_HttpRequest.cc
index 3fdbcca..a6b3551 100644
--- a/src/tests/stub_HttpRequest.cc
+++ b/src/tests/stub_HttpRequest.cc
@@ -45,7 +45,7 @@ bool HttpRequest::expectingBody(const HttpRequestMethod &, int64_t &) const STUB
bool HttpRequest::bodyNibbled() const STUB_RETVAL(false)
int HttpRequest::prefixLen() const STUB_RETVAL(0)
void HttpRequest::swapOut(StoreEntry *) STUB
-void HttpRequest::pack(Packable *) const STUB
+void HttpRequest::pack(Packable *, bool) const STUB
void HttpRequest::httpRequestPack(void *, Packable *) STUB
HttpRequest * HttpRequest::FromUrl(const SBuf &, const MasterXaction::Pointer &, const HttpRequestMethod &) STUB_RETVAL(nullptr)
HttpRequest * HttpRequest::FromUrlXXX(const char *, const MasterXaction::Pointer &, const HttpRequestMethod &) STUB_RETVAL(nullptr)

View File

@ -0,0 +1,426 @@
diff --git a/src/CachePeer.cc b/src/CachePeer.cc
index e6f4b0d..2a43cb4 100644
--- a/src/CachePeer.cc
+++ b/src/CachePeer.cc
@@ -10,6 +10,7 @@
#include "acl/Gadgets.h"
#include "CachePeer.h"
#include "defines.h"
+#include "neighbors.h"
#include "NeighborTypeDomainList.h"
#include "pconn.h"
#include "PeerPoolMgr.h"
@@ -46,3 +47,43 @@ CachePeer::~CachePeer()
xfree(domain);
}
+void
+CachePeer::noteSuccess()
+{
+ if (!tcp_up) {
+ debugs(15, 2, "connection to " << this->host << "/" << this->http_port << " succeeded");
+ tcp_up = connect_fail_limit; // NP: so peerAlive() works properly.
+ peerAlive(this);
+ } else {
+ tcp_up = connect_fail_limit;
+ }
+}
+
+// TODO: Require callers to detail failures instead of using one (and often
+// misleading!) "connection failed" phrase for all of them.
+/// noteFailure() helper for handling failures attributed to this peer
+void
+CachePeer::noteFailure()
+{
+ stats.last_connect_failure = squid_curtime;
+ if (tcp_up > 0)
+ --tcp_up;
+
+ const auto consideredAliveByAdmin = (stats.logged_state == PEER_ALIVE);
+ const auto level = consideredAliveByAdmin ? DBG_IMPORTANT : 2;
+ debugs(15, level, "ERROR: Connection to " << this->host << "/" << this->http_port << " failed");
+
+ if (consideredAliveByAdmin) {
+ if (!tcp_up) {
+ debugs(15, DBG_IMPORTANT, "Detected DEAD " << neighborTypeStr(this) << ": " << name);
+ stats.logged_state = PEER_DEAD;
+ } else {
+ debugs(15, 2, "additional failures needed to mark this cache_peer DEAD: " << tcp_up);
+ }
+ } else {
+ assert(!tcp_up);
+ debugs(15, 2, "cache_peer " << this->host << "/" << this->http_port << " is still DEAD");
+ }
+}
+
+
diff --git a/src/CachePeer.h b/src/CachePeer.h
index edeb048..be753b9 100644
--- a/src/CachePeer.h
+++ b/src/CachePeer.h
@@ -12,6 +12,7 @@
#include "acl/forward.h"
#include "base/CbcPointer.h"
#include "enums.h"
+#include "http/StatusCode.h"
#include "icp_opcode.h"
#include "ip/Address.h"
#include "security/PeerOptions.h"
@@ -32,6 +33,12 @@ public:
CachePeer() = default;
~CachePeer();
+ /// reacts to a successful establishment of a connection to this cache_peer
+ void noteSuccess();
+
+ /// reacts to a failed attempt to establish a connection to this cache_peer
+ void noteFailure();
+
u_int index = 0;
char *name = nullptr;
char *host = nullptr;
@@ -190,7 +197,28 @@ public:
int front_end_https = 0; ///< 0 - off, 1 - on, 2 - auto
int connection_auth = 2; ///< 0 - off, 1 - on, 2 - auto
+
+private:
+ void countFailure();
};
+/// reacts to a successful establishment of a connection to an origin server or cache_peer
+/// \param peer nil if Squid established a connection to an origin server
+inline void
+NoteOutgoingConnectionSuccess(CachePeer * const peer)
+{
+ if (peer)
+ peer->noteSuccess();
+}
+
+/// reacts to a failed attempt to establish a connection to an origin server or cache_peer
+/// \param peer nil if the connection is to an origin server
+inline void
+NoteOutgoingConnectionFailure(CachePeer * const peer)
+{
+ if (peer)
+ peer->noteFailure();
+}
+
#endif /* SQUID_CACHEPEER_H_ */
diff --git a/src/FwdState.cc b/src/FwdState.cc
index c089f82..254b4d5 100644
--- a/src/FwdState.cc
+++ b/src/FwdState.cc
@@ -1083,8 +1083,7 @@ FwdState::successfullyConnectedToPeer(const Comm::ConnectionPointer &conn)
CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
ConnStateData::notePeerConnection, serverConnection());
- if (serverConnection()->getPeer())
- peerConnectSucceded(serverConnection()->getPeer());
+ NoteOutgoingConnectionSuccess(serverConnection()->getPeer());
dispatch();
}
diff --git a/src/FwdState.h b/src/FwdState.h
index 0bcfafd..dea29d5 100644
--- a/src/FwdState.h
+++ b/src/FwdState.h
@@ -30,7 +30,6 @@
class AccessLogEntry;
typedef RefCount<AccessLogEntry> AccessLogEntryPointer;
-class ErrorState;
class HttpRequest;
class PconnPool;
class ResolvedPeers;
diff --git a/src/HappyConnOpener.cc b/src/HappyConnOpener.cc
index 6d83ff1..009e3f7 100644
--- a/src/HappyConnOpener.cc
+++ b/src/HappyConnOpener.cc
@@ -617,8 +617,6 @@ HappyConnOpener::handleConnOpenerAnswer(Attempt &attempt, const CommConnectCbPar
}
debugs(17, 8, what << " failed: " << params.conn);
- if (const auto peer = params.conn->getPeer())
- peerConnectFailed(peer);
// remember the last failure (we forward it if we cannot connect anywhere)
lastFailedConnection = handledPath;
@@ -627,6 +625,8 @@ HappyConnOpener::handleConnOpenerAnswer(Attempt &attempt, const CommConnectCbPar
lastError = makeError(ERR_CONNECT_FAIL);
lastError->xerrno = params.xerrno;
+ NoteOutgoingConnectionFailure(params.conn->getPeer());
+
if (spareWaiting)
updateSpareWaitAfterPrimeFailure();
diff --git a/src/PeerPoolMgr.cc b/src/PeerPoolMgr.cc
index 7423dd6..bd73cb5 100644
--- a/src/PeerPoolMgr.cc
+++ b/src/PeerPoolMgr.cc
@@ -99,7 +99,7 @@ PeerPoolMgr::handleOpenedConnection(const CommConnectCbParams &params)
}
if (params.flag != Comm::OK) {
- peerConnectFailed(peer);
+ NoteOutgoingConnectionFailure(peer);
checkpoint("conn opening failure"); // may retry
return;
}
@@ -148,7 +148,7 @@ PeerPoolMgr::handleSecuredPeer(Security::EncryptorAnswer &answer)
assert(!answer.tunneled);
if (answer.error.get()) {
assert(!answer.conn);
- // PeerConnector calls peerConnectFailed() for us;
+ // PeerConnector calls NoteOutgoingConnectionFailure() for us
checkpoint("conn securing failure"); // may retry
return;
}
diff --git a/src/clients/HttpTunneler.cc b/src/clients/HttpTunneler.cc
index 67a52e6..84c090a 100644
--- a/src/clients/HttpTunneler.cc
+++ b/src/clients/HttpTunneler.cc
@@ -389,8 +389,9 @@ void
Http::Tunneler::countFailingConnection()
{
assert(connection);
- if (const auto p = connection->getPeer())
- peerConnectFailed(p);
+ // No NoteOutgoingConnectionFailure(connection->getPeer()) call here because
+ // we do not blame cache_peer for CONNECT failures (on top of a successfully
+ // established connection to that cache_peer).
if (noteFwdPconnUse && connection->isOpen())
fwdPconnPool->noteUses(fd_table[connection->fd].pconn.uses);
}
diff --git a/src/error/forward.h b/src/error/forward.h
index c105a66..478aac0 100644
--- a/src/error/forward.h
+++ b/src/error/forward.h
@@ -90,6 +90,7 @@ typedef enum {
class Error;
class ErrorDetail;
+class ErrorState;
typedef RefCount<ErrorDetail> ErrorDetailPointer;
diff --git a/src/http/StatusCode.h b/src/http/StatusCode.h
index d9f3620..c093017 100644
--- a/src/http/StatusCode.h
+++ b/src/http/StatusCode.h
@@ -90,6 +90,8 @@ typedef enum {
const char *StatusCodeString(const Http::StatusCode status);
/// whether this is an informational 1xx response status code
inline bool Is1xx(const int sc) { return scContinue <= sc && sc < scOkay; }
+/// whether this is a client error 4xx response status code
+inline bool Is4xx(const int sc) { return scBadRequest <= sc && sc < scInternalServerError; }
/// whether this response status code prohibits sending Content-Length
inline bool ProhibitsContentLength(const StatusCode sc) { return sc == scNoContent || Is1xx(sc); }
diff --git a/src/neighbors.cc b/src/neighbors.cc
index a68d53d..b36b1d0 100644
--- a/src/neighbors.cc
+++ b/src/neighbors.cc
@@ -452,9 +452,6 @@ peerClearRR()
}
}
-/**
- * Perform all actions when a CachePeer is detected revived.
- */
void
peerAlive(CachePeer *p)
{
@@ -468,6 +465,10 @@ peerAlive(CachePeer *p)
p->stats.last_reply = squid_curtime;
p->stats.probe_start = 0;
+
+ // TODO: Remove or explain how we could detect an alive peer without IP addresses
+ if (!p->n_addresses)
+ ipcache_nbgethostbyname(p->host, peerDNSConfigure, p);
}
CachePeer *
@@ -1275,45 +1276,6 @@ peerRefreshDNS(void *data)
eventAddIsh("peerRefreshDNS", peerRefreshDNS, NULL, 3600.0, 1);
}
-static void
-peerConnectFailedSilent(CachePeer * p)
-{
- p->stats.last_connect_failure = squid_curtime;
-
- if (!p->tcp_up) {
- debugs(15, 2, "TCP connection to " << p->host << "/" << p->http_port <<
- " dead");
- return;
- }
-
- -- p->tcp_up;
-
- if (!p->tcp_up) {
- debugs(15, DBG_IMPORTANT, "Detected DEAD " << neighborTypeStr(p) << ": " << p->name);
- p->stats.logged_state = PEER_DEAD;
- }
-}
-
-void
-peerConnectFailed(CachePeer *p)
-{
- debugs(15, DBG_IMPORTANT, "TCP connection to " << p->host << "/" << p->http_port << " failed");
- peerConnectFailedSilent(p);
-}
-
-void
-peerConnectSucceded(CachePeer * p)
-{
- if (!p->tcp_up) {
- debugs(15, 2, "TCP connection to " << p->host << "/" << p->http_port << " succeeded");
- p->tcp_up = p->connect_fail_limit; // NP: so peerAlive(p) works properly.
- peerAlive(p);
- if (!p->n_addresses)
- ipcache_nbgethostbyname(p->host, peerDNSConfigure, p);
- } else
- p->tcp_up = p->connect_fail_limit;
-}
-
/// whether new TCP probes are currently banned
static bool
peerProbeIsBusy(const CachePeer *p)
@@ -1365,11 +1327,10 @@ peerProbeConnectDone(const Comm::ConnectionPointer &conn, Comm::Flag status, int
{
CachePeer *p = (CachePeer*)data;
- if (status == Comm::OK) {
- peerConnectSucceded(p);
- } else {
- peerConnectFailedSilent(p);
- }
+ if (status == Comm::OK)
+ p->noteSuccess();
+ else
+ p->noteFailure();
-- p->testing_now;
conn->close();
diff --git a/src/neighbors.h b/src/neighbors.h
index 95f0928..870943d 100644
--- a/src/neighbors.h
+++ b/src/neighbors.h
@@ -49,6 +49,12 @@ CachePeer *getRoundRobinParent(PeerSelector*);
CachePeer *getWeightedRoundRobinParent(PeerSelector*);
void peerClearRRStart(void);
void peerClearRR(void);
+
+// TODO: Move, together with its many dependencies and callers, into CachePeer.
+/// Updates protocol-agnostic CachePeer state after an indication of a
+/// successful contact with the given cache_peer.
+void peerAlive(CachePeer *);
+
lookup_t peerDigestLookup(CachePeer * p, PeerSelector *);
CachePeer *neighborsDigestSelect(PeerSelector *);
void peerNoteDigestLookup(HttpRequest * request, CachePeer * p, lookup_t lookup);
@@ -56,8 +62,6 @@ void peerNoteDigestGone(CachePeer * p);
int neighborUp(const CachePeer * e);
const char *neighborTypeStr(const CachePeer * e);
peer_t neighborType(const CachePeer *, const AnyP::Uri &);
-void peerConnectFailed(CachePeer *);
-void peerConnectSucceded(CachePeer *);
void dump_peer_options(StoreEntry *, CachePeer *);
int peerHTTPOkay(const CachePeer *, PeerSelector *);
diff --git a/src/security/BlindPeerConnector.cc b/src/security/BlindPeerConnector.cc
index d0d1c4a..8c12843 100644
--- a/src/security/BlindPeerConnector.cc
+++ b/src/security/BlindPeerConnector.cc
@@ -70,13 +70,13 @@ Security::BlindPeerConnector::noteNegotiationDone(ErrorState *error)
if (error) {
debugs(83, 5, "error=" << (void*)error);
- // XXX: forward.cc calls peerConnectSucceeded() after an OK TCP connect but
- // we call peerConnectFailed() if SSL failed afterwards. Is that OK?
- // It is not clear whether we should call peerConnectSucceeded/Failed()
+ // XXX: FwdState calls NoteOutgoingConnectionSuccess() after an OK TCP connect, but
+ // we call noteFailure() if SSL failed afterwards. Is that OK?
+ // It is not clear whether we should call noteSuccess()/noteFailure()/etc.
// based on TCP results, SSL results, or both. And the code is probably not
// consistent in this aspect across tunnelling and forwarding modules.
if (peer && peer->secure.encryptTransport)
- peerConnectFailed(peer);
+ peer->noteFailure();
return;
}
diff --git a/src/security/BlindPeerConnector.h b/src/security/BlindPeerConnector.h
index 37e2fc5..3553859 100644
--- a/src/security/BlindPeerConnector.h
+++ b/src/security/BlindPeerConnector.h
@@ -42,8 +42,8 @@ public:
/// Return the configured TLS context object
virtual Security::ContextPointer getTlsContext();
- /// On error calls peerConnectFailed().
- /// On success store the used TLS session for later use.
+ /// On success, stores the used TLS session for later use.
+ /// On error, informs the peer.
virtual void noteNegotiationDone(ErrorState *);
};
diff --git a/src/security/PeerConnector.cc b/src/security/PeerConnector.cc
index 494edab..0b179bb 100644
--- a/src/security/PeerConnector.cc
+++ b/src/security/PeerConnector.cc
@@ -10,6 +10,7 @@
#include "squid.h"
#include "acl/FilledChecklist.h"
+#include "CachePeer.h"
#include "comm/Loops.h"
#include "comm/Read.h"
#include "Downloader.h"
@@ -92,15 +93,17 @@ Security::PeerConnector::commCloseHandler(const CommCloseCbParams &params)
debugs(83, 5, "FD " << params.fd << ", Security::PeerConnector=" << params.data);
closeHandler = nullptr;
+
+ const auto err = new ErrorState(ERR_SECURE_CONNECT_FAIL, Http::scServiceUnavailable, request.getRaw(), al);
+ static const auto d = MakeNamedErrorDetail("TLS_CONNECT_CLOSE");
+ err->detailError(d);
+
if (serverConn) {
countFailingConnection();
serverConn->noteClosure();
serverConn = nullptr;
}
- const auto err = new ErrorState(ERR_SECURE_CONNECT_FAIL, Http::scServiceUnavailable, request.getRaw(), al);
- static const auto d = MakeNamedErrorDetail("TLS_CONNECT_CLOSE");
- err->detailError(d);
bail(err);
}
@@ -510,8 +513,7 @@ void
Security::PeerConnector::countFailingConnection()
{
assert(serverConn);
- if (const auto p = serverConn->getPeer())
- peerConnectFailed(p);
+ NoteOutgoingConnectionFailure(serverConn->getPeer());
// TODO: Calling PconnPool::noteUses() should not be our responsibility.
if (noteFwdPconnUse && serverConn->isOpen())
fwdPconnPool->noteUses(fd_table[serverConn->fd].pconn.uses);
diff --git a/src/security/PeerConnector.h b/src/security/PeerConnector.h
index 10700d7..a913b25 100644
--- a/src/security/PeerConnector.h
+++ b/src/security/PeerConnector.h
@@ -24,7 +24,6 @@
#include <iosfwd>
#include <queue>
-class ErrorState;
class Downloader;
class AccessLogEntry;
typedef RefCount<AccessLogEntry> AccessLogEntryPointer;

View File

@ -0,0 +1,244 @@
From 4319f4ce66e6421e5928a96b10752537f27b5388 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= <luhliari@redhat.com>
Date: Thu, 2 Oct 2025 14:40:07 +0200
Subject: [PATCH] Bug 5186: noteDestinationsEnd check failed: transportWait
(squid-cache#985) When the "no more destinations to try" notification comes
after the last forwarding/tunneling attempt has failed, the
!destinationsFound block does not run (because destinations were found), the
usingDestination() block does not run (because we are done with that
last/failed destination), but transportWait is false (for the same reason).
Also applied Bug 5090 (master/v6 commit 15bde30) FwdState protections to
tunnel.cc code. Tunnels may continue writing to the client while the
to-server connection is closing or closed, so TunnelStateData can be
potentially exposed to bug 5090 "no server connection but still
transporting" concerns. TunnelStateData never _retries_ successfully
established tunnels and, hence, can (and now does) stop receiving spare
destinations after committedToServer becomes true, but future tunnels
may start reforwarding in many cases, and most of the code does not need
to know about this (temporary?) simplification.
Also re-unified and polished related FwdState and TunnelStateData code,
including fixing lying source code comments and debug messages.
---
src/FwdState.cc | 18 +++++++-----
src/tunnel.cc | 73 ++++++++++++++++++++++++++++++++-----------------
2 files changed, 59 insertions(+), 32 deletions(-)
diff --git a/src/FwdState.cc b/src/FwdState.cc
index 254b4d5..1185ddf 100644
--- a/src/FwdState.cc
+++ b/src/FwdState.cc
@@ -640,7 +640,6 @@ FwdState::noteDestination(Comm::ConnectionPointer path)
if (transporting())
return; // and continue to receive destinations for backup
- // This is the first path candidate we have seen. Use it.
useDestinations();
}
@@ -656,12 +655,8 @@ FwdState::noteDestinationsEnd(ErrorState *selectionError)
Must(!err); // if we tried to connect, then path selection succeeded
fail(selectionError);
}
- else if (err)
- debugs(17, 3, "Will abort forwarding because all found paths have failed.");
- else
- debugs(17, 3, "Will abort forwarding because path selection found no paths.");
- useDestinations(); // will detect and handle the lack of paths
+ stopAndDestroy("path selection found no paths");
return;
}
// else continue to use one of the previously noted destinations;
@@ -674,7 +669,16 @@ FwdState::noteDestinationsEnd(ErrorState *selectionError)
return; // and continue to wait for FwdState::noteConnection() callback
}
- Must(transporting()); // or we would be stuck with nothing to do or wait for
+ if (transporting()) {
+ // We are already using a previously opened connection (but were also
+ // receiving more destinations in case we need to re-forward).
+ debugs(17, 7, "keep transporting");
+ return;
+ }
+
+ // destinationsFound, but none of them worked, and we were waiting for more
+ assert(err);
+ stopAndDestroy("all found paths have failed");
}
/// makes sure connection opener knows that the destinations have changed
diff --git a/src/tunnel.cc b/src/tunnel.cc
index 2d37d2e..b8d2035 100644
--- a/src/tunnel.cc
+++ b/src/tunnel.cc
@@ -97,6 +97,10 @@ public:
return (server.conn != NULL && server.conn->getPeer() ? server.conn->getPeer()->host : request->url.host());
};
+ /// store the given to-server connection; prohibit retries and do not look
+ /// for any other destinations
+ void commitToServer(const Comm::ConnectionPointer &);
+
/// Whether the client sent a CONNECT request to us.
bool clientExpectsConnectResponse() const {
// If we are forcing a tunnel after receiving a client CONNECT, then we
@@ -186,6 +190,10 @@ public:
/// whether another destination may be still attempted if the TCP connection
/// was unexpectedly closed
bool retriable;
+
+ /// whether the decision to tunnel to a particular destination was final
+ bool committedToServer;
+
// TODO: remove after fixing deferred reads in TunnelStateData::copyRead()
CodeContext::Pointer codeContext; ///< our creator context
@@ -263,9 +271,8 @@ private:
/// \returns whether the request should be retried (nil) or the description why it should not
const char *checkRetry();
- /// whether the successfully selected path destination or the established
- /// server connection is still in use
- bool usingDestination() const;
+
+ bool transporting() const;
/// details of the "last tunneling attempt" failure (if it failed)
ErrorState *savedError = nullptr;
@@ -401,6 +408,7 @@ TunnelStateData::TunnelStateData(ClientHttpRequest *clientRequest) :
destinations(new ResolvedPeers()),
destinationsFound(false),
retriable(true),
+ committedToServer(false),
codeContext(CodeContext::Current())
{
debugs(26, 3, "TunnelStateData constructed this=" << this);
@@ -1041,8 +1049,7 @@ void
TunnelStateData::notePeerReadyToShovel(const Comm::ConnectionPointer &conn)
{
assert(!client.dirty);
- retriable = false;
- server.initConnection(conn, tunnelServerClosed, "tunnelServerClosed", this);
+ commitToServer(conn);
if (!clientExpectsConnectResponse())
tunnelStartShoveling(this); // ssl-bumped connection, be quiet
@@ -1057,6 +1064,15 @@ TunnelStateData::notePeerReadyToShovel(const Comm::ConnectionPointer &conn)
}
}
+void
+TunnelStateData::commitToServer(const Comm::ConnectionPointer &conn)
+{
+ committedToServer = true;
+ retriable = false; // may already be false
+ PeerSelectionInitiator::subscribed = false; // may already be false
+ server.initConnection(conn, tunnelServerClosed, "tunnelServerClosed", this);
+}
+
static void
tunnelErrorComplete(int fd/*const Comm::ConnectionPointer &*/, void *data, size_t)
{
@@ -1284,18 +1300,15 @@ TunnelStateData::noteDestination(Comm::ConnectionPointer path)
destinations->addPath(path);
- if (usingDestination()) {
- // We are already using a previously opened connection but also
- // receiving destinations in case we need to re-forward.
- Must(!transportWait);
- return;
- }
-
if (transportWait) {
+ assert(!transporting());
notifyConnOpener();
return; // and continue to wait for tunnelConnectDone() callback
}
+ if (transporting())
+ return; // and continue to receive destinations for backup
+
startConnecting();
}
@@ -1311,8 +1324,9 @@ TunnelStateData::noteDestinationsEnd(ErrorState *selectionError)
if (selectionError)
return sendError(selectionError, "path selection has failed");
+ // TODO: Merge with FwdState and remove this likely unnecessary check.
if (savedError)
- return sendError(savedError, "all found paths have failed");
+ return sendError(savedError, "path selection found no paths (with an impossible early error)");
return sendError(new ErrorState(ERR_CANNOT_FORWARD, Http::scInternalServerError, request.getRaw(), al),
"path selection found no paths");
@@ -1321,21 +1335,32 @@ TunnelStateData::noteDestinationsEnd(ErrorState *selectionError)
// if all of them fail, tunneling as whole will fail
Must(!selectionError); // finding at least one path means selection succeeded
- if (usingDestination()) {
- // We are already using a previously opened connection but also
- // receiving destinations in case we need to re-forward.
- Must(!transportWait);
+ if (transportWait) {
+ assert(!transporting());
+ notifyConnOpener();
+ return; // and continue to wait for the noteConnection() callback
+ }
+
+ if (transporting()) {
+ // We are already using a previously opened connection (but were also
+ // receiving more destinations in case we need to re-forward).
+ debugs(17, 7, "keep transporting");
return;
}
- Must(transportWait); // or we would be stuck with nothing to do or wait for
- notifyConnOpener();
+ // destinationsFound, but none of them worked, and we were waiting for more
+ assert(savedError);
+ // XXX: Honor clientExpectsConnectResponse() before replying.
+ sendError(savedError, "all found paths have failed");
}
+/// Whether a tunneling attempt to some selected destination X is in progress
+/// (after successfully opening/reusing a transport connection to X).
+/// \sa transportWait
bool
-TunnelStateData::usingDestination() const
+TunnelStateData::transporting() const
{
- return encryptionWait || peerWait || Comm::IsConnOpen(server.conn);
+ return encryptionWait || peerWait || committedToServer;
}
/// remembers an error to be used if there will be no more connection attempts
@@ -1394,7 +1419,7 @@ TunnelStateData::startConnecting()
request->hier.startPeerClock();
assert(!destinations->empty());
- assert(!usingDestination());
+ assert(!transporting());
AsyncCall::Pointer callback = asyncCall(17, 5, "TunnelStateData::noteConnection", HappyConnOpener::CbDialer<TunnelStateData>(&TunnelStateData::noteConnection, this));
const auto cs = new HappyConnOpener(destinations, callback, request, startTime, 0, al);
cs->setHost(request->url.host());
@@ -1489,12 +1514,10 @@ switchToTunnel(HttpRequest *request, const Comm::ConnectionPointer &clientConn,
debugs(26, 3, request->method << " " << context->http->uri << " " << request->http_ver);
TunnelStateData *tunnelState = new TunnelStateData(context->http);
- tunnelState->retriable = false;
+ tunnelState->commitToServer(srvConn);
request->hier.resetPeerNotes(srvConn, tunnelState->getHost());
- tunnelState->server.initConnection(srvConn, tunnelServerClosed, "tunnelServerClosed", tunnelState);
-
#if USE_DELAY_POOLS
/* no point using the delayIsNoDelay stuff since tunnel is nice and simple */
if (!srvConn->getPeer() || !srvConn->getPeer()->options.no_delay)
--
2.44.0

View File

@ -2,7 +2,7 @@
Name: squid
Version: 5.5
Release: 19%{?dist}.1
Release: 22%{?dist}.1
Summary: The Squid proxy caching server
Epoch: 7
# See CREDITS for breakdown of non GPLv2+ code
@ -52,8 +52,12 @@ Patch210: squid-5.5-ipv6-crash.patch
Patch211: squid-5.5-large-upload-buffer-dies.patch
# https://issues.redhat.com/browse/RHEL-57030
Patch212: squid-5.5-fatal-read-data-from-mem.patch
# https://issues.redhat.com/browse/RHEL-77282
Patch213: squid-5.5-cache-peer-connect-errors.patch
# https://issues.redhat.com/browse/RHEL-50261
Patch213: squid-5.5-store-client-leak-fix.patch
Patch214: squid-5.5-store-client-leak-fix.patch
# https://issues.redhat.com/browse/RHEL-77084
Patch215: squid-5.5-crash-notedestinationsend.patch
# Security patches
# https://bugzilla.redhat.com/show_bug.cgi?id=2100721
@ -89,6 +93,8 @@ Patch515: squid-5.5-CVE-2024-23638.patch
# Regression caused by squid-5.5-CVE-2023-46846.patch
# Upstream PR: https://github.com/squid-cache/squid/pull/1914
Patch516: squid-5.5-ignore-wsp-after-chunk-size.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2404736
Patch517: squid-5.5-CVE-2025-62168.patch
# cache_swap.sh
Requires: bash gawk
@ -163,6 +169,8 @@ lookup program (dnsserver), a program for retrieving FTP data
%patch209 -p1 -b .halfclosed
%patch210 -p1 -b .ipv6-crash
%patch211 -p1 -b .large-upload-buffer-dies
%patch213 -p1 -b .cache-peer-connect-errors
%patch215 -p1 -b .notedestinationsend
%patch501 -p1 -b .CVE-2021-46784
%patch502 -p1 -b .CVE-2022-41318
@ -180,11 +188,12 @@ lookup program (dnsserver), a program for retrieving FTP data
%patch514 -p1 -b .CVE-2024-37894
%patch515 -p1 -b .CVE-2024-23638
%patch516 -p1 -b .ignore-wsp-chunk-sz
%patch517 -p1 -b .CVE-2025-62168
# patch506 follow-up
%patch212 -p1 -b .fatal-read-data-from-mem
%patch213 -p1 -b .store-client-mem-leak
%patch214 -p1 -b .store-client-mem-leak
# https://bugzilla.redhat.com/show_bug.cgi?id=1679526
# Patch in the vendor documentation and used different location for documentation
@ -411,11 +420,22 @@ fi
%changelog
* Mon Jul 21 2025 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-19.1
- Resolves: RHEL-104154 - Squid memory usage increases until OOM.
* Mon Oct 20 2025 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-22.1
- Resolves: RHEL-122492 - squid: Squid vulnerable to information disclosure via
authentication credential leakage in error handling (CVE-2025-62168)
* Thu Oct 02 2025 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-22
- Resolves: RHEL-77084 - squid crashes with noteDestinationsEnd check failed
* Wed Jul 16 2025 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-21
- Resolves: RHEL-50261 - Squid memory usage increases until OOM.
* Thu Apr 10 2025 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-20
- Resolves: RHEL-77282 - ”TCP connection to XX.XX.XX.XX/XXXX failed” message is
output frequently on RHEL9
* Wed Mar 26 2025 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-19
- Resolves: RHEL-84694 - squid aborts trying to access memory
- Resolves: RHEL-57030 - squid aborts trying to access memory
* Mon Nov 18 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-18
- Resolves: RHEL-67869 - Remove gopher mention from spec file