import CS squid-5.5-21.el9

This commit is contained in:
eabdullin 2025-09-15 12:49:36 +00:00
parent d45156e682
commit 66bd9589fe
4 changed files with 659 additions and 1 deletions

View File

@ -0,0 +1,426 @@
diff --git a/src/CachePeer.cc b/src/CachePeer.cc
index e6f4b0d..2a43cb4 100644
--- a/src/CachePeer.cc
+++ b/src/CachePeer.cc
@@ -10,6 +10,7 @@
#include "acl/Gadgets.h"
#include "CachePeer.h"
#include "defines.h"
+#include "neighbors.h"
#include "NeighborTypeDomainList.h"
#include "pconn.h"
#include "PeerPoolMgr.h"
@@ -46,3 +47,43 @@ CachePeer::~CachePeer()
xfree(domain);
}
+void
+CachePeer::noteSuccess()
+{
+ if (!tcp_up) {
+ debugs(15, 2, "connection to " << this->host << "/" << this->http_port << " succeeded");
+ tcp_up = connect_fail_limit; // NP: so peerAlive() works properly.
+ peerAlive(this);
+ } else {
+ tcp_up = connect_fail_limit;
+ }
+}
+
+// TODO: Require callers to detail failures instead of using one (and often
+// misleading!) "connection failed" phrase for all of them.
+/// noteFailure() helper for handling failures attributed to this peer
+void
+CachePeer::noteFailure()
+{
+ stats.last_connect_failure = squid_curtime;
+ if (tcp_up > 0)
+ --tcp_up;
+
+ const auto consideredAliveByAdmin = (stats.logged_state == PEER_ALIVE);
+ const auto level = consideredAliveByAdmin ? DBG_IMPORTANT : 2;
+ debugs(15, level, "ERROR: Connection to " << this->host << "/" << this->http_port << " failed");
+
+ if (consideredAliveByAdmin) {
+ if (!tcp_up) {
+ debugs(15, DBG_IMPORTANT, "Detected DEAD " << neighborTypeStr(this) << ": " << name);
+ stats.logged_state = PEER_DEAD;
+ } else {
+ debugs(15, 2, "additional failures needed to mark this cache_peer DEAD: " << tcp_up);
+ }
+ } else {
+ assert(!tcp_up);
+ debugs(15, 2, "cache_peer " << this->host << "/" << this->http_port << " is still DEAD");
+ }
+}
+
+
diff --git a/src/CachePeer.h b/src/CachePeer.h
index edeb048..be753b9 100644
--- a/src/CachePeer.h
+++ b/src/CachePeer.h
@@ -12,6 +12,7 @@
#include "acl/forward.h"
#include "base/CbcPointer.h"
#include "enums.h"
+#include "http/StatusCode.h"
#include "icp_opcode.h"
#include "ip/Address.h"
#include "security/PeerOptions.h"
@@ -32,6 +33,12 @@ public:
CachePeer() = default;
~CachePeer();
+ /// reacts to a successful establishment of a connection to this cache_peer
+ void noteSuccess();
+
+ /// reacts to a failed attempt to establish a connection to this cache_peer
+ void noteFailure();
+
u_int index = 0;
char *name = nullptr;
char *host = nullptr;
@@ -190,7 +197,28 @@ public:
int front_end_https = 0; ///< 0 - off, 1 - on, 2 - auto
int connection_auth = 2; ///< 0 - off, 1 - on, 2 - auto
+
+private:
+ void countFailure();
};
+/// reacts to a successful establishment of a connection to an origin server or cache_peer
+/// \param peer nil if Squid established a connection to an origin server
+inline void
+NoteOutgoingConnectionSuccess(CachePeer * const peer)
+{
+ if (peer)
+ peer->noteSuccess();
+}
+
+/// reacts to a failed attempt to establish a connection to an origin server or cache_peer
+/// \param peer nil if the connection is to an origin server
+inline void
+NoteOutgoingConnectionFailure(CachePeer * const peer)
+{
+ if (peer)
+ peer->noteFailure();
+}
+
#endif /* SQUID_CACHEPEER_H_ */
diff --git a/src/FwdState.cc b/src/FwdState.cc
index c089f82..254b4d5 100644
--- a/src/FwdState.cc
+++ b/src/FwdState.cc
@@ -1083,8 +1083,7 @@ FwdState::successfullyConnectedToPeer(const Comm::ConnectionPointer &conn)
CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
ConnStateData::notePeerConnection, serverConnection());
- if (serverConnection()->getPeer())
- peerConnectSucceded(serverConnection()->getPeer());
+ NoteOutgoingConnectionSuccess(serverConnection()->getPeer());
dispatch();
}
diff --git a/src/FwdState.h b/src/FwdState.h
index 0bcfafd..dea29d5 100644
--- a/src/FwdState.h
+++ b/src/FwdState.h
@@ -30,7 +30,6 @@
class AccessLogEntry;
typedef RefCount<AccessLogEntry> AccessLogEntryPointer;
-class ErrorState;
class HttpRequest;
class PconnPool;
class ResolvedPeers;
diff --git a/src/HappyConnOpener.cc b/src/HappyConnOpener.cc
index 6d83ff1..009e3f7 100644
--- a/src/HappyConnOpener.cc
+++ b/src/HappyConnOpener.cc
@@ -617,8 +617,6 @@ HappyConnOpener::handleConnOpenerAnswer(Attempt &attempt, const CommConnectCbPar
}
debugs(17, 8, what << " failed: " << params.conn);
- if (const auto peer = params.conn->getPeer())
- peerConnectFailed(peer);
// remember the last failure (we forward it if we cannot connect anywhere)
lastFailedConnection = handledPath;
@@ -627,6 +625,8 @@ HappyConnOpener::handleConnOpenerAnswer(Attempt &attempt, const CommConnectCbPar
lastError = makeError(ERR_CONNECT_FAIL);
lastError->xerrno = params.xerrno;
+ NoteOutgoingConnectionFailure(params.conn->getPeer());
+
if (spareWaiting)
updateSpareWaitAfterPrimeFailure();
diff --git a/src/PeerPoolMgr.cc b/src/PeerPoolMgr.cc
index 7423dd6..bd73cb5 100644
--- a/src/PeerPoolMgr.cc
+++ b/src/PeerPoolMgr.cc
@@ -99,7 +99,7 @@ PeerPoolMgr::handleOpenedConnection(const CommConnectCbParams &params)
}
if (params.flag != Comm::OK) {
- peerConnectFailed(peer);
+ NoteOutgoingConnectionFailure(peer);
checkpoint("conn opening failure"); // may retry
return;
}
@@ -148,7 +148,7 @@ PeerPoolMgr::handleSecuredPeer(Security::EncryptorAnswer &answer)
assert(!answer.tunneled);
if (answer.error.get()) {
assert(!answer.conn);
- // PeerConnector calls peerConnectFailed() for us;
+ // PeerConnector calls NoteOutgoingConnectionFailure() for us
checkpoint("conn securing failure"); // may retry
return;
}
diff --git a/src/clients/HttpTunneler.cc b/src/clients/HttpTunneler.cc
index 67a52e6..84c090a 100644
--- a/src/clients/HttpTunneler.cc
+++ b/src/clients/HttpTunneler.cc
@@ -389,8 +389,9 @@ void
Http::Tunneler::countFailingConnection()
{
assert(connection);
- if (const auto p = connection->getPeer())
- peerConnectFailed(p);
+ // No NoteOutgoingConnectionFailure(connection->getPeer()) call here because
+ // we do not blame cache_peer for CONNECT failures (on top of a successfully
+ // established connection to that cache_peer).
if (noteFwdPconnUse && connection->isOpen())
fwdPconnPool->noteUses(fd_table[connection->fd].pconn.uses);
}
diff --git a/src/error/forward.h b/src/error/forward.h
index c105a66..478aac0 100644
--- a/src/error/forward.h
+++ b/src/error/forward.h
@@ -90,6 +90,7 @@ typedef enum {
class Error;
class ErrorDetail;
+class ErrorState;
typedef RefCount<ErrorDetail> ErrorDetailPointer;
diff --git a/src/http/StatusCode.h b/src/http/StatusCode.h
index d9f3620..c093017 100644
--- a/src/http/StatusCode.h
+++ b/src/http/StatusCode.h
@@ -90,6 +90,8 @@ typedef enum {
const char *StatusCodeString(const Http::StatusCode status);
/// whether this is an informational 1xx response status code
inline bool Is1xx(const int sc) { return scContinue <= sc && sc < scOkay; }
+/// whether this is a client error 4xx response status code
+inline bool Is4xx(const int sc) { return scBadRequest <= sc && sc < scInternalServerError; }
/// whether this response status code prohibits sending Content-Length
inline bool ProhibitsContentLength(const StatusCode sc) { return sc == scNoContent || Is1xx(sc); }
diff --git a/src/neighbors.cc b/src/neighbors.cc
index a68d53d..b36b1d0 100644
--- a/src/neighbors.cc
+++ b/src/neighbors.cc
@@ -452,9 +452,6 @@ peerClearRR()
}
}
-/**
- * Perform all actions when a CachePeer is detected revived.
- */
void
peerAlive(CachePeer *p)
{
@@ -468,6 +465,10 @@ peerAlive(CachePeer *p)
p->stats.last_reply = squid_curtime;
p->stats.probe_start = 0;
+
+ // TODO: Remove or explain how we could detect an alive peer without IP addresses
+ if (!p->n_addresses)
+ ipcache_nbgethostbyname(p->host, peerDNSConfigure, p);
}
CachePeer *
@@ -1275,45 +1276,6 @@ peerRefreshDNS(void *data)
eventAddIsh("peerRefreshDNS", peerRefreshDNS, NULL, 3600.0, 1);
}
-static void
-peerConnectFailedSilent(CachePeer * p)
-{
- p->stats.last_connect_failure = squid_curtime;
-
- if (!p->tcp_up) {
- debugs(15, 2, "TCP connection to " << p->host << "/" << p->http_port <<
- " dead");
- return;
- }
-
- -- p->tcp_up;
-
- if (!p->tcp_up) {
- debugs(15, DBG_IMPORTANT, "Detected DEAD " << neighborTypeStr(p) << ": " << p->name);
- p->stats.logged_state = PEER_DEAD;
- }
-}
-
-void
-peerConnectFailed(CachePeer *p)
-{
- debugs(15, DBG_IMPORTANT, "TCP connection to " << p->host << "/" << p->http_port << " failed");
- peerConnectFailedSilent(p);
-}
-
-void
-peerConnectSucceded(CachePeer * p)
-{
- if (!p->tcp_up) {
- debugs(15, 2, "TCP connection to " << p->host << "/" << p->http_port << " succeeded");
- p->tcp_up = p->connect_fail_limit; // NP: so peerAlive(p) works properly.
- peerAlive(p);
- if (!p->n_addresses)
- ipcache_nbgethostbyname(p->host, peerDNSConfigure, p);
- } else
- p->tcp_up = p->connect_fail_limit;
-}
-
/// whether new TCP probes are currently banned
static bool
peerProbeIsBusy(const CachePeer *p)
@@ -1365,11 +1327,10 @@ peerProbeConnectDone(const Comm::ConnectionPointer &conn, Comm::Flag status, int
{
CachePeer *p = (CachePeer*)data;
- if (status == Comm::OK) {
- peerConnectSucceded(p);
- } else {
- peerConnectFailedSilent(p);
- }
+ if (status == Comm::OK)
+ p->noteSuccess();
+ else
+ p->noteFailure();
-- p->testing_now;
conn->close();
diff --git a/src/neighbors.h b/src/neighbors.h
index 95f0928..870943d 100644
--- a/src/neighbors.h
+++ b/src/neighbors.h
@@ -49,6 +49,12 @@ CachePeer *getRoundRobinParent(PeerSelector*);
CachePeer *getWeightedRoundRobinParent(PeerSelector*);
void peerClearRRStart(void);
void peerClearRR(void);
+
+// TODO: Move, together with its many dependencies and callers, into CachePeer.
+/// Updates protocol-agnostic CachePeer state after an indication of a
+/// successful contact with the given cache_peer.
+void peerAlive(CachePeer *);
+
lookup_t peerDigestLookup(CachePeer * p, PeerSelector *);
CachePeer *neighborsDigestSelect(PeerSelector *);
void peerNoteDigestLookup(HttpRequest * request, CachePeer * p, lookup_t lookup);
@@ -56,8 +62,6 @@ void peerNoteDigestGone(CachePeer * p);
int neighborUp(const CachePeer * e);
const char *neighborTypeStr(const CachePeer * e);
peer_t neighborType(const CachePeer *, const AnyP::Uri &);
-void peerConnectFailed(CachePeer *);
-void peerConnectSucceded(CachePeer *);
void dump_peer_options(StoreEntry *, CachePeer *);
int peerHTTPOkay(const CachePeer *, PeerSelector *);
diff --git a/src/security/BlindPeerConnector.cc b/src/security/BlindPeerConnector.cc
index d0d1c4a..8c12843 100644
--- a/src/security/BlindPeerConnector.cc
+++ b/src/security/BlindPeerConnector.cc
@@ -70,13 +70,13 @@ Security::BlindPeerConnector::noteNegotiationDone(ErrorState *error)
if (error) {
debugs(83, 5, "error=" << (void*)error);
- // XXX: forward.cc calls peerConnectSucceeded() after an OK TCP connect but
- // we call peerConnectFailed() if SSL failed afterwards. Is that OK?
- // It is not clear whether we should call peerConnectSucceeded/Failed()
+ // XXX: FwdState calls NoteOutgoingConnectionSuccess() after an OK TCP connect, but
+ // we call noteFailure() if SSL failed afterwards. Is that OK?
+ // It is not clear whether we should call noteSuccess()/noteFailure()/etc.
// based on TCP results, SSL results, or both. And the code is probably not
// consistent in this aspect across tunnelling and forwarding modules.
if (peer && peer->secure.encryptTransport)
- peerConnectFailed(peer);
+ peer->noteFailure();
return;
}
diff --git a/src/security/BlindPeerConnector.h b/src/security/BlindPeerConnector.h
index 37e2fc5..3553859 100644
--- a/src/security/BlindPeerConnector.h
+++ b/src/security/BlindPeerConnector.h
@@ -42,8 +42,8 @@ public:
/// Return the configured TLS context object
virtual Security::ContextPointer getTlsContext();
- /// On error calls peerConnectFailed().
- /// On success store the used TLS session for later use.
+ /// On success, stores the used TLS session for later use.
+ /// On error, informs the peer.
virtual void noteNegotiationDone(ErrorState *);
};
diff --git a/src/security/PeerConnector.cc b/src/security/PeerConnector.cc
index 494edab..0b179bb 100644
--- a/src/security/PeerConnector.cc
+++ b/src/security/PeerConnector.cc
@@ -10,6 +10,7 @@
#include "squid.h"
#include "acl/FilledChecklist.h"
+#include "CachePeer.h"
#include "comm/Loops.h"
#include "comm/Read.h"
#include "Downloader.h"
@@ -92,15 +93,17 @@ Security::PeerConnector::commCloseHandler(const CommCloseCbParams &params)
debugs(83, 5, "FD " << params.fd << ", Security::PeerConnector=" << params.data);
closeHandler = nullptr;
+
+ const auto err = new ErrorState(ERR_SECURE_CONNECT_FAIL, Http::scServiceUnavailable, request.getRaw(), al);
+ static const auto d = MakeNamedErrorDetail("TLS_CONNECT_CLOSE");
+ err->detailError(d);
+
if (serverConn) {
countFailingConnection();
serverConn->noteClosure();
serverConn = nullptr;
}
- const auto err = new ErrorState(ERR_SECURE_CONNECT_FAIL, Http::scServiceUnavailable, request.getRaw(), al);
- static const auto d = MakeNamedErrorDetail("TLS_CONNECT_CLOSE");
- err->detailError(d);
bail(err);
}
@@ -510,8 +513,7 @@ void
Security::PeerConnector::countFailingConnection()
{
assert(serverConn);
- if (const auto p = serverConn->getPeer())
- peerConnectFailed(p);
+ NoteOutgoingConnectionFailure(serverConn->getPeer());
// TODO: Calling PconnPool::noteUses() should not be our responsibility.
if (noteFwdPconnUse && serverConn->isOpen())
fwdPconnPool->noteUses(fd_table[serverConn->fd].pconn.uses);
diff --git a/src/security/PeerConnector.h b/src/security/PeerConnector.h
index 10700d7..a913b25 100644
--- a/src/security/PeerConnector.h
+++ b/src/security/PeerConnector.h
@@ -24,7 +24,6 @@
#include <iosfwd>
#include <queue>
-class ErrorState;
class Downloader;
class AccessLogEntry;
typedef RefCount<AccessLogEntry> AccessLogEntryPointer;

View File

@ -0,0 +1,48 @@
From 6c29ec591b1c777fc9a66f810f0ce5bc5076bc40 Mon Sep 17 00:00:00 2001
From: Alex Rousskov <rousskov@measurement-factory.com>
Date: Tue, 14 Nov 2023 18:40:37 +0000
Subject: [PATCH] Bug 5317: FATAL attempt to read data from memory (#1579)
FATAL: Squid has attempted to read data ... that is not present.
Recent commit 122a6e3 attempted to deliver in-memory response body bytes
to a Store-reading client that requested (at least) response headers.
That optimization relied on the old canReadFromMemory() logic, but that
logic results in false positives when the checked read offset falls into
a gap between stored headers and the first body byte of a Content-Range.
In that case, a false positive leads to a readFromMemory() call and a
FATAL mem_hdr::copy() error.
This workaround disables the above optimization without fixing
canReadFromMemory(). We believe that a readFromMemory() call that comes
right after response headers are delivered to the Store-reading client
will not suffer from the same problem because the client will supply the
read offset of the first body byte, eliminating the false positive.
---
src/store_client.cc | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/src/store_client.cc b/src/store_client.cc
index 13ead49..d8ae028 100644
--- a/src/store_client.cc
+++ b/src/store_client.cc
@@ -415,8 +415,9 @@ store_client::doCopy(StoreEntry *anEntry)
return; // failure
}
- // send any immediately available body bytes even if we also sendHttpHeaders
- if (canReadFromMemory()) {
+ // Send any immediately available body bytes unless we sendHttpHeaders.
+ // TODO: Send those body bytes when we sendHttpHeaders as well.
+ if (!sendHttpHeaders && canReadFromMemory()) {
readFromMemory();
noteNews(); // will sendHttpHeaders (if needed) as well
flags.store_copying = false;
@@ -502,6 +503,7 @@ store_client::canReadFromMemory() const
{
const auto &mem = entry->mem();
const auto memReadOffset = nextHttpReadOffset();
+ // XXX: This (lo <= offset < end) logic does not support Content-Range gaps.
return mem.inmem_lo <= memReadOffset && memReadOffset < mem.endOffset() &&
parsingBuffer->spaceSize();
}

View File

@ -0,0 +1,163 @@
diff --git a/src/StoreClient.h b/src/StoreClient.h
index 484b413..b523cc0 100644
--- a/src/StoreClient.h
+++ b/src/StoreClient.h
@@ -203,7 +203,7 @@ private:
public:
struct Callback {
- Callback ():callback_handler(NULL), callback_data(NULL) {}
+ Callback() = default;
Callback (STCB *, void *);
@@ -212,8 +212,8 @@ public:
/// delivery to the STCB callback_handler.
bool pending() const;
- STCB *callback_handler;
- void *callback_data;
+ STCB *callback_handler = nullptr; ///< where to deliver the answer
+ CallbackData cbData; ///< the first STCB callback parameter
CodeContextPointer codeContext; ///< Store client context
/// a scheduled asynchronous finishCallback() call (or nil)
diff --git a/src/store_client.cc b/src/store_client.cc
index d8ae028..e7144b4 100644
--- a/src/store_client.cc
+++ b/src/store_client.cc
@@ -188,14 +188,12 @@ store_client::finishCallback()
++answers;
STCB *temphandler = _callback.callback_handler;
- void *cbdata = _callback.callback_data;
- _callback = Callback(NULL, NULL);
+ const auto cbdata = _callback.cbData.validDone();
+ _callback = Callback();
copyInto.data = NULL;
- if (cbdataReferenceValid(cbdata))
+ if (cbdata)
temphandler(cbdata, result);
-
- cbdataReferenceDone(cbdata);
}
store_client::store_client(StoreEntry *e) :
@@ -255,7 +253,7 @@ store_client::copy(StoreEntry * anEntry,
#endif
assert(!_callback.pending());
- _callback = Callback (callback_fn, cbdataReference(data));
+ _callback = Callback(callback_fn, data);
copyInto.data = copyRequest.data;
copyInto.length = copyRequest.length;
copyInto.offset = copyRequest.offset;
@@ -1129,7 +1127,7 @@ store_client::dumpStats(MemBuf * output, int clientNumber) const
if (_callback.pending())
return;
- output->appendf("\tClient #%d, %p\n", clientNumber, _callback.callback_data);
+ output->appendf("\tClient #%d, %p\n", clientNumber, this);
output->appendf("\t\tcopy_offset: %" PRId64 "\n", copyInto.offset);
output->appendf("\t\tcopy_size: %" PRIuSIZE "\n", copyInto.length);
output->append("\t\tflags:", 8);
@@ -1154,7 +1152,7 @@ store_client::Callback::pending() const
store_client::Callback::Callback(STCB *function, void *data):
callback_handler(function),
- callback_data(data),
+ cbData(data),
codeContext(CodeContext::Current())
{
}
diff --git a/src/tunnel.cc b/src/tunnel.cc
index 4fc5abd..2d37d2e 100644
--- a/src/tunnel.cc
+++ b/src/tunnel.cc
@@ -273,6 +273,7 @@ private:
/// resumes operations after the (possibly failed) HTTP CONNECT exchange
void tunnelEstablishmentDone(Http::TunnelerAnswer &answer);
+ void finishWritingAndDelete(Connection &);
void deleteThis();
void cancelStep(const char *reason);
@@ -319,7 +320,9 @@ TunnelStateData::serverClosed()
{
server.noteClosure();
- retryOrBail(__FUNCTION__);
+ request->hier.stopPeerClock(false);
+
+ finishWritingAndDelete(client);
}
/// TunnelStateData::clientClosed() wrapper
@@ -334,12 +337,48 @@ void
TunnelStateData::clientClosed()
{
client.noteClosure();
+ finishWritingAndDelete(server);
+}
+/// Gracefully handles non-retriable connection closure. If necessary, either
+/// starts closing the given connection or waits for its pending write to
+/// finish. Otherwise, immediately destroys the tunnel object.
+/// \prec The other Connection is not open.
+void
+TunnelStateData::finishWritingAndDelete(Connection &remainingConnection)
+{
if (noConnections())
return deleteThis();
- if (!server.writer)
- server.conn->close();
+ // XXX: The code below should precede the noConnections() check above. When
+ // there is no writer, we should trigger an immediate noConnections()
+ // outcome instead of waiting for an asynchronous call to our own closure
+ // callback (that will call this method again). We should not move this code
+ // until a noConnections() outcome guarantees a nil writer because such a
+ // move will unnecessary delay deleteThis().
+
+ if (remainingConnection.writer) {
+ debugs(26, 5, "waiting to finish writing to " << remainingConnection.conn);
+ // the write completion callback must close its remainingConnection
+ // after noticing that the other connection is gone
+ return;
+ }
+
+ // XXX: Stop abusing connection closure callback for terminating tunneling
+ // in cases like this, where our code knows that tunneling must end. The
+ // closure callback should be dedicated to handling rare connection closures
+ // originated _outside_ of TunnelStateData (e.g., during shutdown). In all
+ // other cases, our own close()-calling code must detail the
+ // closure-triggering error (if any) _and_ clear all callbacks: Our code
+ // does not need to be (asynchronously) notified of the closure that it
+ // itself has initiated! Until that (significant) refactoring,
+ // serverClosed() and clientClosed() callbacks will continue to mishandle
+ // those rare closures as regular ones, and access.log records will continue
+ // to lack some tunneling error indicators/details.
+ //
+ // This asynchronous close() leads to another finishWritingAndDelete() call
+ // but with true noConnections() that finally triggers deleteThis().
+ remainingConnection.conn->close();
}
/// destroys the tunnel (after performing potentially-throwing cleanup)
@@ -451,14 +490,7 @@ TunnelStateData::retryOrBail(const char *context)
return sendError(savedError, bailDescription ? bailDescription : context);
*status_ptr = savedError->httpStatus;
- if (noConnections())
- return deleteThis();
-
- // This is a "Comm::IsConnOpen(client.conn) but !canSendError" case.
- // Closing the connection (after finishing writing) is the best we can do.
- if (!client.writer)
- client.conn->close();
- // else writeClientDone() must notice a closed server and close the client
+ finishWritingAndDelete(client);
}
int

View File

@ -2,7 +2,7 @@
Name: squid
Version: 5.5
Release: 18%{?dist}
Release: 21%{?dist}
Summary: The Squid proxy caching server
Epoch: 7
# See CREDITS for breakdown of non GPLv2+ code
@ -50,6 +50,12 @@ Patch209: squid-5.5-halfclosed.patch
Patch210: squid-5.5-ipv6-crash.patch
# https://issues.redhat.com/browse/RHEL-12356
Patch211: squid-5.5-large-upload-buffer-dies.patch
# https://issues.redhat.com/browse/RHEL-57030
Patch212: squid-5.5-fatal-read-data-from-mem.patch
# https://issues.redhat.com/browse/RHEL-77282
Patch213: squid-5.5-cache-peer-connect-errors.patch
# https://issues.redhat.com/browse/RHEL-50261
Patch214: squid-5.5-store-client-leak-fix.patch
# Security patches
# https://bugzilla.redhat.com/show_bug.cgi?id=2100721
@ -159,6 +165,7 @@ lookup program (dnsserver), a program for retrieving FTP data
%patch209 -p1 -b .halfclosed
%patch210 -p1 -b .ipv6-crash
%patch211 -p1 -b .large-upload-buffer-dies
%patch213 -p1 -b .cache-peer-connect-errors
%patch501 -p1 -b .CVE-2021-46784
%patch502 -p1 -b .CVE-2022-41318
@ -177,6 +184,10 @@ lookup program (dnsserver), a program for retrieving FTP data
%patch515 -p1 -b .CVE-2024-23638
%patch516 -p1 -b .ignore-wsp-chunk-sz
# patch506 follow-up
%patch212 -p1 -b .fatal-read-data-from-mem
%patch214 -p1 -b .store-client-mem-leak
# https://bugzilla.redhat.com/show_bug.cgi?id=1679526
# Patch in the vendor documentation and used different location for documentation
@ -403,6 +414,16 @@ fi
%changelog
* Wed Jul 16 2025 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-21
- Resolves: RHEL-50261 - Squid memory usage increases until OOM.
* Thu Apr 10 2025 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-20
- Resolves: RHEL-77282 - ”TCP connection to XX.XX.XX.XX/XXXX failed” message is
output frequently on RHEL9
* Wed Mar 26 2025 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-19
- Resolves: RHEL-57030 - squid aborts trying to access memory
* Mon Nov 18 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-18
- Resolves: RHEL-67869 - Remove gopher mention from spec file