From 48497485e934a79fc92a46caa1e1c38eb284cf51 Mon Sep 17 00:00:00 2001 From: eabdullin Date: Tue, 5 Nov 2024 09:07:53 +0000 Subject: [PATCH] import from CS git squid-4.15-10.el8.1 --- .gitignore | 1 + .squid.metadata | 1 + SOURCES/cache_swap.sh | 16 + SOURCES/perl-requires-squid.sh | 3 + SOURCES/squid-4.11-active-ftp.patch | 127 + SOURCES/squid-4.11-config.patch | 27 + SOURCES/squid-4.11-convert-ipv4.patch | 143 + SOURCES/squid-4.11-include-guards.patch | 41 + SOURCES/squid-4.11-large-acl.patch | 178 + SOURCES/squid-4.11-location.patch | 33 + SOURCES/squid-4.11-perlpath.patch | 10 + SOURCES/squid-4.15-CVE-2021-28116.patch | 424 ++ SOURCES/squid-4.15-CVE-2021-46784.patch | 129 + SOURCES/squid-4.15-CVE-2022-41318.patch | 38 + SOURCES/squid-4.15-CVE-2023-46724.patch | 24 + SOURCES/squid-4.15-CVE-2023-46728.patch | 1673 +++++++ SOURCES/squid-4.15-CVE-2023-46846.patch | 1281 +++++ SOURCES/squid-4.15-CVE-2023-46847.patch | 23 + SOURCES/squid-4.15-CVE-2023-49285.patch | 30 + SOURCES/squid-4.15-CVE-2023-49286.patch | 62 + SOURCES/squid-4.15-CVE-2023-50269.patch | 50 + SOURCES/squid-4.15-CVE-2023-5824.patch | 4352 +++++++++++++++++ SOURCES/squid-4.15-CVE-2024-25111.patch | 193 + SOURCES/squid-4.15-CVE-2024-25617.patch | 105 + .../squid-4.15-ftp-filename-extraction.patch | 32 + SOURCES/squid-4.15-halfclosed.patch | 163 + ...uid-4.15-ignore-wsp-after-chunk-size.patch | 367 ++ .../squid-4.15-ip-bind-address-no-port.patch | 156 + SOURCES/squid-4.15.tar.xz.asc | 25 + SOURCES/squid.logrotate | 16 + SOURCES/squid.nm | 7 + SOURCES/squid.pam | 3 + SOURCES/squid.service | 18 + SOURCES/squid.sysconfig | 9 + SPECS/squid.spec | 1798 +++++++ 35 files changed, 11558 insertions(+) create mode 100644 .gitignore create mode 100644 .squid.metadata create mode 100644 SOURCES/cache_swap.sh create mode 100755 SOURCES/perl-requires-squid.sh create mode 100644 SOURCES/squid-4.11-active-ftp.patch create mode 100644 SOURCES/squid-4.11-config.patch create mode 100644 SOURCES/squid-4.11-convert-ipv4.patch create mode 100644 SOURCES/squid-4.11-include-guards.patch create mode 100644 SOURCES/squid-4.11-large-acl.patch create mode 100644 SOURCES/squid-4.11-location.patch create mode 100644 SOURCES/squid-4.11-perlpath.patch create mode 100644 SOURCES/squid-4.15-CVE-2021-28116.patch create mode 100644 SOURCES/squid-4.15-CVE-2021-46784.patch create mode 100644 SOURCES/squid-4.15-CVE-2022-41318.patch create mode 100644 SOURCES/squid-4.15-CVE-2023-46724.patch create mode 100644 SOURCES/squid-4.15-CVE-2023-46728.patch create mode 100644 SOURCES/squid-4.15-CVE-2023-46846.patch create mode 100644 SOURCES/squid-4.15-CVE-2023-46847.patch create mode 100644 SOURCES/squid-4.15-CVE-2023-49285.patch create mode 100644 SOURCES/squid-4.15-CVE-2023-49286.patch create mode 100644 SOURCES/squid-4.15-CVE-2023-50269.patch create mode 100644 SOURCES/squid-4.15-CVE-2023-5824.patch create mode 100644 SOURCES/squid-4.15-CVE-2024-25111.patch create mode 100644 SOURCES/squid-4.15-CVE-2024-25617.patch create mode 100644 SOURCES/squid-4.15-ftp-filename-extraction.patch create mode 100644 SOURCES/squid-4.15-halfclosed.patch create mode 100644 SOURCES/squid-4.15-ignore-wsp-after-chunk-size.patch create mode 100644 SOURCES/squid-4.15-ip-bind-address-no-port.patch create mode 100644 SOURCES/squid-4.15.tar.xz.asc create mode 100644 SOURCES/squid.logrotate create mode 100755 SOURCES/squid.nm create mode 100644 SOURCES/squid.pam create mode 100644 SOURCES/squid.service create mode 100644 SOURCES/squid.sysconfig create mode 100644 SPECS/squid.spec diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c378471 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +SOURCES/squid-4.15.tar.xz \ No newline at end of file diff --git a/.squid.metadata b/.squid.metadata new file mode 100644 index 0000000..30ac1f5 --- /dev/null +++ b/.squid.metadata @@ -0,0 +1 @@ +60bda34ba39657e2d870c8c1d2acece8a69c3075 SOURCES/squid-4.15.tar.xz diff --git a/SOURCES/cache_swap.sh b/SOURCES/cache_swap.sh new file mode 100644 index 0000000..5e94072 --- /dev/null +++ b/SOURCES/cache_swap.sh @@ -0,0 +1,16 @@ +#!/bin/bash +if [ -f /etc/sysconfig/squid ]; then + . /etc/sysconfig/squid +fi + +SQUID_CONF=${SQUID_CONF:-"/etc/squid/squid.conf"} + +CACHE_SWAP=`sed -e 's/#.*//g' $SQUID_CONF | \ + grep cache_dir | awk '{ print $3 }'` + +for adir in $CACHE_SWAP; do + if [ ! -d $adir/00 ]; then + echo -n "init_cache_dir $adir... " + squid -N -z -F -f $SQUID_CONF >> /var/log/squid/squid.out 2>&1 + fi +done diff --git a/SOURCES/perl-requires-squid.sh b/SOURCES/perl-requires-squid.sh new file mode 100755 index 0000000..029e7b9 --- /dev/null +++ b/SOURCES/perl-requires-squid.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +/usr/lib/rpm/perl.req $* | grep -v "Authen::Smb" diff --git a/SOURCES/squid-4.11-active-ftp.patch b/SOURCES/squid-4.11-active-ftp.patch new file mode 100644 index 0000000..00a9b56 --- /dev/null +++ b/SOURCES/squid-4.11-active-ftp.patch @@ -0,0 +1,127 @@ +diff --git a/src/clients/FtpClient.cc b/src/clients/FtpClient.cc +index b665bcf..d287e55 100644 +--- a/src/clients/FtpClient.cc ++++ b/src/clients/FtpClient.cc +@@ -778,7 +778,8 @@ Ftp::Client::connectDataChannel() + bool + Ftp::Client::openListenSocket() + { +- return false; ++ debugs(9, 3, HERE); ++ return false; + } + + /// creates a data channel Comm close callback +diff --git a/src/clients/FtpClient.h b/src/clients/FtpClient.h +index a76a5a0..218d696 100644 +--- a/src/clients/FtpClient.h ++++ b/src/clients/FtpClient.h +@@ -118,7 +118,7 @@ public: + bool sendPort(); + bool sendPassive(); + void connectDataChannel(); +- bool openListenSocket(); ++ virtual bool openListenSocket(); + void switchTimeoutToDataChannel(); + + CtrlChannel ctrl; ///< FTP control channel state +diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc +index 411bce9..31d3e36 100644 +--- a/src/clients/FtpGateway.cc ++++ b/src/clients/FtpGateway.cc +@@ -87,6 +87,13 @@ struct GatewayFlags { + class Gateway; + typedef void (StateMethod)(Ftp::Gateway *); + ++} // namespace FTP ++ ++static void ftpOpenListenSocket(Ftp::Gateway * ftpState, int fallback); ++ ++namespace Ftp ++{ ++ + /// FTP Gateway: An FTP client that takes an HTTP request with an ftp:// URI, + /// converts it into one or more FTP commands, and then + /// converts one or more FTP responses into the final HTTP response. +@@ -137,7 +144,11 @@ public: + + /// create a data channel acceptor and start listening. + void listenForDataChannel(const Comm::ConnectionPointer &conn); +- ++ virtual bool openListenSocket() { ++ debugs(9, 3, HERE); ++ ftpOpenListenSocket(this, 0); ++ return Comm::IsConnOpen(data.conn); ++ } + int checkAuth(const HttpHeader * req_hdr); + void checkUrlpath(); + void buildTitleUrl(); +@@ -1787,6 +1798,7 @@ ftpOpenListenSocket(Ftp::Gateway * ftpState, int fallback) + } + + ftpState->listenForDataChannel(temp); ++ ftpState->data.listenConn = temp; + } + + static void +@@ -1822,13 +1834,19 @@ ftpSendPORT(Ftp::Gateway * ftpState) + // pull out the internal IP address bytes to send in PORT command... + // source them from the listen_conn->local + ++ struct sockaddr_in addr; ++ socklen_t addrlen = sizeof(addr); ++ getsockname(ftpState->data.listenConn->fd, (struct sockaddr *) &addr, &addrlen); ++ unsigned char port_high = ntohs(addr.sin_port) >> 8; ++ unsigned char port_low = ntohs(addr.sin_port) & 0xff; ++ + struct addrinfo *AI = NULL; + ftpState->data.listenConn->local.getAddrInfo(AI, AF_INET); + unsigned char *addrptr = (unsigned char *) &((struct sockaddr_in*)AI->ai_addr)->sin_addr; +- unsigned char *portptr = (unsigned char *) &((struct sockaddr_in*)AI->ai_addr)->sin_port; ++ // unsigned char *portptr = (unsigned char *) &((struct sockaddr_in*)AI->ai_addr)->sin_port; + snprintf(cbuf, CTRL_BUFLEN, "PORT %d,%d,%d,%d,%d,%d\r\n", + addrptr[0], addrptr[1], addrptr[2], addrptr[3], +- portptr[0], portptr[1]); ++ port_high, port_low); + ftpState->writeCommand(cbuf); + ftpState->state = Ftp::Client::SENT_PORT; + +@@ -1881,14 +1899,27 @@ ftpSendEPRT(Ftp::Gateway * ftpState) + return; + } + ++ ++ unsigned int port; ++ struct sockaddr_storage addr; ++ socklen_t addrlen = sizeof(addr); ++ getsockname(ftpState->data.listenConn->fd, (struct sockaddr *) &addr, &addrlen); ++ if (addr.ss_family == AF_INET) { ++ struct sockaddr_in *addr4 = (struct sockaddr_in*) &addr; ++ port = ntohs( addr4->sin_port ); ++ } else { ++ struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) &addr; ++ port = ntohs( addr6->sin6_port ); ++ } ++ + char buf[MAX_IPSTRLEN]; + + /* RFC 2428 defines EPRT as IPv6 equivalent to IPv4 PORT command. */ + /* Which can be used by EITHER protocol. */ +- snprintf(cbuf, CTRL_BUFLEN, "EPRT |%d|%s|%d|\r\n", ++ snprintf(cbuf, CTRL_BUFLEN, "EPRT |%d|%s|%u|\r\n", + ( ftpState->data.listenConn->local.isIPv6() ? 2 : 1 ), + ftpState->data.listenConn->local.toStr(buf,MAX_IPSTRLEN), +- ftpState->data.listenConn->local.port() ); ++ port); + + ftpState->writeCommand(cbuf); + ftpState->state = Ftp::Client::SENT_EPRT; +@@ -1907,7 +1938,7 @@ ftpReadEPRT(Ftp::Gateway * ftpState) + ftpSendPORT(ftpState); + return; + } +- ++ ftpState->ctrl.message = NULL; + ftpRestOrList(ftpState); + } + diff --git a/SOURCES/squid-4.11-config.patch b/SOURCES/squid-4.11-config.patch new file mode 100644 index 0000000..9b0e2d8 --- /dev/null +++ b/SOURCES/squid-4.11-config.patch @@ -0,0 +1,27 @@ +diff --git a/src/cf.data.pre b/src/cf.data.pre +index 26ef576..30d5509 100644 +--- a/src/cf.data.pre ++++ b/src/cf.data.pre +@@ -5006,7 +5006,7 @@ DOC_END + + NAME: logfile_rotate + TYPE: int +-DEFAULT: 10 ++DEFAULT: 0 + LOC: Config.Log.rotateNumber + DOC_START + Specifies the default number of logfile rotations to make when you +@@ -6857,11 +6857,11 @@ COMMENT_END + + NAME: cache_mgr + TYPE: string +-DEFAULT: webmaster ++DEFAULT: root + LOC: Config.adminEmail + DOC_START + Email-address of local cache manager who will receive +- mail if the cache dies. The default is "webmaster". ++ mail if the cache dies. The default is "root". + DOC_END + + NAME: mail_from diff --git a/SOURCES/squid-4.11-convert-ipv4.patch b/SOURCES/squid-4.11-convert-ipv4.patch new file mode 100644 index 0000000..5198f35 --- /dev/null +++ b/SOURCES/squid-4.11-convert-ipv4.patch @@ -0,0 +1,143 @@ +From 771908d313ee9c255adfb5e4fdba4d6797c18409 Mon Sep 17 00:00:00 2001 +From: Amos Jeffries +Date: Thu, 7 Mar 2019 13:50:38 +0000 +Subject: [PATCH] Bug 4928: Cannot convert non-IPv4 to IPv4 (#379) + +... when reaching client_ip_max_connections + +The client_ip_max_connections limit is checked before the TCP dst-IP is located for the newly received TCP connection. This leaves Squid unable to fetch the NFMARK or similar +details later on (they do not exist for [::]). + +Move client_ip_max_connections test later in the TCP accept process to ensure dst-IP is known when the error is produced. +--- + src/comm/TcpAcceptor.cc | 82 ++++++++++++++++++++--------------------- + 1 file changed, 39 insertions(+), 43 deletions(-) + +diff --git a/src/comm/TcpAcceptor.cc b/src/comm/TcpAcceptor.cc +index d4b576d..936aa30 100644 +--- a/src/comm/TcpAcceptor.cc ++++ b/src/comm/TcpAcceptor.cc +@@ -282,7 +282,16 @@ Comm::TcpAcceptor::acceptOne() + ConnectionPointer newConnDetails = new Connection(); + const Comm::Flag flag = oldAccept(newConnDetails); + +- if (flag == Comm::COMM_ERROR) { ++ /* Check for errors */ ++ if (!newConnDetails->isOpen()) { ++ ++ if (flag == Comm::NOMESSAGE) { ++ /* register interest again */ ++ debugs(5, 5, HERE << "try later: " << conn << " handler Subscription: " << theCallSub); ++ SetSelect(conn->fd, COMM_SELECT_READ, doAccept, this, 0); ++ return; ++ } ++ + // A non-recoverable error; notify the caller */ + debugs(5, 5, HERE << "non-recoverable error:" << status() << " handler Subscription: " << theCallSub); + if (intendedForUserConnections()) +@@ -292,16 +301,12 @@ Comm::TcpAcceptor::acceptOne() + return; + } + +- if (flag == Comm::NOMESSAGE) { +- /* register interest again */ +- debugs(5, 5, "try later: " << conn << " handler Subscription: " << theCallSub); +- } else { +- debugs(5, 5, "Listener: " << conn << +- " accepted new connection " << newConnDetails << +- " handler Subscription: " << theCallSub); +- notify(flag, newConnDetails); +- } ++ newConnDetails->nfmark = Ip::Qos::getNfmarkFromConnection(newConnDetails, Ip::Qos::dirAccepted); + ++ debugs(5, 5, HERE << "Listener: " << conn << ++ " accepted new connection " << newConnDetails << ++ " handler Subscription: " << theCallSub); ++ notify(flag, newConnDetails); + SetSelect(conn->fd, COMM_SELECT_READ, doAccept, this, 0); + } + +@@ -341,8 +346,8 @@ Comm::TcpAcceptor::notify(const Comm::Flag flag, const Comm::ConnectionPointer & + * + * \retval Comm::OK success. details parameter filled. + * \retval Comm::NOMESSAGE attempted accept() but nothing useful came in. +- * Or this client has too many connections already. + * \retval Comm::COMM_ERROR an outright failure occurred. ++ * Or this client has too many connections already. + */ + Comm::Flag + Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details) +@@ -383,6 +388,15 @@ Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details) + + details->remote = *gai; + ++ if ( Config.client_ip_max_connections >= 0) { ++ if (clientdbEstablished(details->remote, 0) > Config.client_ip_max_connections) { ++ debugs(50, DBG_IMPORTANT, "WARNING: " << details->remote << " attempting more than " << Config.client_ip_max_connections << " connections."); ++ Ip::Address::FreeAddr(gai); ++ PROF_stop(comm_accept); ++ return Comm::COMM_ERROR; ++ } ++ } ++ + // lookup the local-end details of this new connection + Ip::Address::InitAddr(gai); + details->local.setEmpty(); +@@ -396,6 +410,23 @@ Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details) + details->local = *gai; + Ip::Address::FreeAddr(gai); + ++ /* fdstat update */ ++ fdd_table[sock].close_file = NULL; ++ fdd_table[sock].close_line = 0; ++ ++ fde *F = &fd_table[sock]; ++ details->remote.toStr(F->ipaddr,MAX_IPSTRLEN); ++ F->remote_port = details->remote.port(); ++ F->local_addr = details->local; ++ F->sock_family = details->local.isIPv6()?AF_INET6:AF_INET; ++ ++ // set socket flags ++ commSetCloseOnExec(sock); ++ commSetNonBlocking(sock); ++ ++ /* IFF the socket is (tproxy) transparent, pass the flag down to allow spoofing */ ++ F->flags.transparent = fd_table[conn->fd].flags.transparent; // XXX: can we remove this line yet? ++ + // Perform NAT or TPROXY operations to retrieve the real client/dest IP addresses + if (conn->flags&(COMM_TRANSPARENT|COMM_INTERCEPTION) && !Ip::Interceptor.Lookup(details, conn)) { + debugs(50, DBG_IMPORTANT, "ERROR: NAT/TPROXY lookup failed to locate original IPs on " << details); +@@ -414,33 +445,6 @@ Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details) + } + #endif + +- details->nfmark = Ip::Qos::getNfmarkFromConnection(details, Ip::Qos::dirAccepted); +- +- if (Config.client_ip_max_connections >= 0) { +- if (clientdbEstablished(details->remote, 0) > Config.client_ip_max_connections) { +- debugs(50, DBG_IMPORTANT, "WARNING: " << details->remote << " attempting more than " << Config.client_ip_max_connections << " connections."); +- PROF_stop(comm_accept); +- return Comm::NOMESSAGE; +- } +- } +- +- /* fdstat update */ +- fdd_table[sock].close_file = NULL; +- fdd_table[sock].close_line = 0; +- +- fde *F = &fd_table[sock]; +- details->remote.toStr(F->ipaddr,MAX_IPSTRLEN); +- F->remote_port = details->remote.port(); +- F->local_addr = details->local; +- F->sock_family = details->local.isIPv6()?AF_INET6:AF_INET; +- +- // set socket flags +- commSetCloseOnExec(sock); +- commSetNonBlocking(sock); +- +- /* IFF the socket is (tproxy) transparent, pass the flag down to allow spoofing */ +- F->flags.transparent = fd_table[conn->fd].flags.transparent; // XXX: can we remove this line yet? +- + PROF_stop(comm_accept); + return Comm::OK; + } diff --git a/SOURCES/squid-4.11-include-guards.patch b/SOURCES/squid-4.11-include-guards.patch new file mode 100644 index 0000000..fb96c57 --- /dev/null +++ b/SOURCES/squid-4.11-include-guards.patch @@ -0,0 +1,41 @@ +diff --git a/compat/os/linux.h b/compat/os/linux.h +index 0ff05c6..d51389b 100644 +--- a/compat/os/linux.h ++++ b/compat/os/linux.h +@@ -44,6 +44,36 @@ + #include + #endif + ++/* ++ * Netfilter header madness. (see Bug 4323) ++ * ++ * Netfilter have a history of defining their own versions of network protocol ++ * primitives without sufficient protection against the POSIX defines which are ++ * aways present in Linux. ++ * ++ * netinet/in.h must be included before any other sys header in order to properly ++ * activate include guards in the kernel maintainers added ++ * to workaround it. ++ */ ++#if HAVE_NETINET_IN_H ++#include ++#endif ++ ++/* ++ * Netfilter header madness. (see Bug 4323) ++ * ++ * Netfilter have a history of defining their own versions of network protocol ++ * primitives without sufficient protection against the POSIX defines which are ++ * aways present in Linux. ++ * ++ * netinet/in.h must be included before any other sys header in order to properly ++ * activate include guards in the kernel maintainers added ++ * to workaround it. ++ */ ++#if HAVE_NETINET_IN_H ++#include ++#endif ++ + /* + * sys/capability.h is only needed in Linux apparently. + * diff --git a/SOURCES/squid-4.11-large-acl.patch b/SOURCES/squid-4.11-large-acl.patch new file mode 100644 index 0000000..8aacf38 --- /dev/null +++ b/SOURCES/squid-4.11-large-acl.patch @@ -0,0 +1,178 @@ +diff --git a/src/acl/RegexData.cc b/src/acl/RegexData.cc +index 01a4c12..b5c1679 100644 +--- a/src/acl/RegexData.cc ++++ b/src/acl/RegexData.cc +@@ -22,6 +22,7 @@ + #include "ConfigParser.h" + #include "Debug.h" + #include "sbuf/List.h" ++#include "sbuf/Algorithms.h" + + ACLRegexData::~ACLRegexData() + { +@@ -129,6 +130,18 @@ compileRE(std::list &curlist, const char * RE, int flags) + return true; + } + ++static bool ++compileRE(std::list &curlist, const SBufList &RE, int flags) ++{ ++ if (RE.empty()) ++ return curlist.empty(); // XXX: old code did this. It looks wrong. ++ SBuf regexp; ++ static const SBuf openparen("("), closeparen(")"), separator(")|("); ++ JoinContainerIntoSBuf(regexp, RE.begin(), RE.end(), separator, openparen, ++ closeparen); ++ return compileRE(curlist, regexp.c_str(), flags); ++} ++ + /** Compose and compile one large RE from a set of (small) REs. + * The ultimate goal is to have only one RE per ACL so that match() is + * called only once per ACL. +@@ -137,16 +150,11 @@ static int + compileOptimisedREs(std::list &curlist, const SBufList &sl) + { + std::list newlist; +- int numREs = 0; ++ SBufList accumulatedRE; ++ int numREs = 0, reSize = 0; + int flags = REG_EXTENDED | REG_NOSUB; +- int largeREindex = 0; +- char largeRE[BUFSIZ]; +- *largeRE = 0; + + for (const SBuf & configurationLineWord : sl) { +- int RElen; +- RElen = configurationLineWord.length(); +- + static const SBuf minus_i("-i"); + static const SBuf plus_i("+i"); + if (configurationLineWord == minus_i) { +@@ -155,10 +163,11 @@ compileOptimisedREs(std::list &curlist, const SBufList &sl) + debugs(28, 2, "optimisation of -i ... -i" ); + } else { + debugs(28, 2, "-i" ); +- if (!compileRE(newlist, largeRE, flags)) ++ if (!compileRE(newlist, accumulatedRE, flags)) + return 0; + flags |= REG_ICASE; +- largeRE[largeREindex=0] = '\0'; ++ accumulatedRE.clear(); ++ reSize = 0; + } + } else if (configurationLineWord == plus_i) { + if ((flags & REG_ICASE) == 0) { +@@ -166,37 +175,34 @@ compileOptimisedREs(std::list &curlist, const SBufList &sl) + debugs(28, 2, "optimisation of +i ... +i"); + } else { + debugs(28, 2, "+i"); +- if (!compileRE(newlist, largeRE, flags)) ++ if (!compileRE(newlist, accumulatedRE, flags)) + return 0; + flags &= ~REG_ICASE; +- largeRE[largeREindex=0] = '\0'; ++ accumulatedRE.clear(); ++ reSize = 0; + } +- } else if (RElen + largeREindex + 3 < BUFSIZ-1) { ++ } else if (reSize < 1024) { + debugs(28, 2, "adding RE '" << configurationLineWord << "'"); +- if (largeREindex > 0) { +- largeRE[largeREindex] = '|'; +- ++largeREindex; +- } +- largeRE[largeREindex] = '('; +- ++largeREindex; +- configurationLineWord.copy(largeRE+largeREindex, BUFSIZ-largeREindex); +- largeREindex += configurationLineWord.length(); +- largeRE[largeREindex] = ')'; +- ++largeREindex; +- largeRE[largeREindex] = '\0'; ++ accumulatedRE.push_back(configurationLineWord); + ++numREs; ++ reSize += configurationLineWord.length(); + } else { + debugs(28, 2, "buffer full, generating new optimised RE..." ); +- if (!compileRE(newlist, largeRE, flags)) ++ accumulatedRE.push_back(configurationLineWord); ++ if (!compileRE(newlist, accumulatedRE, flags)) + return 0; +- largeRE[largeREindex=0] = '\0'; ++ accumulatedRE.clear(); ++ reSize = 0; + continue; /* do the loop again to add the RE to largeRE */ + } + } + +- if (!compileRE(newlist, largeRE, flags)) ++ if (!compileRE(newlist, accumulatedRE, flags)) + return 0; + ++ accumulatedRE.clear(); ++ reSize = 0; ++ + /* all was successful, so put the new list at the tail */ + curlist.splice(curlist.end(), newlist); + +diff --git a/src/sbuf/Algorithms.h b/src/sbuf/Algorithms.h +index 21ee889..338e9c0 100644 +--- a/src/sbuf/Algorithms.h ++++ b/src/sbuf/Algorithms.h +@@ -81,6 +81,57 @@ SBufContainerJoin(const Container &items, const SBuf& separator) + return rv; + } + ++/** Join container of SBufs and append to supplied target ++ * ++ * append to the target SBuf all elements in the [begin,end) range from ++ * an iterable container, prefixed by prefix, separated by separator and ++ * followed by suffix. Prefix and suffix are added also in case of empty ++ * iterable ++ * ++ * \return the modified dest ++ */ ++template ++SBuf& ++JoinContainerIntoSBuf(SBuf &dest, const ContainerIterator &begin, ++ const ContainerIterator &end, const SBuf& separator, ++ const SBuf& prefix = SBuf(), const SBuf& suffix = SBuf()) ++{ ++ if (begin == end) { ++ dest.append(prefix).append(suffix); ++ return dest; ++ } ++ ++ // optimization: pre-calculate needed storage ++ const SBuf::size_type totalContainerSize = ++ std::accumulate(begin, end, 0, SBufAddLength(separator)) + ++ dest.length() + prefix.length() + suffix.length(); ++ SBufReservationRequirements req; ++ req.minSpace = totalContainerSize; ++ dest.reserve(req); ++ ++ auto i = begin; ++ dest.append(prefix); ++ dest.append(*i); ++ ++i; ++ for (; i != end; ++i) ++ dest.append(separator).append(*i); ++ dest.append(suffix); ++ return dest; ++} ++ ++ ++/// convenience wrapper of JoinContainerIntoSBuf with no caller-supplied SBuf ++template ++SBuf ++JoinContainerToSBuf(const ContainerIterator &begin, ++ const ContainerIterator &end, const SBuf& separator, ++ const SBuf& prefix = SBuf(), const SBuf& suffix = SBuf()) ++{ ++ SBuf rv; ++ return JoinContainerIntoSBuf(rv, begin, end, separator, prefix, suffix); ++} ++ ++ + namespace std { + /// default hash functor to support std::unordered_map + template <> diff --git a/SOURCES/squid-4.11-location.patch b/SOURCES/squid-4.11-location.patch new file mode 100644 index 0000000..e33e9a0 --- /dev/null +++ b/SOURCES/squid-4.11-location.patch @@ -0,0 +1,33 @@ +diff --git a/QUICKSTART b/QUICKSTART +index e5299b4..a243437 100644 +--- a/QUICKSTART ++++ b/QUICKSTART +@@ -10,10 +10,9 @@ After you retrieved, compiled and installed the Squid software (see + INSTALL in the same directory), you have to configure the squid.conf + file. This is the list of the values you *need* to change, because no + sensible defaults could be defined. Do not touch the other variables +-for now. We assume you have installed Squid in the default location: +-/usr/local/squid ++for now. + +-Uncomment and edit the following lines in /usr/local/squid/etc/squid.conf: ++Uncomment and edit the following lines in /etc/squid/squid.conf: + + ============================================================================== + +@@ -80,12 +79,12 @@ After editing squid.conf to your liking, run Squid from the command + line TWICE: + + To create any disk cache_dir configured: +- % /usr/local/squid/sbin/squid -z ++ % /usr/sbin/squid -z + + To start squid: +- % /usr/local/squid/sbin/squid ++ % /usr/sbin/squid + +-Check in the cache.log (/usr/local/squid/var/logs/cache.log) that ++Check in the cache.log (/var/log/squid/cache.log) that + everything is all right. + + Once Squid created all its files (it can take several minutes on some diff --git a/SOURCES/squid-4.11-perlpath.patch b/SOURCES/squid-4.11-perlpath.patch new file mode 100644 index 0000000..9e7fbbc --- /dev/null +++ b/SOURCES/squid-4.11-perlpath.patch @@ -0,0 +1,10 @@ +diff --git a/contrib/url-normalizer.pl b/contrib/url-normalizer.pl +index 90ac6a4..8dbed90 100755 +--- a/contrib/url-normalizer.pl ++++ b/contrib/url-normalizer.pl +@@ -1,4 +1,4 @@ +-#!/usr/local/bin/perl -Tw ++#!/usr/bin/perl -Tw + # + # * Copyright (C) 1996-2021 The Squid Software Foundation and contributors + # * diff --git a/SOURCES/squid-4.15-CVE-2021-28116.patch b/SOURCES/squid-4.15-CVE-2021-28116.patch new file mode 100644 index 0000000..116a520 --- /dev/null +++ b/SOURCES/squid-4.15-CVE-2021-28116.patch @@ -0,0 +1,424 @@ +commit b003a0da7865caa25b5d1e70c79329b32409b02a (HEAD -> refs/heads/v4, refs/remotes/origin/v4) +Author: Amos Jeffries +Date: 2021-09-24 21:53:11 +0000 + + WCCP: Validate packets better (#899) + + Update WCCP to support exception based error handling for + parsing and processing we are moving Squid to for protocol + handling. + + Update the main WCCPv2 parsing checks to throw meaningful + exceptions when detected. + +diff --git a/src/wccp2.cc b/src/wccp2.cc +index ee592449c..6ef469e91 100644 +--- a/src/wccp2.cc ++++ b/src/wccp2.cc +@@ -1108,6 +1108,59 @@ wccp2ConnectionClose(void) + * Functions for handling the requests. + */ + ++/// Checks that the given area section ends inside the given (whole) area. ++/// \param error the message to throw when the section does not fit ++static void ++CheckSectionLength(const void *sectionStart, const size_t sectionLength, const void *wholeStart, const size_t wholeSize, const char *error) ++{ ++ assert(sectionStart); ++ assert(wholeStart); ++ ++ const auto wholeEnd = static_cast(wholeStart) + wholeSize; ++ assert(sectionStart >= wholeStart && "we never go backwards"); ++ assert(sectionStart <= wholeEnd && "we never go beyond our whole (but zero-sized fields are OK)"); ++ static_assert(sizeof(wccp2_i_see_you_t) <= PTRDIFF_MAX, "paranoid: no UB when subtracting in-whole pointers"); ++ // subtraction safe due to the three assertions above ++ const auto remainderDiff = wholeEnd - static_cast(sectionStart); ++ ++ // casting safe due to the assertions above (and size_t definition) ++ assert(remainderDiff >= 0); ++ const auto remainderSize = static_cast(remainderDiff); ++ ++ if (sectionLength <= remainderSize) ++ return; ++ ++ throw TextException(error, Here()); ++} ++ ++/// Checks that the area contains at least dataLength bytes after the header. ++/// The size of the field header itself is not included in dataLength. ++/// \returns the total field size -- the field header and field data combined ++template ++static size_t ++CheckFieldDataLength(const FieldHeader *header, const size_t dataLength, const void *areaStart, const size_t areaSize, const char *error) ++{ ++ assert(header); ++ const auto dataStart = reinterpret_cast(header) + sizeof(header); ++ CheckSectionLength(dataStart, dataLength, areaStart, areaSize, error); ++ return sizeof(header) + dataLength; // no overflow after CheckSectionLength() ++} ++ ++/// Positions the given field at a given start within a given packet area. ++/// The Field type determines the correct field size (used for bounds checking). ++/// \param field the field pointer the function should set ++/// \param areaStart the start of a packet (sub)structure containing the field ++/// \param areaSize the size of the packet (sub)structure starting at areaStart ++/// \param fieldStart the start of a field within the given area ++/// \param error the message to throw when the field does not fit the area ++template ++static void ++SetField(Field *&field, const void *fieldStart, const void *areaStart, const size_t areaSize, const char *error) ++{ ++ CheckSectionLength(fieldStart, sizeof(Field), areaStart, areaSize, error); ++ field = static_cast(const_cast(fieldStart)); ++} ++ + /* + * Accept the UDP packet + */ +@@ -1124,8 +1177,6 @@ wccp2HandleUdp(int sock, void *) + + /* These structs form the parts of the packet */ + +- struct wccp2_item_header_t *header = NULL; +- + struct wccp2_security_none_t *security_info = NULL; + + struct wccp2_service_info_t *service_info = NULL; +@@ -1141,14 +1192,13 @@ wccp2HandleUdp(int sock, void *) + struct wccp2_cache_identity_info_t *cache_identity = NULL; + + struct wccp2_capability_info_header_t *router_capability_header = NULL; ++ char *router_capability_data_start = nullptr; + + struct wccp2_capability_element_t *router_capability_element; + + struct sockaddr_in from; + + struct in_addr cache_address; +- int len, found; +- short int data_length, offset; + uint32_t tmp; + char *ptr; + int num_caches; +@@ -1161,20 +1211,18 @@ wccp2HandleUdp(int sock, void *) + Ip::Address from_tmp; + from_tmp.setIPv4(); + +- len = comm_udp_recvfrom(sock, +- &wccp2_i_see_you, +- WCCP_RESPONSE_SIZE, +- 0, +- from_tmp); ++ const auto lenOrError = comm_udp_recvfrom(sock, &wccp2_i_see_you, WCCP_RESPONSE_SIZE, 0, from_tmp); + +- if (len < 0) ++ if (lenOrError < 0) + return; ++ const auto len = static_cast(lenOrError); + +- if (ntohs(wccp2_i_see_you.version) != WCCP2_VERSION) +- return; +- +- if (ntohl(wccp2_i_see_you.type) != WCCP2_I_SEE_YOU) +- return; ++ try { ++ // TODO: Remove wccp2_i_see_you.data and use a buffer to read messages. ++ const auto message_header_size = sizeof(wccp2_i_see_you) - sizeof(wccp2_i_see_you.data); ++ Must2(len >= message_header_size, "incomplete WCCP message header"); ++ Must2(ntohs(wccp2_i_see_you.version) == WCCP2_VERSION, "WCCP version unsupported"); ++ Must2(ntohl(wccp2_i_see_you.type) == WCCP2_I_SEE_YOU, "WCCP packet type unsupported"); + + /* FIXME INET6 : drop conversion boundary */ + from_tmp.getSockAddr(from); +@@ -1182,73 +1230,60 @@ wccp2HandleUdp(int sock, void *) + debugs(80, 3, "Incoming WCCPv2 I_SEE_YOU length " << ntohs(wccp2_i_see_you.length) << "."); + + /* Record the total data length */ +- data_length = ntohs(wccp2_i_see_you.length); ++ const auto data_length = ntohs(wccp2_i_see_you.length); ++ Must2(data_length <= len - message_header_size, ++ "malformed packet claiming it's bigger than received data"); + +- offset = 0; +- +- if (data_length > len) { +- debugs(80, DBG_IMPORTANT, "ERROR: Malformed WCCPv2 packet claiming it's bigger than received data"); +- return; +- } ++ size_t offset = 0; + + /* Go through the data structure */ +- while (data_length > offset) { ++ while (offset + sizeof(struct wccp2_item_header_t) <= data_length) { + + char *data = wccp2_i_see_you.data; + +- header = (struct wccp2_item_header_t *) &data[offset]; ++ const auto itemHeader = reinterpret_cast(&data[offset]); ++ const auto itemSize = CheckFieldDataLength(itemHeader, ntohs(itemHeader->length), ++ data, data_length, "truncated record"); ++ // XXX: Check "The specified length must be a multiple of 4 octets" ++ // requirement to avoid unaligned memory reads after the first item. + +- switch (ntohs(header->type)) { ++ switch (ntohs(itemHeader->type)) { + + case WCCP2_SECURITY_INFO: +- +- if (security_info != NULL) { +- debugs(80, DBG_IMPORTANT, "Duplicate security definition"); +- return; +- } +- +- security_info = (struct wccp2_security_none_t *) &wccp2_i_see_you.data[offset]; ++ Must2(!security_info, "duplicate security definition"); ++ SetField(security_info, itemHeader, itemHeader, itemSize, ++ "security definition truncated"); + break; + + case WCCP2_SERVICE_INFO: +- +- if (service_info != NULL) { +- debugs(80, DBG_IMPORTANT, "Duplicate service_info definition"); +- return; +- } +- +- service_info = (struct wccp2_service_info_t *) &wccp2_i_see_you.data[offset]; ++ Must2(!service_info, "duplicate service_info definition"); ++ SetField(service_info, itemHeader, itemHeader, itemSize, ++ "service_info definition truncated"); + break; + + case WCCP2_ROUTER_ID_INFO: +- +- if (router_identity_info != NULL) { +- debugs(80, DBG_IMPORTANT, "Duplicate router_identity_info definition"); +- return; +- } +- +- router_identity_info = (struct router_identity_info_t *) &wccp2_i_see_you.data[offset]; ++ Must2(!router_identity_info, "duplicate router_identity_info definition"); ++ SetField(router_identity_info, itemHeader, itemHeader, itemSize, ++ "router_identity_info definition truncated"); + break; + + case WCCP2_RTR_VIEW_INFO: +- +- if (router_view_header != NULL) { +- debugs(80, DBG_IMPORTANT, "Duplicate router_view definition"); +- return; +- } +- +- router_view_header = (struct router_view_t *) &wccp2_i_see_you.data[offset]; ++ Must2(!router_view_header, "duplicate router_view definition"); ++ SetField(router_view_header, itemHeader, itemHeader, itemSize, ++ "router_view definition truncated"); + break; + +- case WCCP2_CAPABILITY_INFO: +- +- if (router_capability_header != NULL) { +- debugs(80, DBG_IMPORTANT, "Duplicate router_capability definition"); +- return; +- } ++ case WCCP2_CAPABILITY_INFO: { ++ Must2(!router_capability_header, "duplicate router_capability definition"); ++ SetField(router_capability_header, itemHeader, itemHeader, itemSize, ++ "router_capability definition truncated"); + +- router_capability_header = (struct wccp2_capability_info_header_t *) &wccp2_i_see_you.data[offset]; ++ CheckFieldDataLength(router_capability_header, ntohs(router_capability_header->capability_info_length), ++ itemHeader, itemSize, "capability info truncated"); ++ router_capability_data_start = reinterpret_cast(router_capability_header) + ++ sizeof(*router_capability_header); + break; ++ } + + /* Nothing to do for the types below */ + +@@ -1257,22 +1292,17 @@ wccp2HandleUdp(int sock, void *) + break; + + default: +- debugs(80, DBG_IMPORTANT, "Unknown record type in WCCPv2 Packet (" << ntohs(header->type) << ")."); ++ debugs(80, DBG_IMPORTANT, "Unknown record type in WCCPv2 Packet (" << ntohs(itemHeader->type) << ")."); + } + +- offset += sizeof(struct wccp2_item_header_t); +- offset += ntohs(header->length); +- +- if (offset > data_length) { +- debugs(80, DBG_IMPORTANT, "Error: WCCPv2 packet tried to tell us there is data beyond the end of the packet"); +- return; +- } ++ offset += itemSize; ++ assert(offset <= data_length && "CheckFieldDataLength(itemHeader...) established that"); + } + +- if ((security_info == NULL) || (service_info == NULL) || (router_identity_info == NULL) || (router_view_header == NULL)) { +- debugs(80, DBG_IMPORTANT, "Incomplete WCCPv2 Packet"); +- return; +- } ++ Must2(security_info, "packet missing security definition"); ++ Must2(service_info, "packet missing service_info definition"); ++ Must2(router_identity_info, "packet missing router_identity_info definition"); ++ Must2(router_view_header, "packet missing router_view definition"); + + debugs(80, 5, "Complete packet received"); + +@@ -1308,10 +1338,7 @@ wccp2HandleUdp(int sock, void *) + break; + } + +- if (router_list_ptr->next == NULL) { +- debugs(80, DBG_IMPORTANT, "WCCPv2 Packet received from unknown router"); +- return; +- } ++ Must2(router_list_ptr->next, "packet received from unknown router"); + + /* Set the router id */ + router_list_ptr->info->router_address = router_identity_info->router_id_element.router_address; +@@ -1331,11 +1358,20 @@ wccp2HandleUdp(int sock, void *) + } + } else { + +- char *end = ((char *) router_capability_header) + sizeof(*router_capability_header) + ntohs(router_capability_header->capability_info_length) - sizeof(struct wccp2_capability_info_header_t); +- +- router_capability_element = (struct wccp2_capability_element_t *) (((char *) router_capability_header) + sizeof(*router_capability_header)); +- +- while ((char *) router_capability_element <= end) { ++ const auto router_capability_data_length = ntohs(router_capability_header->capability_info_length); ++ assert(router_capability_data_start); ++ const auto router_capability_data_end = router_capability_data_start + ++ router_capability_data_length; ++ for (auto router_capability_data_current = router_capability_data_start; ++ router_capability_data_current < router_capability_data_end;) { ++ ++ SetField(router_capability_element, router_capability_data_current, ++ router_capability_data_start, router_capability_data_length, ++ "capability element header truncated"); ++ const auto elementSize = CheckFieldDataLength( ++ router_capability_element, ntohs(router_capability_element->capability_length), ++ router_capability_data_start, router_capability_data_length, ++ "capability element truncated"); + + switch (ntohs(router_capability_element->capability_type)) { + +@@ -1377,7 +1413,7 @@ wccp2HandleUdp(int sock, void *) + debugs(80, DBG_IMPORTANT, "Unknown capability type in WCCPv2 Packet (" << ntohs(router_capability_element->capability_type) << ")."); + } + +- router_capability_element = (struct wccp2_capability_element_t *) (((char *) router_capability_element) + sizeof(struct wccp2_item_header_t) + ntohs(router_capability_element->capability_length)); ++ router_capability_data_current += elementSize; + } + } + +@@ -1396,23 +1432,34 @@ wccp2HandleUdp(int sock, void *) + num_caches = 0; + + /* Check to see if we're the master cache and update the cache list */ +- found = 0; ++ bool found = false; + service_list_ptr->lowest_ip = 1; + cache_list_ptr = &router_list_ptr->cache_list_head; + + /* to find the list of caches, we start at the end of the router view header */ + + ptr = (char *) (router_view_header) + sizeof(struct router_view_t); ++ const auto router_view_size = sizeof(struct router_view_t) + ++ ntohs(router_view_header->header.length); + + /* Then we read the number of routers */ +- memcpy(&tmp, ptr, sizeof(tmp)); ++ const uint32_t *routerCountRaw = nullptr; ++ SetField(routerCountRaw, ptr, router_view_header, router_view_size, ++ "malformed packet (truncated router view info w/o number of routers)"); + + /* skip the number plus all the ip's */ +- +- ptr += sizeof(tmp) + (ntohl(tmp) * sizeof(struct in_addr)); ++ ptr += sizeof(*routerCountRaw); ++ const auto ipCount = ntohl(*routerCountRaw); ++ const auto ipsSize = ipCount * sizeof(struct in_addr); // we check for unsigned overflow below ++ Must2(ipsSize / sizeof(struct in_addr) != ipCount, "huge IP address count"); ++ CheckSectionLength(ptr, ipsSize, router_view_header, router_view_size, "invalid IP address count"); ++ ptr += ipsSize; + + /* Then read the number of caches */ +- memcpy(&tmp, ptr, sizeof(tmp)); ++ const uint32_t *cacheCountRaw = nullptr; ++ SetField(cacheCountRaw, ptr, router_view_header, router_view_size, ++ "malformed packet (truncated router view info w/o cache count)"); ++ memcpy(&tmp, cacheCountRaw, sizeof(tmp)); // TODO: Replace tmp with cacheCount + ptr += sizeof(tmp); + + if (ntohl(tmp) != 0) { +@@ -1426,7 +1473,8 @@ wccp2HandleUdp(int sock, void *) + + case WCCP2_ASSIGNMENT_METHOD_HASH: + +- cache_identity = (struct wccp2_cache_identity_info_t *) ptr; ++ SetField(cache_identity, ptr, router_view_header, router_view_size, ++ "malformed packet (truncated router view info cache w/o assignment hash)"); + + ptr += sizeof(struct wccp2_cache_identity_info_t); + +@@ -1437,13 +1485,15 @@ wccp2HandleUdp(int sock, void *) + + case WCCP2_ASSIGNMENT_METHOD_MASK: + +- cache_mask_info = (struct cache_mask_info_t *) ptr; ++ SetField(cache_mask_info, ptr, router_view_header, router_view_size, ++ "malformed packet (truncated router view info cache w/o assignment mask)"); + + /* The mask assignment has an undocumented variable length entry here */ + + if (ntohl(cache_mask_info->num1) == 3) { + +- cache_mask_identity = (struct wccp2_cache_mask_identity_info_t *) ptr; ++ SetField(cache_mask_identity, ptr, router_view_header, router_view_size, ++ "malformed packet (truncated router view info cache w/o assignment mask identity)"); + + ptr += sizeof(struct wccp2_cache_mask_identity_info_t); + +@@ -1474,10 +1524,7 @@ wccp2HandleUdp(int sock, void *) + debugs (80, 5, "checking cache list: (" << std::hex << cache_address.s_addr << ":" << router_list_ptr->local_ip.s_addr << ")"); + + /* Check to see if it's the master, or us */ +- +- if (cache_address.s_addr == router_list_ptr->local_ip.s_addr) { +- found = 1; +- } ++ found = found || (cache_address.s_addr == router_list_ptr->local_ip.s_addr); + + if (cache_address.s_addr < router_list_ptr->local_ip.s_addr) { + service_list_ptr->lowest_ip = 0; +@@ -1494,7 +1541,7 @@ wccp2HandleUdp(int sock, void *) + cache_list_ptr->next = NULL; + + service_list_ptr->lowest_ip = 1; +- found = 1; ++ found = true; + num_caches = 1; + } + +@@ -1502,7 +1549,7 @@ wccp2HandleUdp(int sock, void *) + + router_list_ptr->num_caches = htonl(num_caches); + +- if ((found == 1) && (service_list_ptr->lowest_ip == 1)) { ++ if (found && (service_list_ptr->lowest_ip == 1)) { + if (ntohl(router_view_header->change_number) != router_list_ptr->member_change) { + debugs(80, 4, "Change detected - queueing up new assignment"); + router_list_ptr->member_change = ntohl(router_view_header->change_number); +@@ -1515,6 +1562,10 @@ wccp2HandleUdp(int sock, void *) + eventDelete(wccp2AssignBuckets, NULL); + debugs(80, 5, "I am not the lowest ip cache - not assigning buckets"); + } ++ ++ } catch (...) { ++ debugs(80, DBG_IMPORTANT, "ERROR: Ignoring WCCPv2 message: " << CurrentException); ++ } + } + + static void diff --git a/SOURCES/squid-4.15-CVE-2021-46784.patch b/SOURCES/squid-4.15-CVE-2021-46784.patch new file mode 100644 index 0000000..3c3da59 --- /dev/null +++ b/SOURCES/squid-4.15-CVE-2021-46784.patch @@ -0,0 +1,129 @@ +From 780c4ea1b4c9d2fb41f6962aa6ed73ae57f74b2b Mon Sep 17 00:00:00 2001 +From: Joshua Rogers +Date: Mon, 18 Apr 2022 13:42:36 +0000 +Subject: [PATCH] Improve handling of Gopher responses (#1022) + +--- + src/gopher.cc | 45 ++++++++++++++++++++------------------------- + 1 file changed, 20 insertions(+), 25 deletions(-) + +diff --git a/src/gopher.cc b/src/gopher.cc +index 169b0e18299..6187da18bcd 100644 +--- a/src/gopher.cc ++++ b/src/gopher.cc +@@ -371,7 +371,6 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) + char *lpos = NULL; + char *tline = NULL; + LOCAL_ARRAY(char, line, TEMP_BUF_SIZE); +- LOCAL_ARRAY(char, tmpbuf, TEMP_BUF_SIZE); + char *name = NULL; + char *selector = NULL; + char *host = NULL; +@@ -381,7 +380,6 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) + char gtype; + StoreEntry *entry = NULL; + +- memset(tmpbuf, '\0', TEMP_BUF_SIZE); + memset(line, '\0', TEMP_BUF_SIZE); + + entry = gopherState->entry; +@@ -416,7 +414,7 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) + return; + } + +- String outbuf; ++ SBuf outbuf; + + if (!gopherState->HTML_header_added) { + if (gopherState->conversion == GopherStateData::HTML_CSO_RESULT) +@@ -583,34 +581,34 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) + break; + } + +- memset(tmpbuf, '\0', TEMP_BUF_SIZE); +- + if ((gtype == GOPHER_TELNET) || (gtype == GOPHER_3270)) { + if (strlen(escaped_selector) != 0) +- snprintf(tmpbuf, TEMP_BUF_SIZE, " %s\n", +- icon_url, escaped_selector, rfc1738_escape_part(host), +- *port ? ":" : "", port, html_quote(name)); ++ outbuf.appendf(" %s\n", ++ icon_url, escaped_selector, rfc1738_escape_part(host), ++ *port ? ":" : "", port, html_quote(name)); + else +- snprintf(tmpbuf, TEMP_BUF_SIZE, " %s\n", +- icon_url, rfc1738_escape_part(host), *port ? ":" : "", +- port, html_quote(name)); ++ outbuf.appendf(" %s\n", ++ icon_url, rfc1738_escape_part(host), *port ? ":" : "", ++ port, html_quote(name)); + + } else if (gtype == GOPHER_INFO) { +- snprintf(tmpbuf, TEMP_BUF_SIZE, "\t%s\n", html_quote(name)); ++ outbuf.appendf("\t%s\n", html_quote(name)); + } else { + if (strncmp(selector, "GET /", 5) == 0) { + /* WWW link */ +- snprintf(tmpbuf, TEMP_BUF_SIZE, " %s\n", +- icon_url, host, rfc1738_escape_unescaped(selector + 5), html_quote(name)); ++ outbuf.appendf(" %s\n", ++ icon_url, host, rfc1738_escape_unescaped(selector + 5), html_quote(name)); ++ } else if (gtype == GOPHER_WWW) { ++ outbuf.appendf(" %s\n", ++ icon_url, rfc1738_escape_unescaped(selector), html_quote(name)); + } else { + /* Standard link */ +- snprintf(tmpbuf, TEMP_BUF_SIZE, " %s\n", +- icon_url, host, gtype, escaped_selector, html_quote(name)); ++ outbuf.appendf(" %s\n", ++ icon_url, host, gtype, escaped_selector, html_quote(name)); + } + } + + safe_free(escaped_selector); +- outbuf.append(tmpbuf); + } else { + memset(line, '\0', TEMP_BUF_SIZE); + continue; +@@ -643,13 +641,12 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) + break; + + if (gopherState->cso_recno != recno) { +- snprintf(tmpbuf, TEMP_BUF_SIZE, "

Record# %d
%s

\n
", recno, html_quote(result));
++                    outbuf.appendf("

Record# %d
%s

\n
", recno, html_quote(result));
+                     gopherState->cso_recno = recno;
+                 } else {
+-                    snprintf(tmpbuf, TEMP_BUF_SIZE, "%s\n", html_quote(result));
++                    outbuf.appendf("%s\n", html_quote(result));
+                 }
+ 
+-                outbuf.append(tmpbuf);
+                 break;
+             } else {
+                 int code;
+@@ -677,8 +674,7 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
+ 
+                 case 502: { /* Too Many Matches */
+                     /* Print the message the server returns */
+-                    snprintf(tmpbuf, TEMP_BUF_SIZE, "

%s

\n
", html_quote(result));
+-                    outbuf.append(tmpbuf);
++                    outbuf.appendf("

%s

\n
", html_quote(result));
+                     break;
+                 }
+ 
+@@ -694,13 +690,12 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
+ 
+     }               /* while loop */
+ 
+-    if (outbuf.size() > 0) {
+-        entry->append(outbuf.rawBuf(), outbuf.size());
++    if (outbuf.length() > 0) {
++        entry->append(outbuf.rawContent(), outbuf.length());
+         /* now let start sending stuff to client */
+         entry->flush();
+     }
+ 
+-    outbuf.clean();
+     return;
+ }
+ 
\ No newline at end of file
diff --git a/SOURCES/squid-4.15-CVE-2022-41318.patch b/SOURCES/squid-4.15-CVE-2022-41318.patch
new file mode 100644
index 0000000..cb303ad
--- /dev/null
+++ b/SOURCES/squid-4.15-CVE-2022-41318.patch
@@ -0,0 +1,38 @@
+commit 4031c6c2b004190fdffbc19dab7cd0305a2025b7 (refs/remotes/origin/v4, refs/remotes/github/v4, refs/heads/v4)
+Author: Amos Jeffries 
+Date:   2022-08-09 23:34:54 +0000
+
+    Bug 3193 pt2: NTLM decoder truncating strings (#1114)
+    
+    The initial bug fix overlooked large 'offset' causing integer
+    wrap to extract a too-short length string.
+    
+    Improve debugs and checks sequence to clarify cases and ensure
+    that all are handled correctly.
+
+diff --git a/lib/ntlmauth/ntlmauth.cc b/lib/ntlmauth/ntlmauth.cc
+index 5d9637290..f00fd51f8 100644
+--- a/lib/ntlmauth/ntlmauth.cc
++++ b/lib/ntlmauth/ntlmauth.cc
+@@ -107,10 +107,19 @@ ntlm_fetch_string(const ntlmhdr *packet, const int32_t packet_size, const strhdr
+     int32_t o = le32toh(str->offset);
+     // debug("ntlm_fetch_string(plength=%d,l=%d,o=%d)\n",packet_size,l,o);
+ 
+-    if (l < 0 || l > NTLM_MAX_FIELD_LENGTH || o + l > packet_size || o == 0) {
+-        debug("ntlm_fetch_string: insane data (pkt-sz: %d, fetch len: %d, offset: %d)\n", packet_size,l,o);
++    if (l < 0 || l > NTLM_MAX_FIELD_LENGTH) {
++        debug("ntlm_fetch_string: insane string length (pkt-sz: %d, fetch len: %d, offset: %d)\n", packet_size,l,o);
+         return rv;
+     }
++    else if (o <= 0 || o > packet_size) {
++        debug("ntlm_fetch_string: insane string offset (pkt-sz: %d, fetch len: %d, offset: %d)\n", packet_size,l,o);
++        return rv;
++    }
++    else if (l > packet_size - o) {
++        debug("ntlm_fetch_string: truncated string data (pkt-sz: %d, fetch len: %d, offset: %d)\n", packet_size,l,o);
++        return rv;
++    }
++
+     rv.str = (char *)packet + o;
+     rv.l = 0;
+     if ((flags & NTLM_NEGOTIATE_ASCII) == 0) {
diff --git a/SOURCES/squid-4.15-CVE-2023-46724.patch b/SOURCES/squid-4.15-CVE-2023-46724.patch
new file mode 100644
index 0000000..58b8651
--- /dev/null
+++ b/SOURCES/squid-4.15-CVE-2023-46724.patch
@@ -0,0 +1,24 @@
+diff --git a/src/anyp/Uri.cc b/src/anyp/Uri.cc
+index 20b9bf1..81ebb18 100644
+--- a/src/anyp/Uri.cc
++++ b/src/anyp/Uri.cc
+@@ -173,6 +173,10 @@ urlInitialize(void)
+     assert(0 == matchDomainName("*.foo.com", ".foo.com", mdnHonorWildcards));
+     assert(0 != matchDomainName("*.foo.com", "foo.com", mdnHonorWildcards));
+ 
++    assert(0 != matchDomainName("foo.com", ""));
++    assert(0 != matchDomainName("foo.com", "", mdnHonorWildcards));
++    assert(0 != matchDomainName("foo.com", "", mdnRejectSubsubDomains));
++
+     /* more cases? */
+ }
+ 
+@@ -756,6 +760,8 @@ matchDomainName(const char *h, const char *d, MatchDomainNameFlags flags)
+         return -1;
+ 
+     dl = strlen(d);
++    if (dl == 0)
++        return 1;
+ 
+     /*
+      * Start at the ends of the two strings and work towards the
diff --git a/SOURCES/squid-4.15-CVE-2023-46728.patch b/SOURCES/squid-4.15-CVE-2023-46728.patch
new file mode 100644
index 0000000..980f372
--- /dev/null
+++ b/SOURCES/squid-4.15-CVE-2023-46728.patch
@@ -0,0 +1,1673 @@
+commit 0cf1b78cacfdb278107ae352022ced143635b528
+Author: Luboš Uhliarik 
+Date:   Wed Dec 6 20:04:56 2023 +0100
+
+    Remove gopher support
+
+diff --git a/doc/debug-sections.txt b/doc/debug-sections.txt
+index 8b8b25f..50bd122 100644
+--- a/doc/debug-sections.txt
++++ b/doc/debug-sections.txt
+@@ -27,7 +27,6 @@ section 06    Disk I/O Routines
+ section 07    Multicast
+ section 08    Swap File Bitmap
+ section 09    File Transfer Protocol (FTP)
+-section 10    Gopher
+ section 11    Hypertext Transfer Protocol (HTTP)
+ section 12    Internet Cache Protocol (ICP)
+ section 13    High Level Memory Pool Management
+diff --git a/errors/en/ERR_UNSUP_REQ b/errors/en/ERR_UNSUP_REQ
+index 352399d..e208043 100644
+--- a/errors/en/ERR_UNSUP_REQ
++++ b/errors/en/ERR_UNSUP_REQ
+@@ -24,7 +24,7 @@ body
+ 

Unsupported Request Method and Protocol

+ + +-

Squid does not support all request methods for all access protocols. For example, you can not POST a Gopher request.

++

Squid does not support all request methods for all access protocols.

+ +

Your cache administrator is %w.

+
+diff --git a/errors/errorpage.css b/errors/errorpage.css +index 38ba434..facee93 100644 +--- a/errors/errorpage.css ++++ b/errors/errorpage.css +@@ -73,7 +73,7 @@ p { + pre { + } + +-/* special event: FTP / Gopher directory listing */ ++/* special event: FTP directory listing */ + #dirmsg { + font-family: courier, monospace; + color: black; +diff --git a/errors/templates/ERR_UNSUP_REQ b/errors/templates/ERR_UNSUP_REQ +index e880392..196887d 100644 +--- a/errors/templates/ERR_UNSUP_REQ ++++ b/errors/templates/ERR_UNSUP_REQ +@@ -24,7 +24,7 @@ body +

Unsupported Request Method and Protocol

+ + +-

Squid does not support all request methods for all access protocols. For example, you can not POST a Gopher request.

++

Squid does not support all request methods for all access protocols.

+ +

Your cache administrator is %w.

+
+diff --git a/src/FwdState.cc b/src/FwdState.cc +index 41a1679..5363572 100644 +--- a/src/FwdState.cc ++++ b/src/FwdState.cc +@@ -28,7 +28,6 @@ + #include "fde.h" + #include "FwdState.h" + #include "globals.h" +-#include "gopher.h" + #include "hier_code.h" + #include "http.h" + #include "http/Stream.h" +@@ -1007,10 +1006,6 @@ FwdState::dispatch() + httpStart(this); + break; + +- case AnyP::PROTO_GOPHER: +- gopherStart(this); +- break; +- + case AnyP::PROTO_FTP: + if (request->flags.ftpNative) + Ftp::StartRelay(this); +diff --git a/src/HttpMsg.h b/src/HttpMsg.h +index 2bf799f..06ef081 100644 +--- a/src/HttpMsg.h ++++ b/src/HttpMsg.h +@@ -38,7 +38,6 @@ public: + srcFtp = 1 << (16 + 1), ///< ftp_port or FTP server + srcIcap = 1 << (16 + 2), ///< traditional ICAP service without encryption + srcEcap = 1 << (16 + 3), ///< eCAP service that uses insecure libraries/daemons +- srcGopher = 1 << (16 + 14), ///< Gopher server + srcWhois = 1 << (16 + 15), ///< Whois server + srcUnsafe = 0xFFFF0000, ///< Unsafe sources mask + srcSafe = 0x0000FFFF ///< Safe sources mask +diff --git a/src/HttpRequest.cc b/src/HttpRequest.cc +index 0c11f5a..38b9307 100644 +--- a/src/HttpRequest.cc ++++ b/src/HttpRequest.cc +@@ -18,7 +18,6 @@ + #include "Downloader.h" + #include "err_detail_type.h" + #include "globals.h" +-#include "gopher.h" + #include "http.h" + #include "http/one/RequestParser.h" + #include "http/Stream.h" +@@ -556,11 +555,6 @@ HttpRequest::maybeCacheable() + return false; + break; + +- case AnyP::PROTO_GOPHER: +- if (!gopherCachable(this)) +- return false; +- break; +- + case AnyP::PROTO_CACHE_OBJECT: + return false; + +diff --git a/src/IoStats.h b/src/IoStats.h +index e04deef..0b69d41 100644 +--- a/src/IoStats.h ++++ b/src/IoStats.h +@@ -22,7 +22,7 @@ public: + int writes; + int write_hist[histSize]; + } +- Http, Ftp, Gopher; ++ Http, Ftp; + }; + + #endif /* SQUID_IOSTATS_H_ */ +diff --git a/src/Makefile.am b/src/Makefile.am +index 7189757..cbce754 100644 +--- a/src/Makefile.am ++++ b/src/Makefile.am +@@ -306,8 +306,6 @@ squid_SOURCES = \ + FwdState.h \ + Generic.h \ + globals.h \ +- gopher.h \ +- gopher.cc \ + helper.cc \ + helper.h \ + hier_code.h \ +@@ -1260,8 +1258,6 @@ tests_testCacheManager_SOURCES = \ + fqdncache.cc \ + FwdState.cc \ + FwdState.h \ +- gopher.h \ +- gopher.cc \ + hier_code.h \ + helper.cc \ + $(HTCPSOURCE) \ +@@ -1679,8 +1675,6 @@ tests_testEvent_SOURCES = \ + fqdncache.cc \ + FwdState.cc \ + FwdState.h \ +- gopher.h \ +- gopher.cc \ + helper.cc \ + hier_code.h \ + $(HTCPSOURCE) \ +@@ -1915,8 +1909,6 @@ tests_testEventLoop_SOURCES = \ + fqdncache.cc \ + FwdState.cc \ + FwdState.h \ +- gopher.h \ +- gopher.cc \ + helper.cc \ + hier_code.h \ + $(HTCPSOURCE) \ +@@ -2146,8 +2138,6 @@ tests_test_http_range_SOURCES = \ + fqdncache.cc \ + FwdState.cc \ + FwdState.h \ +- gopher.h \ +- gopher.cc \ + helper.cc \ + hier_code.h \ + $(HTCPSOURCE) \ +@@ -2462,8 +2452,6 @@ tests_testHttpRequest_SOURCES = \ + fqdncache.cc \ + FwdState.cc \ + FwdState.h \ +- gopher.h \ +- gopher.cc \ + helper.cc \ + hier_code.h \ + $(HTCPSOURCE) \ +@@ -3308,8 +3296,6 @@ tests_testURL_SOURCES = \ + fqdncache.cc \ + FwdState.cc \ + FwdState.h \ +- gopher.h \ +- gopher.cc \ + helper.cc \ + hier_code.h \ + $(HTCPSOURCE) \ +diff --git a/src/Makefile.in b/src/Makefile.in +index 53ac16d..d46f49c 100644 +--- a/src/Makefile.in ++++ b/src/Makefile.in +@@ -263,7 +263,7 @@ am__squid_SOURCES_DIST = AclRegs.cc AuthReg.cc AccessLogEntry.cc \ + ExternalACL.h ExternalACLEntry.cc ExternalACLEntry.h \ + FadingCounter.h FadingCounter.cc fatal.h fatal.cc fd.h fd.cc \ + fde.cc fde.h FileMap.h filemap.cc fqdncache.h fqdncache.cc \ +- FwdState.cc FwdState.h Generic.h globals.h gopher.h gopher.cc \ ++ FwdState.cc FwdState.h Generic.h globals.h \ + helper.cc helper.h hier_code.h HierarchyLogEntry.h htcp.cc \ + htcp.h http.cc http.h HttpHeaderFieldStat.h HttpHdrCc.h \ + HttpHdrCc.cc HttpHdrCc.cci HttpHdrRange.cc HttpHdrSc.cc \ +@@ -352,7 +352,7 @@ am_squid_OBJECTS = $(am__objects_1) AccessLogEntry.$(OBJEXT) \ + EventLoop.$(OBJEXT) external_acl.$(OBJEXT) \ + ExternalACLEntry.$(OBJEXT) FadingCounter.$(OBJEXT) \ + fatal.$(OBJEXT) fd.$(OBJEXT) fde.$(OBJEXT) filemap.$(OBJEXT) \ +- fqdncache.$(OBJEXT) FwdState.$(OBJEXT) gopher.$(OBJEXT) \ ++ fqdncache.$(OBJEXT) FwdState.$(OBJEXT) \ + helper.$(OBJEXT) $(am__objects_5) http.$(OBJEXT) \ + HttpHdrCc.$(OBJEXT) HttpHdrRange.$(OBJEXT) HttpHdrSc.$(OBJEXT) \ + HttpHdrScTarget.$(OBJEXT) HttpHdrContRange.$(OBJEXT) \ +@@ -539,7 +539,7 @@ am__tests_testCacheManager_SOURCES_DIST = AccessLogEntry.cc debug.cc \ + tests/stub_ETag.cc event.cc external_acl.cc \ + ExternalACLEntry.cc fatal.h tests/stub_fatal.cc fd.h fd.cc \ + fde.cc FileMap.h filemap.cc fqdncache.h fqdncache.cc \ +- FwdState.cc FwdState.h gopher.h gopher.cc hier_code.h \ ++ FwdState.cc FwdState.h hier_code.h \ + helper.cc htcp.cc htcp.h http.cc HttpBody.h HttpBody.cc \ + HttpHeader.h HttpHeader.cc HttpHeaderFieldInfo.h \ + HttpHeaderTools.h HttpHeaderTools.cc HttpHeaderFieldStat.h \ +@@ -594,7 +594,7 @@ am_tests_testCacheManager_OBJECTS = AccessLogEntry.$(OBJEXT) \ + event.$(OBJEXT) external_acl.$(OBJEXT) \ + ExternalACLEntry.$(OBJEXT) tests/stub_fatal.$(OBJEXT) \ + fd.$(OBJEXT) fde.$(OBJEXT) filemap.$(OBJEXT) \ +- fqdncache.$(OBJEXT) FwdState.$(OBJEXT) gopher.$(OBJEXT) \ ++ fqdncache.$(OBJEXT) FwdState.$(OBJEXT) \ + helper.$(OBJEXT) $(am__objects_5) http.$(OBJEXT) \ + HttpBody.$(OBJEXT) HttpHeader.$(OBJEXT) \ + HttpHeaderTools.$(OBJEXT) HttpHdrCc.$(OBJEXT) \ +@@ -838,7 +838,7 @@ am__tests_testEvent_SOURCES_DIST = AccessLogEntry.cc BodyPipe.cc \ + EventLoop.h EventLoop.cc external_acl.cc ExternalACLEntry.cc \ + FadingCounter.cc fatal.h tests/stub_fatal.cc fd.h fd.cc fde.cc \ + FileMap.h filemap.cc fqdncache.h fqdncache.cc FwdState.cc \ +- FwdState.h gopher.h gopher.cc helper.cc hier_code.h htcp.cc \ ++ FwdState.h helper.cc hier_code.h htcp.cc \ + htcp.h http.cc HttpBody.h HttpBody.cc \ + tests/stub_HttpControlMsg.cc HttpHeader.h HttpHeader.cc \ + HttpHeaderFieldInfo.h HttpHeaderTools.h HttpHeaderTools.cc \ +@@ -891,7 +891,7 @@ am_tests_testEvent_OBJECTS = AccessLogEntry.$(OBJEXT) \ + external_acl.$(OBJEXT) ExternalACLEntry.$(OBJEXT) \ + FadingCounter.$(OBJEXT) tests/stub_fatal.$(OBJEXT) \ + fd.$(OBJEXT) fde.$(OBJEXT) filemap.$(OBJEXT) \ +- fqdncache.$(OBJEXT) FwdState.$(OBJEXT) gopher.$(OBJEXT) \ ++ fqdncache.$(OBJEXT) FwdState.$(OBJEXT) \ + helper.$(OBJEXT) $(am__objects_5) http.$(OBJEXT) \ + HttpBody.$(OBJEXT) tests/stub_HttpControlMsg.$(OBJEXT) \ + HttpHeader.$(OBJEXT) HttpHeaderTools.$(OBJEXT) \ +@@ -975,8 +975,8 @@ am__tests_testEventLoop_SOURCES_DIST = AccessLogEntry.cc BodyPipe.cc \ + tests/stub_ETag.cc EventLoop.h EventLoop.cc event.cc \ + external_acl.cc ExternalACLEntry.cc FadingCounter.cc fatal.h \ + tests/stub_fatal.cc fd.h fd.cc fde.cc FileMap.h filemap.cc \ +- fqdncache.h fqdncache.cc FwdState.cc FwdState.h gopher.h \ +- gopher.cc helper.cc hier_code.h htcp.cc htcp.h http.cc \ ++ fqdncache.h fqdncache.cc FwdState.cc FwdState.h \ ++ helper.cc hier_code.h htcp.cc htcp.h http.cc \ + HttpBody.h HttpBody.cc tests/stub_HttpControlMsg.cc \ + HttpHeader.h HttpHeader.cc HttpHeaderFieldInfo.h \ + HttpHeaderTools.h HttpHeaderTools.cc HttpHeaderFieldStat.h \ +@@ -1029,7 +1029,7 @@ am_tests_testEventLoop_OBJECTS = AccessLogEntry.$(OBJEXT) \ + external_acl.$(OBJEXT) ExternalACLEntry.$(OBJEXT) \ + FadingCounter.$(OBJEXT) tests/stub_fatal.$(OBJEXT) \ + fd.$(OBJEXT) fde.$(OBJEXT) filemap.$(OBJEXT) \ +- fqdncache.$(OBJEXT) FwdState.$(OBJEXT) gopher.$(OBJEXT) \ ++ fqdncache.$(OBJEXT) FwdState.$(OBJEXT) \ + helper.$(OBJEXT) $(am__objects_5) http.$(OBJEXT) \ + HttpBody.$(OBJEXT) tests/stub_HttpControlMsg.$(OBJEXT) \ + HttpHeader.$(OBJEXT) HttpHeaderTools.$(OBJEXT) \ +@@ -1187,7 +1187,7 @@ am__tests_testHttpRequest_SOURCES_DIST = AccessLogEntry.cc \ + fs_io.cc dlink.h dlink.cc dns_internal.cc errorpage.cc \ + tests/stub_ETag.cc external_acl.cc ExternalACLEntry.cc fatal.h \ + tests/stub_fatal.cc fd.h fd.cc fde.cc fqdncache.h fqdncache.cc \ +- FwdState.cc FwdState.h gopher.h gopher.cc helper.cc \ ++ FwdState.cc FwdState.h helper.cc \ + hier_code.h htcp.cc htcp.h http.cc HttpBody.h HttpBody.cc \ + tests/stub_HttpControlMsg.cc HttpHeader.h HttpHeader.cc \ + HttpHeaderFieldInfo.h HttpHeaderTools.h HttpHeaderTools.cc \ +@@ -1243,7 +1243,7 @@ am_tests_testHttpRequest_OBJECTS = AccessLogEntry.$(OBJEXT) \ + $(am__objects_4) errorpage.$(OBJEXT) tests/stub_ETag.$(OBJEXT) \ + external_acl.$(OBJEXT) ExternalACLEntry.$(OBJEXT) \ + tests/stub_fatal.$(OBJEXT) fd.$(OBJEXT) fde.$(OBJEXT) \ +- fqdncache.$(OBJEXT) FwdState.$(OBJEXT) gopher.$(OBJEXT) \ ++ fqdncache.$(OBJEXT) FwdState.$(OBJEXT) \ + helper.$(OBJEXT) $(am__objects_5) http.$(OBJEXT) \ + HttpBody.$(OBJEXT) tests/stub_HttpControlMsg.$(OBJEXT) \ + HttpHeader.$(OBJEXT) HttpHeaderTools.$(OBJEXT) \ +@@ -1670,8 +1670,8 @@ am__tests_testURL_SOURCES_DIST = AccessLogEntry.cc BodyPipe.cc \ + fs_io.cc dlink.h dlink.cc dns_internal.cc errorpage.cc ETag.cc \ + event.cc external_acl.cc ExternalACLEntry.cc fatal.h \ + tests/stub_fatal.cc fd.h fd.cc fde.cc FileMap.h filemap.cc \ +- fqdncache.h fqdncache.cc FwdState.cc FwdState.h gopher.h \ +- gopher.cc helper.cc hier_code.h htcp.cc htcp.h http.cc \ ++ fqdncache.h fqdncache.cc FwdState.cc FwdState.h \ ++ helper.cc hier_code.h htcp.cc htcp.h http.cc \ + HttpBody.h HttpBody.cc tests/stub_HttpControlMsg.cc \ + HttpHeaderFieldStat.h HttpHdrCc.h HttpHdrCc.cc HttpHdrCc.cci \ + HttpHdrContRange.cc HttpHdrRange.cc HttpHdrSc.cc \ +@@ -1725,7 +1725,7 @@ am_tests_testURL_OBJECTS = AccessLogEntry.$(OBJEXT) BodyPipe.$(OBJEXT) \ + event.$(OBJEXT) external_acl.$(OBJEXT) \ + ExternalACLEntry.$(OBJEXT) tests/stub_fatal.$(OBJEXT) \ + fd.$(OBJEXT) fde.$(OBJEXT) filemap.$(OBJEXT) \ +- fqdncache.$(OBJEXT) FwdState.$(OBJEXT) gopher.$(OBJEXT) \ ++ fqdncache.$(OBJEXT) FwdState.$(OBJEXT) \ + helper.$(OBJEXT) $(am__objects_5) http.$(OBJEXT) \ + HttpBody.$(OBJEXT) tests/stub_HttpControlMsg.$(OBJEXT) \ + HttpHdrCc.$(OBJEXT) HttpHdrContRange.$(OBJEXT) \ +@@ -1925,8 +1925,8 @@ am__tests_test_http_range_SOURCES_DIST = AccessLogEntry.cc BodyPipe.cc \ + dns_internal.cc errorpage.cc tests/stub_ETag.cc event.cc \ + FadingCounter.cc fatal.h tests/stub_libauth.cc \ + tests/stub_fatal.cc fd.h fd.cc fde.cc FileMap.h filemap.cc \ +- fqdncache.h fqdncache.cc FwdState.cc FwdState.h gopher.h \ +- gopher.cc helper.cc hier_code.h htcp.cc htcp.h http.cc \ ++ fqdncache.h fqdncache.cc FwdState.cc FwdState.h \ ++ helper.cc hier_code.h htcp.cc htcp.h http.cc \ + HttpBody.h HttpBody.cc tests/stub_HttpControlMsg.cc \ + HttpHeaderFieldStat.h HttpHdrCc.h HttpHdrCc.cc HttpHdrCc.cci \ + HttpHdrContRange.cc HttpHdrRange.cc HttpHdrSc.cc \ +@@ -1979,7 +1979,7 @@ am_tests_test_http_range_OBJECTS = AccessLogEntry.$(OBJEXT) \ + FadingCounter.$(OBJEXT) tests/stub_libauth.$(OBJEXT) \ + tests/stub_fatal.$(OBJEXT) fd.$(OBJEXT) fde.$(OBJEXT) \ + filemap.$(OBJEXT) fqdncache.$(OBJEXT) FwdState.$(OBJEXT) \ +- gopher.$(OBJEXT) helper.$(OBJEXT) $(am__objects_5) \ ++ helper.$(OBJEXT) $(am__objects_5) \ + http.$(OBJEXT) HttpBody.$(OBJEXT) \ + tests/stub_HttpControlMsg.$(OBJEXT) HttpHdrCc.$(OBJEXT) \ + HttpHdrContRange.$(OBJEXT) HttpHdrRange.$(OBJEXT) \ +@@ -2131,7 +2131,7 @@ am__depfiles_remade = ./$(DEPDIR)/AccessLogEntry.Po \ + ./$(DEPDIR)/external_acl.Po ./$(DEPDIR)/fatal.Po \ + ./$(DEPDIR)/fd.Po ./$(DEPDIR)/fde.Po ./$(DEPDIR)/filemap.Po \ + ./$(DEPDIR)/fqdncache.Po ./$(DEPDIR)/fs_io.Po \ +- ./$(DEPDIR)/globals.Po ./$(DEPDIR)/gopher.Po \ ++ ./$(DEPDIR)/globals.Po \ + ./$(DEPDIR)/helper.Po ./$(DEPDIR)/hier_code.Po \ + ./$(DEPDIR)/htcp.Po ./$(DEPDIR)/http.Po \ + ./$(DEPDIR)/icp_opcode.Po ./$(DEPDIR)/icp_v2.Po \ +@@ -3046,7 +3046,7 @@ squid_SOURCES = $(ACL_REGISTRATION_SOURCES) AccessLogEntry.cc \ + ExternalACL.h ExternalACLEntry.cc ExternalACLEntry.h \ + FadingCounter.h FadingCounter.cc fatal.h fatal.cc fd.h fd.cc \ + fde.cc fde.h FileMap.h filemap.cc fqdncache.h fqdncache.cc \ +- FwdState.cc FwdState.h Generic.h globals.h gopher.h gopher.cc \ ++ FwdState.cc FwdState.h Generic.h globals.h \ + helper.cc helper.h hier_code.h HierarchyLogEntry.h \ + $(HTCPSOURCE) http.cc http.h HttpHeaderFieldStat.h HttpHdrCc.h \ + HttpHdrCc.cc HttpHdrCc.cci HttpHdrRange.cc HttpHdrSc.cc \ +@@ -3711,8 +3711,6 @@ tests_testCacheManager_SOURCES = \ + fqdncache.cc \ + FwdState.cc \ + FwdState.h \ +- gopher.h \ +- gopher.cc \ + hier_code.h \ + helper.cc \ + $(HTCPSOURCE) \ +@@ -4137,8 +4135,6 @@ tests_testEvent_SOURCES = \ + fqdncache.cc \ + FwdState.cc \ + FwdState.h \ +- gopher.h \ +- gopher.cc \ + helper.cc \ + hier_code.h \ + $(HTCPSOURCE) \ +@@ -4374,8 +4370,6 @@ tests_testEventLoop_SOURCES = \ + fqdncache.cc \ + FwdState.cc \ + FwdState.h \ +- gopher.h \ +- gopher.cc \ + helper.cc \ + hier_code.h \ + $(HTCPSOURCE) \ +@@ -4607,8 +4601,6 @@ tests_test_http_range_SOURCES = \ + fqdncache.cc \ + FwdState.cc \ + FwdState.h \ +- gopher.h \ +- gopher.cc \ + helper.cc \ + hier_code.h \ + $(HTCPSOURCE) \ +@@ -4927,8 +4919,6 @@ tests_testHttpRequest_SOURCES = \ + fqdncache.cc \ + FwdState.cc \ + FwdState.h \ +- gopher.h \ +- gopher.cc \ + helper.cc \ + hier_code.h \ + $(HTCPSOURCE) \ +@@ -5780,8 +5770,6 @@ tests_testURL_SOURCES = \ + fqdncache.cc \ + FwdState.cc \ + FwdState.h \ +- gopher.h \ +- gopher.cc \ + helper.cc \ + hier_code.h \ + $(HTCPSOURCE) \ +@@ -6826,7 +6814,6 @@ distclean-compile: + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fqdncache.Po@am__quote@ # am--include-marker + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fs_io.Po@am__quote@ # am--include-marker + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/globals.Po@am__quote@ # am--include-marker +-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gopher.Po@am__quote@ # am--include-marker + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/helper.Po@am__quote@ # am--include-marker + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hier_code.Po@am__quote@ # am--include-marker + @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/htcp.Po@am__quote@ # am--include-marker +@@ -7808,7 +7795,6 @@ distclean: distclean-recursive + -rm -f ./$(DEPDIR)/fqdncache.Po + -rm -f ./$(DEPDIR)/fs_io.Po + -rm -f ./$(DEPDIR)/globals.Po +- -rm -f ./$(DEPDIR)/gopher.Po + -rm -f ./$(DEPDIR)/helper.Po + -rm -f ./$(DEPDIR)/hier_code.Po + -rm -f ./$(DEPDIR)/htcp.Po +@@ -8133,7 +8119,6 @@ maintainer-clean: maintainer-clean-recursive + -rm -f ./$(DEPDIR)/fqdncache.Po + -rm -f ./$(DEPDIR)/fs_io.Po + -rm -f ./$(DEPDIR)/globals.Po +- -rm -f ./$(DEPDIR)/gopher.Po + -rm -f ./$(DEPDIR)/helper.Po + -rm -f ./$(DEPDIR)/hier_code.Po + -rm -f ./$(DEPDIR)/htcp.Po +diff --git a/src/adaptation/ecap/Host.cc b/src/adaptation/ecap/Host.cc +index 33fbb5a..5f17f86 100644 +--- a/src/adaptation/ecap/Host.cc ++++ b/src/adaptation/ecap/Host.cc +@@ -49,7 +49,6 @@ Adaptation::Ecap::Host::Host() + libecap::protocolHttp.assignHostId(AnyP::PROTO_HTTP); + libecap::protocolHttps.assignHostId(AnyP::PROTO_HTTPS); + libecap::protocolFtp.assignHostId(AnyP::PROTO_FTP); +- libecap::protocolGopher.assignHostId(AnyP::PROTO_GOPHER); + libecap::protocolWais.assignHostId(AnyP::PROTO_WAIS); + libecap::protocolUrn.assignHostId(AnyP::PROTO_URN); + libecap::protocolWhois.assignHostId(AnyP::PROTO_WHOIS); +diff --git a/src/adaptation/ecap/MessageRep.cc b/src/adaptation/ecap/MessageRep.cc +index a2779e7..94595b6 100644 +--- a/src/adaptation/ecap/MessageRep.cc ++++ b/src/adaptation/ecap/MessageRep.cc +@@ -140,8 +140,6 @@ Adaptation::Ecap::FirstLineRep::protocol() const + return libecap::protocolHttps; + case AnyP::PROTO_FTP: + return libecap::protocolFtp; +- case AnyP::PROTO_GOPHER: +- return libecap::protocolGopher; + case AnyP::PROTO_WAIS: + return libecap::protocolWais; + case AnyP::PROTO_WHOIS: +diff --git a/src/anyp/ProtocolType.h b/src/anyp/ProtocolType.h +index 66f7bc2..ef3ab25 100644 +--- a/src/anyp/ProtocolType.h ++++ b/src/anyp/ProtocolType.h +@@ -27,7 +27,6 @@ typedef enum { + PROTO_HTTPS, + PROTO_COAP, + PROTO_COAPS, +- PROTO_GOPHER, + PROTO_WAIS, + PROTO_CACHE_OBJECT, + PROTO_ICP, +diff --git a/src/anyp/Uri.cc b/src/anyp/Uri.cc +index ced3181..b0b60cf 100644 +--- a/src/anyp/Uri.cc ++++ b/src/anyp/Uri.cc +@@ -885,8 +885,6 @@ urlCheckRequest(const HttpRequest * r) + if (r->method == Http::METHOD_PUT) + rc = 1; + +- case AnyP::PROTO_GOPHER: +- + case AnyP::PROTO_WAIS: + + case AnyP::PROTO_WHOIS: +diff --git a/src/anyp/UriScheme.cc b/src/anyp/UriScheme.cc +index bac5435..f96c73f 100644 +--- a/src/anyp/UriScheme.cc ++++ b/src/anyp/UriScheme.cc +@@ -87,9 +87,6 @@ AnyP::UriScheme::defaultPort() const + // Assuming IANA policy of allocating same port for base and TLS protocol versions will occur. + return 5683; + +- case AnyP::PROTO_GOPHER: +- return 70; +- + case AnyP::PROTO_WAIS: + return 210; + +diff --git a/src/cf.data.pre b/src/cf.data.pre +index b5519b2..bc2ddcd 100644 +--- a/src/cf.data.pre ++++ b/src/cf.data.pre +@@ -1513,7 +1513,6 @@ acl SSL_ports port 443 + acl Safe_ports port 80 # http + acl Safe_ports port 21 # ftp + acl Safe_ports port 443 # https +-acl Safe_ports port 70 # gopher + acl Safe_ports port 210 # wais + acl Safe_ports port 1025-65535 # unregistered ports + acl Safe_ports port 280 # http-mgmt +@@ -4563,7 +4562,7 @@ DOC_START + [http::]url.getScheme() == AnyP::PROTO_HTTP) + return method.respMaybeCacheable(); + +- if (request->url.getScheme() == AnyP::PROTO_GOPHER) +- return gopherCachable(request); +- + if (request->url.getScheme() == AnyP::PROTO_CACHE_OBJECT) + return 0; + +diff --git a/src/err_type.h b/src/err_type.h +index 742fc5a..dbb4527 100644 +--- a/src/err_type.h ++++ b/src/err_type.h +@@ -65,7 +65,7 @@ typedef enum { + ERR_GATEWAY_FAILURE, + + /* Special Cases */ +- ERR_DIR_LISTING, /* Display of remote directory (FTP, Gopher) */ ++ ERR_DIR_LISTING, /* Display of remote directory (FTP) */ + ERR_SQUID_SIGNATURE, /* not really an error */ + ERR_SHUTTING_DOWN, + ERR_PROTOCOL_UNKNOWN, +diff --git a/src/gopher.cc b/src/gopher.cc +deleted file mode 100644 +index 6187da1..0000000 +--- a/src/gopher.cc ++++ /dev/null +@@ -1,977 +0,0 @@ +-/* +- * Copyright (C) 1996-2021 The Squid Software Foundation and contributors +- * +- * Squid software is distributed under GPLv2+ license and includes +- * contributions from numerous individuals and organizations. +- * Please see the COPYING and CONTRIBUTORS files for details. +- */ +- +-/* DEBUG: section 10 Gopher */ +- +-#include "squid.h" +-#include "comm.h" +-#include "comm/Read.h" +-#include "comm/Write.h" +-#include "errorpage.h" +-#include "fd.h" +-#include "FwdState.h" +-#include "globals.h" +-#include "html_quote.h" +-#include "HttpReply.h" +-#include "HttpRequest.h" +-#include "MemBuf.h" +-#include "mime.h" +-#include "parser/Tokenizer.h" +-#include "rfc1738.h" +-#include "SquidConfig.h" +-#include "SquidTime.h" +-#include "StatCounters.h" +-#include "Store.h" +-#include "tools.h" +- +-#if USE_DELAY_POOLS +-#include "DelayPools.h" +-#include "MemObject.h" +-#endif +- +-/* gopher type code from rfc. Anawat. */ +-#define GOPHER_FILE '0' +-#define GOPHER_DIRECTORY '1' +-#define GOPHER_CSO '2' +-#define GOPHER_ERROR '3' +-#define GOPHER_MACBINHEX '4' +-#define GOPHER_DOSBIN '5' +-#define GOPHER_UUENCODED '6' +-#define GOPHER_INDEX '7' +-#define GOPHER_TELNET '8' +-#define GOPHER_BIN '9' +-#define GOPHER_REDUNT '+' +-#define GOPHER_3270 'T' +-#define GOPHER_GIF 'g' +-#define GOPHER_IMAGE 'I' +- +-#define GOPHER_HTML 'h' +-#define GOPHER_INFO 'i' +- +-/// W3 address +-#define GOPHER_WWW 'w' +-#define GOPHER_SOUND 's' +- +-#define GOPHER_PLUS_IMAGE ':' +-#define GOPHER_PLUS_MOVIE ';' +-#define GOPHER_PLUS_SOUND '<' +- +-#define GOPHER_PORT 70 +- +-#define TAB '\t' +- +-// TODO CODE: should this be a protocol-specific thing? +-#define TEMP_BUF_SIZE 4096 +- +-#define MAX_CSO_RESULT 1024 +- +-/** +- * Gopher Gateway Internals +- * +- * Gopher is somewhat complex and gross because it must convert from +- * the Gopher protocol to HTTP. +- */ +-class GopherStateData +-{ +- CBDATA_CLASS(GopherStateData); +- +-public: +- GopherStateData(FwdState *aFwd) : +- entry(aFwd->entry), +- conversion(NORMAL), +- HTML_header_added(0), +- HTML_pre(0), +- type_id(GOPHER_FILE /* '0' */), +- cso_recno(0), +- len(0), +- buf(NULL), +- fwd(aFwd) +- { +- *request = 0; +- buf = (char *)memAllocate(MEM_4K_BUF); +- entry->lock("gopherState"); +- *replybuf = 0; +- } +- ~GopherStateData() {if(buf) swanSong();} +- +- /* AsyncJob API emulated */ +- void deleteThis(const char *aReason); +- void swanSong(); +- +-public: +- StoreEntry *entry; +- enum { +- NORMAL, +- HTML_DIR, +- HTML_INDEX_RESULT, +- HTML_CSO_RESULT, +- HTML_INDEX_PAGE, +- HTML_CSO_PAGE +- } conversion; +- int HTML_header_added; +- int HTML_pre; +- char type_id; +- char request[MAX_URL]; +- int cso_recno; +- int len; +- char *buf; /* pts to a 4k page */ +- Comm::ConnectionPointer serverConn; +- FwdState::Pointer fwd; +- HttpReply::Pointer reply_; +- char replybuf[BUFSIZ]; +-}; +- +-CBDATA_CLASS_INIT(GopherStateData); +- +-static CLCB gopherStateFree; +-static void gopherMimeCreate(GopherStateData *); +-static void gopher_request_parse(const HttpRequest * req, +- char *type_id, +- char *request); +-static void gopherEndHTML(GopherStateData *); +-static void gopherToHTML(GopherStateData *, char *inbuf, int len); +-static CTCB gopherTimeout; +-static IOCB gopherReadReply; +-static IOCB gopherSendComplete; +-static PF gopherSendRequest; +- +-static char def_gopher_bin[] = "www/unknown"; +- +-static char def_gopher_text[] = "text/plain"; +- +-static void +-gopherStateFree(const CommCloseCbParams ¶ms) +-{ +- GopherStateData *gopherState = (GopherStateData *)params.data; +- +- if (gopherState == NULL) +- return; +- +- gopherState->deleteThis("gopherStateFree"); +-} +- +-void +-GopherStateData::deleteThis(const char *) +-{ +- swanSong(); +- delete this; +-} +- +-void +-GopherStateData::swanSong() +-{ +- if (entry) +- entry->unlock("gopherState"); +- +- if (buf) { +- memFree(buf, MEM_4K_BUF); +- buf = nullptr; +- } +-} +- +-/** +- * Create MIME Header for Gopher Data +- */ +-static void +-gopherMimeCreate(GopherStateData * gopherState) +-{ +- StoreEntry *entry = gopherState->entry; +- const char *mime_type = NULL; +- const char *mime_enc = NULL; +- +- switch (gopherState->type_id) { +- +- case GOPHER_DIRECTORY: +- +- case GOPHER_INDEX: +- +- case GOPHER_HTML: +- +- case GOPHER_WWW: +- +- case GOPHER_CSO: +- mime_type = "text/html"; +- break; +- +- case GOPHER_GIF: +- +- case GOPHER_IMAGE: +- +- case GOPHER_PLUS_IMAGE: +- mime_type = "image/gif"; +- break; +- +- case GOPHER_SOUND: +- +- case GOPHER_PLUS_SOUND: +- mime_type = "audio/basic"; +- break; +- +- case GOPHER_PLUS_MOVIE: +- mime_type = "video/mpeg"; +- break; +- +- case GOPHER_MACBINHEX: +- +- case GOPHER_DOSBIN: +- +- case GOPHER_UUENCODED: +- +- case GOPHER_BIN: +- /* Rightnow We have no idea what it is. */ +- mime_enc = mimeGetContentEncoding(gopherState->request); +- mime_type = mimeGetContentType(gopherState->request); +- if (!mime_type) +- mime_type = def_gopher_bin; +- break; +- +- case GOPHER_FILE: +- +- default: +- mime_enc = mimeGetContentEncoding(gopherState->request); +- mime_type = mimeGetContentType(gopherState->request); +- if (!mime_type) +- mime_type = def_gopher_text; +- break; +- } +- +- assert(entry->isEmpty()); +- +- HttpReply *reply = new HttpReply; +- entry->buffer(); +- reply->setHeaders(Http::scOkay, "Gatewaying", mime_type, -1, -1, -2); +- if (mime_enc) +- reply->header.putStr(Http::HdrType::CONTENT_ENCODING, mime_enc); +- +- entry->replaceHttpReply(reply); +- gopherState->reply_ = reply; +-} +- +-/** +- * Parse a gopher request into components. By Anawat. +- */ +-static void +-gopher_request_parse(const HttpRequest * req, char *type_id, char *request) +-{ +- ::Parser::Tokenizer tok(req->url.path()); +- +- if (request) +- *request = 0; +- +- tok.skip('/'); // ignore failures? path could be ab-empty +- +- if (tok.atEnd()) { +- *type_id = GOPHER_DIRECTORY; +- return; +- } +- +- static const CharacterSet anyByte("UTF-8",0x00, 0xFF); +- +- SBuf typeId; +- (void)tok.prefix(typeId, anyByte, 1); // never fails since !atEnd() +- *type_id = typeId[0]; +- +- if (request) { +- SBufToCstring(request, tok.remaining().substr(0, MAX_URL-1)); +- /* convert %xx to char */ +- rfc1738_unescape(request); +- } +-} +- +-/** +- * Parse the request to determine whether it is cachable. +- * +- * \param req Request data. +- * \retval 0 Not cachable. +- * \retval 1 Cachable. +- */ +-int +-gopherCachable(const HttpRequest * req) +-{ +- int cachable = 1; +- char type_id; +- /* parse to see type */ +- gopher_request_parse(req, +- &type_id, +- NULL); +- +- switch (type_id) { +- +- case GOPHER_INDEX: +- +- case GOPHER_CSO: +- +- case GOPHER_TELNET: +- +- case GOPHER_3270: +- cachable = 0; +- break; +- +- default: +- cachable = 1; +- } +- +- return cachable; +-} +- +-static void +-gopherHTMLHeader(StoreEntry * e, const char *title, const char *substring) +-{ +- storeAppendPrintf(e, "\n"); +- storeAppendPrintf(e, ""); +- storeAppendPrintf(e, title, substring); +- storeAppendPrintf(e, ""); +- storeAppendPrintf(e, "\n"); +- storeAppendPrintf(e, "\n

"); +- storeAppendPrintf(e, title, substring); +- storeAppendPrintf(e, "

\n"); +-} +- +-static void +-gopherHTMLFooter(StoreEntry * e) +-{ +- storeAppendPrintf(e, "
\n"); +- storeAppendPrintf(e, "
\n"); +- storeAppendPrintf(e, "Generated %s by %s (%s)\n", +- mkrfc1123(squid_curtime), +- getMyHostname(), +- visible_appname_string); +- storeAppendPrintf(e, "
\n"); +-} +- +-static void +-gopherEndHTML(GopherStateData * gopherState) +-{ +- StoreEntry *e = gopherState->entry; +- +- if (!gopherState->HTML_header_added) { +- gopherHTMLHeader(e, "Server Return Nothing", NULL); +- storeAppendPrintf(e, "

The Gopher query resulted in a blank response

"); +- } else if (gopherState->HTML_pre) { +- storeAppendPrintf(e, "
\n"); +- } +- +- gopherHTMLFooter(e); +-} +- +-/** +- * Convert Gopher to HTML. +- * +- * Borrow part of code from libwww2 came with Mosaic distribution. +- */ +-static void +-gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) +-{ +- char *pos = inbuf; +- char *lpos = NULL; +- char *tline = NULL; +- LOCAL_ARRAY(char, line, TEMP_BUF_SIZE); +- char *name = NULL; +- char *selector = NULL; +- char *host = NULL; +- char *port = NULL; +- char *escaped_selector = NULL; +- const char *icon_url = NULL; +- char gtype; +- StoreEntry *entry = NULL; +- +- memset(line, '\0', TEMP_BUF_SIZE); +- +- entry = gopherState->entry; +- +- if (gopherState->conversion == GopherStateData::HTML_INDEX_PAGE) { +- char *html_url = html_quote(entry->url()); +- gopherHTMLHeader(entry, "Gopher Index %s", html_url); +- storeAppendPrintf(entry, +- "

This is a searchable Gopher index. Use the search\n" +- "function of your browser to enter search terms.\n" +- "\n"); +- gopherHTMLFooter(entry); +- /* now let start sending stuff to client */ +- entry->flush(); +- gopherState->HTML_header_added = 1; +- +- return; +- } +- +- if (gopherState->conversion == GopherStateData::HTML_CSO_PAGE) { +- char *html_url = html_quote(entry->url()); +- gopherHTMLHeader(entry, "CSO Search of %s", html_url); +- storeAppendPrintf(entry, +- "

A CSO database usually contains a phonebook or\n" +- "directory. Use the search function of your browser to enter\n" +- "search terms.

\n"); +- gopherHTMLFooter(entry); +- /* now let start sending stuff to client */ +- entry->flush(); +- gopherState->HTML_header_added = 1; +- +- return; +- } +- +- SBuf outbuf; +- +- if (!gopherState->HTML_header_added) { +- if (gopherState->conversion == GopherStateData::HTML_CSO_RESULT) +- gopherHTMLHeader(entry, "CSO Search Result", NULL); +- else +- gopherHTMLHeader(entry, "Gopher Menu", NULL); +- +- outbuf.append ("
");
+-
+-        gopherState->HTML_header_added = 1;
+-
+-        gopherState->HTML_pre = 1;
+-    }
+-
+-    while (pos < inbuf + len) {
+-        int llen;
+-        int left = len - (pos - inbuf);
+-        lpos = (char *)memchr(pos, '\n', left);
+-        if (lpos) {
+-            ++lpos;             /* Next line is after \n */
+-            llen = lpos - pos;
+-        } else {
+-            llen = left;
+-        }
+-        if (gopherState->len + llen >= TEMP_BUF_SIZE) {
+-            debugs(10, DBG_IMPORTANT, "GopherHTML: Buffer overflow. Lost some data on URL: " << entry->url()  );
+-            llen = TEMP_BUF_SIZE - gopherState->len - 1;
+-        }
+-        if (!lpos) {
+-            /* there is no complete line in inbuf */
+-            /* copy it to temp buffer */
+-            /* note: llen is adjusted above */
+-            memcpy(gopherState->buf + gopherState->len, pos, llen);
+-            gopherState->len += llen;
+-            break;
+-        }
+-        if (gopherState->len != 0) {
+-            /* there is something left from last tx. */
+-            memcpy(line, gopherState->buf, gopherState->len);
+-            memcpy(line + gopherState->len, pos, llen);
+-            llen += gopherState->len;
+-            gopherState->len = 0;
+-        } else {
+-            memcpy(line, pos, llen);
+-        }
+-        line[llen + 1] = '\0';
+-        /* move input to next line */
+-        pos = lpos;
+-
+-        /* at this point. We should have one line in buffer to process */
+-
+-        if (*line == '.') {
+-            /* skip it */
+-            memset(line, '\0', TEMP_BUF_SIZE);
+-            continue;
+-        }
+-
+-        switch (gopherState->conversion) {
+-
+-        case GopherStateData::HTML_INDEX_RESULT:
+-
+-        case GopherStateData::HTML_DIR: {
+-            tline = line;
+-            gtype = *tline;
+-            ++tline;
+-            name = tline;
+-            selector = strchr(tline, TAB);
+-
+-            if (selector) {
+-                *selector = '\0';
+-                ++selector;
+-                host = strchr(selector, TAB);
+-
+-                if (host) {
+-                    *host = '\0';
+-                    ++host;
+-                    port = strchr(host, TAB);
+-
+-                    if (port) {
+-                        char *junk;
+-                        port[0] = ':';
+-                        junk = strchr(host, TAB);
+-
+-                        if (junk)
+-                            *junk++ = 0;    /* Chop port */
+-                        else {
+-                            junk = strchr(host, '\r');
+-
+-                            if (junk)
+-                                *junk++ = 0;    /* Chop port */
+-                            else {
+-                                junk = strchr(host, '\n');
+-
+-                                if (junk)
+-                                    *junk++ = 0;    /* Chop port */
+-                            }
+-                        }
+-
+-                        if ((port[1] == '0') && (!port[2]))
+-                            port[0] = 0;    /* 0 means none */
+-                    }
+-
+-                    /* escape a selector here */
+-                    escaped_selector = xstrdup(rfc1738_escape_part(selector));
+-
+-                    switch (gtype) {
+-
+-                    case GOPHER_DIRECTORY:
+-                        icon_url = mimeGetIconURL("internal-menu");
+-                        break;
+-
+-                    case GOPHER_HTML:
+-
+-                    case GOPHER_FILE:
+-                        icon_url = mimeGetIconURL("internal-text");
+-                        break;
+-
+-                    case GOPHER_INDEX:
+-
+-                    case GOPHER_CSO:
+-                        icon_url = mimeGetIconURL("internal-index");
+-                        break;
+-
+-                    case GOPHER_IMAGE:
+-
+-                    case GOPHER_GIF:
+-
+-                    case GOPHER_PLUS_IMAGE:
+-                        icon_url = mimeGetIconURL("internal-image");
+-                        break;
+-
+-                    case GOPHER_SOUND:
+-
+-                    case GOPHER_PLUS_SOUND:
+-                        icon_url = mimeGetIconURL("internal-sound");
+-                        break;
+-
+-                    case GOPHER_PLUS_MOVIE:
+-                        icon_url = mimeGetIconURL("internal-movie");
+-                        break;
+-
+-                    case GOPHER_TELNET:
+-
+-                    case GOPHER_3270:
+-                        icon_url = mimeGetIconURL("internal-telnet");
+-                        break;
+-
+-                    case GOPHER_BIN:
+-
+-                    case GOPHER_MACBINHEX:
+-
+-                    case GOPHER_DOSBIN:
+-
+-                    case GOPHER_UUENCODED:
+-                        icon_url = mimeGetIconURL("internal-binary");
+-                        break;
+-
+-                    case GOPHER_INFO:
+-                        icon_url = NULL;
+-                        break;
+-
+-                    default:
+-                        icon_url = mimeGetIconURL("internal-unknown");
+-                        break;
+-                    }
+-
+-                    if ((gtype == GOPHER_TELNET) || (gtype == GOPHER_3270)) {
+-                        if (strlen(escaped_selector) != 0)
+-                            outbuf.appendf(" %s\n",
+-                                           icon_url, escaped_selector, rfc1738_escape_part(host),
+-                                           *port ? ":" : "", port, html_quote(name));
+-                        else
+-                            outbuf.appendf(" %s\n",
+-                                           icon_url, rfc1738_escape_part(host), *port ? ":" : "",
+-                                           port, html_quote(name));
+-
+-                    } else if (gtype == GOPHER_INFO) {
+-                        outbuf.appendf("\t%s\n", html_quote(name));
+-                    } else {
+-                        if (strncmp(selector, "GET /", 5) == 0) {
+-                            /* WWW link */
+-                            outbuf.appendf(" %s\n",
+-                                           icon_url, host, rfc1738_escape_unescaped(selector + 5), html_quote(name));
+-                        } else if (gtype == GOPHER_WWW) {
+-                            outbuf.appendf(" %s\n",
+-                                           icon_url, rfc1738_escape_unescaped(selector), html_quote(name));
+-                        } else {
+-                            /* Standard link */
+-                            outbuf.appendf(" %s\n",
+-                                           icon_url, host, gtype, escaped_selector, html_quote(name));
+-                        }
+-                    }
+-
+-                    safe_free(escaped_selector);
+-                } else {
+-                    memset(line, '\0', TEMP_BUF_SIZE);
+-                    continue;
+-                }
+-            } else {
+-                memset(line, '\0', TEMP_BUF_SIZE);
+-                continue;
+-            }
+-
+-            break;
+-            }           /* HTML_DIR, HTML_INDEX_RESULT */
+-
+-        case GopherStateData::HTML_CSO_RESULT: {
+-            if (line[0] == '-') {
+-                int code, recno;
+-                char *s_code, *s_recno, *result;
+-
+-                s_code = strtok(line + 1, ":\n");
+-                s_recno = strtok(NULL, ":\n");
+-                result = strtok(NULL, "\n");
+-
+-                if (!result)
+-                    break;
+-
+-                code = atoi(s_code);
+-
+-                recno = atoi(s_recno);
+-
+-                if (code != 200)
+-                    break;
+-
+-                if (gopherState->cso_recno != recno) {
+-                    outbuf.appendf("

Record# %d
%s

\n
", recno, html_quote(result));
+-                    gopherState->cso_recno = recno;
+-                } else {
+-                    outbuf.appendf("%s\n", html_quote(result));
+-                }
+-
+-                break;
+-            } else {
+-                int code;
+-                char *s_code, *result;
+-
+-                s_code = strtok(line, ":");
+-                result = strtok(NULL, "\n");
+-
+-                if (!result)
+-                    break;
+-
+-                code = atoi(s_code);
+-
+-                switch (code) {
+-
+-                case 200: {
+-                    /* OK */
+-                    /* Do nothing here */
+-                    break;
+-                }
+-
+-                case 102:   /* Number of matches */
+-
+-                case 501:   /* No Match */
+-
+-                case 502: { /* Too Many Matches */
+-                    /* Print the message the server returns */
+-                    outbuf.appendf("

%s

\n
", html_quote(result));
+-                    break;
+-                }
+-
+-                }
+-            }
+-
+-            }           /* HTML_CSO_RESULT */
+-
+-        default:
+-            break;      /* do nothing */
+-
+-        }           /* switch */
+-
+-    }               /* while loop */
+-
+-    if (outbuf.length() > 0) {
+-        entry->append(outbuf.rawContent(), outbuf.length());
+-        /* now let start sending stuff to client */
+-        entry->flush();
+-    }
+-
+-    return;
+-}
+-
+-static void
+-gopherTimeout(const CommTimeoutCbParams &io)
+-{
+-    GopherStateData *gopherState = static_cast(io.data);
+-    debugs(10, 4, HERE << io.conn << ": '" << gopherState->entry->url() << "'" );
+-
+-    gopherState->fwd->fail(new ErrorState(ERR_READ_TIMEOUT, Http::scGatewayTimeout, gopherState->fwd->request));
+-
+-    if (Comm::IsConnOpen(io.conn))
+-        io.conn->close();
+-}
+-
+-/**
+- * This will be called when data is ready to be read from fd.
+- * Read until error or connection closed.
+- */
+-static void
+-gopherReadReply(const Comm::ConnectionPointer &conn, char *buf, size_t len, Comm::Flag flag, int xerrno, void *data)
+-{
+-    GopherStateData *gopherState = (GopherStateData *)data;
+-    StoreEntry *entry = gopherState->entry;
+-    int clen;
+-    int bin;
+-    size_t read_sz = BUFSIZ;
+-#if USE_DELAY_POOLS
+-    DelayId delayId = entry->mem_obj->mostBytesAllowed();
+-#endif
+-
+-    /* Bail out early on Comm::ERR_CLOSING - close handlers will tidy up for us */
+-
+-    if (flag == Comm::ERR_CLOSING) {
+-        return;
+-    }
+-
+-    assert(buf == gopherState->replybuf);
+-
+-    // XXX: Should update delayId, statCounter, etc. before bailing
+-    if (!entry->isAccepting()) {
+-        debugs(10, 3, "terminating due to bad " << *entry);
+-        // TODO: Do not abuse connection for triggering cleanup.
+-        gopherState->serverConn->close();
+-        return;
+-    }
+-
+-#if USE_DELAY_POOLS
+-    read_sz = delayId.bytesWanted(1, read_sz);
+-#endif
+-
+-    /* leave one space for \0 in gopherToHTML */
+-
+-    if (flag == Comm::OK && len > 0) {
+-#if USE_DELAY_POOLS
+-        delayId.bytesIn(len);
+-#endif
+-
+-        statCounter.server.all.kbytes_in += len;
+-        statCounter.server.other.kbytes_in += len;
+-    }
+-
+-    debugs(10, 5, HERE << conn << " read len=" << len);
+-
+-    if (flag == Comm::OK && len > 0) {
+-        AsyncCall::Pointer nil;
+-        commSetConnTimeout(conn, Config.Timeout.read, nil);
+-        ++IOStats.Gopher.reads;
+-
+-        for (clen = len - 1, bin = 0; clen; ++bin)
+-            clen >>= 1;
+-
+-        ++IOStats.Gopher.read_hist[bin];
+-
+-        HttpRequest *req = gopherState->fwd->request;
+-        if (req->hier.bodyBytesRead < 0) {
+-            req->hier.bodyBytesRead = 0;
+-            // first bytes read, update Reply flags:
+-            gopherState->reply_->sources |= HttpMsg::srcGopher;
+-        }
+-
+-        req->hier.bodyBytesRead += len;
+-    }
+-
+-    if (flag != Comm::OK) {
+-        debugs(50, DBG_IMPORTANT, MYNAME << "error reading: " << xstrerr(xerrno));
+-
+-        if (ignoreErrno(xerrno)) {
+-            AsyncCall::Pointer call = commCbCall(5,4, "gopherReadReply",
+-                                                 CommIoCbPtrFun(gopherReadReply, gopherState));
+-            comm_read(conn, buf, read_sz, call);
+-        } else {
+-            ErrorState *err = new ErrorState(ERR_READ_ERROR, Http::scInternalServerError, gopherState->fwd->request);
+-            err->xerrno = xerrno;
+-            gopherState->fwd->fail(err);
+-            gopherState->serverConn->close();
+-        }
+-    } else if (len == 0 && entry->isEmpty()) {
+-        gopherState->fwd->fail(new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, gopherState->fwd->request));
+-        gopherState->serverConn->close();
+-    } else if (len == 0) {
+-        /* Connection closed; retrieval done. */
+-        /* flush the rest of data in temp buf if there is one. */
+-
+-        if (gopherState->conversion != GopherStateData::NORMAL)
+-            gopherEndHTML(gopherState);
+-
+-        entry->timestampsSet();
+-        entry->flush();
+-        gopherState->fwd->complete();
+-        gopherState->serverConn->close();
+-    } else {
+-        if (gopherState->conversion != GopherStateData::NORMAL) {
+-            gopherToHTML(gopherState, buf, len);
+-        } else {
+-            entry->append(buf, len);
+-        }
+-        AsyncCall::Pointer call = commCbCall(5,4, "gopherReadReply",
+-                                             CommIoCbPtrFun(gopherReadReply, gopherState));
+-        comm_read(conn, buf, read_sz, call);
+-    }
+-}
+-
+-/**
+- * This will be called when request write is complete. Schedule read of reply.
+- */
+-static void
+-gopherSendComplete(const Comm::ConnectionPointer &conn, char *, size_t size, Comm::Flag errflag, int xerrno, void *data)
+-{
+-    GopherStateData *gopherState = (GopherStateData *) data;
+-    StoreEntry *entry = gopherState->entry;
+-    debugs(10, 5, HERE << conn << " size: " << size << " errflag: " << errflag);
+-
+-    if (size > 0) {
+-        fd_bytes(conn->fd, size, FD_WRITE);
+-        statCounter.server.all.kbytes_out += size;
+-        statCounter.server.other.kbytes_out += size;
+-    }
+-
+-    if (!entry->isAccepting()) {
+-        debugs(10, 3, "terminating due to bad " << *entry);
+-        // TODO: Do not abuse connection for triggering cleanup.
+-        gopherState->serverConn->close();
+-        return;
+-    }
+-
+-    if (errflag) {
+-        ErrorState *err;
+-        err = new ErrorState(ERR_WRITE_ERROR, Http::scServiceUnavailable, gopherState->fwd->request);
+-        err->xerrno = xerrno;
+-        err->port = gopherState->fwd->request->url.port();
+-        err->url = xstrdup(entry->url());
+-        gopherState->fwd->fail(err);
+-        gopherState->serverConn->close();
+-        return;
+-    }
+-
+-    /*
+-     * OK. We successfully reach remote site.  Start MIME typing
+-     * stuff.  Do it anyway even though request is not HTML type.
+-     */
+-    entry->buffer();
+-
+-    gopherMimeCreate(gopherState);
+-
+-    switch (gopherState->type_id) {
+-
+-    case GOPHER_DIRECTORY:
+-        /* we got to convert it first */
+-        gopherState->conversion = GopherStateData::HTML_DIR;
+-        gopherState->HTML_header_added = 0;
+-        break;
+-
+-    case GOPHER_INDEX:
+-        /* we got to convert it first */
+-        gopherState->conversion = GopherStateData::HTML_INDEX_RESULT;
+-        gopherState->HTML_header_added = 0;
+-        break;
+-
+-    case GOPHER_CSO:
+-        /* we got to convert it first */
+-        gopherState->conversion = GopherStateData::HTML_CSO_RESULT;
+-        gopherState->cso_recno = 0;
+-        gopherState->HTML_header_added = 0;
+-        break;
+-
+-    default:
+-        gopherState->conversion = GopherStateData::NORMAL;
+-        entry->flush();
+-    }
+-
+-    /* Schedule read reply. */
+-    AsyncCall::Pointer call =  commCbCall(5,5, "gopherReadReply",
+-                                          CommIoCbPtrFun(gopherReadReply, gopherState));
+-    entry->delayAwareRead(conn, gopherState->replybuf, BUFSIZ, call);
+-}
+-
+-/**
+- * This will be called when connect completes. Write request.
+- */
+-static void
+-gopherSendRequest(int, void *data)
+-{
+-    GopherStateData *gopherState = (GopherStateData *)data;
+-    MemBuf mb;
+-    mb.init();
+-
+-    if (gopherState->type_id == GOPHER_CSO) {
+-        const char *t = strchr(gopherState->request, '?');
+-
+-        if (t)
+-            ++t;        /* skip the ? */
+-        else
+-            t = "";
+-
+-        mb.appendf("query %s\r\nquit", t);
+-    } else {
+-        if (gopherState->type_id == GOPHER_INDEX) {
+-            if (char *t = strchr(gopherState->request, '?'))
+-                *t = '\t';
+-        }
+-        mb.append(gopherState->request, strlen(gopherState->request));
+-    }
+-    mb.append("\r\n", 2);
+-
+-    debugs(10, 5, gopherState->serverConn);
+-    AsyncCall::Pointer call = commCbCall(5,5, "gopherSendComplete",
+-                                         CommIoCbPtrFun(gopherSendComplete, gopherState));
+-    Comm::Write(gopherState->serverConn, &mb, call);
+-
+-    if (!gopherState->entry->makePublic())
+-        gopherState->entry->makePrivate(true);
+-}
+-
+-void
+-gopherStart(FwdState * fwd)
+-{
+-    GopherStateData *gopherState = new GopherStateData(fwd);
+-
+-    debugs(10, 3, gopherState->entry->url());
+-
+-    ++ statCounter.server.all.requests;
+-
+-    ++ statCounter.server.other.requests;
+-
+-    /* Parse url. */
+-    gopher_request_parse(fwd->request,
+-                         &gopherState->type_id, gopherState->request);
+-
+-    comm_add_close_handler(fwd->serverConnection()->fd, gopherStateFree, gopherState);
+-
+-    if (((gopherState->type_id == GOPHER_INDEX) || (gopherState->type_id == GOPHER_CSO))
+-            && (strchr(gopherState->request, '?') == NULL)) {
+-        /* Index URL without query word */
+-        /* We have to generate search page back to client. No need for connection */
+-        gopherMimeCreate(gopherState);
+-
+-        if (gopherState->type_id == GOPHER_INDEX) {
+-            gopherState->conversion = GopherStateData::HTML_INDEX_PAGE;
+-        } else {
+-            if (gopherState->type_id == GOPHER_CSO) {
+-                gopherState->conversion = GopherStateData::HTML_CSO_PAGE;
+-            } else {
+-                gopherState->conversion = GopherStateData::HTML_INDEX_PAGE;
+-            }
+-        }
+-
+-        gopherToHTML(gopherState, (char *) NULL, 0);
+-        fwd->complete();
+-        return;
+-    }
+-
+-    gopherState->serverConn = fwd->serverConnection();
+-    gopherSendRequest(fwd->serverConnection()->fd, gopherState);
+-    AsyncCall::Pointer timeoutCall = commCbCall(5, 4, "gopherTimeout",
+-                                     CommTimeoutCbPtrFun(gopherTimeout, gopherState));
+-    commSetConnTimeout(fwd->serverConnection(), Config.Timeout.read, timeoutCall);
+-}
+-
+diff --git a/src/gopher.h b/src/gopher.h
+deleted file mode 100644
+index 1d73bac..0000000
+--- a/src/gopher.h
++++ /dev/null
+@@ -1,29 +0,0 @@
+-/*
+- * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
+- *
+- * Squid software is distributed under GPLv2+ license and includes
+- * contributions from numerous individuals and organizations.
+- * Please see the COPYING and CONTRIBUTORS files for details.
+- */
+-
+-/* DEBUG: section 10    Gopher */
+-
+-#ifndef SQUID_GOPHER_H_
+-#define SQUID_GOPHER_H_
+-
+-class FwdState;
+-class HttpRequest;
+-
+-/**
+- \defgroup ServerProtocolGopherAPI Server-Side Gopher API
+- \ingroup ServerProtocol
+- */
+-
+-/// \ingroup ServerProtocolGopherAPI
+-void gopherStart(FwdState *);
+-
+-/// \ingroup ServerProtocolGopherAPI
+-int gopherCachable(const HttpRequest *);
+-
+-#endif /* SQUID_GOPHER_H_ */
+-
+diff --git a/src/mgr/IoAction.cc b/src/mgr/IoAction.cc
+index 149f2c4..e48a2e0 100644
+--- a/src/mgr/IoAction.cc
++++ b/src/mgr/IoAction.cc
+@@ -35,9 +35,6 @@ Mgr::IoActionData::operator += (const IoActionData& stats)
+     ftp_reads += stats.ftp_reads;
+     for (int i = 0; i < IoStats::histSize; ++i)
+         ftp_read_hist[i] += stats.ftp_read_hist[i];
+-    gopher_reads += stats.gopher_reads;
+-    for (int i = 0; i < IoStats::histSize; ++i)
+-        gopher_read_hist[i] += stats.gopher_read_hist[i];
+ 
+     return *this;
+ }
+diff --git a/src/mgr/IoAction.h b/src/mgr/IoAction.h
+index 32de089..f11ade7 100644
+--- a/src/mgr/IoAction.h
++++ b/src/mgr/IoAction.h
+@@ -27,10 +27,8 @@ public:
+ public:
+     double http_reads;
+     double ftp_reads;
+-    double gopher_reads;
+     double http_read_hist[IoStats::histSize];
+     double ftp_read_hist[IoStats::histSize];
+-    double gopher_read_hist[IoStats::histSize];
+ };
+ 
+ /// implement aggregated 'io' action
+diff --git a/src/squid.8.in b/src/squid.8.in
+index 11135c3..bfffd91 100644
+--- a/src/squid.8.in
++++ b/src/squid.8.in
+@@ -25,7 +25,7 @@ command\-line
+ .PP
+ .B squid
+ is a high\-performance proxy caching server for web clients,
+-supporting FTP, gopher, ICAP, ICP, HTCP and HTTP data objects.
++supporting FTP, ICAP, ICP, HTCP and HTTP data objects.
+ Unlike traditional caching software, 
+ Squid handles all requests in a single, non-blocking process.
+ .PP
+diff --git a/src/stat.cc b/src/stat.cc
+index 8a59be4..4ed2c57 100644
+--- a/src/stat.cc
++++ b/src/stat.cc
+@@ -206,12 +206,6 @@ GetIoStats(Mgr::IoActionData& stats)
+     for (i = 0; i < IoStats::histSize; ++i) {
+         stats.ftp_read_hist[i] = IOStats.Ftp.read_hist[i];
+     }
+-
+-    stats.gopher_reads = IOStats.Gopher.reads;
+-
+-    for (i = 0; i < IoStats::histSize; ++i) {
+-        stats.gopher_read_hist[i] = IOStats.Gopher.read_hist[i];
+-    }
+ }
+ 
+ void
+@@ -244,19 +238,6 @@ DumpIoStats(Mgr::IoActionData& stats, StoreEntry* sentry)
+                           Math::doublePercent(stats.ftp_read_hist[i], stats.ftp_reads));
+     }
+ 
+-    storeAppendPrintf(sentry, "\n");
+-    storeAppendPrintf(sentry, "Gopher I/O\n");
+-    storeAppendPrintf(sentry, "number of reads: %.0f\n", stats.gopher_reads);
+-    storeAppendPrintf(sentry, "Read Histogram:\n");
+-
+-    for (i = 0; i < IoStats::histSize; ++i) {
+-        storeAppendPrintf(sentry, "%5d-%5d: %9.0f %2.0f%%\n",
+-                          i ? (1 << (i - 1)) + 1 : 1,
+-                          1 << i,
+-                          stats.gopher_read_hist[i],
+-                          Math::doublePercent(stats.gopher_read_hist[i], stats.gopher_reads));
+-    }
+-
+     storeAppendPrintf(sentry, "\n");
+ }
+ 
+diff --git a/test-suite/squidconf/regressions-3.4.0.1 b/test-suite/squidconf/regressions-3.4.0.1
+index 41a441b..85f0a64 100644
+--- a/test-suite/squidconf/regressions-3.4.0.1
++++ b/test-suite/squidconf/regressions-3.4.0.1
+@@ -44,6 +44,5 @@ refresh_pattern -i \.(gif|png|jpg|jpeg|ico)$ 40320 75% 86400
+ refresh_pattern -i \.(iso|avi|wav|mp3|mpeg|swf|flv|x-flv)$ 1440 40% 40320
+ 
+ refresh_pattern ^ftp:           1440    20%     10080
+-refresh_pattern ^gopher:        1440    0%      1440
+ refresh_pattern -i (/cgi-bin/|\?)       0       0%      0
+ refresh_pattern .       0       20%     4320
diff --git a/SOURCES/squid-4.15-CVE-2023-46846.patch b/SOURCES/squid-4.15-CVE-2023-46846.patch
new file mode 100644
index 0000000..5738703
--- /dev/null
+++ b/SOURCES/squid-4.15-CVE-2023-46846.patch
@@ -0,0 +1,1281 @@
+diff --git a/src/adaptation/icap/ModXact.cc b/src/adaptation/icap/ModXact.cc
+index 2db0a68..370f077 100644
+--- a/src/adaptation/icap/ModXact.cc
++++ b/src/adaptation/icap/ModXact.cc
+@@ -25,12 +25,13 @@
+ #include "comm.h"
+ #include "comm/Connection.h"
+ #include "err_detail_type.h"
+-#include "http/one/TeChunkedParser.h"
+ #include "HttpHeaderTools.h"
+ #include "HttpMsg.h"
+ #include "HttpReply.h"
+ #include "HttpRequest.h"
+ #include "MasterXaction.h"
++#include "parser/Tokenizer.h"
++#include "sbuf/Stream.h"
+ #include "SquidTime.h"
+ 
+ // flow and terminology:
+@@ -44,6 +45,8 @@ CBDATA_NAMESPACED_CLASS_INIT(Adaptation::Icap, ModXactLauncher);
+ 
+ static const size_t TheBackupLimit = BodyPipe::MaxCapacity;
+ 
++const SBuf Adaptation::Icap::ChunkExtensionValueParser::UseOriginalBodyName("use-original-body");
++
+ Adaptation::Icap::ModXact::State::State()
+ {
+     memset(this, 0, sizeof(*this));
+@@ -1108,6 +1111,7 @@ void Adaptation::Icap::ModXact::decideOnParsingBody()
+         state.parsing = State::psBody;
+         replyHttpBodySize = 0;
+         bodyParser = new Http1::TeChunkedParser;
++        bodyParser->parseExtensionValuesWith(&extensionParser);
+         makeAdaptedBodyPipe("adapted response from the ICAP server");
+         Must(state.sending == State::sendingAdapted);
+     } else {
+@@ -1142,9 +1146,8 @@ void Adaptation::Icap::ModXact::parseBody()
+     }
+ 
+     if (parsed) {
+-        if (state.readyForUob && bodyParser->useOriginBody >= 0) {
+-            prepPartialBodyEchoing(
+-                static_cast(bodyParser->useOriginBody));
++        if (state.readyForUob && extensionParser.sawUseOriginalBody()) {
++            prepPartialBodyEchoing(extensionParser.useOriginalBody());
+             stopParsing();
+             return;
+         }
+@@ -2014,3 +2017,14 @@ void Adaptation::Icap::ModXactLauncher::updateHistory(bool doStart)
+     }
+ }
+ 
++void
++Adaptation::Icap::ChunkExtensionValueParser::parse(Tokenizer &tok, const SBuf &extName)
++{
++    if (extName == UseOriginalBodyName) {
++        useOriginalBody_ = tok.udec64("use-original-body");
++        assert(useOriginalBody_ >= 0);
++    } else {
++        Ignore(tok, extName);
++    }
++}
++
+diff --git a/src/adaptation/icap/ModXact.h b/src/adaptation/icap/ModXact.h
+index f7afa69..fb4dec0 100644
+--- a/src/adaptation/icap/ModXact.h
++++ b/src/adaptation/icap/ModXact.h
+@@ -15,6 +15,7 @@
+ #include "adaptation/icap/Xaction.h"
+ #include "BodyPipe.h"
+ #include "http/one/forward.h"
++#include "http/one/TeChunkedParser.h"
+ 
+ /*
+  * ICAPModXact implements ICAP REQMOD and RESPMOD transaction using
+@@ -105,6 +106,23 @@ private:
+     enum State { stDisabled, stWriting, stIeof, stDone } theState;
+ };
+ 
++/// handles ICAP-specific chunk extensions supported by Squid
++class ChunkExtensionValueParser: public Http1::ChunkExtensionValueParser
++{
++public:
++    /* Http1::ChunkExtensionValueParser API */
++    virtual void parse(Tokenizer &tok, const SBuf &extName) override;
++
++    bool sawUseOriginalBody() const { return useOriginalBody_ >= 0; }
++    uint64_t useOriginalBody() const { assert(sawUseOriginalBody()); return static_cast(useOriginalBody_); }
++
++private:
++    static const SBuf UseOriginalBodyName;
++
++    /// the value of the parsed use-original-body chunk extension (or -1)
++    int64_t useOriginalBody_ = -1;
++};
++
+ class ModXact: public Xaction, public BodyProducer, public BodyConsumer
+ {
+     CBDATA_CLASS(ModXact);
+@@ -270,6 +288,8 @@ private:
+ 
+     int adaptHistoryId; ///< adaptation history slot reservation
+ 
++    ChunkExtensionValueParser extensionParser;
++
+     class State
+     {
+ 
+diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
+index 0c86733..affe0b1 100644
+--- a/src/http/one/Parser.cc
++++ b/src/http/one/Parser.cc
+@@ -7,10 +7,11 @@
+  */
+ 
+ #include "squid.h"
++#include "base/CharacterSet.h"
+ #include "Debug.h"
+ #include "http/one/Parser.h"
+-#include "http/one/Tokenizer.h"
+ #include "mime_header.h"
++#include "parser/Tokenizer.h"
+ #include "SquidConfig.h"
+ 
+ /// RFC 7230 section 2.6 - 7 magic octets
+@@ -61,20 +62,19 @@ Http::One::Parser::DelimiterCharacters()
+            RelaxedDelimiterCharacters() : CharacterSet::SP;
+ }
+ 
+-bool
+-Http::One::Parser::skipLineTerminator(Http1::Tokenizer &tok) const
++void
++Http::One::Parser::skipLineTerminator(Tokenizer &tok) const
+ {
+     if (tok.skip(Http1::CrLf()))
+-        return true;
++        return;
+ 
+     if (Config.onoff.relaxed_header_parser && tok.skipOne(CharacterSet::LF))
+-        return true;
++        return;
+ 
+     if (tok.atEnd() || (tok.remaining().length() == 1 && tok.remaining().at(0) == '\r'))
+-        return false; // need more data
++        throw InsufficientInput();
+ 
+     throw TexcHere("garbage instead of CRLF line terminator");
+-    return false; // unreachable, but make naive compilers happy
+ }
+ 
+ /// all characters except the LF line terminator
+@@ -102,7 +102,7 @@ LineCharacters()
+ void
+ Http::One::Parser::cleanMimePrefix()
+ {
+-    Http1::Tokenizer tok(mimeHeaderBlock_);
++    Tokenizer tok(mimeHeaderBlock_);
+     while (tok.skipOne(RelaxedDelimiterCharacters())) {
+         (void)tok.skipAll(LineCharacters()); // optional line content
+         // LF terminator is required.
+@@ -137,7 +137,7 @@ Http::One::Parser::cleanMimePrefix()
+ void
+ Http::One::Parser::unfoldMime()
+ {
+-    Http1::Tokenizer tok(mimeHeaderBlock_);
++    Tokenizer tok(mimeHeaderBlock_);
+     const auto szLimit = mimeHeaderBlock_.length();
+     mimeHeaderBlock_.clear();
+     // prevent the mime sender being able to make append() realloc/grow multiple times.
+@@ -228,7 +228,7 @@ Http::One::Parser::getHostHeaderField()
+     debugs(25, 5, "looking for " << name);
+ 
+     // while we can find more LF in the SBuf
+-    Http1::Tokenizer tok(mimeHeaderBlock_);
++    Tokenizer tok(mimeHeaderBlock_);
+     SBuf p;
+ 
+     while (tok.prefix(p, LineCharacters())) {
+@@ -250,7 +250,7 @@ Http::One::Parser::getHostHeaderField()
+         p.consume(namelen + 1);
+ 
+         // TODO: optimize SBuf::trim to take CharacterSet directly
+-        Http1::Tokenizer t(p);
++        Tokenizer t(p);
+         t.skipAll(CharacterSet::WSP);
+         p = t.remaining();
+ 
+@@ -278,10 +278,15 @@ Http::One::ErrorLevel()
+ }
+ 
+ // BWS = *( SP / HTAB ) ; WhitespaceCharacters() may relax this RFC 7230 rule
+-bool
+-Http::One::ParseBws(Tokenizer &tok)
++void
++Http::One::ParseBws(Parser::Tokenizer &tok)
+ {
+-    if (const auto count = tok.skipAll(Parser::WhitespaceCharacters())) {
++    const auto count = tok.skipAll(Parser::WhitespaceCharacters());
++
++    if (tok.atEnd())
++        throw InsufficientInput(); // even if count is positive
++
++    if (count) {
+         // Generating BWS is a MUST-level violation so warn about it as needed.
+         debugs(33, ErrorLevel(), "found " << count << " BWS octets");
+         // RFC 7230 says we MUST parse BWS, so we fall through even if
+@@ -289,6 +294,6 @@ Http::One::ParseBws(Tokenizer &tok)
+     }
+     // else we successfully "parsed" an empty BWS sequence
+ 
+-    return true;
++    // success: no more BWS characters expected
+ }
+ 
+diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
+index 58a5cae..40e281b 100644
+--- a/src/http/one/Parser.h
++++ b/src/http/one/Parser.h
+@@ -12,6 +12,7 @@
+ #include "anyp/ProtocolVersion.h"
+ #include "http/one/forward.h"
+ #include "http/StatusCode.h"
++#include "parser/forward.h"
+ #include "sbuf/SBuf.h"
+ 
+ namespace Http {
+@@ -40,6 +41,7 @@ class Parser : public RefCountable
+ {
+ public:
+     typedef SBuf::size_type size_type;
++    typedef ::Parser::Tokenizer Tokenizer;
+ 
+     Parser() : parseStatusCode(Http::scNone), parsingStage_(HTTP_PARSE_NONE), hackExpectsMime_(false) {}
+     virtual ~Parser() {}
+@@ -118,11 +120,11 @@ protected:
+      * detect and skip the CRLF or (if tolerant) LF line terminator
+      * consume from the tokenizer.
+      *
+-     * throws if non-terminator is detected.
++     * \throws exception on bad or InsuffientInput.
+      * \retval true only if line terminator found.
+      * \retval false incomplete or missing line terminator, need more data.
+      */
+-    bool skipLineTerminator(Http1::Tokenizer &tok) const;
++    void skipLineTerminator(Tokenizer &) const;
+ 
+     /**
+      * Scan to find the mime headers block for current message.
+@@ -159,8 +161,8 @@ private:
+ };
+ 
+ /// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
+-/// \returns true (always; unlike all the skip*() functions)
+-bool ParseBws(Tokenizer &tok);
++/// \throws InsufficientInput when the end of BWS cannot be confirmed
++void ParseBws(Parser::Tokenizer &);
+ 
+ /// the right debugs() level for logging HTTP violation messages
+ int ErrorLevel();
+diff --git a/src/http/one/RequestParser.cc b/src/http/one/RequestParser.cc
+index a325f7d..0f13c92 100644
+--- a/src/http/one/RequestParser.cc
++++ b/src/http/one/RequestParser.cc
+@@ -9,8 +9,8 @@
+ #include "squid.h"
+ #include "Debug.h"
+ #include "http/one/RequestParser.h"
+-#include "http/one/Tokenizer.h"
+ #include "http/ProtocolVersion.h"
++#include "parser/Tokenizer.h"
+ #include "profiler/Profiler.h"
+ #include "SquidConfig.h"
+ 
+@@ -64,7 +64,7 @@ Http::One::RequestParser::skipGarbageLines()
+  *  RFC 7230 section 2.6, 3.1 and 3.5
+  */
+ bool
+-Http::One::RequestParser::parseMethodField(Http1::Tokenizer &tok)
++Http::One::RequestParser::parseMethodField(Tokenizer &tok)
+ {
+     // method field is a sequence of TCHAR.
+     // Limit to 32 characters to prevent overly long sequences of non-HTTP
+@@ -145,7 +145,7 @@ Http::One::RequestParser::RequestTargetCharacters()
+ }
+ 
+ bool
+-Http::One::RequestParser::parseUriField(Http1::Tokenizer &tok)
++Http::One::RequestParser::parseUriField(Tokenizer &tok)
+ {
+     /* Arbitrary 64KB URI upper length limit.
+      *
+@@ -178,7 +178,7 @@ Http::One::RequestParser::parseUriField(Http1::Tokenizer &tok)
+ }
+ 
+ bool
+-Http::One::RequestParser::parseHttpVersionField(Http1::Tokenizer &tok)
++Http::One::RequestParser::parseHttpVersionField(Tokenizer &tok)
+ {
+     static const SBuf http1p0("HTTP/1.0");
+     static const SBuf http1p1("HTTP/1.1");
+@@ -253,7 +253,7 @@ Http::One::RequestParser::skipDelimiter(const size_t count, const char *where)
+ 
+ /// Parse CRs at the end of request-line, just before the terminating LF.
+ bool
+-Http::One::RequestParser::skipTrailingCrs(Http1::Tokenizer &tok)
++Http::One::RequestParser::skipTrailingCrs(Tokenizer &tok)
+ {
+     if (Config.onoff.relaxed_header_parser) {
+         (void)tok.skipAllTrailing(CharacterSet::CR); // optional; multiple OK
+@@ -289,12 +289,12 @@ Http::One::RequestParser::parseRequestFirstLine()
+     // Earlier, skipGarbageLines() took care of any leading LFs (if allowed).
+     // Now, the request line has to end at the first LF.
+     static const CharacterSet lineChars = CharacterSet::LF.complement("notLF");
+-    ::Parser::Tokenizer lineTok(buf_);
++    Tokenizer lineTok(buf_);
+     if (!lineTok.prefix(line, lineChars) || !lineTok.skip('\n')) {
+         if (buf_.length() >= Config.maxRequestHeaderSize) {
+             /* who should we blame for our failure to parse this line? */
+ 
+-            Http1::Tokenizer methodTok(buf_);
++            Tokenizer methodTok(buf_);
+             if (!parseMethodField(methodTok))
+                 return -1; // blame a bad method (or its delimiter)
+ 
+@@ -308,7 +308,7 @@ Http::One::RequestParser::parseRequestFirstLine()
+         return 0;
+     }
+ 
+-    Http1::Tokenizer tok(line);
++    Tokenizer tok(line);
+ 
+     if (!parseMethodField(tok))
+         return -1;
+diff --git a/src/http/one/RequestParser.h b/src/http/one/RequestParser.h
+index 7086548..26697cd 100644
+--- a/src/http/one/RequestParser.h
++++ b/src/http/one/RequestParser.h
+@@ -54,11 +54,11 @@ private:
+     bool doParse(const SBuf &aBuf);
+ 
+     /* all these return false and set parseStatusCode on parsing failures */
+-    bool parseMethodField(Http1::Tokenizer &);
+-    bool parseUriField(Http1::Tokenizer &);
+-    bool parseHttpVersionField(Http1::Tokenizer &);
++    bool parseMethodField(Tokenizer &);
++    bool parseUriField(Tokenizer &);
++    bool parseHttpVersionField(Tokenizer &);
+     bool skipDelimiter(const size_t count, const char *where);
+-    bool skipTrailingCrs(Http1::Tokenizer &tok);
++    bool skipTrailingCrs(Tokenizer &tok);
+ 
+     bool http0() const {return !msgProtocol_.major;}
+     static const CharacterSet &RequestTargetCharacters();
+diff --git a/src/http/one/ResponseParser.cc b/src/http/one/ResponseParser.cc
+index 24af849..65baf09 100644
+--- a/src/http/one/ResponseParser.cc
++++ b/src/http/one/ResponseParser.cc
+@@ -9,8 +9,8 @@
+ #include "squid.h"
+ #include "Debug.h"
+ #include "http/one/ResponseParser.h"
+-#include "http/one/Tokenizer.h"
+ #include "http/ProtocolVersion.h"
++#include "parser/Tokenizer.h"
+ #include "profiler/Profiler.h"
+ #include "SquidConfig.h"
+ 
+@@ -47,7 +47,7 @@ Http::One::ResponseParser::firstLineSize() const
+ // NP: we found the protocol version and consumed it already.
+ // just need the status code and reason phrase
+ int
+-Http::One::ResponseParser::parseResponseStatusAndReason(Http1::Tokenizer &tok, const CharacterSet &WspDelim)
++Http::One::ResponseParser::parseResponseStatusAndReason(Tokenizer &tok, const CharacterSet &WspDelim)
+ {
+     if (!completedStatus_) {
+         debugs(74, 9, "seek status-code in: " << tok.remaining().substr(0,10) << "...");
+@@ -87,14 +87,13 @@ Http::One::ResponseParser::parseResponseStatusAndReason(Http1::Tokenizer &tok, c
+     static const CharacterSet phraseChars = CharacterSet::WSP + CharacterSet::VCHAR + CharacterSet::OBSTEXT;
+     (void)tok.prefix(reasonPhrase_, phraseChars); // optional, no error if missing
+     try {
+-        if (skipLineTerminator(tok)) {
+-            debugs(74, DBG_DATA, "parse remaining buf={length=" << tok.remaining().length() << ", data='" << tok.remaining() << "'}");
+-            buf_ = tok.remaining(); // resume checkpoint
+-            return 1;
+-        }
++        skipLineTerminator(tok);
++        buf_ = tok.remaining(); // resume checkpoint
++        debugs(74, DBG_DATA, Raw("leftovers", buf_.rawContent(), buf_.length()));
++        return 1;
++    } catch (const InsufficientInput &) {
+         reasonPhrase_.clear();
+         return 0; // need more to be sure we have it all
+-
+     } catch (const std::exception &ex) {
+         debugs(74, 6, "invalid status-line: " << ex.what());
+     }
+@@ -119,7 +118,7 @@ Http::One::ResponseParser::parseResponseStatusAndReason(Http1::Tokenizer &tok, c
+ int
+ Http::One::ResponseParser::parseResponseFirstLine()
+ {
+-    Http1::Tokenizer tok(buf_);
++    Tokenizer tok(buf_);
+ 
+     const CharacterSet &WspDelim = DelimiterCharacters();
+ 
+diff --git a/src/http/one/ResponseParser.h b/src/http/one/ResponseParser.h
+index 15db4a0..cf13b4d 100644
+--- a/src/http/one/ResponseParser.h
++++ b/src/http/one/ResponseParser.h
+@@ -43,7 +43,7 @@ public:
+ 
+ private:
+     int parseResponseFirstLine();
+-    int parseResponseStatusAndReason(Http1::Tokenizer&, const CharacterSet &);
++    int parseResponseStatusAndReason(Tokenizer&, const CharacterSet &);
+ 
+     /// magic prefix for identifying ICY response messages
+     static const SBuf IcyMagic;
+diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
+index 754086e..6d2f8ea 100644
+--- a/src/http/one/TeChunkedParser.cc
++++ b/src/http/one/TeChunkedParser.cc
+@@ -13,10 +13,13 @@
+ #include "http/one/Tokenizer.h"
+ #include "http/ProtocolVersion.h"
+ #include "MemBuf.h"
++#include "parser/Tokenizer.h"
+ #include "Parsing.h"
++#include "sbuf/Stream.h"
+ #include "SquidConfig.h"
+ 
+-Http::One::TeChunkedParser::TeChunkedParser()
++Http::One::TeChunkedParser::TeChunkedParser():
++    customExtensionValueParser(nullptr)
+ {
+     // chunked encoding only exists in HTTP/1.1
+     Http1::Parser::msgProtocol_ = Http::ProtocolVersion(1,1);
+@@ -31,7 +34,11 @@ Http::One::TeChunkedParser::clear()
+     buf_.clear();
+     theChunkSize = theLeftBodySize = 0;
+     theOut = NULL;
+-    useOriginBody = -1;
++    // XXX: We do not reset customExtensionValueParser here. Based on the
++    // clear() API description, we must, but it makes little sense and could
++    // break method callers if they appear because some of them may forget to
++    // reset customExtensionValueParser. TODO: Remove Http1::Parser as our
++    // parent class and this unnecessary method with it.
+ }
+ 
+ bool
+@@ -49,14 +56,14 @@ Http::One::TeChunkedParser::parse(const SBuf &aBuf)
+     if (parsingStage_ == Http1::HTTP_PARSE_NONE)
+         parsingStage_ = Http1::HTTP_PARSE_CHUNK_SZ;
+ 
+-    Http1::Tokenizer tok(buf_);
++    Tokenizer tok(buf_);
+ 
+     // loop for as many chunks as we can
+     // use do-while instead of while so that we can incrementally
+     // restart in the middle of a chunk/frame
+     do {
+ 
+-        if (parsingStage_ == Http1::HTTP_PARSE_CHUNK_EXT && !parseChunkExtension(tok, theChunkSize))
++        if (parsingStage_ == Http1::HTTP_PARSE_CHUNK_EXT && !parseChunkMetadataSuffix(tok))
+             return false;
+ 
+         if (parsingStage_ == Http1::HTTP_PARSE_CHUNK && !parseChunkBody(tok))
+@@ -80,7 +87,7 @@ Http::One::TeChunkedParser::needsMoreSpace() const
+ 
+ /// RFC 7230 section 4.1 chunk-size
+ bool
+-Http::One::TeChunkedParser::parseChunkSize(Http1::Tokenizer &tok)
++Http::One::TeChunkedParser::parseChunkSize(Tokenizer &tok)
+ {
+     Must(theChunkSize <= 0); // Should(), really
+ 
+@@ -104,66 +111,75 @@ Http::One::TeChunkedParser::parseChunkSize(Http1::Tokenizer &tok)
+     return false; // should not be reachable
+ }
+ 
+-/**
+- * Parses chunk metadata suffix, looking for interesting extensions and/or
+- * getting to the line terminator. RFC 7230 section 4.1.1 and its Errata #4667:
+- *
+- *   chunk-ext = *( BWS  ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
+- *   chunk-ext-name = token
+- *   chunk-ext-val  = token / quoted-string
+- *
+- * ICAP 'use-original-body=N' extension is supported.
+- */
++/// Parses "[chunk-ext] CRLF" from RFC 7230 section 4.1.1:
++///   chunk = chunk-size [ chunk-ext ] CRLF chunk-data CRLF
++///   last-chunk = 1*"0" [ chunk-ext ] CRLF
+ bool
+-Http::One::TeChunkedParser::parseChunkExtension(Http1::Tokenizer &tok, bool skipKnown)
++Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
+ {
+-    SBuf ext;
+-    SBuf value;
+-    while (
+-        ParseBws(tok) && // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
+-        tok.skip(';') &&
+-        ParseBws(tok) && // Bug 4492: ICAP servers send SP before chunk-ext-name
+-        tok.prefix(ext, CharacterSet::TCHAR)) { // chunk-ext-name
+-
+-        // whole value part is optional. if no '=' expect next chunk-ext
+-        if (ParseBws(tok) && tok.skip('=') && ParseBws(tok)) {
+-
+-            if (!skipKnown) {
+-                if (ext.cmp("use-original-body",17) == 0 && tok.int64(useOriginBody, 10)) {
+-                    debugs(94, 3, "Found chunk extension " << ext << "=" << useOriginBody);
+-                    buf_ = tok.remaining(); // parse checkpoint
+-                    continue;
+-                }
+-            }
+-
+-            debugs(94, 5, "skipping unknown chunk extension " << ext);
+-
+-            // unknown might have a value token or quoted-string
+-            if (tok.quotedStringOrToken(value) && !tok.atEnd()) {
+-                buf_ = tok.remaining(); // parse checkpoint
+-                continue;
+-            }
+-
+-            // otherwise need more data OR corrupt syntax
+-            break;
+-        }
+-
+-        if (!tok.atEnd())
+-            buf_ = tok.remaining(); // parse checkpoint (unless there might be more token name)
+-    }
+-
+-    if (skipLineTerminator(tok)) {
+-        buf_ = tok.remaining(); // checkpoint
+-        // non-0 chunk means data, 0-size means optional Trailer follows
++    // Code becomes much simpler when incremental parsing functions throw on
++    // bad or insufficient input, like in the code below. TODO: Expand up.
++    try {
++        parseChunkExtensions(tok); // a possibly empty chunk-ext list
++        skipLineTerminator(tok);
++        buf_ = tok.remaining();
+         parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
+         return true;
++    } catch (const InsufficientInput &) {
++        tok.reset(buf_); // backtrack to the last commit point
++        return false;
+     }
++    // other exceptions bubble up to kill message parsing
++}
+ 
+-    return false;
++/// Parses the chunk-ext list (RFC 7230 section 4.1.1 and its Errata #4667):
++/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
++void
++Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &tok)
++{
++    do {
++        ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
++
++        if (!tok.skip(';'))
++            return; // reached the end of extensions (if any)
++
++        parseOneChunkExtension(tok);
++        buf_ = tok.remaining(); // got one extension
++    } while (true);
++}
++
++void
++Http::One::ChunkExtensionValueParser::Ignore(Tokenizer &tok, const SBuf &extName)
++{
++    const auto ignoredValue = tokenOrQuotedString(tok);
++    debugs(94, 5, extName << " with value " << ignoredValue);
++}
++
++/// Parses a single chunk-ext list element:
++/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
++void
++Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &tok)
++{
++    ParseBws(tok); // Bug 4492: ICAP servers send SP before chunk-ext-name
++
++    const auto extName = tok.prefix("chunk-ext-name", CharacterSet::TCHAR);
++
++    ParseBws(tok);
++
++    if (!tok.skip('='))
++        return; // parsed a valueless chunk-ext
++
++    ParseBws(tok);
++
++    // optimization: the only currently supported extension needs last-chunk
++    if (!theChunkSize && customExtensionValueParser)
++        customExtensionValueParser->parse(tok, extName);
++    else
++        ChunkExtensionValueParser::Ignore(tok, extName);
+ }
+ 
+ bool
+-Http::One::TeChunkedParser::parseChunkBody(Http1::Tokenizer &tok)
++Http::One::TeChunkedParser::parseChunkBody(Tokenizer &tok)
+ {
+     if (theLeftBodySize > 0) {
+         buf_ = tok.remaining(); // sync buffers before buf_ use
+@@ -188,17 +204,20 @@ Http::One::TeChunkedParser::parseChunkBody(Http1::Tokenizer &tok)
+ }
+ 
+ bool
+-Http::One::TeChunkedParser::parseChunkEnd(Http1::Tokenizer &tok)
++Http::One::TeChunkedParser::parseChunkEnd(Tokenizer &tok)
+ {
+     Must(theLeftBodySize == 0); // Should(), really
+ 
+-    if (skipLineTerminator(tok)) {
++    try {
++        skipLineTerminator(tok);
+         buf_ = tok.remaining(); // parse checkpoint
+         theChunkSize = 0; // done with the current chunk
+         parsingStage_ = Http1::HTTP_PARSE_CHUNK_SZ;
+         return true;
+     }
+-
+-    return false;
++    catch (const InsufficientInput &) {
++        return false;
++    }
++    // other exceptions bubble up to kill message parsing
+ }
+ 
+diff --git a/src/http/one/TeChunkedParser.h b/src/http/one/TeChunkedParser.h
+index 1b0319e..2ca8988 100644
+--- a/src/http/one/TeChunkedParser.h
++++ b/src/http/one/TeChunkedParser.h
+@@ -18,6 +18,26 @@ namespace Http
+ namespace One
+ {
+ 
++using ::Parser::InsufficientInput;
++
++// TODO: Move this class into http/one/ChunkExtensionValueParser.*
++/// A customizable parser of a single chunk extension value (chunk-ext-val).
++/// From RFC 7230 section 4.1.1 and its Errata #4667:
++/// chunk-ext = *( BWS  ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
++/// chunk-ext-name = token
++/// chunk-ext-val  = token / quoted-string
++class ChunkExtensionValueParser
++{
++public:
++    typedef ::Parser::Tokenizer Tokenizer;
++
++    /// extracts and ignores the value of a named extension
++    static void Ignore(Tokenizer &tok, const SBuf &extName);
++
++    /// extracts and then interprets (or ignores) the extension value
++    virtual void parse(Tokenizer &tok, const SBuf &extName) = 0;
++};
++
+ /**
+  * An incremental parser for chunked transfer coding
+  * defined in RFC 7230 section 4.1.
+@@ -25,7 +45,7 @@ namespace One
+  *
+  * The parser shovels content bytes from the raw
+  * input buffer into the content output buffer, both caller-supplied.
+- * Ignores chunk extensions except for ICAP's ieof.
++ * Chunk extensions like use-original-body are handled via parseExtensionValuesWith().
+  * Trailers are available via mimeHeader() if wanted.
+  */
+ class TeChunkedParser : public Http1::Parser
+@@ -37,6 +57,10 @@ public:
+     /// set the buffer to be used to store decoded chunk data
+     void setPayloadBuffer(MemBuf *parsedContent) {theOut = parsedContent;}
+ 
++    /// Instead of ignoring all chunk extension values, give the supplied
++    /// parser a chance to handle them. Only applied to last-chunk (for now).
++    void parseExtensionValuesWith(ChunkExtensionValueParser *parser) { customExtensionValueParser = parser; }
++
+     bool needsMoreSpace() const;
+ 
+     /* Http1::Parser API */
+@@ -45,17 +69,20 @@ public:
+     virtual Parser::size_type firstLineSize() const {return 0;} // has no meaning with multiple chunks
+ 
+ private:
+-    bool parseChunkSize(Http1::Tokenizer &tok);
+-    bool parseChunkExtension(Http1::Tokenizer &tok, bool skipKnown);
+-    bool parseChunkBody(Http1::Tokenizer &tok);
+-    bool parseChunkEnd(Http1::Tokenizer &tok);
++    bool parseChunkSize(Tokenizer &tok);
++    bool parseChunkMetadataSuffix(Tokenizer &);
++    void parseChunkExtensions(Tokenizer &);
++    void parseOneChunkExtension(Tokenizer &);
++    bool parseChunkBody(Tokenizer &tok);
++    bool parseChunkEnd(Tokenizer &tok);
+ 
+     MemBuf *theOut;
+     uint64_t theChunkSize;
+     uint64_t theLeftBodySize;
+ 
+-public:
+-    int64_t useOriginBody;
++    /// An optional plugin for parsing and interpreting custom chunk-ext-val.
++    /// This "visitor" object is owned by our creator.
++    ChunkExtensionValueParser *customExtensionValueParser;
+ };
+ 
+ } // namespace One
+diff --git a/src/http/one/Tokenizer.cc b/src/http/one/Tokenizer.cc
+index 804b8e1..3a6bef3 100644
+--- a/src/http/one/Tokenizer.cc
++++ b/src/http/one/Tokenizer.cc
+@@ -8,35 +8,18 @@
+ 
+ #include "squid.h"
+ #include "Debug.h"
++#include "http/one/Parser.h"
+ #include "http/one/Tokenizer.h"
+-
+-bool
+-Http::One::Tokenizer::quotedString(SBuf &returnedToken, const bool http1p0)
+-{
+-    checkpoint();
+-
+-    if (!skip('"'))
+-        return false;
+-
+-    return qdText(returnedToken, http1p0);
+-}
+-
+-bool
+-Http::One::Tokenizer::quotedStringOrToken(SBuf &returnedToken, const bool http1p0)
++#include "parser/Tokenizer.h"
++#include "sbuf/Stream.h"
++
++/// Extracts quoted-string after the caller removes the initial '"'.
++/// \param http1p0 whether to prohibit \-escaped characters in quoted strings
++/// \throws InsufficientInput when input can be a token _prefix_
++/// \returns extracted quoted string (without quotes and with chars unescaped)
++static SBuf
++parseQuotedStringSuffix(Parser::Tokenizer &tok, const bool http1p0)
+ {
+-    checkpoint();
+-
+-    if (!skip('"'))
+-        return prefix(returnedToken, CharacterSet::TCHAR);
+-
+-    return qdText(returnedToken, http1p0);
+-}
+-
+-bool
+-Http::One::Tokenizer::qdText(SBuf &returnedToken, const bool http1p0)
+-{
+-    // the initial DQUOTE has been skipped by the caller
+-
+     /*
+      * RFC 1945 - defines qdtext:
+      *   inclusive of LWS (which includes CR and LF)
+@@ -61,12 +44,17 @@ Http::One::Tokenizer::qdText(SBuf &returnedToken, const bool http1p0)
+     // best we can do is a conditional reference since http1p0 value may change per-client
+     const CharacterSet &tokenChars = (http1p0 ? qdtext1p0 : qdtext1p1);
+ 
+-    for (;;) {
+-        SBuf::size_type prefixLen = buf().findFirstNotOf(tokenChars);
+-        returnedToken.append(consume(prefixLen));
++    SBuf parsedToken;
++
++    while (!tok.atEnd()) {
++        SBuf qdText;
++        if (tok.prefix(qdText, tokenChars))
++            parsedToken.append(qdText);
++
++        if (!http1p0 && tok.skip('\\')) { // HTTP/1.1 allows quoted-pair, HTTP/1.0 does not
++            if (tok.atEnd())
++                break;
+ 
+-        // HTTP/1.1 allows quoted-pair, HTTP/1.0 does not
+-        if (!http1p0 && skip('\\')) {
+             /* RFC 7230 section 3.2.6
+              *
+              * The backslash octet ("\") can be used as a single-octet quoting
+@@ -78,32 +66,42 @@ Http::One::Tokenizer::qdText(SBuf &returnedToken, const bool http1p0)
+              */
+             static const CharacterSet qPairChars = CharacterSet::HTAB + CharacterSet::SP + CharacterSet::VCHAR + CharacterSet::OBSTEXT;
+             SBuf escaped;
+-            if (!prefix(escaped, qPairChars, 1)) {
+-                returnedToken.clear();
+-                restoreLastCheckpoint();
+-                return false;
+-            }
+-            returnedToken.append(escaped);
++            if (!tok.prefix(escaped, qPairChars, 1))
++                throw TexcHere("invalid escaped character in quoted-pair");
++
++            parsedToken.append(escaped);
+             continue;
++        }
+ 
+-        } else if (skip('"')) {
+-            break; // done
++        if (tok.skip('"'))
++            return parsedToken; // may be empty
+ 
+-        } else if (atEnd()) {
+-            // need more data
+-            returnedToken.clear();
+-            restoreLastCheckpoint();
+-            return false;
+-        }
++        if (tok.atEnd())
++            break;
+ 
+-        // else, we have an error
+-        debugs(24, 8, "invalid bytes for set " << tokenChars.name);
+-        returnedToken.clear();
+-        restoreLastCheckpoint();
+-        return false;
++        throw TexcHere(ToSBuf("invalid bytes for set ", tokenChars.name));
+     }
+ 
+-    // found the whole string
+-    return true;
++    throw Http::One::InsufficientInput();
++}
++
++SBuf
++Http::One::tokenOrQuotedString(Parser::Tokenizer &tok, const bool http1p0)
++{
++    if (tok.skip('"'))
++        return parseQuotedStringSuffix(tok, http1p0);
++
++    if (tok.atEnd())
++        throw InsufficientInput();
++
++    SBuf parsedToken;
++    if (!tok.prefix(parsedToken, CharacterSet::TCHAR))
++        throw TexcHere("invalid input while expecting an HTTP token");
++
++    if (tok.atEnd())
++        throw InsufficientInput();
++
++    // got the complete token
++    return parsedToken;
+ }
+ 
+diff --git a/src/http/one/Tokenizer.h b/src/http/one/Tokenizer.h
+index 658875f..2d40574 100644
+--- a/src/http/one/Tokenizer.h
++++ b/src/http/one/Tokenizer.h
+@@ -9,68 +9,47 @@
+ #ifndef SQUID_SRC_HTTP_ONE_TOKENIZER_H
+ #define SQUID_SRC_HTTP_ONE_TOKENIZER_H
+ 
+-#include "parser/Tokenizer.h"
++#include "parser/forward.h"
++#include "sbuf/forward.h"
+ 
+ namespace Http {
+ namespace One {
+ 
+ /**
+- * Lexical processor extended to tokenize HTTP/1.x syntax.
++ * Extracts either an HTTP/1 token or quoted-string while dealing with
++ * possibly incomplete input typical for incremental text parsers.
++ * Unescapes escaped characters in HTTP/1.1 quoted strings.
+  *
+- * \see ::Parser::Tokenizer for more detail
++ * \param http1p0 whether to prohibit \-escaped characters in quoted strings
++ * \throws InsufficientInput as appropriate, including on unterminated tokens
++ * \returns extracted token or quoted string (without quotes)
++ *
++ * Governed by:
++ *  - RFC 1945 section 2.1
++ *  "
++ *    A string of text is parsed as a single word if it is quoted using
++ *    double-quote marks.
++ *
++ *        quoted-string  = ( <"> *(qdtext) <"> )
++ *
++ *        qdtext         =  and CTLs,
++ *                         but including LWS>
++ *
++ *    Single-character quoting using the backslash ("\") character is not
++ *    permitted in HTTP/1.0.
++ *  "
++ *
++ *  - RFC 7230 section 3.2.6
++ *  "
++ *    A string of text is parsed as a single value if it is quoted using
++ *    double-quote marks.
++ *
++ *    quoted-string  = DQUOTE *( qdtext / quoted-pair ) DQUOTE
++ *    qdtext         = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text
++ *    obs-text       = %x80-FF
++ *  "
+  */
+-class Tokenizer : public ::Parser::Tokenizer
+-{
+-public:
+-    Tokenizer(SBuf &s) : ::Parser::Tokenizer(s), savedStats_(0) {}
+-
+-    /**
+-     * Attempt to parse a quoted-string lexical construct.
+-     *
+-     * Governed by:
+-     *  - RFC 1945 section 2.1
+-     *  "
+-     *    A string of text is parsed as a single word if it is quoted using
+-     *    double-quote marks.
+-     *
+-     *        quoted-string  = ( <"> *(qdtext) <"> )
+-     *
+-     *        qdtext         =  and CTLs,
+-     *                         but including LWS>
+-     *
+-     *    Single-character quoting using the backslash ("\") character is not
+-     *    permitted in HTTP/1.0.
+-     *  "
+-     *
+-     *  - RFC 7230 section 3.2.6
+-     *  "
+-     *    A string of text is parsed as a single value if it is quoted using
+-     *    double-quote marks.
+-     *
+-     *    quoted-string  = DQUOTE *( qdtext / quoted-pair ) DQUOTE
+-     *    qdtext         = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text
+-     *    obs-text       = %x80-FF
+-     *  "
+-     *
+-     * \param escaped HTTP/1.0 does not permit \-escaped characters
+-     */
+-    bool quotedString(SBuf &value, const bool http1p0 = false);
+-
+-    /**
+-     * Attempt to parse a (token / quoted-string ) lexical construct.
+-     */
+-    bool quotedStringOrToken(SBuf &value, const bool http1p0 = false);
+-
+-private:
+-    /// parse the internal component of a quote-string, and terminal DQUOTE
+-    bool qdText(SBuf &value, const bool http1p0);
+-
+-    void checkpoint() { savedCheckpoint_ = buf(); savedStats_ = parsedSize(); }
+-    void restoreLastCheckpoint() { undoParse(savedCheckpoint_, savedStats_); }
+-
+-    SBuf savedCheckpoint_;
+-    SBuf::size_type savedStats_;
+-};
++SBuf tokenOrQuotedString(Parser::Tokenizer &tok, const bool http1p0 = false);
+ 
+ } // namespace One
+ } // namespace Http
+diff --git a/src/http/one/forward.h b/src/http/one/forward.h
+index c90dc34..2b4ad28 100644
+--- a/src/http/one/forward.h
++++ b/src/http/one/forward.h
+@@ -10,6 +10,7 @@
+ #define SQUID_SRC_HTTP_ONE_FORWARD_H
+ 
+ #include "base/RefCount.h"
++#include "parser/forward.h"
+ #include "sbuf/forward.h"
+ 
+ namespace Http {
+@@ -31,6 +32,8 @@ typedef RefCount ResponseParserPointer;
+ /// CRLF textual representation
+ const SBuf &CrLf();
+ 
++using ::Parser::InsufficientInput;
++
+ } // namespace One
+ } // namespace Http
+ 
+diff --git a/src/parser/BinaryTokenizer.h b/src/parser/BinaryTokenizer.h
+index acebd4d..24042d4 100644
+--- a/src/parser/BinaryTokenizer.h
++++ b/src/parser/BinaryTokenizer.h
+@@ -9,6 +9,7 @@
+ #ifndef SQUID_SRC_PARSER_BINARYTOKENIZER_H
+ #define SQUID_SRC_PARSER_BINARYTOKENIZER_H
+ 
++#include "parser/forward.h"
+ #include "sbuf/SBuf.h"
+ 
+ namespace Parser
+@@ -44,7 +45,7 @@ public:
+ class BinaryTokenizer
+ {
+ public:
+-    class InsufficientInput {}; // thrown when a method runs out of data
++    typedef ::Parser::InsufficientInput InsufficientInput;
+     typedef uint64_t size_type; // enough for the largest supported offset
+ 
+     BinaryTokenizer();
+diff --git a/src/parser/Makefile.am b/src/parser/Makefile.am
+index af2b759..0daa5a8 100644
+--- a/src/parser/Makefile.am
++++ b/src/parser/Makefile.am
+@@ -13,6 +13,7 @@ noinst_LTLIBRARIES = libparser.la
+ libparser_la_SOURCES = \
+ 	BinaryTokenizer.h \
+ 	BinaryTokenizer.cc \
++	forward.h \
+ 	Tokenizer.h \
+ 	Tokenizer.cc
+ 
+diff --git a/src/parser/Tokenizer.cc b/src/parser/Tokenizer.cc
+index 7e73e04..68f4aec 100644
+--- a/src/parser/Tokenizer.cc
++++ b/src/parser/Tokenizer.cc
+@@ -10,7 +10,9 @@
+ 
+ #include "squid.h"
+ #include "Debug.h"
++#include "parser/forward.h"
+ #include "parser/Tokenizer.h"
++#include "sbuf/Stream.h"
+ 
+ #include 
+ #if HAVE_CTYPE_H
+@@ -96,6 +98,23 @@ Parser::Tokenizer::prefix(SBuf &returnedToken, const CharacterSet &tokenChars, c
+     return true;
+ }
+ 
++SBuf
++Parser::Tokenizer::prefix(const char *description, const CharacterSet &tokenChars, const SBuf::size_type limit)
++{
++    if (atEnd())
++        throw InsufficientInput();
++
++    SBuf result;
++
++    if (!prefix(result, tokenChars, limit))
++        throw TexcHere(ToSBuf("cannot parse ", description));
++
++    if (atEnd())
++        throw InsufficientInput();
++
++    return result;
++}
++
+ bool
+ Parser::Tokenizer::suffix(SBuf &returnedToken, const CharacterSet &tokenChars, const SBuf::size_type limit)
+ {
+@@ -283,3 +302,24 @@ Parser::Tokenizer::int64(int64_t & result, int base, bool allowSign, const SBuf:
+     return success(s - range.rawContent());
+ }
+ 
++int64_t
++Parser::Tokenizer::udec64(const char *description, const SBuf::size_type limit)
++{
++    if (atEnd())
++        throw InsufficientInput();
++
++    int64_t result = 0;
++
++    // Since we only support unsigned decimals, a parsing failure with a
++    // non-empty input always implies invalid/malformed input (or a buggy
++    // limit=0 caller). TODO: Support signed and non-decimal integers by
++    // refactoring int64() to detect insufficient input.
++    if (!int64(result, 10, false, limit))
++        throw TexcHere(ToSBuf("cannot parse ", description));
++
++    if (atEnd())
++        throw InsufficientInput(); // more digits may be coming
++
++    return result;
++}
++
+diff --git a/src/parser/Tokenizer.h b/src/parser/Tokenizer.h
+index 54414be..03a8388 100644
+--- a/src/parser/Tokenizer.h
++++ b/src/parser/Tokenizer.h
+@@ -143,6 +143,19 @@ public:
+      */
+     bool int64(int64_t &result, int base = 0, bool allowSign = true, SBuf::size_type limit = SBuf::npos);
+ 
++    /*
++     * The methods below mimic their counterparts documented above, but they
++     * throw on errors, including InsufficientInput. The field description
++     * parameter is used for error reporting and debugging.
++     */
++
++    /// prefix() wrapper but throws InsufficientInput if input contains
++    /// nothing but the prefix (i.e. if the prefix is not "terminated")
++    SBuf prefix(const char *description, const CharacterSet &tokenChars, SBuf::size_type limit = SBuf::npos);
++
++    /// int64() wrapper but limited to unsigned decimal integers (for now)
++    int64_t udec64(const char *description, SBuf::size_type limit = SBuf::npos);
++
+ protected:
+     SBuf consume(const SBuf::size_type n);
+     SBuf::size_type success(const SBuf::size_type n);
+diff --git a/src/parser/forward.h b/src/parser/forward.h
+new file mode 100644
+index 0000000..5a95b7a
+--- /dev/null
++++ b/src/parser/forward.h
+@@ -0,0 +1,22 @@
++/*
++ * Copyright (C) 1996-2019 The Squid Software Foundation and contributors
++ *
++ * Squid software is distributed under GPLv2+ license and includes
++ * contributions from numerous individuals and organizations.
++ * Please see the COPYING and CONTRIBUTORS files for details.
++ */
++
++#ifndef SQUID_PARSER_FORWARD_H
++#define SQUID_PARSER_FORWARD_H
++
++namespace Parser {
++class Tokenizer;
++class BinaryTokenizer;
++
++// TODO: Move this declaration (to parser/Elements.h) if we need more like it.
++/// thrown by modern "incremental" parsers when they need more data
++class InsufficientInput {};
++} // namespace Parser
++
++#endif /* SQUID_PARSER_FORWARD_H */
++
+diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
+index affe0b1..05591fe 100644
+--- a/src/http/one/Parser.cc
++++ b/src/http/one/Parser.cc
+@@ -65,16 +65,10 @@ Http::One::Parser::DelimiterCharacters()
+ void
+ Http::One::Parser::skipLineTerminator(Tokenizer &tok) const
+ {
+-    if (tok.skip(Http1::CrLf()))
+-        return;
+-
+     if (Config.onoff.relaxed_header_parser && tok.skipOne(CharacterSet::LF))
+         return;
+ 
+-    if (tok.atEnd() || (tok.remaining().length() == 1 && tok.remaining().at(0) == '\r'))
+-        throw InsufficientInput();
+-
+-    throw TexcHere("garbage instead of CRLF line terminator");
++    tok.skipRequired("line-terminating CRLF", Http1::CrLf());
+ }
+ 
+ /// all characters except the LF line terminator
+diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
+index 40e281b..9a2a4ad 100644
+--- a/src/http/one/Parser.h
++++ b/src/http/one/Parser.h
+@@ -120,9 +120,7 @@ protected:
+      * detect and skip the CRLF or (if tolerant) LF line terminator
+      * consume from the tokenizer.
+      *
+-     * \throws exception on bad or InsuffientInput.
+-     * \retval true only if line terminator found.
+-     * \retval false incomplete or missing line terminator, need more data.
++     * \throws exception on bad or InsufficientInput
+      */
+     void skipLineTerminator(Tokenizer &) const;
+ 
+diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
+index 6d2f8ea..3bff6c7 100644
+--- a/src/http/one/TeChunkedParser.cc
++++ b/src/http/one/TeChunkedParser.cc
+@@ -91,6 +91,11 @@ Http::One::TeChunkedParser::parseChunkSize(Tokenizer &tok)
+ {
+     Must(theChunkSize <= 0); // Should(), really
+ 
++    static const SBuf bannedHexPrefixLower("0x");
++    static const SBuf bannedHexPrefixUpper("0X");
++    if (tok.skip(bannedHexPrefixLower) || tok.skip(bannedHexPrefixUpper))
++        throw TextException("chunk starts with 0x", Here());
++
+     int64_t size = -1;
+     if (tok.int64(size, 16, false) && !tok.atEnd()) {
+         if (size < 0)
+@@ -121,7 +126,7 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
+     // bad or insufficient input, like in the code below. TODO: Expand up.
+     try {
+         parseChunkExtensions(tok); // a possibly empty chunk-ext list
+-        skipLineTerminator(tok);
++        tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
+         buf_ = tok.remaining();
+         parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
+         return true;
+@@ -132,12 +137,14 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
+     // other exceptions bubble up to kill message parsing
+ }
+ 
+-/// Parses the chunk-ext list (RFC 7230 section 4.1.1 and its Errata #4667):
++/// Parses the chunk-ext list (RFC 9112 section 7.1.1:
+ /// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
+ void
+-Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &tok)
++Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
+ {
+     do {
++        auto tok = callerTok;
++
+         ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
+ 
+         if (!tok.skip(';'))
+@@ -145,6 +152,7 @@ Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &tok)
+ 
+         parseOneChunkExtension(tok);
+         buf_ = tok.remaining(); // got one extension
++        callerTok = tok;
+     } while (true);
+ }
+ 
+@@ -158,11 +166,14 @@ Http::One::ChunkExtensionValueParser::Ignore(Tokenizer &tok, const SBuf &extName
+ /// Parses a single chunk-ext list element:
+ /// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
+ void
+-Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &tok)
++Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &callerTok)
+ {
++    auto tok = callerTok;
++
+     ParseBws(tok); // Bug 4492: ICAP servers send SP before chunk-ext-name
+ 
+     const auto extName = tok.prefix("chunk-ext-name", CharacterSet::TCHAR);
++    callerTok = tok; // in case we determine that this is a valueless chunk-ext
+ 
+     ParseBws(tok);
+ 
+@@ -176,6 +187,8 @@ Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &tok)
+         customExtensionValueParser->parse(tok, extName);
+     else
+         ChunkExtensionValueParser::Ignore(tok, extName);
++
++    callerTok = tok;
+ }
+ 
+ bool
+@@ -209,7 +222,7 @@ Http::One::TeChunkedParser::parseChunkEnd(Tokenizer &tok)
+     Must(theLeftBodySize == 0); // Should(), really
+ 
+     try {
+-        skipLineTerminator(tok);
++        tok.skipRequired("chunk CRLF", Http1::CrLf());
+         buf_ = tok.remaining(); // parse checkpoint
+         theChunkSize = 0; // done with the current chunk
+         parsingStage_ = Http1::HTTP_PARSE_CHUNK_SZ;
+diff --git a/src/parser/Tokenizer.cc b/src/parser/Tokenizer.cc
+index 68f4aec..8516869 100644
+--- a/src/parser/Tokenizer.cc
++++ b/src/parser/Tokenizer.cc
+@@ -147,6 +147,18 @@ Parser::Tokenizer::skipAll(const CharacterSet &tokenChars)
+     return success(prefixLen);
+ }
+ 
++void
++Parser::Tokenizer::skipRequired(const char *description, const SBuf &tokenToSkip)
++{
++    if (skip(tokenToSkip) || tokenToSkip.isEmpty())
++        return;
++
++    if (tokenToSkip.startsWith(buf_))
++        throw InsufficientInput();
++
++    throw TextException(ToSBuf("cannot skip ", description), Here());
++}
++
+ bool
+ Parser::Tokenizer::skipOne(const CharacterSet &chars)
+ {
+diff --git a/src/parser/Tokenizer.h b/src/parser/Tokenizer.h
+index 03a8388..78ab9e7 100644
+--- a/src/parser/Tokenizer.h
++++ b/src/parser/Tokenizer.h
+@@ -115,6 +115,13 @@ public:
+      */
+     SBuf::size_type skipAll(const CharacterSet &discardables);
+ 
++    /** skips a given character sequence (string);
++     * does nothing if the sequence is empty
++     *
++     * \throws exception on mismatching prefix or InsufficientInput
++     */
++    void skipRequired(const char *description, const SBuf &tokenToSkip);
++
+     /** Removes a single trailing character from the set.
+      *
+      * \return whether a character was removed
diff --git a/SOURCES/squid-4.15-CVE-2023-46847.patch b/SOURCES/squid-4.15-CVE-2023-46847.patch
new file mode 100644
index 0000000..c268517
--- /dev/null
+++ b/SOURCES/squid-4.15-CVE-2023-46847.patch
@@ -0,0 +1,23 @@
+diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc
+index 6a9736f..0a883fa 100644
+--- a/src/auth/digest/Config.cc
++++ b/src/auth/digest/Config.cc
+@@ -847,11 +847,15 @@ Auth::Digest::Config::decode(char const *proxy_auth, const char *aRequestRealm)
+             break;
+ 
+         case DIGEST_NC:
+-            if (value.size() != 8) {
++            if (value.size() == 8) {
++                // for historical reasons, the nc value MUST be exactly 8 bytes
++                static_assert(sizeof(digest_request->nc) == 8 + 1, "bad nc buffer size");
++                xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
++                debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
++            } else {
+                 debugs(29, 9, "Invalid nc '" << value << "' in '" << temp << "'");
++                digest_request->nc[0] = 0;
+             }
+-            xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
+-            debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
+             break;
+ 
+         case DIGEST_CNONCE:
diff --git a/SOURCES/squid-4.15-CVE-2023-49285.patch b/SOURCES/squid-4.15-CVE-2023-49285.patch
new file mode 100644
index 0000000..f6351e4
--- /dev/null
+++ b/SOURCES/squid-4.15-CVE-2023-49285.patch
@@ -0,0 +1,30 @@
+commit 77b3fb4df0f126784d5fd4967c28ed40eb8d521b
+Author: Alex Rousskov 
+Date:   Wed Oct 25 19:41:45 2023 +0000
+
+    RFC 1123: Fix date parsing (#1538)
+    
+    The bug was discovered and detailed by Joshua Rogers at
+    https://megamansec.github.io/Squid-Security-Audit/datetime-overflow.html
+    where it was filed as "1-Byte Buffer OverRead in RFC 1123 date/time
+    Handling".
+
+diff --git a/lib/rfc1123.c b/lib/rfc1123.c
+index e5bf9a4d7..cb484cc00 100644
+--- a/lib/rfc1123.c
++++ b/lib/rfc1123.c
+@@ -50,7 +50,13 @@ make_month(const char *s)
+     char month[3];
+ 
+     month[0] = xtoupper(*s);
++    if (!month[0])
++        return -1; // protects *(s + 1) below
++
+     month[1] = xtolower(*(s + 1));
++    if (!month[1])
++        return -1; // protects *(s + 2) below
++
+     month[2] = xtolower(*(s + 2));
+ 
+     for (i = 0; i < 12; i++)
+
diff --git a/SOURCES/squid-4.15-CVE-2023-49286.patch b/SOURCES/squid-4.15-CVE-2023-49286.patch
new file mode 100644
index 0000000..28f5beb
--- /dev/null
+++ b/SOURCES/squid-4.15-CVE-2023-49286.patch
@@ -0,0 +1,62 @@
+diff --git a/src/ipc.cc b/src/ipc.cc
+index 42e11e6..a68e623 100644
+--- a/src/ipc.cc
++++ b/src/ipc.cc
+@@ -19,6 +19,11 @@
+ #include "SquidConfig.h"
+ #include "SquidIpc.h"
+ #include "tools.h"
++#include 
++
++#if HAVE_UNISTD_H
++#include 
++#endif
+ 
+ static const char *hello_string = "hi there\n";
+ #ifndef HELLO_BUF_SZ
+@@ -365,6 +370,22 @@ ipcCreate(int type, const char *prog, const char *const args[], const char *name
+     }
+ 
+     PutEnvironment();
++
++    // A dup(2) wrapper that reports and exits the process on errors. The
++    // exiting logic is only suitable for this child process context.
++    const auto dupOrExit = [prog,name](const int oldFd) {
++        const auto newFd = dup(oldFd);
++        if (newFd < 0) {
++            const auto savedErrno = errno;
++            debugs(54, DBG_CRITICAL, "ERROR: Helper process initialization failure: " << name <<
++                   Debug::Extra << "helper (CHILD) PID: " << getpid() <<
++                   Debug::Extra << "helper program name: " << prog <<
++                   Debug::Extra << "dup(2) system call error for FD " << oldFd << ": " << xstrerr(savedErrno));
++            _exit(EXIT_FAILURE);
++        }
++        return newFd;
++    };
++
+     /*
+      * This double-dup stuff avoids problems when one of
+      *  crfd, cwfd, or debug_log are in the rage 0-2.
+@@ -372,17 +393,16 @@ ipcCreate(int type, const char *prog, const char *const args[], const char *name
+ 
+     do {
+         /* First make sure 0-2 is occupied by something. Gets cleaned up later */
+-        x = dup(crfd);
+-        assert(x > -1);
+-    } while (x < 3 && x > -1);
++        x = dupOrExit(crfd);
++    } while (x < 3);
+ 
+     close(x);
+ 
+-    t1 = dup(crfd);
++    t1 = dupOrExit(crfd);
+ 
+-    t2 = dup(cwfd);
++    t2 = dupOrExit(cwfd);
+ 
+-    t3 = dup(fileno(debug_log));
++    t3 = dupOrExit(fileno(debug_log));
+ 
+     assert(t1 > 2 && t2 > 2 && t3 > 2);
+ 
diff --git a/SOURCES/squid-4.15-CVE-2023-50269.patch b/SOURCES/squid-4.15-CVE-2023-50269.patch
new file mode 100644
index 0000000..06ea82c
--- /dev/null
+++ b/SOURCES/squid-4.15-CVE-2023-50269.patch
@@ -0,0 +1,50 @@
+diff --git a/src/ClientRequestContext.h b/src/ClientRequestContext.h
+index fe2edf6..47aa935 100644
+--- a/src/ClientRequestContext.h
++++ b/src/ClientRequestContext.h
+@@ -81,6 +81,10 @@ public:
+ #endif
+     ErrorState *error; ///< saved error page for centralized/delayed processing
+     bool readNextRequest; ///< whether Squid should read after error handling
++
++#if FOLLOW_X_FORWARDED_FOR
++    size_t currentXffHopNumber = 0; ///< number of X-Forwarded-For header values processed so far
++#endif
+ };
+ 
+ #endif /* SQUID_CLIENTREQUESTCONTEXT_H */
+diff --git a/src/client_side_request.cc b/src/client_side_request.cc
+index 1c6ff62..b758f6f 100644
+--- a/src/client_side_request.cc
++++ b/src/client_side_request.cc
+@@ -78,6 +78,11 @@
+ static const char *const crlf = "\r\n";
+ 
+ #if FOLLOW_X_FORWARDED_FOR
++
++#if !defined(SQUID_X_FORWARDED_FOR_HOP_MAX)
++#define SQUID_X_FORWARDED_FOR_HOP_MAX 64
++#endif
++
+ static void clientFollowXForwardedForCheck(allow_t answer, void *data);
+ #endif /* FOLLOW_X_FORWARDED_FOR */
+ 
+@@ -485,8 +490,16 @@ clientFollowXForwardedForCheck(allow_t answer, void *data)
+                 /* override the default src_addr tested if we have to go deeper than one level into XFF */
+                 Filled(calloutContext->acl_checklist)->src_addr = request->indirect_client_addr;
+             }
+-            calloutContext->acl_checklist->nonBlockingCheck(clientFollowXForwardedForCheck, data);
+-            return;
++            if (++calloutContext->currentXffHopNumber < SQUID_X_FORWARDED_FOR_HOP_MAX) {
++                calloutContext->acl_checklist->nonBlockingCheck(clientFollowXForwardedForCheck, data);
++                return;
++            }
++            const auto headerName = Http::HeaderLookupTable.lookup(Http::HdrType::X_FORWARDED_FOR).name;
++            debugs(28, DBG_CRITICAL, "ERROR: Ignoring trailing " << headerName << " addresses" <<
++                   Debug::Extra << "addresses allowed by follow_x_forwarded_for: " << calloutContext->currentXffHopNumber <<
++                   Debug::Extra << "last/accepted address: " << request->indirect_client_addr <<
++                   Debug::Extra << "ignored trailing addresses: " << request->x_forwarded_for_iterator);
++            // fall through to resume clientAccessCheck() processing
+         }
+     }
+ 
diff --git a/SOURCES/squid-4.15-CVE-2023-5824.patch b/SOURCES/squid-4.15-CVE-2023-5824.patch
new file mode 100644
index 0000000..4395c71
--- /dev/null
+++ b/SOURCES/squid-4.15-CVE-2023-5824.patch
@@ -0,0 +1,4352 @@
+commit bf9a9ec5329bde6acc26797d1fa7a7a165fec01f
+Author: Tomas Korbar 
+Date:   Tue Nov 21 13:21:43 2023 +0100
+
+    Fix CVE-2023-5824 (#1335) (#1561) (#1562)
+    Supply ALE with HttpReply before checking http_reply_access (#398)
+    Replace adjustable base reply - downstream change neccessary for
+    backport
+
+diff --git a/src/AccessLogEntry.cc b/src/AccessLogEntry.cc
+index 1956c9b..4f1e73e 100644
+--- a/src/AccessLogEntry.cc
++++ b/src/AccessLogEntry.cc
+@@ -10,6 +10,7 @@
+ #include "AccessLogEntry.h"
+ #include "HttpReply.h"
+ #include "HttpRequest.h"
++#include "MemBuf.h"
+ #include "SquidConfig.h"
+ 
+ #if USE_OPENSSL
+@@ -89,6 +90,8 @@ AccessLogEntry::getExtUser() const
+     return nullptr;
+ }
+ 
++AccessLogEntry::AccessLogEntry() {}
++
+ AccessLogEntry::~AccessLogEntry()
+ {
+     safe_free(headers.request);
+@@ -97,14 +100,11 @@ AccessLogEntry::~AccessLogEntry()
+     safe_free(adapt.last_meta);
+ #endif
+ 
+-    safe_free(headers.reply);
+-
+     safe_free(headers.adapted_request);
+     HTTPMSGUNLOCK(adapted_request);
+ 
+     safe_free(lastAclName);
+ 
+-    HTTPMSGUNLOCK(reply);
+     HTTPMSGUNLOCK(request);
+ #if ICAP_CLIENT
+     HTTPMSGUNLOCK(icap.reply);
+@@ -124,3 +124,10 @@ AccessLogEntry::effectiveVirginUrl() const
+     return nullptr;
+ }
+ 
++void
++AccessLogEntry::packReplyHeaders(MemBuf &mb) const
++{
++    if (reply)
++        reply->packHeadersUsingFastPacker(mb);
++}
++
+diff --git a/src/AccessLogEntry.h b/src/AccessLogEntry.h
+index 1f29e61..f1d2ecc 100644
+--- a/src/AccessLogEntry.h
++++ b/src/AccessLogEntry.h
+@@ -40,13 +40,7 @@ class AccessLogEntry: public RefCountable
+ public:
+     typedef RefCount Pointer;
+ 
+-    AccessLogEntry() :
+-        url(nullptr),
+-        lastAclName(nullptr),
+-        reply(nullptr),
+-        request(nullptr),
+-        adapted_request(nullptr)
+-    {}
++    AccessLogEntry();
+     ~AccessLogEntry();
+ 
+     /// Fetch the client IP log string into the given buffer.
+@@ -63,6 +57,9 @@ public:
+     /// Fetch the transaction method string (ICP opcode, HTCP opcode or HTTP method)
+     SBuf getLogMethod() const;
+ 
++    /// dump all reply headers (for sending or risky logging)
++    void packReplyHeaders(MemBuf &mb) const;
++
+     SBuf url;
+ 
+     /// TCP/IP level details about the client connection
+@@ -187,14 +184,12 @@ public:
+ 
+     public:
+         Headers() : request(NULL),
+-            adapted_request(NULL),
+-            reply(NULL) {}
++            adapted_request(NULL)
++            {}
+ 
+         char *request; //< virgin HTTP request headers
+ 
+         char *adapted_request; //< HTTP request headers after adaptation and redirection
+-
+-        char *reply;
+     } headers;
+ 
+ #if USE_ADAPTATION
+@@ -212,13 +207,13 @@ public:
+     } adapt;
+ #endif
+ 
+-    const char *lastAclName; ///< string for external_acl_type %ACL format code
++    const char *lastAclName = nullptr; ///< string for external_acl_type %ACL format code
+     SBuf lastAclData; ///< string for external_acl_type %DATA format code
+ 
+     HierarchyLogEntry hier;
+-    HttpReply *reply;
+-    HttpRequest *request; //< virgin HTTP request
+-    HttpRequest *adapted_request; //< HTTP request after adaptation and redirection
++    HttpReplyPointer reply;
++    HttpRequest *request = nullptr; //< virgin HTTP request
++    HttpRequest *adapted_request = nullptr; //< HTTP request after adaptation and redirection
+ 
+     /// key:value pairs set by squid.conf note directive and
+     /// key=value pairs returned from URL rewrite/redirect helper
+diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc
+index 8dcc7e3..21206a9 100644
+--- a/src/HttpHeader.cc
++++ b/src/HttpHeader.cc
+@@ -9,6 +9,7 @@
+ /* DEBUG: section 55    HTTP Header */
+ 
+ #include "squid.h"
++#include "base/Assure.h"
+ #include "base/EnumIterator.h"
+ #include "base64.h"
+ #include "globals.h"
+diff --git a/src/HttpHeaderTools.cc b/src/HttpHeaderTools.cc
+index f1e45a4..1337b8d 100644
+--- a/src/HttpHeaderTools.cc
++++ b/src/HttpHeaderTools.cc
+@@ -479,7 +479,7 @@ httpHdrAdd(HttpHeader *heads, HttpRequest *request, const AccessLogEntryPointer
+ 
+     checklist.al = al;
+     if (al && al->reply) {
+-        checklist.reply = al->reply;
++        checklist.reply = al->reply.getRaw();
+         HTTPMSGLOCK(checklist.reply);
+     }
+ 
+diff --git a/src/HttpReply.cc b/src/HttpReply.cc
+index 6feb262..e74960b 100644
+--- a/src/HttpReply.cc
++++ b/src/HttpReply.cc
+@@ -20,7 +20,9 @@
+ #include "HttpReply.h"
+ #include "HttpRequest.h"
+ #include "MemBuf.h"
++#include "sbuf/Stream.h"
+ #include "SquidConfig.h"
++#include "SquidMath.h"
+ #include "SquidTime.h"
+ #include "Store.h"
+ #include "StrList.h"
+@@ -524,6 +526,38 @@ HttpReply::expectedBodyTooLarge(HttpRequest& request)
+     return expectedSize > bodySizeMax;
+ }
+ 
++size_t
++HttpReply::parseTerminatedPrefix(const char * const terminatedBuf, const size_t bufSize)
++{
++    auto error = Http::scNone;
++    const bool eof = false; // TODO: Remove after removing atEnd from HttpHeader::parse()
++    if (parse(terminatedBuf, bufSize, eof, &error)) {
++        debugs(58, 7, "success after accumulating " << bufSize << " bytes and parsing " << hdr_sz);
++        Assure(pstate == psParsed);
++        Assure(hdr_sz > 0);
++        Assure(!Less(bufSize, hdr_sz)); // cannot parse more bytes than we have
++        return hdr_sz; // success
++    }
++
++    Assure(pstate != psParsed);
++    hdr_sz = 0;
++
++    if (error) {
++        throw TextException(ToSBuf("failed to parse HTTP headers",
++                                   Debug::Extra, "parser error code: ", error,
++                                   Debug::Extra, "accumulated unparsed bytes: ", bufSize,
++                                   Debug::Extra, "reply_header_max_size: ", Config.maxReplyHeaderSize),
++                            Here());
++    }
++
++    debugs(58, 3, "need more bytes after accumulating " << bufSize << " out of " << Config.maxReplyHeaderSize);
++
++    // the parse() call above enforces Config.maxReplyHeaderSize limit
++    // XXX: Make this a strict comparison after fixing Http::Message::parse() enforcement
++    Assure(bufSize <= Config.maxReplyHeaderSize);
++    return 0; // parsed nothing, need more data
++}
++
+ void
+ HttpReply::calcMaxBodySize(HttpRequest& request) const
+ {
+diff --git a/src/HttpReply.h b/src/HttpReply.h
+index 6c90e20..4301cfd 100644
+--- a/src/HttpReply.h
++++ b/src/HttpReply.h
+@@ -121,6 +121,13 @@ public:
+     /// \returns false if any information is missing
+     bool olderThan(const HttpReply *them) const;
+ 
++    /// Parses response status line and headers at the start of the given
++    /// NUL-terminated buffer of the given size. Respects reply_header_max_size.
++    /// Assures pstate becomes Http::Message::psParsed on (and only on) success.
++    /// \returns the number of bytes in a successfully parsed prefix (or zero)
++    /// \retval 0 implies that more data is needed to parse the response prefix
++    size_t parseTerminatedPrefix(const char *, size_t);
++
+ private:
+     /** initialize */
+     void init();
+diff --git a/src/MemObject.cc b/src/MemObject.cc
+index df7791f..650d3fd 100644
+--- a/src/MemObject.cc
++++ b/src/MemObject.cc
+@@ -196,8 +196,8 @@ struct LowestMemReader : public unary_function {
+     LowestMemReader(int64_t seed):current(seed) {}
+ 
+     void operator() (store_client const &x) {
+-        if (x.memReaderHasLowerOffset(current))
+-            current = x.copyInto.offset;
++        if (x.getType() == STORE_MEM_CLIENT)
++            current = std::min(current, x.discardableHttpEnd());
+     }
+ 
+     int64_t current;
+@@ -369,6 +369,12 @@ MemObject::policyLowestOffsetToKeep(bool swap) const
+      */
+     int64_t lowest_offset = lowestMemReaderOffset();
+ 
++    // XXX: Remove the last (Config.onoff.memory_cache_first-based) condition
++    // and update keepForLocalMemoryCache() accordingly. The caller wants to
++    // remove all local memory that is safe to remove. Honoring caching
++    // preferences is its responsibility. Our responsibility is safety. The
++    // situation was different when ff4b33f added that condition -- there was no
++    // keepInLocalMemory/keepForLocalMemoryCache() call guard back then.
+     if (endOffset() < lowest_offset ||
+             endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
+             (swap && !Config.onoff.memory_cache_first))
+@@ -492,7 +498,7 @@ MemObject::mostBytesAllowed() const
+ 
+ #endif
+ 
+-        j = sc->delayId.bytesWanted(0, sc->copyInto.length);
++        j = sc->bytesWanted();
+ 
+         if (j > jmax) {
+             jmax = j;
+diff --git a/src/MemObject.h b/src/MemObject.h
+index 711966d..9f4add0 100644
+--- a/src/MemObject.h
++++ b/src/MemObject.h
+@@ -56,9 +56,23 @@ public:
+ 
+     void write(const StoreIOBuffer &buf);
+     void unlinkRequest();
++
++    /// HTTP response before 304 (Not Modified) updates
++    /// starts "empty"; modified via replaceBaseReply() or adjustableBaseReply()
++    HttpReply &baseReply() const { return *_reply; }
++
+     HttpReply const *getReply() const;
+     void replaceHttpReply(HttpReply *newrep);
+     void stat (MemBuf * mb) const;
++
++    /// The offset of the last memory-stored HTTP response byte plus one.
++    /// * HTTP response headers (if any) are stored at offset zero.
++    /// * HTTP response body byte[n] usually has offset (hdr_sz + n), where
++    ///   hdr_sz is the size of stored HTTP response headers (zero if none); and
++    ///   n is the corresponding byte offset in the whole resource body.
++    ///   However, some 206 (Partial Content) response bodies are stored (and
++    ///   retrieved) as regular 200 response bodies, disregarding offsets of
++    ///   their body parts. \sa HttpStateData::decideIfWeDoRanges().
+     int64_t endOffset () const;
+     void markEndOfReplyHeaders(); ///< sets _reply->hdr_sz to endOffset()
+     /// negative if unknown; otherwise, expected object_sz, expected endOffset
+diff --git a/src/MemStore.cc b/src/MemStore.cc
+index a4a6ab2..6762c4f 100644
+--- a/src/MemStore.cc
++++ b/src/MemStore.cc
+@@ -17,6 +17,8 @@
+ #include "MemObject.h"
+ #include "MemStore.h"
+ #include "mime_header.h"
++#include "sbuf/SBuf.h"
++#include "sbuf/Stream.h"
+ #include "SquidConfig.h"
+ #include "SquidMath.h"
+ #include "StoreStats.h"
+@@ -316,19 +318,25 @@ MemStore::get(const cache_key *key)
+     // create a brand new store entry and initialize it with stored info
+     StoreEntry *e = new StoreEntry();
+ 
+-    // XXX: We do not know the URLs yet, only the key, but we need to parse and
+-    // store the response for the Root().find() callers to be happy because they
+-    // expect IN_MEMORY entries to already have the response headers and body.
+-    e->createMemObject();
+-
+-    anchorEntry(*e, index, *slot);
+-
+-    const bool copied = copyFromShm(*e, index, *slot);
+-
+-    if (copied)
+-        return e;
++    try {
++        // XXX: We do not know the URLs yet, only the key, but we need to parse and
++        // store the response for the Root().find() callers to be happy because they
++        // expect IN_MEMORY entries to already have the response headers and body.
++        e->createMemObject();
++
++        anchorEntry(*e, index, *slot);
++
++        // TODO: make copyFromShm() throw on all failures, simplifying this code
++        if (copyFromShm(*e, index, *slot))
++            return e;
++        debugs(20, 3, "failed for " << *e);
++    } catch (...) {
++        // see store_client::parseHttpHeadersFromDisk() for problems this may log
++        debugs(20, DBG_IMPORTANT, "ERROR: Cannot load a cache hit from shared memory" <<
++               Debug::Extra << "exception: " << CurrentException <<
++               Debug::Extra << "cache_mem entry: " << *e);
++    }
+ 
+-    debugs(20, 3, "failed for " << *e);
+     map->freeEntry(index); // do not let others into the same trap
+     destroyStoreEntry(static_cast(e));
+     return NULL;
+@@ -473,6 +481,8 @@ MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnc
+     Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid
+     bool wasEof = anchor.complete() && sid < 0;
+     int64_t sliceOffset = 0;
++
++    SBuf httpHeaderParsingBuffer;
+     while (sid >= 0) {
+         const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
+         // slice state may change during copying; take snapshots now
+@@ -495,10 +505,18 @@ MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnc
+             const StoreIOBuffer sliceBuf(wasSize - prefixSize,
+                                          e.mem_obj->endOffset(),
+                                          page + prefixSize);
+-            if (!copyFromShmSlice(e, sliceBuf, wasEof))
+-                return false;
++
++            copyFromShmSlice(e, sliceBuf);
+             debugs(20, 8, "entry " << index << " copied slice " << sid <<
+                    " from " << extra.page << '+' << prefixSize);
++
++            // parse headers if needed; they might span multiple slices!
++            auto &reply = e.mem().baseReply();
++            if (reply.pstate != psParsed) {
++                httpHeaderParsingBuffer.append(sliceBuf.data, sliceBuf.length);
++                if (reply.parseTerminatedPrefix(httpHeaderParsingBuffer.c_str(), httpHeaderParsingBuffer.length()))
++                    httpHeaderParsingBuffer = SBuf(); // we do not need these bytes anymore
++            }
+         }
+         // else skip a [possibly incomplete] slice that we copied earlier
+ 
+@@ -524,6 +542,9 @@ MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnc
+     debugs(20, 5, "mem-loaded all " << e.mem_obj->endOffset() << '/' <<
+            anchor.basics.swap_file_sz << " bytes of " << e);
+ 
++    if (e.mem().baseReply().pstate != psParsed)
++        throw TextException(ToSBuf("truncated mem-cached headers; accumulated: ", httpHeaderParsingBuffer.length()), Here());
++
+     // from StoreEntry::complete()
+     e.mem_obj->object_sz = e.mem_obj->endOffset();
+     e.store_status = STORE_OK;
+@@ -539,32 +560,11 @@ MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnc
+ }
+ 
+ /// imports one shared memory slice into local memory
+-bool
+-MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof)
++void
++MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf)
+ {
+     debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
+ 
+-    // from store_client::readBody()
+-    // parse headers if needed; they might span multiple slices!
+-    HttpReply *rep = (HttpReply *)e.getReply();
+-    if (rep->pstate < psParsed) {
+-        // XXX: have to copy because httpMsgParseStep() requires 0-termination
+-        MemBuf mb;
+-        mb.init(buf.length+1, buf.length+1);
+-        mb.append(buf.data, buf.length);
+-        mb.terminate();
+-        const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof);
+-        if (result > 0) {
+-            assert(rep->pstate == psParsed);
+-        } else if (result < 0) {
+-            debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e);
+-            return false;
+-        } else { // more slices are needed
+-            assert(!eof);
+-        }
+-    }
+-    debugs(20, 7, "rep pstate: " << rep->pstate);
+-
+     // local memory stores both headers and body so copy regardless of pstate
+     const int64_t offBefore = e.mem_obj->endOffset();
+     assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
+@@ -572,7 +572,6 @@ MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof)
+     // expect to write the entire buf because StoreEntry::write() never fails
+     assert(offAfter >= 0 && offBefore <= offAfter &&
+            static_cast(offAfter - offBefore) == buf.length);
+-    return true;
+ }
+ 
+ /// whether we should cache the entry
+diff --git a/src/MemStore.h b/src/MemStore.h
+index 516da3c..31a2015 100644
+--- a/src/MemStore.h
++++ b/src/MemStore.h
+@@ -76,7 +76,7 @@ protected:
+     void copyToShm(StoreEntry &e);
+     void copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor, Ipc::StoreMap::Slice &slice);
+     bool copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor);
+-    bool copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof);
++    void copyFromShmSlice(StoreEntry &, const StoreIOBuffer &);
+ 
+     void updateHeadersOrThrow(Ipc::StoreMapUpdate &update);
+ 
+diff --git a/src/SquidMath.h b/src/SquidMath.h
+index c70acd1..bfca0cc 100644
+--- a/src/SquidMath.h
++++ b/src/SquidMath.h
+@@ -9,6 +9,11 @@
+ #ifndef _SQUID_SRC_SQUIDMATH_H
+ #define _SQUID_SRC_SQUIDMATH_H
+ 
++#include 
++#include 
++
++// TODO: Move to src/base/Math.h and drop the Math namespace
++
+ /* Math functions we define locally for Squid */
+ namespace Math
+ {
+@@ -21,5 +26,165 @@ double doubleAverage(const double, const double, int, const int);
+ 
+ } // namespace Math
+ 
++// If Sum() performance becomes important, consider using GCC and clang
++// built-ins like __builtin_add_overflow() instead of manual overflow checks.
++
++/// detects a pair of unsigned types
++/// reduces code duplication in declarations further below
++template 
++using AllUnsigned = typename std::conditional<
++                    std::is_unsigned::value && std::is_unsigned::value,
++                    std::true_type,
++                    std::false_type
++                    >::type;
++
++// TODO: Replace with std::cmp_less() after migrating to C++20.
++/// whether integer a is less than integer b, with correct overflow handling
++template 
++constexpr bool
++Less(const A a, const B b) {
++    // The casts below make standard C++ integer conversions explicit. They
++    // quell compiler warnings about signed/unsigned comparison. The first two
++    // lines exclude different-sign a and b, making the casts/comparison safe.
++    using AB = typename std::common_type::type;
++    return
++        (a >= 0 && b < 0) ? false :
++        (a < 0 && b >= 0) ? true :
++        /* (a >= 0) == (b >= 0) */ static_cast(a) < static_cast(b);
++}
++
++/// ensure that T is supported by NaturalSum() and friends
++template
++constexpr void
++AssertNaturalType()
++{
++    static_assert(std::numeric_limits::is_bounded, "std::numeric_limits::max() is meaningful");
++    static_assert(std::numeric_limits::is_exact, "no silent loss of precision");
++    static_assert(!std::is_enum::value, "no silent creation of non-enumerated values");
++}
++
++// TODO: Investigate whether this optimization can be expanded to [signed] types
++// A and B when std::numeric_limits::is_modulo is true.
++/// This IncreaseSumInternal() overload is optimized for speed.
++/// \returns a non-overflowing sum of the two unsigned arguments (or nothing)
++/// \prec both argument types are unsigned
++template ::value, int> = 0>
++std::pair
++IncreaseSumInternal(const A a, const B b) {
++    // paranoid: AllUnsigned precondition established that already
++    static_assert(std::is_unsigned::value, "AllUnsigned dispatch worked for A");
++    static_assert(std::is_unsigned::value, "AllUnsigned dispatch worked for B");
++
++    AssertNaturalType();
++    AssertNaturalType();
++    AssertNaturalType();
++
++    // we should only be called by IncreaseSum(); it forces integer promotion
++    static_assert(std::is_same::value, "a will not be promoted");
++    static_assert(std::is_same::value, "b will not be promoted");
++    // and without integer promotions, a sum of unsigned integers is unsigned
++    static_assert(std::is_unsigned::value, "a+b is unsigned");
++
++    // with integer promotions ruled out, a or b can only undergo integer
++    // conversion to the higher rank type (A or B, we do not know which)
++    using AB = typename std::common_type::type;
++    static_assert(std::is_same::value || std::is_same::value, "no unexpected conversions");
++    static_assert(std::is_same::value, "lossless assignment");
++    const AB sum = a + b;
++
++    static_assert(std::numeric_limits::is_modulo, "we can detect overflows");
++    // 1. modulo math: overflowed sum is smaller than any of its operands
++    // 2. the sum may overflow S (i.e. the return base type)
++    // We do not need Less() here because we compare promoted unsigned types.
++    return (sum >= a && sum <= std::numeric_limits::max()) ?
++           std::make_pair(sum, true) : std::make_pair(S(), false);
++}
++
++/// This IncreaseSumInternal() overload supports a larger variety of types.
++/// \returns a non-overflowing sum of the two arguments (or nothing)
++/// \returns nothing if at least one of the arguments is negative
++/// \prec at least one of the argument types is signed
++template ::value, int> = 0>
++std::pair constexpr
++IncreaseSumInternal(const A a, const B b) {
++    AssertNaturalType();
++    AssertNaturalType();
++    AssertNaturalType();
++
++    // we should only be called by IncreaseSum() that does integer promotion
++    static_assert(std::is_same::value, "a will not be promoted");
++    static_assert(std::is_same::value, "b will not be promoted");
++
++    return
++        // We could support a non-under/overflowing sum of negative numbers, but
++        // our callers use negative values specially (e.g., for do-not-use or
++        // do-not-limit settings) and are not supposed to do math with them.
++        (a < 0 || b < 0) ? std::make_pair(S(), false) :
++        // To avoid undefined behavior of signed overflow, we must not compute
++        // the raw a+b sum if it may overflow. When A is not B, a or b undergoes
++        // (safe for non-negatives) integer conversion in these expressions, so
++        // we do not know the resulting a+b type AB and its maximum. We must
++        // also detect subsequent casting-to-S overflows.
++        // Overflow condition: (a + b > maxAB) or (a + b > maxS).
++        // A is an integer promotion of S, so maxS <= maxA <= maxAB.
++        // Since maxS <= maxAB, it is sufficient to just check: a + b > maxS,
++        // which is the same as the overflow-safe condition here: maxS - a < b.
++        // Finally, (maxS - a) cannot overflow because a is not negative and
++        // cannot underflow because a is a promotion of s: 0 <= a <= maxS.
++        Less(std::numeric_limits::max() - a, b) ? std::make_pair(S(), false) :
++        std::make_pair(S(a + b), true);
++}
++
++/// argument pack expansion termination for IncreaseSum()
++template 
++std::pair
++IncreaseSum(const S s, const T t)
++{
++    // Force (always safe) integer promotions now, to give std::enable_if_t<>
++    // promoted types instead of entering IncreaseSumInternal(s,t)
++    // but getting a _signed_ promoted value of s or t in s + t.
++    return IncreaseSumInternal(+s, +t);
++}
++
++/// \returns a non-overflowing sum of the arguments (or nothing)
++template 
++std::pair
++IncreaseSum(const S sum, const T t, const Args... args) {
++    const auto head = IncreaseSum(sum, t);
++    if (head.second) {
++        return IncreaseSum(head.first, args...);
++    } else {
++        // std::optional() triggers bogus -Wmaybe-uninitialized warnings in GCC v10.3
++        return std::make_pair(S(), false);
++    }
++}
++
++/// \returns an exact, non-overflowing sum of the arguments (or nothing)
++template 
++std::pair
++NaturalSum(const Args... args) {
++    return IncreaseSum(0, args...);
++}
++
++/// Safely resets the given variable to NaturalSum() of the given arguments.
++/// If the sum overflows, resets to variable's maximum possible value.
++/// \returns the new variable value (like an assignment operator would)
++template 
++S
++SetToNaturalSumOrMax(S &var, const Args... args)
++{
++    var = NaturalSum(args...).value_or(std::numeric_limits::max());
++    return var;
++}
++
++/// converts a given non-negative integer into an integer of a given type
++/// without loss of information or undefined behavior
++template 
++Result
++NaturalCast(const Source s)
++{
++    return NaturalSum(s).value();
++}
++
+ #endif /* _SQUID_SRC_SQUIDMATH_H */
+ 
+diff --git a/src/Store.h b/src/Store.h
+index 3eb6b84..2475fe0 100644
+--- a/src/Store.h
++++ b/src/Store.h
+@@ -49,6 +49,9 @@ public:
+     StoreEntry();
+     virtual ~StoreEntry();
+ 
++    MemObject &mem() { assert(mem_obj); return *mem_obj; }
++    const MemObject &mem() const { assert(mem_obj); return *mem_obj; }
++
+     virtual HttpReply const *getReply() const;
+     virtual void write (StoreIOBuffer);
+ 
+diff --git a/src/StoreClient.h b/src/StoreClient.h
+index 65472d8..942f9fc 100644
+--- a/src/StoreClient.h
++++ b/src/StoreClient.h
+@@ -9,11 +9,13 @@
+ #ifndef SQUID_STORECLIENT_H
+ #define SQUID_STORECLIENT_H
+ 
++#include "base/AsyncCall.h"
+ #include "dlink.h"
++#include "store/ParsingBuffer.h"
+ #include "StoreIOBuffer.h"
+ #include "StoreIOState.h"
+ 
+-typedef void STCB(void *, StoreIOBuffer);   /* store callback */
++using STCB = void (void *, StoreIOBuffer);   /* store callback */
+ 
+ class StoreEntry;
+ 
+@@ -39,17 +41,34 @@ class store_client
+ public:
+     store_client(StoreEntry *);
+     ~store_client();
+-    bool memReaderHasLowerOffset(int64_t) const;
++
++    /// the client will not use HTTP response bytes with lower offsets (if any)
++    auto discardableHttpEnd() const { return discardableHttpEnd_; }
++
+     int getType() const;
+-    void fail();
+-    void callback(ssize_t len, bool error = false);
++
++    /// React to the end of reading the response from disk. There will be no
++    /// more readHeader() and readBody() callbacks for the current storeRead()
++    /// swapin after this notification.
++    void noteSwapInDone(bool error);
++
+     void doCopy (StoreEntry *e);
+     void readHeader(const char *buf, ssize_t len);
+     void readBody(const char *buf, ssize_t len);
++
++    /// Request StoreIOBuffer-described response data via an asynchronous STCB
++    /// callback. At most one outstanding request is allowed per store_client.
+     void copy(StoreEntry *, StoreIOBuffer, STCB *, void *);
++
+     void dumpStats(MemBuf * output, int clientNumber) const;
+ 
+-    int64_t cmp_offset;
++    // TODO: When STCB gets a dedicated Answer type, move this info there.
++    /// Whether the last successful storeClientCopy() answer was known to
++    /// contain the last body bytes of the HTTP response
++    /// \retval true requesting bytes at higher offsets is futile
++    /// \sa STCB
++    bool atEof() const { return atEof_; }
++
+ #if STORE_CLIENT_LIST_DEBUG
+ 
+     void *owner;
+@@ -59,33 +78,86 @@ public:
+     StoreIOState::Pointer swapin_sio;
+ 
+     struct {
++        /// whether we are expecting a response to be swapped in from disk
++        /// (i.e. whether async storeRead() is currently in progress)
++        // TODO: a better name reflecting the 'in' scope of the flag
+         bool disk_io_pending;
++
++        /// whether the store_client::doCopy()-initiated STCB sequence is
++        /// currently in progress
+         bool store_copying;
+-        bool copy_event_pending;
+     } flags;
+ 
+ #if USE_DELAY_POOLS
+     DelayId delayId;
++
++    /// The maximum number of bytes the Store client can read/copy next without
++    /// overflowing its buffer and without violating delay pool limits. Store
++    /// I/O is not rate-limited, but we assume that the same number of bytes may
++    /// be read from the Squid-to-server connection that may be rate-limited.
++    int bytesWanted() const;
++
+     void setDelayId(DelayId delay_id);
+ #endif
+ 
+     dlink_node node;
+-    /* Below here is private - do no alter outside storeClient calls */
+-    StoreIOBuffer copyInto;
+ 
+ private:
+-    bool moreToSend() const;
++    bool moreToRead() const;
++    bool canReadFromMemory() const;
++    bool answeredOnce() const { return answers >= 1; }
++    bool sendingHttpHeaders() const;
++    int64_t nextHttpReadOffset() const;
+ 
+     void fileRead();
+     void scheduleDiskRead();
+-    void scheduleMemRead();
++    void readFromMemory();
+     void scheduleRead();
+     bool startSwapin();
+     bool unpackHeader(char const *buf, ssize_t len);
++    void handleBodyFromDisk();
++    void maybeWriteFromDiskToMemory(const StoreIOBuffer &);
++
++    bool parseHttpHeadersFromDisk();
++    bool tryParsingHttpHeaders();
++    void skipHttpHeadersFromDisk();
++
++    void fail();
++    void callback(ssize_t);
++    void noteCopiedBytes(size_t);
++    void noteNews();
++    void finishCallback();
++    static void FinishCallback(store_client *);
+ 
+     int type;
+     bool object_ok;
+ 
++    /// \copydoc atEof()
++    bool atEof_;
++
++    /// Storage and metadata associated with the current copy() request. Ought
++    /// to be ignored when not answering a copy() request.
++    /// * copyInto.offset is the requested HTTP response body offset;
++    /// * copyInto.data is the client-owned, client-provided result buffer;
++    /// * copyInto.length is the size of the .data result buffer;
++    /// * copyInto.flags are unused by this class.
++    StoreIOBuffer copyInto;
++
++    // TODO: Convert to uint64_t after fixing mem_hdr::endOffset() and friends.
++    /// \copydoc discardableHttpEnd()
++    int64_t discardableHttpEnd_ = 0;
++
++    /// the total number of finishCallback() calls
++    uint64_t answers;
++
++    /// Accumulates raw bytes read from Store while answering the current copy()
++    /// request. Buffer contents depends on the source and parsing stage; it may
++    /// hold (parts of) swap metadata, HTTP response headers, and/or HTTP
++    /// response body bytes.
++    std::pair parsingBuffer = std::make_pair(Store::ParsingBuffer(), false);
++
++    StoreIOBuffer lastDiskRead; ///< buffer used for the last storeRead() call
++
+     /* Until we finish stuffing code into store_client */
+ 
+ public:
+@@ -97,6 +169,7 @@ public:
+         bool pending() const;
+         STCB *callback_handler;
+         void *callback_data;
++        AsyncCall::Pointer notifier;
+     } _callback;
+ };
+ 
+diff --git a/src/StoreIOBuffer.h b/src/StoreIOBuffer.h
+index 009aafe..ad1c491 100644
+--- a/src/StoreIOBuffer.h
++++ b/src/StoreIOBuffer.h
+@@ -43,6 +43,9 @@ public:
+         return Range(offset, offset + length);
+     }
+ 
++    /// convenience method for changing the offset of a being-configured buffer
++    StoreIOBuffer &positionAt(const int64_t newOffset) { offset = newOffset; return *this; }
++
+     void dump() const {
+         if (fwrite(data, length, 1, stderr)) {}
+         if (fwrite("\n", 1, 1, stderr)) {}
+diff --git a/src/acl/Asn.cc b/src/acl/Asn.cc
+index 94ec862..07353d6 100644
+--- a/src/acl/Asn.cc
++++ b/src/acl/Asn.cc
+@@ -16,20 +16,22 @@
+ #include "acl/DestinationIp.h"
+ #include "acl/SourceAsn.h"
+ #include "acl/Strategised.h"
++#include "base/CharacterSet.h"
+ #include "FwdState.h"
+ #include "HttpReply.h"
+ #include "HttpRequest.h"
+ #include "ipcache.h"
+ #include "MasterXaction.h"
+ #include "mgr/Registration.h"
++#include "parser/Tokenizer.h"
+ #include "radix.h"
+ #include "RequestFlags.h"
++#include "sbuf/SBuf.h"
+ #include "SquidConfig.h"
+ #include "Store.h"
+ #include "StoreClient.h"
+ 
+ #define WHOIS_PORT 43
+-#define AS_REQBUF_SZ    4096
+ 
+ /* BEGIN of definitions for radix tree entries */
+ 
+@@ -70,33 +72,18 @@ class ASState
+     CBDATA_CLASS(ASState);
+ 
+ public:
+-    ASState();
++    ASState() = default;
+     ~ASState();
+ 
+     StoreEntry *entry;
+     store_client *sc;
+     HttpRequest::Pointer request;
+     int as_number;
+-    int64_t offset;
+-    int reqofs;
+-    char reqbuf[AS_REQBUF_SZ];
+-    bool dataRead;
++    Store::ParsingBuffer parsingBuffer;
+ };
+ 
+ CBDATA_CLASS_INIT(ASState);
+ 
+-ASState::ASState() :
+-    entry(NULL),
+-    sc(NULL),
+-    request(NULL),
+-    as_number(0),
+-    offset(0),
+-    reqofs(0),
+-    dataRead(false)
+-{
+-    memset(reqbuf, 0, AS_REQBUF_SZ);
+-}
+-
+ ASState::~ASState()
+ {
+     debugs(53, 3, entry->url());
+@@ -112,7 +99,7 @@ struct rtentry_t {
+     m_ADDR e_mask;
+ };
+ 
+-static int asnAddNet(char *, int);
++static int asnAddNet(const SBuf &, int);
+ 
+ static void asnCacheStart(int as);
+ 
+@@ -256,8 +243,7 @@ asnCacheStart(int as)
+     }
+ 
+     asState->entry = e;
+-    StoreIOBuffer readBuffer (AS_REQBUF_SZ, asState->offset, asState->reqbuf);
+-    storeClientCopy(asState->sc, e, readBuffer, asHandleReply, asState);
++    storeClientCopy(asState->sc, e, asState->parsingBuffer.makeInitialSpace(), asHandleReply, asState);
+ }
+ 
+ static void
+@@ -265,13 +251,8 @@ asHandleReply(void *data, StoreIOBuffer result)
+ {
+     ASState *asState = (ASState *)data;
+     StoreEntry *e = asState->entry;
+-    char *s;
+-    char *t;
+-    char *buf = asState->reqbuf;
+-    int leftoversz = -1;
+ 
+-    debugs(53, 3, "asHandleReply: Called with size=" << (unsigned int)result.length);
+-    debugs(53, 3, "asHandleReply: buffer='" << buf << "'");
++    debugs(53, 3, result << " for " << asState->as_number << " with " << *e);
+ 
+     /* First figure out whether we should abort the request */
+ 
+@@ -280,11 +261,7 @@ asHandleReply(void *data, StoreIOBuffer result)
+         return;
+     }
+ 
+-    if (result.length == 0 && asState->dataRead) {
+-        debugs(53, 3, "asHandleReply: Done: " << e->url());
+-        delete asState;
+-        return;
+-    } else if (result.flags.error) {
++    if (result.flags.error) {
+         debugs(53, DBG_IMPORTANT, "asHandleReply: Called with Error set and size=" << (unsigned int) result.length);
+         delete asState;
+         return;
+@@ -294,117 +271,85 @@ asHandleReply(void *data, StoreIOBuffer result)
+         return;
+     }
+ 
+-    /*
+-     * Next, attempt to parse our request
+-     * Remembering that the actual buffer size is retsize + reqofs!
+-     */
+-    s = buf;
++    asState->parsingBuffer.appended(result.data, result.length);
++    Parser::Tokenizer tok(SBuf(asState->parsingBuffer.content().data, asState->parsingBuffer.contentSize()));
++    SBuf address;
++    // Word delimiters in WHOIS ASN replies. RFC 3912 mentions SP, CR, and LF.
++    // Others are added to mimic an earlier isspace()-based implementation.
++    static const auto WhoisSpaces = CharacterSet("ASCII_spaces", " \f\r\n\t\v");
++    while (tok.token(address, WhoisSpaces)) {
++        (void)asnAddNet(address, asState->as_number);
++    }
++    asState->parsingBuffer.consume(tok.parsedSize());
++    const auto leftoverBytes = asState->parsingBuffer.contentSize();
+ 
+-    while ((size_t)(s - buf) < result.length + asState->reqofs && *s != '\0') {
+-        while (*s && xisspace(*s))
+-            ++s;
++    if (asState->sc->atEof()) {
++        if (leftoverBytes)
++            debugs(53, 2, "WHOIS: Discarding the last " << leftoverBytes << " received bytes of a truncated AS response");
++        delete asState;
++        return;
++    }
+ 
+-        for (t = s; *t; ++t) {
+-            if (xisspace(*t))
+-                break;
+-        }
++    if (asState->sc->atEof()) {
++        if (leftoverBytes)
++            debugs(53, 2, "WHOIS: Discarding the last " << leftoverBytes << " received bytes of a truncated AS response");
++        delete asState;
++        return;
++    }
+ 
+-        if (*t == '\0') {
+-            /* oof, word should continue on next block */
+-            break;
+-        }
++    const auto remainingSpace = asState->parsingBuffer.space().positionAt(result.offset + result.length);
+ 
+-        *t = '\0';
+-        debugs(53, 3, "asHandleReply: AS# " << s << " (" << asState->as_number << ")");
+-        asnAddNet(s, asState->as_number);
+-        s = t + 1;
+-        asState->dataRead = true;
++    if (!remainingSpace.length) {
++        Assure(leftoverBytes);
++        debugs(53, DBG_IMPORTANT, "WARNING: Ignoring the tail of a WHOIS AS response" <<
++               " with an unparsable section of " << leftoverBytes <<
++               " bytes ending at offset " << remainingSpace.offset);
++        delete asState;
++        return;
+     }
+ 
+-    /*
+-     * Next, grab the end of the 'valid data' in the buffer, and figure
+-     * out how much data is left in our buffer, which we need to keep
+-     * around for the next request
+-     */
+-    leftoversz = (asState->reqofs + result.length) - (s - buf);
+-
+-    assert(leftoversz >= 0);
+-
+-    /*
+-     * Next, copy the left over data, from s to s + leftoversz to the
+-     * beginning of the buffer
+-     */
+-    memmove(buf, s, leftoversz);
+-
+-    /*
+-     * Next, update our offset and reqofs, and kick off a copy if required
+-     */
+-    asState->offset += result.length;
+-
+-    asState->reqofs = leftoversz;
+-
+-    debugs(53, 3, "asState->offset = " << asState->offset);
+-
+-    if (e->store_status == STORE_PENDING) {
+-        debugs(53, 3, "asHandleReply: store_status == STORE_PENDING: " << e->url()  );
+-        StoreIOBuffer tempBuffer (AS_REQBUF_SZ - asState->reqofs,
+-                                  asState->offset,
+-                                  asState->reqbuf + asState->reqofs);
+-        storeClientCopy(asState->sc,
+-                        e,
+-                        tempBuffer,
+-                        asHandleReply,
+-                        asState);
+-    } else {
+-        StoreIOBuffer tempBuffer;
+-        debugs(53, 3, "asHandleReply: store complete, but data received " << e->url()  );
+-        tempBuffer.offset = asState->offset;
+-        tempBuffer.length = AS_REQBUF_SZ - asState->reqofs;
+-        tempBuffer.data = asState->reqbuf + asState->reqofs;
+-        storeClientCopy(asState->sc,
+-                        e,
+-                        tempBuffer,
+-                        asHandleReply,
+-                        asState);
+-    }
++    const decltype(StoreIOBuffer::offset) stillReasonableOffset = 100000; // an arbitrary limit in bytes
++    if (remainingSpace.offset > stillReasonableOffset) {
++        // stop suspicious accumulation of parsed addresses and/or work
++        debugs(53, DBG_IMPORTANT, "WARNING: Ignoring the tail of a suspiciously large WHOIS AS response" <<
++               " exceeding " << stillReasonableOffset << " bytes");
++        delete asState;
++        return;
++     }
++
++    storeClientCopy(asState->sc, e, remainingSpace, asHandleReply, asState);
+ }
+ 
+ /**
+  * add a network (addr, mask) to the radix tree, with matching AS number
+  */
+ static int
+-asnAddNet(char *as_string, int as_number)
++asnAddNet(const SBuf &addressAndMask, const int as_number)
+ {
+     struct squid_radix_node *rn;
+     CbDataList **Tail = NULL;
+     CbDataList *q = NULL;
+     as_info *asinfo = NULL;
+ 
+-    Ip::Address mask;
+-    Ip::Address addr;
+-    char *t;
+-    int bitl;
+-
+-    t = strchr(as_string, '/');
+-
+-    if (t == NULL) {
++    static const CharacterSet NonSlashSet = CharacterSet("slash", "/").complement("non-slash");
++    Parser::Tokenizer tok(addressAndMask);
++    SBuf addressToken;
++    if (!(tok.prefix(addressToken, NonSlashSet) && tok.skip('/'))) {
+         debugs(53, 3, "asnAddNet: failed, invalid response from whois server.");
+         return 0;
+     }
+ 
+-    *t = '\0';
+-    addr = as_string;
+-    bitl = atoi(t + 1);
+-
+-    if (bitl < 0)
+-        bitl = 0;
++    const Ip::Address addr = addressToken.c_str();
+ 
+     // INET6 TODO : find a better way of identifying the base IPA family for mask than this.
+-    t = strchr(as_string, '.');
++    const auto addrFamily = (addressToken.find('.') != SBuf::npos) ? AF_INET : AF_INET6;
+ 
+     // generate Netbits Format Mask
++    Ip::Address mask;
+     mask.setNoAddr();
+-    mask.applyMask(bitl, (t!=NULL?AF_INET:AF_INET6) );
++    int64_t bitl = 0;
++    if (tok.int64(bitl, 10, false))
++        mask.applyMask(bitl, addrFamily);
+ 
+     debugs(53, 3, "asnAddNet: called for " << addr << "/" << mask );
+ 
+diff --git a/src/acl/FilledChecklist.cc b/src/acl/FilledChecklist.cc
+index 9826c24..33eeb67 100644
+--- a/src/acl/FilledChecklist.cc
++++ b/src/acl/FilledChecklist.cc
+@@ -116,7 +116,6 @@ ACLFilledChecklist::verifyAle() const
+     if (reply && !al->reply) {
+         showDebugWarning("HttpReply object");
+         al->reply = reply;
+-        HTTPMSGLOCK(al->reply);
+     }
+ 
+ #if USE_IDENT
+diff --git a/src/adaptation/icap/ModXact.cc b/src/adaptation/icap/ModXact.cc
+index 370f077..2bcc917 100644
+--- a/src/adaptation/icap/ModXact.cc
++++ b/src/adaptation/icap/ModXact.cc
+@@ -1292,11 +1292,8 @@ void Adaptation::Icap::ModXact::finalizeLogInfo()
+     al.adapted_request = adapted_request_;
+     HTTPMSGLOCK(al.adapted_request);
+ 
+-    if (adapted_reply_) {
+-        al.reply = adapted_reply_;
+-        HTTPMSGLOCK(al.reply);
+-    } else
+-        al.reply = NULL;
++    // XXX: This reply (and other ALE members!) may have been needed earlier.
++    al.reply = adapted_reply_;
+ 
+     if (h->rfc931.size())
+         al.cache.rfc931 = h->rfc931.termedBuf();
+@@ -1331,12 +1328,6 @@ void Adaptation::Icap::ModXact::finalizeLogInfo()
+         if (replyHttpBodySize >= 0)
+             al.cache.highOffset = replyHttpBodySize;
+         //don't set al.cache.objectSize because it hasn't exist yet
+-
+-        MemBuf mb;
+-        mb.init();
+-        adapted_reply_->header.packInto(&mb);
+-        al.headers.reply = xstrdup(mb.buf);
+-        mb.clean();
+     }
+     prepareLogWithRequestDetails(adapted_request_, alep);
+     Xaction::finalizeLogInfo();
+diff --git a/src/adaptation/icap/icap_log.cc b/src/adaptation/icap/icap_log.cc
+index ecc4baf..6bb5a6d 100644
+--- a/src/adaptation/icap/icap_log.cc
++++ b/src/adaptation/icap/icap_log.cc
+@@ -62,7 +62,7 @@ void icapLogLog(AccessLogEntry::Pointer &al)
+     if (IcapLogfileStatus == LOG_ENABLE) {
+         ACLFilledChecklist checklist(NULL, al->adapted_request, NULL);
+         if (al->reply) {
+-            checklist.reply = al->reply;
++            checklist.reply = al->reply.getRaw();
+             HTTPMSGLOCK(checklist.reply);
+         }
+         accessLogLogTo(Config.Log.icaplogs, al, &checklist);
+diff --git a/src/base/Assure.cc b/src/base/Assure.cc
+new file mode 100644
+index 0000000..cb69fc5
+--- /dev/null
++++ b/src/base/Assure.cc
+@@ -0,0 +1,24 @@
++/*
++ * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
++ *
++ * Squid software is distributed under GPLv2+ license and includes
++ * contributions from numerous individuals and organizations.
++ * Please see the COPYING and CONTRIBUTORS files for details.
++ */
++
++#include "squid.h"
++#include "base/Assure.h"
++#include "base/TextException.h"
++#include "sbuf/Stream.h"
++
++[[ noreturn ]] void
++ReportAndThrow_(const int debugLevel, const char *description, const SourceLocation &location)
++{
++    const TextException ex(description, location);
++    const auto label = debugLevel <= DBG_IMPORTANT ? "ERROR: Squid BUG: " : "";
++    // TODO: Consider also printing the number of BUGs reported so far. It would
++    // require GC, but we could even print the number of same-location reports.
++    debugs(0, debugLevel, label << ex);
++    throw ex;
++}
++
+diff --git a/src/base/Assure.h b/src/base/Assure.h
+new file mode 100644
+index 0000000..bb571d2
+--- /dev/null
++++ b/src/base/Assure.h
+@@ -0,0 +1,52 @@
++/*
++ * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
++ *
++ * Squid software is distributed under GPLv2+ license and includes
++ * contributions from numerous individuals and organizations.
++ * Please see the COPYING and CONTRIBUTORS files for details.
++ */
++
++#ifndef SQUID_SRC_BASE_ASSURE_H
++#define SQUID_SRC_BASE_ASSURE_H
++
++#include "base/Here.h"
++
++/// Reports the description (at the given debugging level) and throws
++/// the corresponding exception. Reduces compiled code size of Assure() and
++/// Must() callers. Do not call directly; use Assure() instead.
++/// \param description explains the condition (i.e. what MUST happen)
++[[ noreturn ]] void ReportAndThrow_(int debugLevel, const char *description, const SourceLocation &);
++
++/// Calls ReportAndThrow() if needed. Reduces caller code duplication.
++/// Do not call directly; use Assure() instead.
++/// \param description c-string explaining the condition (i.e. what MUST happen)
++#define Assure_(debugLevel, condition, description, location) \
++    while (!(condition)) \
++        ReportAndThrow_((debugLevel), (description), (location))
++
++#if !defined(NDEBUG)
++
++/// Like assert() but throws an exception instead of aborting the process. Use
++/// this macro to detect code logic mistakes (i.e. bugs) where aborting the
++/// current AsyncJob or a similar task is unlikely to jeopardize Squid service
++/// integrity. For example, this macro is _not_ appropriate for detecting bugs
++/// that indicate a dangerous global state corruption which may go unnoticed by
++/// other jobs after the current job or task is aborted.
++#define Assure(condition) \
++        Assure2((condition), #condition)
++
++/// Like Assure() but allows the caller to customize the exception message.
++/// \param description string literal describing the condition (i.e. what MUST happen)
++#define Assure2(condition, description) \
++        Assure_(0, (condition), ("assurance failed: " description), Here())
++
++#else
++
++/* do-nothing implementations for NDEBUG builds */
++#define Assure(condition) ((void)0)
++#define Assure2(condition, description) ((void)0)
++
++#endif /* NDEBUG */
++
++#endif /* SQUID_SRC_BASE_ASSURE_H */
++
+diff --git a/src/base/Makefile.am b/src/base/Makefile.am
+index 9b0f4cf..d5f4c01 100644
+--- a/src/base/Makefile.am
++++ b/src/base/Makefile.am
+@@ -11,6 +11,8 @@ include $(top_srcdir)/src/TestHeaders.am
+ noinst_LTLIBRARIES = libbase.la
+ 
+ libbase_la_SOURCES = \
++	Assure.cc \
++	Assure.h \
+ 	AsyncCall.cc \
+ 	AsyncCall.h \
+ 	AsyncCallQueue.cc \
+diff --git a/src/base/Makefile.in b/src/base/Makefile.in
+index 90a4f5b..6a83aa4 100644
+--- a/src/base/Makefile.in
++++ b/src/base/Makefile.in
+@@ -163,7 +163,7 @@ CONFIG_CLEAN_FILES =
+ CONFIG_CLEAN_VPATH_FILES =
+ LTLIBRARIES = $(noinst_LTLIBRARIES)
+ libbase_la_LIBADD =
+-am_libbase_la_OBJECTS = AsyncCall.lo AsyncCallQueue.lo AsyncJob.lo \
++am_libbase_la_OBJECTS = Assure.lo AsyncCall.lo AsyncCallQueue.lo AsyncJob.lo \
+ 	CharacterSet.lo File.lo Here.lo RegexPattern.lo \
+ 	RunnersRegistry.lo TextException.lo
+ libbase_la_OBJECTS = $(am_libbase_la_OBJECTS)
+@@ -186,7 +186,7 @@ am__v_at_1 =
+ DEFAULT_INCLUDES = 
+ depcomp = $(SHELL) $(top_srcdir)/cfgaux/depcomp
+ am__maybe_remake_depfiles = depfiles
+-am__depfiles_remade = ./$(DEPDIR)/AsyncCall.Plo \
++am__depfiles_remade = ./$(DEPDIR)/Assure.Plo ./$(DEPDIR)/AsyncCall.Plo \
+ 	./$(DEPDIR)/AsyncCallQueue.Plo ./$(DEPDIR)/AsyncJob.Plo \
+ 	./$(DEPDIR)/CharacterSet.Plo ./$(DEPDIR)/File.Plo \
+ 	./$(DEPDIR)/Here.Plo ./$(DEPDIR)/RegexPattern.Plo \
+@@ -729,6 +729,8 @@ COMPAT_LIB = $(top_builddir)/compat/libcompatsquid.la $(LIBPROFILER)
+ subst_perlshell = sed -e 's,[@]PERL[@],$(PERL),g' <$(srcdir)/$@.pl.in >$@ || ($(RM) -f $@ ; exit 1)
+ noinst_LTLIBRARIES = libbase.la
+ libbase_la_SOURCES = \
++	Assure.cc \
++	Assure.h \
+ 	AsyncCall.cc \
+ 	AsyncCall.h \
+ 	AsyncCallQueue.cc \
+@@ -827,6 +829,7 @@ mostlyclean-compile:
+ distclean-compile:
+ 	-rm -f *.tab.c
+ 
++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/Assure.Plo@am__quote@ # am--include-marker
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/AsyncCall.Plo@am__quote@ # am--include-marker
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/AsyncCallQueue.Plo@am__quote@ # am--include-marker
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/AsyncJob.Plo@am__quote@ # am--include-marker
+@@ -1167,7 +1170,8 @@ clean-am: clean-checkPROGRAMS clean-generic clean-libtool \
+ 	clean-noinstLTLIBRARIES mostlyclean-am
+ 
+ distclean: distclean-am
+-		-rm -f ./$(DEPDIR)/AsyncCall.Plo
++		-rm -f ./$(DEPDIR)/Assure.Plo
++	-rm -f ./$(DEPDIR)/AsyncCall.Plo
+ 	-rm -f ./$(DEPDIR)/AsyncCallQueue.Plo
+ 	-rm -f ./$(DEPDIR)/AsyncJob.Plo
+ 	-rm -f ./$(DEPDIR)/CharacterSet.Plo
+@@ -1221,7 +1225,8 @@ install-ps-am:
+ installcheck-am:
+ 
+ maintainer-clean: maintainer-clean-am
+-		-rm -f ./$(DEPDIR)/AsyncCall.Plo
++		-rm -f ./$(DEPDIR)/Assure.Plo
++	-rm -f ./$(DEPDIR)/AsyncCall.Plo
+ 	-rm -f ./$(DEPDIR)/AsyncCallQueue.Plo
+ 	-rm -f ./$(DEPDIR)/AsyncJob.Plo
+ 	-rm -f ./$(DEPDIR)/CharacterSet.Plo
+diff --git a/src/base/TextException.cc b/src/base/TextException.cc
+index 5cfeb26..f895ae9 100644
+--- a/src/base/TextException.cc
++++ b/src/base/TextException.cc
+@@ -58,6 +58,13 @@ TextException::what() const throw()
+     return result.what();
+ }
+ 
++std::ostream &
++operator <<(std::ostream &os, const TextException &ex)
++{
++    ex.print(os);
++    return os;
++}
++
+ std::ostream &
+ CurrentException(std::ostream &os)
+ {
+diff --git a/src/base/TextException.h b/src/base/TextException.h
+index 6a79536..1f9ca11 100644
+--- a/src/base/TextException.h
++++ b/src/base/TextException.h
+@@ -9,6 +9,7 @@
+ #ifndef SQUID__TEXTEXCEPTION_H
+ #define SQUID__TEXTEXCEPTION_H
+ 
++#include "base/Assure.h"
+ #include "base/Here.h"
+ 
+ #include 
+@@ -51,11 +52,12 @@ public:
+ /// prints active (i.e., thrown but not yet handled) exception
+ std::ostream &CurrentException(std::ostream &);
+ 
++/// efficiently prints TextException
++std::ostream &operator <<(std::ostream &, const TextException &);
++
+ /// legacy convenience macro; it is not difficult to type Here() now
+ #define TexcHere(msg) TextException((msg), Here())
+ 
+-/// Like assert() but throws an exception instead of aborting the process
+-/// and allows the caller to specify a custom exception message.
+ #define Must2(condition, message) \
+     do { \
+         if (!(condition)) { \
+@@ -65,8 +67,13 @@ std::ostream &CurrentException(std::ostream &);
+         } \
+     } while (/*CONSTCOND*/ false)
+ 
++/// Like assert() but throws an exception instead of aborting the process
++/// and allows the caller to specify a custom exception message.
++#define Must3(condition, description, location) \
++    Assure_(3, (condition), ("check failed: " description), (location))
++
+ /// Like assert() but throws an exception instead of aborting the process.
+-#define Must(condition) Must2((condition), "check failed: " #condition)
++#define Must(condition) Must3((condition), #condition, Here())
+ 
+ /// Reports and swallows all exceptions to prevent compiler warnings and runtime
+ /// errors related to throwing class destructors. Should be used for most dtors.
+diff --git a/src/clientStream.cc b/src/clientStream.cc
+index 04d89c0..bd5dd09 100644
+--- a/src/clientStream.cc
++++ b/src/clientStream.cc
+@@ -154,8 +154,7 @@ clientStreamCallback(clientStreamNode * thisObject, ClientHttpRequest * http,
+     assert(thisObject && http && thisObject->node.next);
+     next = thisObject->next();
+ 
+-    debugs(87, 3, "clientStreamCallback: Calling " << next->callback << " with cbdata " <<
+-           next->data.getRaw() << " from node " << thisObject);
++    debugs(87, 3, thisObject << " gives " << next->data << ' ' << replyBuffer);
+     next->callback(next, http, rep, replyBuffer);
+ }
+ 
+diff --git a/src/client_side.cc b/src/client_side.cc
+index ab393e4..c46a845 100644
+--- a/src/client_side.cc
++++ b/src/client_side.cc
+@@ -429,7 +429,7 @@ ClientHttpRequest::logRequest()
+         // The al->notes and request->notes must point to the same object.
+         (void)SyncNotes(*al, *request);
+         for (auto i = Config.notes.begin(); i != Config.notes.end(); ++i) {
+-            if (const char *value = (*i)->match(request, al->reply, al)) {
++            if (const char *value = (*i)->match(request, al->reply.getRaw(), al)) {
+                 NotePairs ¬es = SyncNotes(*al, *request);
+                 notes.add((*i)->key.termedBuf(), value);
+                 debugs(33, 3, (*i)->key.termedBuf() << " " << value);
+@@ -439,7 +439,7 @@ ClientHttpRequest::logRequest()
+ 
+     ACLFilledChecklist checklist(NULL, request, NULL);
+     if (al->reply) {
+-        checklist.reply = al->reply;
++        checklist.reply = al->reply.getRaw();
+         HTTPMSGLOCK(checklist.reply);
+     }
+ 
+@@ -457,7 +457,7 @@ ClientHttpRequest::logRequest()
+         ACLFilledChecklist statsCheck(Config.accessList.stats_collection, request, NULL);
+         statsCheck.al = al;
+         if (al->reply) {
+-            statsCheck.reply = al->reply;
++            statsCheck.reply = al->reply.getRaw();
+             HTTPMSGLOCK(statsCheck.reply);
+         }
+         updatePerformanceCounters = statsCheck.fastCheck().allowed();
+@@ -3844,6 +3844,11 @@ ConnStateData::finishDechunkingRequest(bool withSuccess)
+ void
+ ConnStateData::sendControlMsg(HttpControlMsg msg)
+ {
++    if (const auto context = pipeline.front()) {
++        if (context->http)
++            context->http->al->reply = msg.reply;
++    }
++
+     if (!isOpen()) {
+         debugs(33, 3, HERE << "ignoring 1xx due to earlier closure");
+         return;
+diff --git a/src/client_side_reply.cc b/src/client_side_reply.cc
+index c919af4..fea5ecb 100644
+--- a/src/client_side_reply.cc
++++ b/src/client_side_reply.cc
+@@ -34,6 +34,7 @@
+ #include "RequestFlags.h"
+ #include "SquidConfig.h"
+ #include "SquidTime.h"
++#include "SquidMath.h"
+ #include "Store.h"
+ #include "StrList.h"
+ #include "tools.h"
+@@ -76,11 +77,7 @@ clientReplyContext::clientReplyContext(ClientHttpRequest *clientContext) :
+     purgeStatus(Http::scNone),
+     lookingforstore(0),
+     http(cbdataReference(clientContext)),
+-    headers_sz(0),
+     sc(NULL),
+-    old_reqsize(0),
+-    reqsize(0),
+-    reqofs(0),
+ #if USE_CACHE_DIGESTS
+     lookup_type(NULL),
+ #endif
+@@ -166,8 +163,6 @@ void clientReplyContext::setReplyToStoreEntry(StoreEntry *entry, const char *rea
+ #if USE_DELAY_POOLS
+     sc->setDelayId(DelayId::DelayClient(http));
+ #endif
+-    reqofs = 0;
+-    reqsize = 0;
+     if (http->request)
+         http->request->ignoreRange(reason);
+     flags.storelogiccomplete = 1;
+@@ -206,13 +201,10 @@ clientReplyContext::saveState()
+     old_sc = sc;
+     old_lastmod = http->request->lastmod;
+     old_etag = http->request->etag;
+-    old_reqsize = reqsize;
+-    tempBuffer.offset = reqofs;
++
+     /* Prevent accessing the now saved entries */
+     http->storeEntry(NULL);
+     sc = NULL;
+-    reqsize = 0;
+-    reqofs = 0;
+ }
+ 
+ void
+@@ -223,8 +215,6 @@ clientReplyContext::restoreState()
+     removeClientStoreReference(&sc, http);
+     http->storeEntry(old_entry);
+     sc = old_sc;
+-    reqsize = old_reqsize;
+-    reqofs = tempBuffer.offset;
+     http->request->lastmod = old_lastmod;
+     http->request->etag = old_etag;
+     /* Prevent accessed the old saved entries */
+@@ -232,7 +222,7 @@ clientReplyContext::restoreState()
+     old_sc = NULL;
+     old_lastmod = -1;
+     old_etag.clean();
+-    old_reqsize = 0;
++
+     tempBuffer.offset = 0;
+ }
+ 
+@@ -250,18 +240,27 @@ clientReplyContext::getNextNode() const
+     return (clientStreamNode *)ourNode->node.next->data;
+ }
+ 
+-/* This function is wrong - the client parameters don't include the
+- * header offset
+- */
++/// Request HTTP response headers from Store, to be sent to the given recipient.
++/// That recipient also gets zero, some, or all HTTP response body bytes (into
++/// next()->readBuffer).
+ void
+-clientReplyContext::triggerInitialStoreRead()
++clientReplyContext::triggerInitialStoreRead(STCB recipient)
+ {
+-    /* when confident, 0 becomes reqofs, and then this factors into
+-     * startSendProcess
+-     */
+-    assert(reqofs == 0);
++    Assure(recipient != HandleIMSReply);
++    lastStreamBufferedBytes = StoreIOBuffer(); // storeClientCopy(next()->readBuffer) invalidates
+     StoreIOBuffer localTempBuffer (next()->readBuffer.length, 0, next()->readBuffer.data);
+-    storeClientCopy(sc, http->storeEntry(), localTempBuffer, SendMoreData, this);
++    ::storeClientCopy(sc, http->storeEntry(), localTempBuffer, recipient, this);
++}
++
++/// Request HTTP response body bytes from Store into next()->readBuffer. This
++/// method requests body bytes at readerBuffer.offset and, hence, it should only
++/// be called after we triggerInitialStoreRead() and get the requested HTTP
++/// response headers (using zero offset).
++void
++clientReplyContext::requestMoreBodyFromStore()
++{
++    lastStreamBufferedBytes = StoreIOBuffer(); // storeClientCopy(next()->readBuffer) invalidates
++    ::storeClientCopy(sc, http->storeEntry(), next()->readBuffer, SendMoreData, this);
+ }
+ 
+ /* there is an expired entry in the store.
+@@ -358,30 +357,23 @@ clientReplyContext::processExpired()
+     {
+         /* start counting the length from 0 */
+         StoreIOBuffer localTempBuffer(HTTP_REQBUF_SZ, 0, tempbuf);
+-        storeClientCopy(sc, entry, localTempBuffer, HandleIMSReply, this);
++        // keep lastStreamBufferedBytes: tempbuf is not a Client Stream buffer
++        ::storeClientCopy(sc, entry, localTempBuffer, HandleIMSReply, this);
+     }
+ }
+ 
+ void
+-clientReplyContext::sendClientUpstreamResponse()
++clientReplyContext::sendClientUpstreamResponse(const StoreIOBuffer &upstreamResponse)
+ {
+-    StoreIOBuffer tempresult;
+     removeStoreReference(&old_sc, &old_entry);
+ 
+     if (collapsedRevalidation)
+         http->storeEntry()->clearPublicKeyScope();
+ 
+     /* here the data to send is the data we just received */
+-    tempBuffer.offset = 0;
+-    old_reqsize = 0;
+-    /* sendMoreData tracks the offset as well.
+-     * Force it back to zero */
+-    reqofs = 0;
+     assert(!EBIT_TEST(http->storeEntry()->flags, ENTRY_ABORTED));
+-    /* TODO: provide sendMoreData with the ready parsed reply */
+-    tempresult.length = reqsize;
+-    tempresult.data = tempbuf;
+-    sendMoreData(tempresult);
++
++    sendMoreData(upstreamResponse);
+ }
+ 
+ void
+@@ -398,11 +390,9 @@ clientReplyContext::sendClientOldEntry()
+     restoreState();
+     /* here the data to send is in the next nodes buffers already */
+     assert(!EBIT_TEST(http->storeEntry()->flags, ENTRY_ABORTED));
+-    /* sendMoreData tracks the offset as well.
+-     * Force it back to zero */
+-    reqofs = 0;
+-    StoreIOBuffer tempresult (reqsize, reqofs, next()->readBuffer.data);
+-    sendMoreData(tempresult);
++    Assure(matchesStreamBodyBuffer(lastStreamBufferedBytes));
++    Assure(!lastStreamBufferedBytes.offset);
++    sendMoreData(lastStreamBufferedBytes);
+ }
+ 
+ /* This is the workhorse of the HandleIMSReply callback.
+@@ -416,11 +406,11 @@ clientReplyContext::handleIMSReply(StoreIOBuffer result)
+     if (deleting)
+         return;
+ 
+-    debugs(88, 3, http->storeEntry()->url() << ", " << (long unsigned) result.length << " bytes");
+-
+     if (http->storeEntry() == NULL)
+         return;
+ 
++    debugs(88, 3, http->storeEntry()->url() << " got " << result);
++
+     if (result.flags.error && !EBIT_TEST(http->storeEntry()->flags, ENTRY_ABORTED))
+         return;
+ 
+@@ -433,9 +423,6 @@ clientReplyContext::handleIMSReply(StoreIOBuffer result)
+         return;
+     }
+ 
+-    /* update size of the request */
+-    reqsize = result.length + reqofs;
+-
+     const Http::StatusCode status = http->storeEntry()->getReply()->sline.status();
+ 
+     // request to origin was aborted
+@@ -460,7 +447,7 @@ clientReplyContext::handleIMSReply(StoreIOBuffer result)
+         if (http->request->flags.ims && !old_entry->modifiedSince(http->request->ims, http->request->imslen)) {
+             // forward the 304 from origin
+             debugs(88, 3, "origin replied 304, revalidating existing entry and forwarding 304 to client");
+-            sendClientUpstreamResponse();
++            sendClientUpstreamResponse(result);
+         } else {
+             // send existing entry, it's still valid
+             debugs(88, 3, "origin replied 304, revalidating existing entry and sending " <<
+@@ -484,7 +471,7 @@ clientReplyContext::handleIMSReply(StoreIOBuffer result)
+             http->logType = LOG_TCP_REFRESH_MODIFIED;
+             debugs(88, 3, "origin replied " << status <<
+                    ", replacing existing entry and forwarding to client");
+-            sendClientUpstreamResponse();
++            sendClientUpstreamResponse(result);
+         }
+     }
+ 
+@@ -493,7 +480,7 @@ clientReplyContext::handleIMSReply(StoreIOBuffer result)
+         http->logType = LOG_TCP_REFRESH_FAIL_ERR;
+         debugs(88, 3, "origin replied with error " << status <<
+                ", forwarding to client due to fail_on_validation_err");
+-        sendClientUpstreamResponse();
++        sendClientUpstreamResponse(result);
+     } else {
+         // ignore and let client have old entry
+         http->logType = LOG_TCP_REFRESH_FAIL_OLD;
+@@ -506,13 +493,7 @@ clientReplyContext::handleIMSReply(StoreIOBuffer result)
+ SQUIDCEXTERN CSR clientGetMoreData;
+ SQUIDCEXTERN CSD clientReplyDetach;
+ 
+-/**
+- * clientReplyContext::cacheHit Should only be called until the HTTP reply headers
+- * have been parsed.  Normally this should be a single call, but
+- * it might take more than one.  As soon as we have the headers,
+- * we hand off to clientSendMoreData, processExpired, or
+- * processMiss.
+- */
++/// \copydoc clientReplyContext::cacheHit()
+ void
+ clientReplyContext::CacheHit(void *data, StoreIOBuffer result)
+ {
+@@ -520,11 +501,11 @@ clientReplyContext::CacheHit(void *data, StoreIOBuffer result)
+     context->cacheHit(result);
+ }
+ 
+-/**
+- * Process a possible cache HIT.
+- */
++/// Processes HTTP response headers received from Store on a suspected cache hit
++/// path. May be called several times (e.g., a Vary marker object hit followed
++/// by the corresponding variant hit).
+ void
+-clientReplyContext::cacheHit(StoreIOBuffer result)
++clientReplyContext::cacheHit(const StoreIOBuffer result)
+ {
+     /** Ignore if the HIT object is being deleted. */
+     if (deleting) {
+@@ -536,7 +517,7 @@ clientReplyContext::cacheHit(StoreIOBuffer result)
+ 
+     HttpRequest *r = http->request;
+ 
+-    debugs(88, 3, "clientCacheHit: " << http->uri << ", " << result.length << " bytes");
++    debugs(88, 3, http->uri << " got " << result);
+ 
+     if (http->storeEntry() == NULL) {
+         debugs(88, 3, "clientCacheHit: request aborted");
+@@ -560,20 +541,7 @@ clientReplyContext::cacheHit(StoreIOBuffer result)
+         return;
+     }
+ 
+-    if (result.length == 0) {
+-        debugs(88, 5, "store IO buffer has no content. MISS");
+-        /* the store couldn't get enough data from the file for us to id the
+-         * object
+-         */
+-        /* treat as a miss */
+-        http->logType = LOG_TCP_MISS;
+-        processMiss();
+-        return;
+-    }
+-
+     assert(!EBIT_TEST(e->flags, ENTRY_ABORTED));
+-    /* update size of the request */
+-    reqsize = result.length + reqofs;
+ 
+     /*
+      * Got the headers, now grok them
+@@ -587,6 +555,8 @@ clientReplyContext::cacheHit(StoreIOBuffer result)
+         return;
+     }
+ 
++    noteStreamBufferredBytes(result);
++
+     switch (varyEvaluateMatch(e, r)) {
+ 
+     case VARY_NONE:
+@@ -687,7 +657,7 @@ clientReplyContext::cacheHit(StoreIOBuffer result)
+         return;
+     } else if (r->conditional()) {
+         debugs(88, 5, "conditional HIT");
+-        if (processConditional(result))
++        if (processConditional())
+             return;
+     }
+ 
+@@ -806,7 +776,7 @@ clientReplyContext::processOnlyIfCachedMiss()
+ 
+ /// process conditional request from client
+ bool
+-clientReplyContext::processConditional(StoreIOBuffer &result)
++clientReplyContext::processConditional()
+ {
+     StoreEntry *const e = http->storeEntry();
+ 
+@@ -984,16 +954,7 @@ clientReplyContext::purgeFoundObject(StoreEntry *entry)
+ 
+     http->logType = LOG_TCP_HIT;
+ 
+-    reqofs = 0;
+-
+-    localTempBuffer.offset = http->out.offset;
+-
+-    localTempBuffer.length = next()->readBuffer.length;
+-
+-    localTempBuffer.data = next()->readBuffer.data;
+-
+-    storeClientCopy(sc, http->storeEntry(),
+-                    localTempBuffer, CacheHit, this);
++    triggerInitialStoreRead(CacheHit);
+ }
+ 
+ void
+@@ -1111,16 +1072,10 @@ clientReplyContext::purgeDoPurgeHead(StoreEntry *newEntry)
+ }
+ 
+ void
+-clientReplyContext::traceReply(clientStreamNode * node)
++clientReplyContext::traceReply()
+ {
+-    clientStreamNode *nextNode = (clientStreamNode *)node->node.next->data;
+-    StoreIOBuffer localTempBuffer;
+     createStoreEntry(http->request->method, RequestFlags());
+-    localTempBuffer.offset = nextNode->readBuffer.offset + headers_sz;
+-    localTempBuffer.length = nextNode->readBuffer.length;
+-    localTempBuffer.data = nextNode->readBuffer.data;
+-    storeClientCopy(sc, http->storeEntry(),
+-                    localTempBuffer, SendMoreData, this);
++    triggerInitialStoreRead();
+     http->storeEntry()->releaseRequest();
+     http->storeEntry()->buffer();
+     HttpReply *rep = new HttpReply;
+@@ -1169,16 +1124,16 @@ int
+ clientReplyContext::storeOKTransferDone() const
+ {
+     assert(http->storeEntry()->objectLen() >= 0);
++    const auto headers_sz = http->storeEntry()->mem().baseReply().hdr_sz;
+     assert(http->storeEntry()->objectLen() >= headers_sz);
+-    if (http->out.offset >= http->storeEntry()->objectLen() - headers_sz) {
+-        debugs(88,3,HERE << "storeOKTransferDone " <<
+-               " out.offset=" << http->out.offset <<
+-               " objectLen()=" << http->storeEntry()->objectLen() <<
+-               " headers_sz=" << headers_sz);
+-        return 1;
+-    }
+ 
+-    return 0;
++    const auto done = http->out.offset >= http->storeEntry()->objectLen() - headers_sz;
++    const auto debugLevel = done ? 3 : 5;
++    debugs(88, debugLevel, done <<
++           " out.offset=" << http->out.offset <<
++           " objectLen()=" << http->storeEntry()->objectLen() <<
++           " headers_sz=" << headers_sz);
++    return done ? 1 : 0;
+ }
+ 
+ int
+@@ -1190,10 +1145,9 @@ clientReplyContext::storeNotOKTransferDone() const
+     MemObject *mem = http->storeEntry()->mem_obj;
+     assert(mem != NULL);
+     assert(http->request != NULL);
+-    /* mem->reply was wrong because it uses the UPSTREAM header length!!! */
+-    HttpReply const *curReply = mem->getReply();
++    const auto expectedBodySize = mem->baseReply().content_length;
+ 
+-    if (headers_sz == 0)
++    if (mem->baseReply().pstate != psParsed)
+         /* haven't found end of headers yet */
+         return 0;
+ 
+@@ -1202,19 +1156,14 @@ clientReplyContext::storeNotOKTransferDone() const
+      * If we are sending a body and we don't have a content-length,
+      * then we must wait for the object to become STORE_OK.
+      */
+-    if (curReply->content_length < 0)
+-        return 0;
+-
+-    uint64_t expectedLength = curReply->content_length + http->out.headers_sz;
+-
+-    if (http->out.size < expectedLength)
++    if (expectedBodySize < 0)
+         return 0;
+-    else {
+-        debugs(88,3,HERE << "storeNotOKTransferDone " <<
+-               " out.size=" << http->out.size <<
+-               " expectedLength=" << expectedLength);
+-        return 1;
+-    }
++    const auto done = http->out.offset >= expectedBodySize;
++    const auto debugLevel = done ? 3 : 5;
++    debugs(88, debugLevel, done <<
++           " out.offset=" << http->out.offset <<
++           " expectedBodySize=" << expectedBodySize);
++    return done ? 1 : 0;
+ }
+ 
+ /* A write has completed, what is the next status based on the
+@@ -1632,6 +1581,8 @@ clientReplyContext::cloneReply()
+     reply = http->storeEntry()->getReply()->clone();
+     HTTPMSGLOCK(reply);
+ 
++    http->al->reply = reply;
++
+     if (reply->sline.protocol == AnyP::PROTO_HTTP) {
+         /* RFC 2616 requires us to advertise our version (but only on real HTTP traffic) */
+         reply->sline.version = Http::ProtocolVersion();
+@@ -1778,20 +1729,12 @@ clientGetMoreData(clientStreamNode * aNode, ClientHttpRequest * http)
+     assert (context);
+     assert(context->http == http);
+ 
+-    clientStreamNode *next = ( clientStreamNode *)aNode->node.next->data;
+-
+     if (!context->ourNode)
+         context->ourNode = aNode;
+ 
+     /* no cbdatareference, this is only used once, and safely */
+     if (context->flags.storelogiccomplete) {
+-        StoreIOBuffer tempBuffer;
+-        tempBuffer.offset = next->readBuffer.offset + context->headers_sz;
+-        tempBuffer.length = next->readBuffer.length;
+-        tempBuffer.data = next->readBuffer.data;
+-
+-        storeClientCopy(context->sc, http->storeEntry(),
+-                        tempBuffer, clientReplyContext::SendMoreData, context);
++        context->requestMoreBodyFromStore();
+         return;
+     }
+ 
+@@ -1804,7 +1747,7 @@ clientGetMoreData(clientStreamNode * aNode, ClientHttpRequest * http)
+ 
+     if (context->http->request->method == Http::METHOD_TRACE) {
+         if (context->http->request->header.getInt64(Http::HdrType::MAX_FORWARDS) == 0) {
+-            context->traceReply(aNode);
++            context->traceReply();
+             return;
+         }
+ 
+@@ -1834,7 +1777,6 @@ clientReplyContext::doGetMoreData()
+ #endif
+ 
+         assert(http->logType.oldType == LOG_TCP_HIT);
+-        reqofs = 0;
+         /* guarantee nothing has been sent yet! */
+         assert(http->out.size == 0);
+         assert(http->out.offset == 0);
+@@ -1849,10 +1791,7 @@ clientReplyContext::doGetMoreData()
+             }
+         }
+ 
+-        localTempBuffer.offset = reqofs;
+-        localTempBuffer.length = getNextNode()->readBuffer.length;
+-        localTempBuffer.data = getNextNode()->readBuffer.data;
+-        storeClientCopy(sc, http->storeEntry(), localTempBuffer, CacheHit, this);
++        triggerInitialStoreRead(CacheHit);
+     } else {
+         /* MISS CASE, http->logType is already set! */
+         processMiss();
+@@ -1887,12 +1826,11 @@ clientReplyContext::makeThisHead()
+ }
+ 
+ bool
+-clientReplyContext::errorInStream(StoreIOBuffer const &result, size_t const &sizeToProcess)const
++clientReplyContext::errorInStream(const StoreIOBuffer &result) const
+ {
+     return /* aborted request */
+         (http->storeEntry() && EBIT_TEST(http->storeEntry()->flags, ENTRY_ABORTED)) ||
+-        /* Upstream read error */ (result.flags.error) ||
+-        /* Upstream EOF */ (sizeToProcess == 0);
++        /* Upstream read error */ (result.flags.error);
+ }
+ 
+ void
+@@ -1913,24 +1851,17 @@ clientReplyContext::sendStreamError(StoreIOBuffer const &result)
+ }
+ 
+ void
+-clientReplyContext::pushStreamData(StoreIOBuffer const &result, char *source)
++clientReplyContext::pushStreamData(const StoreIOBuffer &result)
+ {
+-    StoreIOBuffer localTempBuffer;
+-
+     if (result.length == 0) {
+         debugs(88, 5, "clientReplyContext::pushStreamData: marking request as complete due to 0 length store result");
+         flags.complete = 1;
+     }
+ 
+-    assert(result.offset - headers_sz == next()->readBuffer.offset);
+-    localTempBuffer.offset = result.offset - headers_sz;
+-    localTempBuffer.length = result.length;
+-
+-    if (localTempBuffer.length)
+-        localTempBuffer.data = source;
++    assert(!result.length || result.offset == next()->readBuffer.offset);
+ 
+     clientStreamCallback((clientStreamNode*)http->client_stream.head->data, http, NULL,
+-                         localTempBuffer);
++                         result);
+ }
+ 
+ clientStreamNode *
+@@ -2022,7 +1953,6 @@ clientReplyContext::processReplyAccess ()
+     if (http->logType.oldType == LOG_TCP_DENIED ||
+             http->logType.oldType == LOG_TCP_DENIED_REPLY ||
+             alwaysAllowResponse(reply->sline.status())) {
+-        headers_sz = reply->hdr_sz;
+         processReplyAccessResult(ACCESS_ALLOWED);
+         return;
+     }
+@@ -2033,8 +1963,6 @@ clientReplyContext::processReplyAccess ()
+         return;
+     }
+ 
+-    headers_sz = reply->hdr_sz;
+-
+     /** check for absent access controls (permit by default) */
+     if (!Config.accessList.reply) {
+         processReplyAccessResult(ACCESS_ALLOWED);
+@@ -2091,11 +2019,9 @@ clientReplyContext::processReplyAccessResult(const allow_t &accessAllowed)
+     /* Ok, the reply is allowed, */
+     http->loggingEntry(http->storeEntry());
+ 
+-    ssize_t body_size = reqofs - reply->hdr_sz;
+-    if (body_size < 0) {
+-        reqofs = reply->hdr_sz;
+-        body_size = 0;
+-    }
++    Assure(matchesStreamBodyBuffer(lastStreamBufferedBytes));
++    Assure(!lastStreamBufferedBytes.offset);
++    auto body_size = lastStreamBufferedBytes.length; // may be zero
+ 
+     debugs(88, 3, "clientReplyContext::sendMoreData: Appending " <<
+            (int) body_size << " bytes after " << reply->hdr_sz <<
+@@ -2123,19 +2049,27 @@ clientReplyContext::processReplyAccessResult(const allow_t &accessAllowed)
+     assert (!flags.headersSent);
+     flags.headersSent = true;
+ 
++    // next()->readBuffer.offset may be positive for Range requests, but our
++    // localTempBuffer initialization code assumes that next()->readBuffer.data
++    // points to the response body at offset 0 because the first
++    // storeClientCopy() request always has offset 0 (i.e. our first Store
++    // request ignores next()->readBuffer.offset).
++    //
++    // XXX: We cannot fully check that assumption: readBuffer.offset field is
++    // often out of sync with the buffer content, and if some buggy code updates
++    // the buffer while we were waiting for the processReplyAccessResult()
++    // callback, we may not notice.
++
+     StoreIOBuffer localTempBuffer;
+-    char *buf = next()->readBuffer.data;
+-    char *body_buf = buf + reply->hdr_sz;
++    const auto body_buf = next()->readBuffer.data;
+ 
+     //Server side may disable ranges under some circumstances.
+ 
+     if ((!http->request->range))
+         next()->readBuffer.offset = 0;
+ 
+-    body_buf -= next()->readBuffer.offset;
+-
+-    if (next()->readBuffer.offset != 0) {
+-        if (next()->readBuffer.offset > body_size) {
++    if (next()->readBuffer.offset > 0) {
++        if (Less(body_size, next()->readBuffer.offset)) {
+             /* Can't use any of the body we received. send nothing */
+             localTempBuffer.length = 0;
+             localTempBuffer.data = NULL;
+@@ -2148,7 +2082,6 @@ clientReplyContext::processReplyAccessResult(const allow_t &accessAllowed)
+         localTempBuffer.data = body_buf;
+     }
+ 
+-    /* TODO??: move the data in the buffer back by the request header size */
+     clientStreamCallback((clientStreamNode *)http->client_stream.head->data,
+                          http, reply, localTempBuffer);
+ 
+@@ -2161,6 +2094,8 @@ clientReplyContext::sendMoreData (StoreIOBuffer result)
+     if (deleting)
+         return;
+ 
++    debugs(88, 5, http->uri << " got " << result);
++
+     StoreEntry *entry = http->storeEntry();
+ 
+     if (ConnStateData * conn = http->getConn()) {
+@@ -2173,7 +2108,9 @@ clientReplyContext::sendMoreData (StoreIOBuffer result)
+             return;
+         }
+ 
+-        if (reqofs==0 && !http->logType.isTcpHit()) {
++        if (!flags.headersSent && !http->logType.isTcpHit()) {
++            // We get here twice if processReplyAccessResult() calls startError().
++            // TODO: Revise when we check/change QoS markings to reduce syscalls.
+             if (Ip::Qos::TheConfig.isHitTosActive()) {
+                 Ip::Qos::doTosLocalMiss(conn->clientConnection, http->request->hier.code);
+             }
+@@ -2187,21 +2124,9 @@ clientReplyContext::sendMoreData (StoreIOBuffer result)
+                " out.offset=" << http->out.offset);
+     }
+ 
+-    char *buf = next()->readBuffer.data;
+-
+-    if (buf != result.data) {
+-        /* we've got to copy some data */
+-        assert(result.length <= next()->readBuffer.length);
+-        memcpy(buf, result.data, result.length);
+-    }
+-
+     /* We've got the final data to start pushing... */
+     flags.storelogiccomplete = 1;
+ 
+-    reqofs += result.length;
+-
+-    assert(reqofs <= HTTP_REQBUF_SZ || flags.headersSent);
+-
+     assert(http->request != NULL);
+ 
+     /* ESI TODO: remove this assert once everything is stable */
+@@ -2210,20 +2135,25 @@ clientReplyContext::sendMoreData (StoreIOBuffer result)
+ 
+     makeThisHead();
+ 
+-    debugs(88, 5, "clientReplyContext::sendMoreData: " << http->uri << ", " <<
+-           reqofs << " bytes (" << result.length <<
+-           " new bytes)");
+-
+-    /* update size of the request */
+-    reqsize = reqofs;
+-
+-    if (errorInStream(result, reqofs)) {
++    if (errorInStream(result)) {
+         sendStreamError(result);
+         return;
+     }
+ 
++    if (!matchesStreamBodyBuffer(result)) {
++        // Subsequent processing expects response body bytes to be at the start
++        // of our Client Stream buffer. When given something else (e.g., bytes
++        // in our tempbuf), we copy and adjust to meet those expectations.
++        const auto &ourClientStreamsBuffer = next()->readBuffer;
++        assert(result.length <= ourClientStreamsBuffer.length);
++        memcpy(ourClientStreamsBuffer.data, result.data, result.length);
++        result.data = ourClientStreamsBuffer.data;
++    }
++
++    noteStreamBufferredBytes(result);
++
+     if (flags.headersSent) {
+-        pushStreamData (result, buf);
++        pushStreamData(result);
+         return;
+     }
+ 
+@@ -2234,23 +2164,38 @@ clientReplyContext::sendMoreData (StoreIOBuffer result)
+         sc->setDelayId(DelayId::DelayClient(http,reply));
+ #endif
+ 
+-    /* handle headers */
++    holdingBuffer = result;
++    processReplyAccess();
++    return;
++}
++
++/// Whether the given body area describes the start of our Client Stream buffer.
++/// An empty area does.
++bool
++clientReplyContext::matchesStreamBodyBuffer(const StoreIOBuffer &their) const
++{
++    // the answer is undefined for errors; they are not really "body buffers"
++    Assure(!their.flags.error);
+ 
+-    if (Config.onoff.log_mime_hdrs) {
+-        size_t k;
++    if (!their.length)
++        return true; // an empty body area always matches our body area
+ 
+-        if ((k = headersEnd(buf, reqofs))) {
+-            safe_free(http->al->headers.reply);
+-            http->al->headers.reply = (char *)xcalloc(k + 1, 1);
+-            xstrncpy(http->al->headers.reply, buf, k);
+-        }
++    if (their.data != next()->readBuffer.data) {
++        debugs(88, 7, "no: " << their << " vs. " << next()->readBuffer);
++        return false;
+     }
+ 
+-    holdingBuffer = result;
+-    processReplyAccess();
+-    return;
++    return true;
++}
++
++void
++clientReplyContext::noteStreamBufferredBytes(const StoreIOBuffer &result)
++{
++    Assure(matchesStreamBodyBuffer(result));
++    lastStreamBufferedBytes = result; // may be unchanged and/or zero-length
+ }
+ 
++
+ /* Using this breaks the client layering just a little!
+  */
+ void
+@@ -2289,13 +2234,6 @@ clientReplyContext::createStoreEntry(const HttpRequestMethod& m, RequestFlags re
+     sc->setDelayId(DelayId::DelayClient(http));
+ #endif
+ 
+-    reqofs = 0;
+-
+-    reqsize = 0;
+-
+-    /* I don't think this is actually needed! -- adrian */
+-    /* http->reqbuf = http->norm_reqbuf; */
+-    //    assert(http->reqbuf == http->norm_reqbuf);
+     /* The next line is illegal because we don't know if the client stream
+      * buffers have been set up
+      */
+diff --git a/src/client_side_reply.h b/src/client_side_reply.h
+index dddab1a..bf705a4 100644
+--- a/src/client_side_reply.h
++++ b/src/client_side_reply.h
+@@ -39,7 +39,6 @@ public:
+     void purgeFoundGet(StoreEntry *newEntry);
+     void purgeFoundHead(StoreEntry *newEntry);
+     void purgeFoundObject(StoreEntry *entry);
+-    void sendClientUpstreamResponse();
+     void purgeDoPurgeGet(StoreEntry *entry);
+     void purgeDoPurgeHead(StoreEntry *entry);
+     void doGetMoreData();
+@@ -67,7 +66,7 @@ public:
+     void processExpired();
+     clientStream_status_t replyStatus();
+     void processMiss();
+-    void traceReply(clientStreamNode * node);
++    void traceReply();
+     const char *storeId() const { return (http->store_id.size() > 0 ? http->store_id.termedBuf() : http->uri); }
+ 
+     Http::StatusCode purgeStatus;
+@@ -77,13 +76,14 @@ public:
+     virtual void created (StoreEntry *newEntry);
+ 
+     ClientHttpRequest *http;
+-    int headers_sz;
+     store_client *sc;       /* The store_client we're using */
+     StoreIOBuffer tempBuffer;   /* For use in validating requests via IMS */
+     int old_reqsize;        /* ... again, for the buffer */
+-    size_t reqsize;
+-    size_t reqofs;
+-    char tempbuf[HTTP_REQBUF_SZ];   ///< a temporary buffer if we need working storage
++    /// Buffer dedicated to receiving storeClientCopy() responses to generated
++    /// revalidation requests. These requests cannot use next()->readBuffer
++    /// because the latter keeps the contents of the stale HTTP response during
++    /// revalidation. sendClientOldEntry() uses that contents.
++    char tempbuf[HTTP_REQBUF_SZ];
+ #if USE_CACHE_DIGESTS
+ 
+     const char *lookup_type;    /* temporary hack: storeGet() result: HIT/MISS/NONE */
+@@ -101,9 +101,10 @@ public:
+ private:
+     clientStreamNode *getNextNode() const;
+     void makeThisHead();
+-    bool errorInStream(StoreIOBuffer const &result, size_t const &sizeToProcess)const ;
++    bool errorInStream(const StoreIOBuffer &result) const;
++    bool matchesStreamBodyBuffer(const StoreIOBuffer &) const;
+     void sendStreamError(StoreIOBuffer const &result);
+-    void pushStreamData(StoreIOBuffer const &result, char *source);
++    void pushStreamData(const StoreIOBuffer &);
+     clientStreamNode * next() const;
+     StoreIOBuffer holdingBuffer;
+     HttpReply *reply;
+@@ -115,11 +116,13 @@ private:
+     bool alwaysAllowResponse(Http::StatusCode sline) const;
+     int checkTransferDone();
+     void processOnlyIfCachedMiss();
+-    bool processConditional(StoreIOBuffer &result);
++    bool processConditional();
++    void noteStreamBufferredBytes(const StoreIOBuffer &);
+     void cacheHit(StoreIOBuffer result);
+     void handleIMSReply(StoreIOBuffer result);
+     void sendMoreData(StoreIOBuffer result);
+-    void triggerInitialStoreRead();
++    void triggerInitialStoreRead(STCB = SendMoreData);
++    void requestMoreBodyFromStore();
+     void sendClientOldEntry();
+     void purgeAllCached();
+     void forgetHit();
+@@ -129,6 +132,13 @@ private:
+     void sendPreconditionFailedError();
+     void sendNotModified();
+     void sendNotModifiedOrPreconditionFailedError();
++    void sendClientUpstreamResponse(const StoreIOBuffer &upstreamResponse);
++
++    /// Reduces a chance of an accidental direct storeClientCopy() call that
++    /// (should but) forgets to invalidate our lastStreamBufferedBytes. This
++    /// function is not defined; decltype() syntax prohibits "= delete", but
++    /// function usage will trigger deprecation warnings and linking errors.
++    static decltype(::storeClientCopy) storeClientCopy [[deprecated]];
+ 
+     StoreEntry *old_entry;
+     /* ... for entry to be validated */
+@@ -145,6 +155,12 @@ private:
+     } CollapsedRevalidation;
+ 
+     CollapsedRevalidation collapsedRevalidation;
++
++    /// HTTP response body bytes stored in our Client Stream buffer (if any)
++    StoreIOBuffer lastStreamBufferedBytes;
++
++    // TODO: Remove after moving the meat of this function into a method.
++    friend CSR clientGetMoreData;
+ };
+ 
+ #endif /* SQUID_CLIENTSIDEREPLY_H */
+diff --git a/src/client_side_request.cc b/src/client_side_request.cc
+index ab08fd2..92da530 100644
+--- a/src/client_side_request.cc
++++ b/src/client_side_request.cc
+@@ -2045,6 +2045,8 @@ ClientHttpRequest::handleAdaptedHeader(HttpMsg *msg)
+         storeEntry()->replaceHttpReply(new_rep);
+         storeEntry()->timestampsSet();
+ 
++        al->reply = new_rep;
++
+         if (!adaptedBodySource) // no body
+             storeEntry()->complete();
+         clientGetMoreData(node, this);
+diff --git a/src/clients/Client.cc b/src/clients/Client.cc
+index f5defbb..cada70e 100644
+--- a/src/clients/Client.cc
++++ b/src/clients/Client.cc
+@@ -136,6 +136,8 @@ Client::setVirginReply(HttpReply *rep)
+     assert(rep);
+     theVirginReply = rep;
+     HTTPMSGLOCK(theVirginReply);
++    if (fwd->al)
++        fwd->al->reply = theVirginReply;
+     return theVirginReply;
+ }
+ 
+@@ -155,6 +157,8 @@ Client::setFinalReply(HttpReply *rep)
+     assert(rep);
+     theFinalReply = rep;
+     HTTPMSGLOCK(theFinalReply);
++    if (fwd->al)
++        fwd->al->reply = theFinalReply;
+ 
+     // give entry the reply because haveParsedReplyHeaders() expects it there
+     entry->replaceHttpReply(theFinalReply, false); // but do not write yet
+@@ -550,6 +554,7 @@ Client::blockCaching()
+         ACLFilledChecklist ch(acl, originalRequest(), NULL);
+         ch.reply = const_cast(entry->getReply()); // ACLFilledChecklist API bug
+         HTTPMSGLOCK(ch.reply);
++        ch.al = fwd->al;
+         if (!ch.fastCheck().allowed()) { // when in doubt, block
+             debugs(20, 3, "store_miss prohibits caching");
+             return true;
+diff --git a/src/enums.h b/src/enums.h
+index 4a860d8..262d62c 100644
+--- a/src/enums.h
++++ b/src/enums.h
+@@ -203,7 +203,6 @@ enum {
+ typedef enum {
+     DIGEST_READ_NONE,
+     DIGEST_READ_REPLY,
+-    DIGEST_READ_HEADERS,
+     DIGEST_READ_CBLOCK,
+     DIGEST_READ_MASK,
+     DIGEST_READ_DONE
+diff --git a/src/format/Format.cc b/src/format/Format.cc
+index 3b6a44b..689bdf9 100644
+--- a/src/format/Format.cc
++++ b/src/format/Format.cc
+@@ -330,7 +330,7 @@ log_quoted_string(const char *str, char *out)
+ static const HttpMsg *
+ actualReplyHeader(const AccessLogEntry::Pointer &al)
+ {
+-    const HttpMsg *msg = al->reply;
++    const HttpMsg *msg = al->reply.getRaw();
+ #if ICAP_CLIENT
+     // al->icap.reqMethod is methodNone in access.log context
+     if (!msg && al->icap.reqMethod == Adaptation::methodReqmod)
+@@ -853,24 +853,35 @@ Format::Format::assemble(MemBuf &mb, const AccessLogEntry::Pointer &al, int logS
+             } else
+ #endif
+             {
++                // just headers without start-line and CRLF
++                // XXX: reconcile with 'headers.request;
+                 quote = 1;
+             }
+             break;
+ 
+         case LFT_ADAPTED_REQUEST_ALL_HEADERS:
++            // just headers without start-line and CRLF
++            // XXX: reconcile with 'headers.adapted_request;
+             quote = 1;
+             break;
+ 
+-        case LFT_REPLY_ALL_HEADERS:
+-            out = al->headers.reply;
++        case LFT_REPLY_ALL_HEADERS: {
++            MemBuf allHeaders;
++            allHeaders.init();
++            // status-line + headers + CRLF
++            // XXX: reconcile with '>h' and '>ha'
++            al->packReplyHeaders(allHeaders);
++            sb.assign(allHeaders.content(), allHeaders.contentSize());
++            out = sb.c_str();
+ #if ICAP_CLIENT
+             if (!out && al->icap.reqMethod == Adaptation::methodReqmod)
+                 out = al->headers.adapted_request;
+ #endif
+             quote = 1;
+-            break;
++        }
++        break;
+ 
+         case LFT_USER_NAME:
+ #if USE_AUTH
+diff --git a/src/http.cc b/src/http.cc
+index 017e492..877172d 100644
+--- a/src/http.cc
++++ b/src/http.cc
+@@ -775,6 +775,9 @@ HttpStateData::processReplyHeader()
+ void
+ HttpStateData::handle1xx(HttpReply *reply)
+ {
++    if (fwd->al)
++        fwd->al->reply = reply;
++
+     HttpReply::Pointer msg(reply); // will destroy reply if unused
+ 
+     // one 1xx at a time: we must not be called while waiting for previous 1xx
+diff --git a/src/icmp/net_db.cc b/src/icmp/net_db.cc
+index 7dc42a2..52595f6 100644
+--- a/src/icmp/net_db.cc
++++ b/src/icmp/net_db.cc
+@@ -33,6 +33,7 @@
+ #include "mgr/Registration.h"
+ #include "mime_header.h"
+ #include "neighbors.h"
++#include "sbuf/SBuf.h"
+ #include "SquidConfig.h"
+ #include "SquidTime.h"
+ #include "Store.h"
+@@ -49,8 +50,6 @@
+ #include "ipcache.h"
+ #include "StoreClient.h"
+ 
+-#define NETDB_REQBUF_SZ 4096
+-
+ typedef enum {
+     STATE_NONE,
+     STATE_HEADER,
+@@ -67,12 +66,8 @@ public:
+         e(NULL),
+         sc(NULL),
+         r(theReq),
+-        used(0),
+-        buf_sz(NETDB_REQBUF_SZ),
+-        buf_ofs(0),
+         connstate(STATE_HEADER)
+     {
+-        *buf = 0;
+ 
+         assert(NULL != r);
+         HTTPMSGLOCK(r);
+@@ -92,10 +87,10 @@ public:
+     StoreEntry *e;
+     store_client *sc;
+     HttpRequest *r;
+-    int64_t used;
+-    size_t buf_sz;
+-    char buf[NETDB_REQBUF_SZ];
+-    int buf_ofs;
++
++    /// for receiving a NetDB reply body from Store and interpreting it
++    Store::ParsingBuffer parsingBuffer;
++
+     netdb_conn_state_t connstate;
+ };
+ 
+@@ -698,24 +693,19 @@ netdbExchangeHandleReply(void *data, StoreIOBuffer receivedData)
+     Ip::Address addr;
+ 
+     netdbExchangeState *ex = (netdbExchangeState *)data;
+-    int rec_sz = 0;
+-    int o;
+ 
+     struct in_addr line_addr;
+     double rtt;
+     double hops;
+-    char *p;
+     int j;
+     HttpReply const *rep;
+-    size_t hdr_sz;
+     int nused = 0;
+-    int size;
+-    int oldbufofs = ex->buf_ofs;
+ 
+-    rec_sz = 0;
++    size_t rec_sz = 0; // received record size (TODO: make const)
+     rec_sz += 1 + sizeof(struct in_addr);
+     rec_sz += 1 + sizeof(int);
+     rec_sz += 1 + sizeof(int);
++    Assure(rec_sz <= ex->parsingBuffer.capacity());
+     debugs(38, 3, "netdbExchangeHandleReply: " << receivedData.length << " read bytes");
+ 
+     if (!cbdataReferenceValid(ex->p)) {
+@@ -726,64 +716,29 @@ netdbExchangeHandleReply(void *data, StoreIOBuffer receivedData)
+ 
+     debugs(38, 3, "netdbExchangeHandleReply: for '" << ex->p->host << ":" << ex->p->http_port << "'");
+ 
+-    if (receivedData.length == 0 && !receivedData.flags.error) {
++    if (receivedData.flags.error) {
+         debugs(38, 3, "netdbExchangeHandleReply: Done");
+         delete ex;
+         return;
+     }
+ 
+-    p = ex->buf;
+-
+-    /* Get the size of the buffer now */
+-    size = ex->buf_ofs + receivedData.length;
+-    debugs(38, 3, "netdbExchangeHandleReply: " << size << " bytes buf");
+-
+-    /* Check if we're still doing headers */
+-
+     if (ex->connstate == STATE_HEADER) {
+-
+-        ex->buf_ofs += receivedData.length;
+-
+-        /* skip reply headers */
+-
+-        if ((hdr_sz = headersEnd(p, ex->buf_ofs))) {
+-            debugs(38, 5, "netdbExchangeHandleReply: hdr_sz = " << hdr_sz);
+-            rep = ex->e->getReply();
+-            assert(rep->sline.status() != Http::scNone);
+-            debugs(38, 3, "netdbExchangeHandleReply: reply status " << rep->sline.status());
+-
+-            if (rep->sline.status() != Http::scOkay) {
+-                delete ex;
+-                return;
+-            }
+-
+-            assert((size_t)ex->buf_ofs >= hdr_sz);
+-
+-            /*
+-             * Now, point p to the part of the buffer where the data
+-             * starts, and update the size accordingly
+-             */
+-            assert(ex->used == 0);
+-            ex->used = hdr_sz;
+-            size = ex->buf_ofs - hdr_sz;
+-            p += hdr_sz;
+-
+-            /* Finally, set the conn state mode to STATE_BODY */
+-            ex->connstate = STATE_BODY;
+-        } else {
+-            StoreIOBuffer tempBuffer;
+-            tempBuffer.offset = ex->buf_ofs;
+-            tempBuffer.length = ex->buf_sz - ex->buf_ofs;
+-            tempBuffer.data = ex->buf + ex->buf_ofs;
+-            /* Have more headers .. */
+-            storeClientCopy(ex->sc, ex->e, tempBuffer,
+-                            netdbExchangeHandleReply, ex);
++        const auto scode = ex->e->mem().baseReply().sline.status();
++        assert(scode != Http::scNone);
++        debugs(38, 3, "reply status " << scode);
++        if (scode != Http::scOkay) {
++            delete ex;
+             return;
+-        }
++         }
++        ex->connstate = STATE_BODY;
+     }
+ 
+     assert(ex->connstate == STATE_BODY);
+ 
++    ex->parsingBuffer.appended(receivedData.data, receivedData.length);
++    auto p = ex->parsingBuffer.c_str(); // current parsing position
++    auto size = ex->parsingBuffer.contentSize(); // bytes we still need to parse
++
+     /* If we get here, we have some body to parse .. */
+     debugs(38, 5, "netdbExchangeHandleReply: start parsing loop, size = " << size);
+ 
+@@ -792,6 +747,7 @@ netdbExchangeHandleReply(void *data, StoreIOBuffer receivedData)
+         addr.setAnyAddr();
+         hops = rtt = 0.0;
+ 
++        size_t o; // current record parsing offset
+         for (o = 0; o < rec_sz;) {
+             switch ((int) *(p + o)) {
+ 
+@@ -829,8 +785,6 @@ netdbExchangeHandleReply(void *data, StoreIOBuffer receivedData)
+ 
+         assert(o == rec_sz);
+ 
+-        ex->used += rec_sz;
+-
+         size -= rec_sz;
+ 
+         p += rec_sz;
+@@ -838,32 +792,8 @@ netdbExchangeHandleReply(void *data, StoreIOBuffer receivedData)
+         ++nused;
+     }
+ 
+-    /*
+-     * Copy anything that is left over to the beginning of the buffer,
+-     * and adjust buf_ofs accordingly
+-     */
+-
+-    /*
+-     * Evilly, size refers to the buf size left now,
+-     * ex->buf_ofs is the original buffer size, so just copy that
+-     * much data over
+-     */
+-    memmove(ex->buf, ex->buf + (ex->buf_ofs - size), size);
+-
+-    ex->buf_ofs = size;
+-
+-    /*
+-     * And don't re-copy the remaining data ..
+-     */
+-    ex->used += size;
+-
+-    /*
+-     * Now the tricky bit - size _included_ the leftover bit from the _last_
+-     * storeClientCopy. We don't want to include that, or our offset will be wrong.
+-     * So, don't count the size of the leftover buffer we began with.
+-     * This can _disappear_ when we're not tracking offsets ..
+-     */
+-    ex->used -= oldbufofs;
++    const auto parsedSize = ex->parsingBuffer.contentSize() - size;
++    ex->parsingBuffer.consume(parsedSize);
+ 
+     debugs(38, 3, "netdbExchangeHandleReply: size left over in this buffer: " << size << " bytes");
+ 
+@@ -871,20 +801,26 @@ netdbExchangeHandleReply(void *data, StoreIOBuffer receivedData)
+            " entries, (x " << rec_sz << " bytes) == " << nused * rec_sz <<
+            " bytes total");
+ 
+-    debugs(38, 3, "netdbExchangeHandleReply: used " << ex->used);
+-
+     if (EBIT_TEST(ex->e->flags, ENTRY_ABORTED)) {
+         debugs(38, 3, "netdbExchangeHandleReply: ENTRY_ABORTED");
+         delete ex;
+-    } else if (ex->e->store_status == STORE_PENDING) {
+-        StoreIOBuffer tempBuffer;
+-        tempBuffer.offset = ex->used;
+-        tempBuffer.length = ex->buf_sz - ex->buf_ofs;
+-        tempBuffer.data = ex->buf + ex->buf_ofs;
+-        debugs(38, 3, "netdbExchangeHandleReply: EOF not received");
+-        storeClientCopy(ex->sc, ex->e, tempBuffer,
+-                        netdbExchangeHandleReply, ex);
++        return;
+     }
++
++    if (ex->sc->atEof()) {
++        if (const auto leftoverBytes = ex->parsingBuffer.contentSize())
++            debugs(38, 2, "discarding a partially received record due to Store EOF: " << leftoverBytes);
++        delete ex;
++        return;
++    }
++
++    // TODO: To protect us from a broken peer sending an "infinite" stream of
++    // new addresses, limit the cumulative number of received bytes or records?
++
++    const auto remainingSpace = ex->parsingBuffer.space().positionAt(receivedData.offset + receivedData.length);
++    // rec_sz is at most buffer capacity, and we consume all fully loaded records
++    Assure(remainingSpace.length);
++    storeClientCopy(ex->sc, ex->e, remainingSpace, netdbExchangeHandleReply, ex);
+ }
+ 
+ #endif /* USE_ICMP */
+@@ -1296,14 +1232,9 @@ netdbExchangeStart(void *data)
+     ex->e = storeCreateEntry(uri, uri, RequestFlags(), Http::METHOD_GET);
+     assert(NULL != ex->e);
+ 
+-    StoreIOBuffer tempBuffer;
+-    tempBuffer.length = ex->buf_sz;
+-    tempBuffer.data = ex->buf;
+-
+     ex->sc = storeClientListAdd(ex->e, ex);
++    storeClientCopy(ex->sc, ex->e, ex->parsingBuffer.makeInitialSpace(), netdbExchangeHandleReply, ex);
+ 
+-    storeClientCopy(ex->sc, ex->e, tempBuffer,
+-                    netdbExchangeHandleReply, ex);
+     ex->r->flags.loopDetected = true;   /* cheat! -- force direct */
+ 
+     // XXX: send as Proxy-Authenticate instead
+diff --git a/src/internal.cc b/src/internal.cc
+index 81d5175..3a04ce0 100644
+--- a/src/internal.cc
++++ b/src/internal.cc
+@@ -9,6 +9,7 @@
+ /* DEBUG: section 76    Internal Squid Object handling */
+ 
+ #include "squid.h"
++#include "base/Assure.h"
+ #include "CacheManager.h"
+ #include "comm/Connection.h"
+ #include "errorpage.h"
+diff --git a/src/log/FormatHttpdCombined.cc b/src/log/FormatHttpdCombined.cc
+index 6639e88..70ea336 100644
+--- a/src/log/FormatHttpdCombined.cc
++++ b/src/log/FormatHttpdCombined.cc
+@@ -69,7 +69,10 @@ Log::Format::HttpdCombined(const AccessLogEntry::Pointer &al, Logfile * logfile)
+ 
+     if (Config.onoff.log_mime_hdrs) {
+         char *ereq = ::Format::QuoteMimeBlob(al->headers.request);
+-        char *erep = ::Format::QuoteMimeBlob(al->headers.reply);
++        MemBuf mb;
++        mb.init();
++        al->packReplyHeaders(mb);
++        auto erep = ::Format::QuoteMimeBlob(mb.content());
+         logfilePrintf(logfile, " [%s] [%s]\n", ereq, erep);
+         safe_free(ereq);
+         safe_free(erep);
+diff --git a/src/log/FormatHttpdCommon.cc b/src/log/FormatHttpdCommon.cc
+index 1613d0e..9e933a0 100644
+--- a/src/log/FormatHttpdCommon.cc
++++ b/src/log/FormatHttpdCommon.cc
+@@ -54,7 +54,10 @@ Log::Format::HttpdCommon(const AccessLogEntry::Pointer &al, Logfile * logfile)
+ 
+     if (Config.onoff.log_mime_hdrs) {
+         char *ereq = ::Format::QuoteMimeBlob(al->headers.request);
+-        char *erep = ::Format::QuoteMimeBlob(al->headers.reply);
++        MemBuf mb;
++        mb.init();
++        al->packReplyHeaders(mb);
++        auto erep = ::Format::QuoteMimeBlob(mb.content());
+         logfilePrintf(logfile, " [%s] [%s]\n", ereq, erep);
+         safe_free(ereq);
+         safe_free(erep);
+diff --git a/src/log/FormatSquidNative.cc b/src/log/FormatSquidNative.cc
+index 0ab97e4..23076b2 100644
+--- a/src/log/FormatSquidNative.cc
++++ b/src/log/FormatSquidNative.cc
+@@ -71,7 +71,10 @@ Log::Format::SquidNative(const AccessLogEntry::Pointer &al, Logfile * logfile)
+ 
+     if (Config.onoff.log_mime_hdrs) {
+         char *ereq = ::Format::QuoteMimeBlob(al->headers.request);
+-        char *erep = ::Format::QuoteMimeBlob(al->headers.reply);
++        MemBuf mb;
++        mb.init();
++        al->packReplyHeaders(mb);
++        auto erep = ::Format::QuoteMimeBlob(mb.content());
+         logfilePrintf(logfile, " [%s] [%s]\n", ereq, erep);
+         safe_free(ereq);
+         safe_free(erep);
+diff --git a/src/peer_digest.cc b/src/peer_digest.cc
+index 7b6314d..8a66277 100644
+--- a/src/peer_digest.cc
++++ b/src/peer_digest.cc
+@@ -39,7 +39,6 @@ static EVH peerDigestCheck;
+ static void peerDigestRequest(PeerDigest * pd);
+ static STCB peerDigestHandleReply;
+ static int peerDigestFetchReply(void *, char *, ssize_t);
+-int peerDigestSwapInHeaders(void *, char *, ssize_t);
+ int peerDigestSwapInCBlock(void *, char *, ssize_t);
+ int peerDigestSwapInMask(void *, char *, ssize_t);
+ static int peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name);
+@@ -374,6 +373,9 @@ peerDigestRequest(PeerDigest * pd)
+     fetch->sc = storeClientListAdd(e, fetch);
+     /* set lastmod to trigger IMS request if possible */
+ 
++    // TODO: Also check for fetch->pd->cd presence as a precondition for sending
++    // IMS requests because peerDigestFetchReply() does not accept 304 responses
++    // without an in-memory cache digest.
+     if (old_e)
+         e->lastModified(old_e->lastModified());
+ 
+@@ -408,11 +410,16 @@ peerDigestHandleReply(void *data, StoreIOBuffer receivedData)
+     digest_read_state_t prevstate;
+     int newsize;
+ 
+-    assert(fetch->pd && receivedData.data);
++    if (receivedData.flags.error) {
++        peerDigestFetchAbort(fetch, fetch->buf, "failure loading digest reply from Store");
++        return;
++    }
++
++    assert(fetch->pd);
+     /* The existing code assumes that the received pointer is
+      * where we asked the data to be put
+      */
+-    assert(fetch->buf + fetch->bufofs == receivedData.data);
++    assert(!receivedData.data || fetch->buf + fetch->bufofs == receivedData.data);
+ 
+     /* Update the buffer size */
+     fetch->bufofs += receivedData.length;
+@@ -444,10 +451,6 @@ peerDigestHandleReply(void *data, StoreIOBuffer receivedData)
+             retsize = peerDigestFetchReply(fetch, fetch->buf, fetch->bufofs);
+             break;
+ 
+-        case DIGEST_READ_HEADERS:
+-            retsize = peerDigestSwapInHeaders(fetch, fetch->buf, fetch->bufofs);
+-            break;
+-
+         case DIGEST_READ_CBLOCK:
+             retsize = peerDigestSwapInCBlock(fetch, fetch->buf, fetch->bufofs);
+             break;
+@@ -487,7 +490,7 @@ peerDigestHandleReply(void *data, StoreIOBuffer receivedData)
+     // checking at the beginning of this function. However, in this case, we would have to require
+     // that the parser does not regard EOF as a special condition (it is true now but may change
+     // in the future).
+-    if (!receivedData.length) { // EOF
++    if (fetch->sc->atEof()) {
+         peerDigestFetchAbort(fetch, fetch->buf, "premature end of digest reply");
+         return;
+     }
+@@ -506,19 +509,12 @@ peerDigestHandleReply(void *data, StoreIOBuffer receivedData)
+     }
+ }
+ 
+-/* wait for full http headers to be received then parse them */
+-/*
+- * This routine handles parsing the reply line.
+- * If the reply line indicates an OK, the same data is thrown
+- * to SwapInHeaders(). If the reply line is a NOT_MODIFIED,
+- * we simply stop parsing.
+- */
++/// handle HTTP response headers in the initial storeClientCopy() response
+ static int
+ peerDigestFetchReply(void *data, char *buf, ssize_t size)
+ {
+     DigestFetchState *fetch = (DigestFetchState *)data;
+     PeerDigest *pd = fetch->pd;
+-    size_t hdr_size;
+     assert(pd && buf);
+     assert(!fetch->offset);
+ 
+@@ -527,7 +523,7 @@ peerDigestFetchReply(void *data, char *buf, ssize_t size)
+     if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestFetchReply"))
+         return -1;
+ 
+-    if ((hdr_size = headersEnd(buf, size))) {
++    {
+         HttpReply const *reply = fetch->entry->getReply();
+         assert(reply);
+         assert(reply->sline.status() != Http::scNone);
+@@ -563,6 +559,15 @@ peerDigestFetchReply(void *data, char *buf, ssize_t size)
+             /* preserve request -- we need its size to update counters */
+             /* requestUnlink(r); */
+             /* fetch->entry->mem_obj->request = NULL; */
++
++            if (!fetch->pd->cd) {
++                peerDigestFetchAbort(fetch, buf, "304 without the old in-memory digest");
++                return -1;
++            }
++
++            // stay with the old in-memory digest
++            peerDigestFetchStop(fetch, buf, "Not modified");
++            fetch->state = DIGEST_READ_DONE;
+         } else if (status == Http::scOkay) {
+             /* get rid of old entry if any */
+ 
+@@ -573,67 +578,12 @@ peerDigestFetchReply(void *data, char *buf, ssize_t size)
+                 fetch->old_entry->unlock("peerDigestFetchReply 200");
+                 fetch->old_entry = NULL;
+             }
++            fetch->state = DIGEST_READ_CBLOCK;
+         } else {
+             /* some kind of a bug */
+             peerDigestFetchAbort(fetch, buf, reply->sline.reason());
+             return -1;      /* XXX -1 will abort stuff in ReadReply! */
+         }
+-
+-        /* must have a ready-to-use store entry if we got here */
+-        /* can we stay with the old in-memory digest? */
+-        if (status == Http::scNotModified && fetch->pd->cd) {
+-            peerDigestFetchStop(fetch, buf, "Not modified");
+-            fetch->state = DIGEST_READ_DONE;
+-        } else {
+-            fetch->state = DIGEST_READ_HEADERS;
+-        }
+-    } else {
+-        /* need more data, do we have space? */
+-
+-        if (size >= SM_PAGE_SIZE)
+-            peerDigestFetchAbort(fetch, buf, "reply header too big");
+-    }
+-
+-    /* We don't want to actually ack that we've handled anything,
+-     * otherwise SwapInHeaders() won't get the reply line .. */
+-    return 0;
+-}
+-
+-/* fetch headers from disk, pass on to SwapInCBlock */
+-int
+-peerDigestSwapInHeaders(void *data, char *buf, ssize_t size)
+-{
+-    DigestFetchState *fetch = (DigestFetchState *)data;
+-    size_t hdr_size;
+-
+-    assert(fetch->state == DIGEST_READ_HEADERS);
+-
+-    if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInHeaders"))
+-        return -1;
+-
+-    assert(!fetch->offset);
+-
+-    if ((hdr_size = headersEnd(buf, size))) {
+-        assert(fetch->entry->getReply());
+-        assert(fetch->entry->getReply()->sline.status() != Http::scNone);
+-
+-        if (fetch->entry->getReply()->sline.status() != Http::scOkay) {
+-            debugs(72, DBG_IMPORTANT, "peerDigestSwapInHeaders: " << fetch->pd->host <<
+-                   " status " << fetch->entry->getReply()->sline.status() <<
+-                   " got cached!");
+-
+-            peerDigestFetchAbort(fetch, buf, "internal status error");
+-            return -1;
+-        }
+-
+-        fetch->state = DIGEST_READ_CBLOCK;
+-        return hdr_size;    /* Say how much data we read */
+-    }
+-
+-    /* need more data, do we have space? */
+-    if (size >= SM_PAGE_SIZE) {
+-        peerDigestFetchAbort(fetch, buf, "stored header too big");
+-        return -1;
+     }
+ 
+     return 0;       /* We need to read more to parse .. */
+@@ -755,7 +705,7 @@ peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const
+     }
+ 
+     /* continue checking (maybe-successful eof case) */
+-    if (!reason && !size) {
++    if (!reason && !size && fetch->state != DIGEST_READ_REPLY) {
+         if (!pd->cd)
+             reason = "null digest?!";
+         else if (fetch->mask_offset != pd->cd->mask_size)
+diff --git a/src/servers/FtpServer.cc b/src/servers/FtpServer.cc
+index fab26cf..d3faa8d 100644
+--- a/src/servers/FtpServer.cc
++++ b/src/servers/FtpServer.cc
+@@ -777,12 +777,6 @@ Ftp::Server::handleReply(HttpReply *reply, StoreIOBuffer data)
+     Http::StreamPointer context = pipeline.front();
+     assert(context != nullptr);
+ 
+-    if (context->http && context->http->al != NULL &&
+-            !context->http->al->reply && reply) {
+-        context->http->al->reply = reply;
+-        HTTPMSGLOCK(context->http->al->reply);
+-    }
+-
+     static ReplyHandler handlers[] = {
+         NULL, // fssBegin
+         NULL, // fssConnected
+diff --git a/src/servers/Http1Server.cc b/src/servers/Http1Server.cc
+index 7514779..e76fb3e 100644
+--- a/src/servers/Http1Server.cc
++++ b/src/servers/Http1Server.cc
+@@ -310,9 +310,6 @@ Http::One::Server::handleReply(HttpReply *rep, StoreIOBuffer receivedData)
+     }
+ 
+     assert(rep);
+-    HTTPMSGUNLOCK(http->al->reply);
+-    http->al->reply = rep;
+-    HTTPMSGLOCK(http->al->reply);
+     context->sendStartOfMessage(rep, receivedData);
+ }
+ 
+diff --git a/src/stmem.cc b/src/stmem.cc
+index d117c15..b627005 100644
+--- a/src/stmem.cc
++++ b/src/stmem.cc
+@@ -95,8 +95,6 @@ mem_hdr::freeDataUpto(int64_t target_offset)
+             break;
+     }
+ 
+-    assert (lowestOffset () <= target_offset);
+-
+     return lowestOffset ();
+ }
+ 
+diff --git a/src/store.cc b/src/store.cc
+index 1948447..b4c7f82 100644
+--- a/src/store.cc
++++ b/src/store.cc
+@@ -273,6 +273,8 @@ StoreEntry::storeClientType() const
+ 
+     assert(mem_obj);
+ 
++    debugs(20, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
++
+     if (mem_obj->inmem_lo)
+         return STORE_DISK_CLIENT;
+ 
+@@ -300,6 +302,7 @@ StoreEntry::storeClientType() const
+                 return STORE_MEM_CLIENT;
+             }
+         }
++        debugs(20, 7, "STORE_OK STORE_DISK_CLIENT");
+         return STORE_DISK_CLIENT;
+     }
+ 
+@@ -319,10 +322,18 @@ StoreEntry::storeClientType() const
+     if (swap_status == SWAPOUT_NONE)
+         return STORE_MEM_CLIENT;
+ 
++    // TODO: The above "must make this a mem client" logic contradicts "Slight
++    // weirdness" logic in store_client::doCopy() that converts hits to misses
++    // on startSwapin() failures. We should probably attempt to open a swapin
++    // file _here_ instead (and avoid STORE_DISK_CLIENT designation for clients
++    // that fail to do so). That would also address a similar problem with Rock
++    // store that does not yet support swapin during SWAPOUT_WRITING.
++
+     /*
+      * otherwise, make subsequent clients read from disk so they
+      * can not delay the first, and vice-versa.
+      */
++    debugs(20, 7, "STORE_PENDING STORE_DISK_CLIENT");
+     return STORE_DISK_CLIENT;
+ }
+ 
+diff --git a/src/store/Makefile.am b/src/store/Makefile.am
+index be177d8..ccfc2dd 100644
+--- a/src/store/Makefile.am
++++ b/src/store/Makefile.am
+@@ -23,4 +23,6 @@ libstore_la_SOURCES= \
+ 	forward.h \
+ 	LocalSearch.cc \
+ 	LocalSearch.h \
++	ParsingBuffer.cc \
++	ParsingBuffer.h \
+ 	Storage.h
+diff --git a/src/store/Makefile.in b/src/store/Makefile.in
+index bb4387d..1959c99 100644
+--- a/src/store/Makefile.in
++++ b/src/store/Makefile.in
+@@ -163,7 +163,7 @@ CONFIG_CLEAN_FILES =
+ CONFIG_CLEAN_VPATH_FILES =
+ LTLIBRARIES = $(noinst_LTLIBRARIES)
+ libstore_la_LIBADD =
+-am_libstore_la_OBJECTS = Controller.lo Disk.lo Disks.lo LocalSearch.lo
++am_libstore_la_OBJECTS = Controller.lo Disk.lo Disks.lo LocalSearch.lo ParsingBuffer.lo
+ libstore_la_OBJECTS = $(am_libstore_la_OBJECTS)
+ AM_V_lt = $(am__v_lt_@AM_V@)
+ am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+@@ -185,7 +185,7 @@ DEFAULT_INCLUDES =
+ depcomp = $(SHELL) $(top_srcdir)/cfgaux/depcomp
+ am__maybe_remake_depfiles = depfiles
+ am__depfiles_remade = ./$(DEPDIR)/Controller.Plo ./$(DEPDIR)/Disk.Plo \
+-	./$(DEPDIR)/Disks.Plo ./$(DEPDIR)/LocalSearch.Plo
++	./$(DEPDIR)/Disks.Plo ./$(DEPDIR)/LocalSearch.Plo ./$(DEPDIR)/ParsingBuffer.Plo
+ am__mv = mv -f
+ CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ 	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+@@ -776,6 +776,8 @@ libstore_la_SOURCES = \
+ 	forward.h \
+ 	LocalSearch.cc \
+ 	LocalSearch.h \
++	ParsingBuffer.cc \
++	ParsingBuffer.h \
+ 	Storage.h
+ 
+ all: all-recursive
+@@ -846,6 +848,7 @@ distclean-compile:
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/Disk.Plo@am__quote@ # am--include-marker
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/Disks.Plo@am__quote@ # am--include-marker
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/LocalSearch.Plo@am__quote@ # am--include-marker
++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ParsingBuffer.Plo@am__quote@ # am--include-marker
+ 
+ $(am__depfiles_remade):
+ 	@$(MKDIR_P) $(@D)
+@@ -1254,6 +1257,7 @@ distclean: distclean-recursive
+ 	-rm -f ./$(DEPDIR)/Disk.Plo
+ 	-rm -f ./$(DEPDIR)/Disks.Plo
+ 	-rm -f ./$(DEPDIR)/LocalSearch.Plo
++	-rm -f ./$(DEPDIR)/ParsingBuffer.Plo
+ 	-rm -f Makefile
+ distclean-am: clean-am distclean-compile distclean-generic \
+ 	distclean-tags
+@@ -1303,6 +1307,7 @@ maintainer-clean: maintainer-clean-recursive
+ 	-rm -f ./$(DEPDIR)/Disk.Plo
+ 	-rm -f ./$(DEPDIR)/Disks.Plo
+ 	-rm -f ./$(DEPDIR)/LocalSearch.Plo
++	-rm -f ./$(DEPDIR)/ParsingBuffer.Plo
+ 	-rm -f Makefile
+ maintainer-clean-am: distclean-am maintainer-clean-generic
+ 
+diff --git a/src/store/ParsingBuffer.cc b/src/store/ParsingBuffer.cc
+new file mode 100644
+index 0000000..ca6be72
+--- /dev/null
++++ b/src/store/ParsingBuffer.cc
+@@ -0,0 +1,199 @@
++/*
++ * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
++ *
++ * Squid software is distributed under GPLv2+ license and includes
++ * contributions from numerous individuals and organizations.
++ * Please see the COPYING and CONTRIBUTORS files for details.
++ */
++
++#include "squid.h"
++#include "sbuf/Stream.h"
++#include "SquidMath.h"
++#include "store/ParsingBuffer.h"
++
++#include 
++
++// Several Store::ParsingBuffer() methods use assert() because the corresponding
++// failure means there is a good chance that somebody have already read from (or
++// written to) the wrong memory location. Since this buffer is used for storing
++// HTTP response bytes, such failures may corrupt traffic. No Assure() handling
++// code can safely recover from such failures.
++
++Store::ParsingBuffer::ParsingBuffer(StoreIOBuffer &initialSpace):
++    readerSuppliedMemory_(initialSpace)
++{
++}
++
++/// a read-only content start (or nil for some zero-size buffers)
++const char *
++Store::ParsingBuffer::memory() const
++{
++    return extraMemory_.second ? extraMemory_.first.rawContent() : readerSuppliedMemory_.data;
++}
++
++size_t
++Store::ParsingBuffer::capacity() const
++{
++    return extraMemory_.second ? (extraMemory_.first.length() + extraMemory_.first.spaceSize()) : readerSuppliedMemory_.length;
++}
++
++size_t
++Store::ParsingBuffer::contentSize() const
++{
++    return extraMemory_.second ? extraMemory_.first.length() : readerSuppliedMemoryContentSize_;
++}
++
++void
++Store::ParsingBuffer::appended(const char * const newBytes, const size_t newByteCount)
++{
++    // a positive newByteCount guarantees that, after the first assertion below
++    // succeeds, the second assertion will not increment a nil memory() pointer
++    if (!newByteCount)
++        return;
++
++    // these checks order guarantees that memory() is not nil in the second assertion
++    assert(newByteCount <= spaceSize()); // the new bytes end in our space
++    assert(memory() + contentSize() == newBytes); // the new bytes start in our space
++    // and now we know that newBytes is not nil either
++
++    if (extraMemory_.second)
++        extraMemory_.first.rawAppendFinish(newBytes, newByteCount);
++    else
++        readerSuppliedMemoryContentSize_ = IncreaseSum(readerSuppliedMemoryContentSize_, newByteCount).first;
++
++    assert(contentSize() <= capacity()); // paranoid
++}
++
++void
++Store::ParsingBuffer::consume(const size_t parsedBytes)
++{
++    Assure(contentSize() >= parsedBytes); // more conservative than extraMemory_->consume()
++    if (extraMemory_.second) {
++        extraMemory_.first.consume(parsedBytes);
++    } else {
++        readerSuppliedMemoryContentSize_ -= parsedBytes;
++        if (parsedBytes && readerSuppliedMemoryContentSize_)
++            memmove(readerSuppliedMemory_.data, memory() + parsedBytes, readerSuppliedMemoryContentSize_);
++    }
++}
++
++StoreIOBuffer
++Store::ParsingBuffer::space()
++{
++    const auto size = spaceSize();
++    const auto start = extraMemory_.second ?
++                       extraMemory_.first.rawAppendStart(size) :
++                       (readerSuppliedMemory_.data + readerSuppliedMemoryContentSize_);
++    return StoreIOBuffer(spaceSize(), 0, start);
++}
++
++StoreIOBuffer
++Store::ParsingBuffer::makeSpace(const size_t pageSize)
++{
++    growSpace(pageSize);
++    auto result = space();
++    Assure(result.length >= pageSize);
++    result.length = pageSize;
++    return result;
++}
++
++StoreIOBuffer
++Store::ParsingBuffer::content() const
++{
++    // This const_cast is a StoreIOBuffer API limitation: That class does not
++    // support a "constant content view", even though it is used as such a view.
++    return StoreIOBuffer(contentSize(), 0, const_cast(memory()));
++}
++
++/// makes sure we have the requested number of bytes, allocates enough memory if needed
++void
++Store::ParsingBuffer::growSpace(const size_t minimumSpaceSize)
++{
++    const auto capacityIncreaseAttempt = IncreaseSum(contentSize(), minimumSpaceSize);
++    if (!capacityIncreaseAttempt.second)
++        throw TextException(ToSBuf("no support for a single memory block of ", contentSize(), '+', minimumSpaceSize, " bytes"), Here());
++    const auto newCapacity = capacityIncreaseAttempt.first;
++
++    if (newCapacity <= capacity())
++        return; // already have enough space; no reallocation is needed
++
++    debugs(90, 7, "growing to provide " << minimumSpaceSize << " in " << *this);
++
++    if (extraMemory_.second) {
++        extraMemory_.first.reserveCapacity(newCapacity);
++    } else {
++        SBuf newStorage;
++        newStorage.reserveCapacity(newCapacity);
++        newStorage.append(readerSuppliedMemory_.data, readerSuppliedMemoryContentSize_);
++        extraMemory_.first = std::move(newStorage);
++        extraMemory_.second = true;
++    }
++    Assure(spaceSize() >= minimumSpaceSize);
++}
++
++SBuf
++Store::ParsingBuffer::toSBuf() const
++{
++    return extraMemory_.second ? extraMemory_.first : SBuf(content().data, content().length);
++}
++
++size_t
++Store::ParsingBuffer::spaceSize() const
++{
++    if (extraMemory_.second)
++        return extraMemory_.first.spaceSize();
++
++    assert(readerSuppliedMemoryContentSize_ <= readerSuppliedMemory_.length);
++    return readerSuppliedMemory_.length - readerSuppliedMemoryContentSize_;
++}
++
++/// 0-terminates stored byte sequence, allocating more memory if needed, but
++/// without increasing the number of stored content bytes
++void
++Store::ParsingBuffer::terminate()
++{
++    *makeSpace(1).data = 0;
++}
++
++StoreIOBuffer
++Store::ParsingBuffer::packBack()
++{
++    const auto bytesToPack = contentSize();
++    // until our callers do not have to work around legacy code expectations
++    Assure(bytesToPack);
++
++    // if we accumulated more bytes at some point, any extra metadata should
++    // have been consume()d by now, allowing readerSuppliedMemory_.data reuse
++    Assure(bytesToPack <= readerSuppliedMemory_.length);
++
++    auto result = readerSuppliedMemory_;
++    result.length = bytesToPack;
++    Assure(result.data);
++
++    if (!extraMemory_.second) {
++        // no accumulated bytes copying because they are in readerSuppliedMemory_
++        debugs(90, 7, "quickly exporting " << result.length << " bytes via " << readerSuppliedMemory_);
++    } else {
++        debugs(90, 7, "slowly exporting " << result.length << " bytes from " << extraMemory_.first.id << " back into " << readerSuppliedMemory_);
++        memmove(result.data, extraMemory_.first.rawContent(), result.length);
++    }
++
++    return result;
++}
++
++void
++Store::ParsingBuffer::print(std::ostream &os) const
++{
++    os << "size=" << contentSize();
++
++    if (extraMemory_.second) {
++        os << " capacity=" << capacity();
++        os << " extra=" << extraMemory_.first.id;
++    }
++
++    // report readerSuppliedMemory_ (if any) even if we are no longer using it
++    // for content storage; it affects packBack() and related parsing logic
++    if (readerSuppliedMemory_.length)
++        os << ' ' << readerSuppliedMemory_;
++}
++
+diff --git a/src/store/ParsingBuffer.h b/src/store/ParsingBuffer.h
+new file mode 100644
+index 0000000..b473ac6
+--- /dev/null
++++ b/src/store/ParsingBuffer.h
+@@ -0,0 +1,128 @@
++/*
++ * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
++ *
++ * Squid software is distributed under GPLv2+ license and includes
++ * contributions from numerous individuals and organizations.
++ * Please see the COPYING and CONTRIBUTORS files for details.
++ */
++
++#ifndef SQUID_SRC_STORE_PARSINGBUFFER_H
++#define SQUID_SRC_STORE_PARSINGBUFFER_H
++
++#include "sbuf/SBuf.h"
++#include "StoreIOBuffer.h"
++
++#include 
++
++namespace Store
++{
++
++/// A continuous buffer for efficient accumulation and NUL-termination of
++/// Store-read bytes. The buffer accumulates two kinds of Store readers:
++///
++/// * Readers that do not have any external buffer to worry about but need to
++///   accumulate, terminate, and/or consume buffered content read by Store.
++///   These readers use the default constructor and then allocate the initial
++///   buffer space for their first read (if any).
++///
++/// * Readers that supply their StoreIOBuffer at construction time. That buffer
++///   is enough to handle the majority of use cases. However, the supplied
++///   StoreIOBuffer capacity may be exceeded when parsing requires accumulating
++///   multiple Store read results and/or NUL-termination of a full buffer.
++///
++/// This buffer seamlessly grows as needed, reducing memory over-allocation and,
++/// in case of StoreIOBuffer-seeded construction, memory copies.
++class ParsingBuffer
++{
++public:
++    /// creates buffer without any space or content
++    ParsingBuffer() = default;
++
++    /// seeds this buffer with the caller-supplied buffer space
++    explicit ParsingBuffer(StoreIOBuffer &);
++
++    /// a NUL-terminated version of content(); same lifetime as content()
++    const char *c_str() { terminate(); return memory(); }
++
++    /// export content() into SBuf, avoiding content copying when possible
++    SBuf toSBuf() const;
++
++    /// the total number of append()ed bytes that were not consume()d
++    size_t contentSize() const;
++
++    /// the number of bytes in the space() buffer
++    size_t spaceSize() const;
++
++    /// the maximum number of bytes we can store without allocating more space
++    size_t capacity() const;
++
++    /// Stored append()ed bytes that have not been consume()d. The returned
++    /// buffer offset is set to zero; the caller is responsible for adjusting
++    /// the offset if needed (TODO: Add/return a no-offset Mem::View instead).
++    /// The returned buffer is invalidated by calling a non-constant method or
++    /// by changing the StoreIOBuffer contents given to our constructor.
++    StoreIOBuffer content() const;
++
++    /// A (possibly empty) buffer for reading the next byte(s). The returned
++    /// buffer offset is set to zero; the caller is responsible for adjusting
++    /// the offset if needed (TODO: Add/return a no-offset Mem::Area instead).
++    /// The returned buffer is invalidated by calling a non-constant method or
++    /// by changing the StoreIOBuffer contents given to our constructor.
++    StoreIOBuffer space();
++
++    /// A buffer for reading the exact number of next byte(s). The method may
++    /// allocate new memory and copy previously appended() bytes as needed.
++    /// \param pageSize the exact number of bytes the caller wants to read
++    /// \returns space() after any necessary allocations
++    StoreIOBuffer makeSpace(size_t pageSize);
++
++    /// A buffer suitable for the first storeClientCopy() call. The method may
++    /// allocate new memory and copy previously appended() bytes as needed.
++    /// \returns space() after any necessary allocations
++    /// \deprecated New clients should call makeSpace() with client-specific
++    /// pageSize instead of this one-size-fits-all legacy method.
++    StoreIOBuffer makeInitialSpace() { return makeSpace(4096); }
++
++    /// remember the new bytes received into the previously provided space()
++    void appended(const char *, size_t);
++
++    /// get rid of previously appended() prefix of a given size
++    void consume(size_t);
++
++    /// Returns stored content, reusing the StoreIOBuffer given at the
++    /// construction time. Copying is avoided if we did not allocate extra
++    /// memory since construction. Not meant for default-constructed buffers.
++    /// \prec positive contentSize() (\sa store_client::finishCallback())
++    StoreIOBuffer packBack();
++
++    /// summarizes object state (for debugging)
++    void print(std::ostream &) const;
++
++private:
++    const char *memory() const;
++    void terminate();
++    void growSpace(size_t);
++
++private:
++    /// externally allocated buffer we were seeded with (or a zero-size one)
++    StoreIOBuffer readerSuppliedMemory_;
++
++    /// append()ed to readerSuppliedMemory_ bytes that were not consume()d
++    size_t readerSuppliedMemoryContentSize_ = 0;
++
++    /// our internal buffer that takes over readerSuppliedMemory_ when the
++    /// latter becomes full and more memory is needed
++    std::pair extraMemory_ = std::make_pair(SBuf(), false);
++};
++
++inline std::ostream &
++operator <<(std::ostream &os, const ParsingBuffer &b)
++{
++    b.print(os);
++    return os;
++}
++
++} // namespace Store
++
++#endif /* SQUID_SRC_STORE_PARSINGBUFFER_H */
++
+diff --git a/src/store/forward.h b/src/store/forward.h
+index 1422a85..db5ee1c 100644
+--- a/src/store/forward.h
++++ b/src/store/forward.h
+@@ -46,6 +46,7 @@ class Disks;
+ class Disk;
+ class DiskConfig;
+ class EntryGuard;
++class ParsingBuffer;
+ 
+ typedef ::StoreEntry Entry;
+ typedef ::MemStore Memory;
+diff --git a/src/store_client.cc b/src/store_client.cc
+index 1b54f04..a5f2440 100644
+--- a/src/store_client.cc
++++ b/src/store_client.cc
+@@ -9,6 +9,7 @@
+ /* DEBUG: section 90    Storage Manager Client-Side Interface */
+ 
+ #include "squid.h"
++#include "base/AsyncCbdataCalls.h"
+ #include "event.h"
+ #include "globals.h"
+ #include "HttpReply.h"
+@@ -16,8 +17,10 @@
+ #include "MemBuf.h"
+ #include "MemObject.h"
+ #include "mime_header.h"
++#include "sbuf/Stream.h"
+ #include "profiler/Profiler.h"
+ #include "SquidConfig.h"
++#include "SquidMath.h"
+ #include "StatCounters.h"
+ #include "Store.h"
+ #include "store_swapin.h"
+@@ -39,17 +42,10 @@
+ static StoreIOState::STRCB storeClientReadBody;
+ static StoreIOState::STRCB storeClientReadHeader;
+ static void storeClientCopy2(StoreEntry * e, store_client * sc);
+-static EVH storeClientCopyEvent;
+ static bool CheckQuickAbortIsReasonable(StoreEntry * entry);
+ 
+ CBDATA_CLASS_INIT(store_client);
+ 
+-bool
+-store_client::memReaderHasLowerOffset(int64_t anOffset) const
+-{
+-    return getType() == STORE_MEM_CLIENT && copyInto.offset < anOffset;
+-}
+-
+ int
+ store_client::getType() const
+ {
+@@ -105,25 +101,35 @@ storeClientListAdd(StoreEntry * e, void *data)
+ }
+ 
+ void
+-store_client::callback(ssize_t sz, bool error)
++store_client::FinishCallback(store_client * const sc)
+ {
+-    size_t bSz = 0;
++    sc->finishCallback();
++}
+ 
+-    if (sz >= 0 && !error)
+-        bSz = sz;
++void
++store_client::finishCallback()
++{
++    Assure(_callback.callback_handler);
++    Assure(_callback.notifier);
+ 
+-    StoreIOBuffer result(bSz, 0 ,copyInto.data);
++    // XXX: Some legacy code relies on zero-length buffers having nil data
++    // pointers. Some other legacy code expects "correct" result.offset even
++    // when there is no body to return. Accommodate all those expectations.
++    auto result = StoreIOBuffer(0, copyInto.offset, nullptr);
++    if (object_ok && parsingBuffer.second && parsingBuffer.first.contentSize())
++        result = parsingBuffer.first.packBack();
++    result.flags.error = object_ok ? 0 : 1;
+ 
+-    if (sz < 0 || error)
+-        result.flags.error = 1;
++    // no HTTP headers and no body bytes (but not because there was no space)
++    atEof_ = !sendingHttpHeaders() && !result.length && copyInto.length;
++
++    parsingBuffer.second = false;
++    ++answers;
+ 
+-    result.offset = cmp_offset;
+-    assert(_callback.pending());
+-    cmp_offset = copyInto.offset + bSz;
+     STCB *temphandler = _callback.callback_handler;
+     void *cbdata = _callback.callback_data;
+-    _callback = Callback(NULL, NULL);
+-    copyInto.data = NULL;
++    _callback = Callback(nullptr, nullptr);
++    copyInto.data = nullptr;
+ 
+     if (cbdataReferenceValid(cbdata))
+         temphandler(cbdata, result);
+@@ -131,32 +137,18 @@ store_client::callback(ssize_t sz, bool error)
+     cbdataReferenceDone(cbdata);
+ }
+ 
+-static void
+-storeClientCopyEvent(void *data)
+-{
+-    store_client *sc = (store_client *)data;
+-    debugs(90, 3, "storeClientCopyEvent: Running");
+-    assert (sc->flags.copy_event_pending);
+-    sc->flags.copy_event_pending = false;
+-
+-    if (!sc->_callback.pending())
+-        return;
+-
+-    storeClientCopy2(sc->entry, sc);
+-}
+-
+ store_client::store_client(StoreEntry *e) :
+-    cmp_offset(0),
+ #if STORE_CLIENT_LIST_DEBUG
+     owner(cbdataReference(data)),
+ #endif
+     entry(e),
+     type(e->storeClientType()),
+-    object_ok(true)
++    object_ok(true),
++    atEof_(false),
++    answers(0)
+ {
+     flags.disk_io_pending = false;
+     flags.store_copying = false;
+-    flags.copy_event_pending = false;
+     ++ entry->refcount;
+ 
+     if (getType() == STORE_DISK_CLIENT) {
+@@ -202,16 +194,33 @@ store_client::copy(StoreEntry * anEntry,
+ #endif
+ 
+     assert(!_callback.pending());
+-#if ONLYCONTIGUOUSREQUESTS
+-
+-    assert(cmp_offset == copyRequest.offset);
+-#endif
+-    /* range requests will skip into the body */
+-    cmp_offset = copyRequest.offset;
+     _callback = Callback (callback_fn, cbdataReference(data));
+     copyInto.data = copyRequest.data;
+     copyInto.length = copyRequest.length;
+     copyInto.offset = copyRequest.offset;
++    Assure(copyInto.offset >= 0);
++
++    if (!copyInto.length) {
++        // During the first storeClientCopy() call, a zero-size buffer means
++        // that we will have to drop any HTTP response body bytes we read (with
++        // the HTTP headers from disk). After that, it means we cannot return
++        // anything to the caller at all.
++        debugs(90, 2, "WARNING: zero-size storeClientCopy() buffer: " << copyInto);
++        // keep going; moreToRead() should prevent any from-Store reading
++    }
++
++    // Our nextHttpReadOffset() expects the first copy() call to have zero
++    // offset. More complex code could handle a positive first offset, but it
++    // would only be useful when reading responses from memory: We would not
++    // _delay_ the response (to read the requested HTTP body bytes from disk)
++    // when we already can respond with HTTP headers.
++    Assure(!copyInto.offset || answeredOnce());
++
++    parsingBuffer.first = Store::ParsingBuffer(copyInto);
++    parsingBuffer.second = true;
++
++    discardableHttpEnd_ = nextHttpReadOffset();
++    debugs(90, 7, "discardableHttpEnd_=" << discardableHttpEnd_);
+ 
+     static bool copying (false);
+     assert (!copying);
+@@ -239,50 +248,41 @@ store_client::copy(StoreEntry * anEntry,
+     // Add no code here. This object may no longer exist.
+ }
+ 
+-/// Whether there is (or will be) more entry data for us.
++/// Whether Store has (or possibly will have) more entry data for us.
+ bool
+-store_client::moreToSend() const
++store_client::moreToRead() const
+ {
++    if (!copyInto.length)
++        return false; // the client supplied a zero-size buffer
++
+     if (entry->store_status == STORE_PENDING)
+         return true; // there may be more coming
+ 
+     /* STORE_OK, including aborted entries: no more data is coming */
+ 
+-    const int64_t len = entry->objectLen();
++    if (canReadFromMemory())
++        return true; // memory has the first byte wanted by the client
+ 
+-    // If we do not know the entry length, then we have to open the swap file.
+-    const bool canSwapIn = entry->hasDisk();
+-    if (len < 0)
+-        return canSwapIn;
++    if (!entry->hasDisk())
++        return false; // cannot read anything from disk either
+ 
+-    if (copyInto.offset >= len)
+-        return false; // sent everything there is
++    if (entry->objectLen() >= 0 && copyInto.offset >= entry->contentLen())
++        return false; // the disk cannot have byte(s) wanted by the client
+ 
+-    if (canSwapIn)
+-        return true; // if we lack prefix, we can swap it in
+-
+-    // If we cannot swap in, make sure we have what we want in RAM. Otherwise,
+-    // scheduleRead calls scheduleDiskRead which asserts without a swap file.
+-    const MemObject *mem = entry->mem_obj;
+-    return mem &&
+-           mem->inmem_lo <= copyInto.offset && copyInto.offset < mem->endOffset();
++    // we cannot be sure until we swap in metadata and learn contentLen(),
++    // but the disk may have the byte(s) wanted by the client
++    return true;
+ }
+ 
+ static void
+ storeClientCopy2(StoreEntry * e, store_client * sc)
+ {
+     /* reentrancy not allowed  - note this could lead to
+-     * dropped events
++     * dropped notifications about response data availability
+      */
+ 
+-    if (sc->flags.copy_event_pending) {
+-        return;
+-    }
+-
+     if (sc->flags.store_copying) {
+-        sc->flags.copy_event_pending = true;
+-        debugs(90, 3, "storeClientCopy2: Queueing storeClientCopyEvent()");
+-        eventAdd("storeClientCopyEvent", storeClientCopyEvent, sc, 0.0, 0);
++        debugs(90, 3, "prevented recursive copying for " << *e);
+         return;
+     }
+ 
+@@ -295,39 +295,44 @@ storeClientCopy2(StoreEntry * e, store_client * sc)
+      * if the peer aborts, we want to give the client(s)
+      * everything we got before the abort condition occurred.
+      */
+-    /* Warning: doCopy may indirectly free itself in callbacks,
+-     * hence the lock to keep it active for the duration of
+-     * this function
+-     * XXX: Locking does not prevent calling sc destructor (it only prevents
+-     * freeing sc memory) so sc may become invalid from C++ p.o.v.
+-     */
+-    CbcPointer tmpLock = sc;
+-    assert (!sc->flags.store_copying);
+     sc->doCopy(e);
+-    assert(!sc->flags.store_copying);
++}
++
++/// Whether our answer, if sent right now, will announce the availability of
++/// HTTP response headers (to the STCB callback) for the first time.
++bool
++store_client::sendingHttpHeaders() const
++{
++    return !answeredOnce() && entry->mem().baseReply().hdr_sz > 0;
+ }
+ 
+ void
+ store_client::doCopy(StoreEntry *anEntry)
+ {
++    Assure(_callback.pending());
++    Assure(!flags.disk_io_pending);
++    Assure(!flags.store_copying);
++
+     assert (anEntry == entry);
+     flags.store_copying = true;
+     MemObject *mem = entry->mem_obj;
+ 
+-    debugs(33, 5, "store_client::doCopy: co: " <<
+-           copyInto.offset << ", hi: " <<
+-           mem->endOffset());
++    debugs(33, 5, this << " into " << copyInto <<
++           " hi: " << mem->endOffset() <<
++           " objectLen: " << entry->objectLen() <<
++           " past_answers: " << answers);
+ 
+-    if (!moreToSend()) {
++    const auto sendHttpHeaders = sendingHttpHeaders();
++
++    if (!sendHttpHeaders && !moreToRead()) {
+         /* There is no more to send! */
+         debugs(33, 3, HERE << "There is no more to send!");
+-        callback(0);
++        noteNews();
+         flags.store_copying = false;
+         return;
+     }
+ 
+-    /* Check that we actually have data */
+-    if (anEntry->store_status == STORE_PENDING && copyInto.offset >= mem->endOffset()) {
++    if (!sendHttpHeaders && anEntry->store_status == STORE_PENDING && nextHttpReadOffset() >= mem->endOffset()) {
+         debugs(90, 3, "store_client::doCopy: Waiting for more");
+         flags.store_copying = false;
+         return;
+@@ -349,7 +354,24 @@ store_client::doCopy(StoreEntry *anEntry)
+         if (!startSwapin())
+             return; // failure
+     }
+-    scheduleRead();
++
++    // send any immediately available body bytes even if we also sendHttpHeaders
++    if (canReadFromMemory()) {
++        readFromMemory();
++        noteNews(); // will sendHttpHeaders (if needed) as well
++        flags.store_copying = false;
++        return;
++    }
++
++    if (sendHttpHeaders) {
++        debugs(33, 5, "just send HTTP headers: " << mem->baseReply().hdr_sz);
++        noteNews();
++        flags.store_copying = false;
++        return;
++    }
++
++    // no information that the client needs is available immediately
++    scheduleDiskRead();
+ }
+ 
+ /// opens the swapin "file" if possible; otherwise, fail()s and returns false
+@@ -383,14 +405,13 @@ store_client::startSwapin()
+ }
+ 
+ void
+-store_client::scheduleRead()
++store_client::noteSwapInDone(const bool error)
+ {
+-    MemObject *mem = entry->mem_obj;
+-
+-    if (copyInto.offset >= mem->inmem_lo && copyInto.offset < mem->endOffset())
+-        scheduleMemRead();
++    Assure(_callback.pending());
++    if (error)
++        fail();
+     else
+-        scheduleDiskRead();
++        noteNews();
+ }
+ 
+ void
+@@ -415,15 +436,44 @@ store_client::scheduleDiskRead()
+     flags.store_copying = false;
+ }
+ 
++/// whether at least one byte wanted by the client is in memory
++bool
++store_client::canReadFromMemory() const
++{
++    const auto &mem = entry->mem();
++    const auto memReadOffset = nextHttpReadOffset();
++    return mem.inmem_lo <= memReadOffset && memReadOffset < mem.endOffset() &&
++           parsingBuffer.first.spaceSize();
++}
++
++/// The offset of the next stored HTTP response byte wanted by the client.
++int64_t
++store_client::nextHttpReadOffset() const
++{
++    Assure(parsingBuffer.second);
++    const auto &mem = entry->mem();
++    const auto hdr_sz = mem.baseReply().hdr_sz;
++    // Certain SMP cache manager transactions do not store HTTP headers in
++    // mem_hdr; they store just a kid-specific piece of the future report body.
++    // In such cases, hdr_sz ought to be zero. In all other (known) cases,
++    // mem_hdr contains HTTP response headers (positive hdr_sz if parsed)
++    // followed by HTTP response body. This code math accommodates all cases.
++    return NaturalSum(hdr_sz, copyInto.offset, parsingBuffer.first.contentSize()).first;
++}
++
++/// Copies at least some of the requested body bytes from MemObject memory,
++/// satisfying the copy() request.
++/// \pre canReadFromMemory() is true
+ void
+-store_client::scheduleMemRead()
++store_client::readFromMemory()
+ {
+-    /* What the client wants is in memory */
+-    /* Old style */
+-    debugs(90, 3, "store_client::doCopy: Copying normal from memory");
+-    size_t sz = entry->mem_obj->data_hdr.copy(copyInto);
+-    callback(sz);
+-    flags.store_copying = false;
++    Assure(parsingBuffer.second);
++    const auto readInto = parsingBuffer.first.space().positionAt(nextHttpReadOffset());
++
++    debugs(90, 3, "copying HTTP body bytes from memory into " << readInto);
++    const auto sz = entry->mem_obj->data_hdr.copy(readInto);
++    Assure(sz > 0); // our canReadFromMemory() precondition guarantees that
++    parsingBuffer.first.appended(readInto.data, sz);
+ }
+ 
+ void
+@@ -435,65 +485,150 @@ store_client::fileRead()
+     assert(!flags.disk_io_pending);
+     flags.disk_io_pending = true;
+ 
++    // mem->swap_hdr_sz is zero here during initial read(s)
++    const auto nextStoreReadOffset = NaturalSum(mem->swap_hdr_sz, nextHttpReadOffset()).first;
++
++    // XXX: If fileRead() is called when we do not yet know mem->swap_hdr_sz,
++    // then we must start reading from disk offset zero to learn it: we cannot
++    // compute correct HTTP response start offset on disk without it. However,
++    // late startSwapin() calls imply that the assertion below might fail.
++    Assure(mem->swap_hdr_sz > 0 || !nextStoreReadOffset);
++
++    // TODO: Remove this assertion. Introduced in 1998 commit 3157c72, it
++    // assumes that swapped out memory is freed unconditionally, but we no
++    // longer do that because trimMemory() path checks lowestMemReaderOffset().
++    // It is also misplaced: We are not swapping out anything here and should
++    // not care about any swapout invariants.
+     if (mem->swap_hdr_sz != 0)
+         if (entry->swappingOut())
+-            assert(mem->swapout.sio->offset() > copyInto.offset + (int64_t)mem->swap_hdr_sz);
++            assert(mem->swapout.sio->offset() > nextStoreReadOffset);
++
++    // XXX: We should let individual cache_dirs limit the read size instead, but
++    // we cannot do that without more fixes and research because:
++    // * larger reads corrupt responses when cache_dir uses SharedMemory::get();
++    // * we do not know how to find all I/O code that assumes this limit;
++    // * performance effects of larger disk reads may be negative somewhere.
++    const decltype(StoreIOBuffer::length) maxReadSize = SM_PAGE_SIZE;
++
++    Assure(parsingBuffer.second);
++    // also, do not read more than we can return (via a copyInto.length buffer)
++    const auto readSize = std::min(copyInto.length, maxReadSize);
++    lastDiskRead = parsingBuffer.first.makeSpace(readSize).positionAt(nextStoreReadOffset);
++    debugs(90, 5, "into " << lastDiskRead);
+ 
+     storeRead(swapin_sio,
+-              copyInto.data,
+-              copyInto.length,
+-              copyInto.offset + mem->swap_hdr_sz,
++              lastDiskRead.data,
++              lastDiskRead.length,
++              lastDiskRead.offset,
+               mem->swap_hdr_sz == 0 ? storeClientReadHeader
+               : storeClientReadBody,
+               this);
+ }
+ 
+ void
+-store_client::readBody(const char *, ssize_t len)
++store_client::readBody(const char * const buf, const ssize_t lastIoResult)
+ {
+-    int parsed_header = 0;
+-
+-    // Don't assert disk_io_pending here.. may be called by read_header
++    Assure(flags.disk_io_pending);
+     flags.disk_io_pending = false;
+     assert(_callback.pending());
+-    debugs(90, 3, "storeClientReadBody: len " << len << "");
++    Assure(parsingBuffer.second);
++    debugs(90, 3, "got " << lastIoResult << " using " << parsingBuffer.first);
+ 
+-    if (len < 0)
++    if (lastIoResult < 0)
+         return fail();
+ 
+-    if (copyInto.offset == 0 && len > 0 && entry->getReply()->sline.status() == Http::scNone) {
+-        /* Our structure ! */
+-        HttpReply *rep = (HttpReply *) entry->getReply(); // bypass const
++    if (!lastIoResult) {
++        if (answeredOnce())
++            return noteNews();
+ 
+-        if (!rep->parseCharBuf(copyInto.data, headersEnd(copyInto.data, len))) {
+-            debugs(90, DBG_CRITICAL, "Could not parse headers from on disk object");
+-        } else {
+-            parsed_header = 1;
+-        }
++        debugs(90, DBG_CRITICAL, "ERROR: Truncated HTTP headers in on-disk object");
++        return fail();
+     }
+ 
+-    const HttpReply *rep = entry->getReply();
+-    if (len > 0 && rep && entry->mem_obj->inmem_lo == 0 && entry->objectLen() <= (int64_t)Config.Store.maxInMemObjSize && Config.onoff.memory_cache_disk) {
+-        storeGetMemSpace(len);
+-        // The above may start to free our object so we need to check again
++    assert(lastDiskRead.data == buf);
++    lastDiskRead.length = lastIoResult;
++
++    parsingBuffer.first.appended(buf, lastIoResult);
++
++    // we know swap_hdr_sz by now and were reading beyond swap metadata because
++    // readHead() would have been called otherwise (to read swap metadata)
++    const auto swap_hdr_sz = entry->mem().swap_hdr_sz;
++    Assure(swap_hdr_sz > 0);
++    Assure(!Less(lastDiskRead.offset, swap_hdr_sz));
++
++    // Map lastDiskRead (i.e. the disk area we just read) to an HTTP reply part.
++    // The bytes are the same, but disk and HTTP offsets differ by swap_hdr_sz.
++    const auto httpOffset = lastDiskRead.offset - swap_hdr_sz;
++    const auto httpPart = StoreIOBuffer(lastDiskRead).positionAt(httpOffset);
++
++    maybeWriteFromDiskToMemory(httpPart);
++    handleBodyFromDisk();
++}
++
++/// de-serializes HTTP response (partially) read from disk storage
++void
++store_client::handleBodyFromDisk()
++{
++    // We cannot de-serialize on-disk HTTP response without MemObject because
++    // without MemObject::swap_hdr_sz we cannot know where that response starts.
++    Assure(entry->mem_obj);
++    Assure(entry->mem_obj->swap_hdr_sz > 0);
++
++    if (!answeredOnce()) {
++        // All on-disk responses have HTTP headers. First disk body read(s)
++        // include HTTP headers that we must parse (if needed) and skip.
++        const auto haveHttpHeaders = entry->mem_obj->baseReply().pstate == psParsed;
++        if (!haveHttpHeaders && !parseHttpHeadersFromDisk())
++            return;
++        skipHttpHeadersFromDisk();
++    }
++
++    noteNews();
++}
++
++/// Adds HTTP response data loaded from disk to the memory cache (if
++/// needed/possible). The given part may contain portions of HTTP response
++/// headers and/or HTTP response body.
++void
++store_client::maybeWriteFromDiskToMemory(const StoreIOBuffer &httpResponsePart)
++{
++    // XXX: Reject [memory-]uncachable/unshareable responses instead of assuming
++    // that an HTTP response should be written to MemObject's data_hdr (and that
++    // it may purge already cached entries) just because it "fits" and was
++    // loaded from disk. For example, this response may already be marked for
++    // release. The (complex) cachability decision(s) should be made outside
++    // (and obeyed by) this low-level code.
++    if (httpResponsePart.length && entry->mem_obj->inmem_lo == 0 && entry->objectLen() <= (int64_t)Config.Store.maxInMemObjSize && Config.onoff.memory_cache_disk) {
++        storeGetMemSpace(httpResponsePart.length);
++        // XXX: This "recheck" is not needed because storeGetMemSpace() cannot
++        // purge mem_hdr bytes of a locked entry, and we do lock ours. And
++        // inmem_lo offset itself should not be relevant to appending new bytes.
++        //
++        // recheck for the above call may purge entry's data from the memory cache
+         if (entry->mem_obj->inmem_lo == 0) {
+-            /* Copy read data back into memory.
+-             * copyInto.offset includes headers, which is what mem cache needs
+-             */
+-            int64_t mem_offset = entry->mem_obj->endOffset();
+-            if ((copyInto.offset == mem_offset) || (parsed_header && mem_offset == rep->hdr_sz)) {
+-                entry->mem_obj->write(StoreIOBuffer(len, copyInto.offset, copyInto.data));
+-            }
++            // XXX: This code assumes a non-shared memory cache.
++            if (httpResponsePart.offset == entry->mem_obj->endOffset())
++                entry->mem_obj->write(httpResponsePart);
+         }
+     }
+-
+-    callback(len);
+ }
+ 
+ void
+ store_client::fail()
+ {
++    debugs(90, 3, (object_ok ? "once" : "again"));
++    if (!object_ok)
++        return; // we failed earlier; nothing to do now
++
+     object_ok = false;
++
++    noteNews();
++}
++
++/// if necessary and possible, informs the Store reader about copy() result
++void
++store_client::noteNews()
++{
+     /* synchronous open failures callback from the store,
+      * before startSwapin detects the failure.
+      * TODO: fix this inconsistent behaviour - probably by
+@@ -501,8 +636,20 @@ store_client::fail()
+      * not synchronous
+      */
+ 
+-    if (_callback.pending())
+-        callback(0, true);
++    if (!_callback.callback_handler) {
++        debugs(90, 5, "client lost interest");
++        return;
++    }
++
++    if (_callback.notifier) {
++        debugs(90, 5, "earlier news is being delivered by " << _callback.notifier);
++        return;
++    }
++
++    _callback.notifier = asyncCall(90, 4, "store_client::FinishCallback", cbdataDialer(store_client::FinishCallback, this));
++    ScheduleCallHere(_callback.notifier);
++
++    Assure(!_callback.pending());
+ }
+ 
+ static void
+@@ -573,38 +720,22 @@ store_client::readHeader(char const *buf, ssize_t len)
+     if (!object_ok)
+         return;
+ 
++    Assure(parsingBuffer.second);
++    debugs(90, 3, "got " << len << " using " << parsingBuffer.first);
++
+     if (len < 0)
+         return fail();
+ 
++    Assure(!parsingBuffer.first.contentSize());
++    parsingBuffer.first.appended(buf, len);
+     if (!unpackHeader(buf, len)) {
+         fail();
+         return;
+     }
++    parsingBuffer.first.consume(mem->swap_hdr_sz);
+ 
+-    /*
+-     * If our last read got some data the client wants, then give
+-     * it to them, otherwise schedule another read.
+-     */
+-    size_t body_sz = len - mem->swap_hdr_sz;
+-
+-    if (copyInto.offset < static_cast(body_sz)) {
+-        /*
+-         * we have (part of) what they want
+-         */
+-        size_t copy_sz = min(copyInto.length, body_sz);
+-        debugs(90, 3, "storeClientReadHeader: copying " << copy_sz << " bytes of body");
+-        memmove(copyInto.data, copyInto.data + mem->swap_hdr_sz, copy_sz);
+-
+-        readBody(copyInto.data, copy_sz);
+-
+-        return;
+-    }
+-
+-    /*
+-     * we don't have what the client wants, but at least we now
+-     * know the swap header size.
+-     */
+-    fileRead();
++    maybeWriteFromDiskToMemory(parsingBuffer.first.content());
++    handleBodyFromDisk();
+ }
+ 
+ int
+@@ -673,10 +804,12 @@ storeUnregister(store_client * sc, StoreEntry * e, void *data)
+         ++statCounter.swap.ins;
+     }
+ 
+-    if (sc->_callback.pending()) {
+-        /* callback with ssize = -1 to indicate unexpected termination */
+-        debugs(90, 3, "store_client for " << *e << " has a callback");
+-        sc->fail();
++    if (sc->_callback.callback_handler || sc->_callback.notifier) {
++        debugs(90, 3, "forgetting store_client callback for " << *e);
++        // Do not notify: Callers want to stop copying and forget about this
++        // pending copy request. Some would mishandle a notification from here.
++        if (sc->_callback.notifier)
++            sc->_callback.notifier->cancel("storeUnregister");
+     }
+ 
+ #if STORE_CLIENT_LIST_DEBUG
+@@ -684,6 +817,8 @@ storeUnregister(store_client * sc, StoreEntry * e, void *data)
+ 
+ #endif
+ 
++    // XXX: We might be inside sc store_client method somewhere up the call
++    // stack. TODO: Convert store_client to AsyncJob to make destruction async.
+     delete sc;
+ 
+     assert(e->locked());
+@@ -740,6 +875,9 @@ StoreEntry::invokeHandlers()
+ 
+         if (sc->flags.disk_io_pending)
+             continue;
++        
++        if (sc->flags.store_copying)
++            continue;
+ 
+         storeClientCopy2(this, sc);
+     }
+@@ -847,6 +985,63 @@ CheckQuickAbortIsReasonable(StoreEntry * entry)
+     return true;
+ }
+ 
++/// parses HTTP header bytes loaded from disk
++/// \returns false if fail() or scheduleDiskRead() has been called and, hence,
++/// the caller should just quit without any further action
++bool
++store_client::parseHttpHeadersFromDisk()
++{
++    try {
++        return tryParsingHttpHeaders();
++    } catch (...) {
++        // XXX: Our parser enforces Config.maxReplyHeaderSize limit, but our
++        // packer does not. Since packing might increase header size, we may
++        // cache a header that we cannot parse and get here. Same for MemStore.
++        debugs(90, DBG_CRITICAL, "ERROR: Cannot parse on-disk HTTP headers" <<
++               Debug::Extra << "exception: " << CurrentException <<
++               Debug::Extra << "raw input size: " << parsingBuffer.first.contentSize() << " bytes" <<
++               Debug::Extra << "current buffer capacity: " << parsingBuffer.first.capacity() << " bytes");
++        fail();
++        return false;
++    }
++}
++
++/// parseHttpHeadersFromDisk() helper
++/// \copydoc parseHttpHeaders()
++bool
++store_client::tryParsingHttpHeaders()
++{
++    Assure(parsingBuffer.second);
++    Assure(!copyInto.offset); // otherwise, parsingBuffer cannot have HTTP response headers
++    auto &adjustableReply = entry->mem().baseReply();
++    if (adjustableReply.parseTerminatedPrefix(parsingBuffer.first.c_str(), parsingBuffer.first.contentSize()))
++        return true;
++
++    // TODO: Optimize by checking memory as well. For simplicity sake, we
++    // continue on the disk-reading path, but readFromMemory() can give us the
++    // missing header bytes immediately if a concurrent request put those bytes
++    // into memory while we were waiting for our disk response.
++    scheduleDiskRead();
++    return false;
++}
++
++/// skips HTTP header bytes previously loaded from disk
++void
++store_client::skipHttpHeadersFromDisk()
++{
++    const auto hdr_sz = entry->mem_obj->baseReply().hdr_sz;
++    Assure(hdr_sz > 0); // all on-disk responses have HTTP headers
++    if (Less(parsingBuffer.first.contentSize(), hdr_sz)) {
++        debugs(90, 5, "discovered " << hdr_sz << "-byte HTTP headers in memory after reading some of them from disk: " << parsingBuffer.first);
++        parsingBuffer.first.consume(parsingBuffer.first.contentSize()); // skip loaded HTTP header prefix
++    } else {
++        parsingBuffer.first.consume(hdr_sz); // skip loaded HTTP headers
++        const auto httpBodyBytesAfterHeader = parsingBuffer.first.contentSize(); // may be zero
++        Assure(httpBodyBytesAfterHeader <= copyInto.length);
++        debugs(90, 5, "read HTTP body prefix: " << httpBodyBytesAfterHeader);
++    }
++}
++
+ void
+ store_client::dumpStats(MemBuf * output, int clientNumber) const
+ {
+@@ -864,8 +1059,8 @@ store_client::dumpStats(MemBuf * output, int clientNumber) const
+     if (flags.store_copying)
+         output->append(" store_copying", 14);
+ 
+-    if (flags.copy_event_pending)
+-        output->append(" copy_event_pending", 19);
++    if (_callback.notifier)
++        output->append(" notifying", 10);
+ 
+     output->append("\n",1);
+ }
+@@ -873,12 +1068,19 @@ store_client::dumpStats(MemBuf * output, int clientNumber) const
+ bool
+ store_client::Callback::pending() const
+ {
+-    return callback_handler && callback_data;
++    return callback_handler && !notifier;
+ }
+ 
+ store_client::Callback::Callback(STCB *function, void *data) : callback_handler(function), callback_data (data) {}
+ 
+ #if USE_DELAY_POOLS
++int
++store_client::bytesWanted() const
++{
++    // TODO: To avoid using stale copyInto, return zero if !_callback.pending()?
++    return delayId.bytesWanted(0, copyInto.length);
++}
++
+ void
+ store_client::setDelayId(DelayId delay_id)
+ {
+diff --git a/src/store_swapin.cc b/src/store_swapin.cc
+index a05d7e3..cd32e94 100644
+--- a/src/store_swapin.cc
++++ b/src/store_swapin.cc
+@@ -56,7 +56,7 @@ storeSwapInFileClosed(void *data, int errflag, StoreIOState::Pointer)
+ 
+     if (sc->_callback.pending()) {
+         assert (errflag <= 0);
+-        sc->callback(0, errflag ? true : false);
++        sc->noteSwapInDone(errflag);
+     }
+ 
+     ++statCounter.swap.ins;
+diff --git a/src/tests/stub_HttpReply.cc b/src/tests/stub_HttpReply.cc
+index 8ca7f9e..5cde8e6 100644
+--- a/src/tests/stub_HttpReply.cc
++++ b/src/tests/stub_HttpReply.cc
+@@ -25,6 +25,7 @@ void httpBodyPackInto(const HttpBody *, Packable *) STUB
+ bool HttpReply::sanityCheckStartLine(const char *buf, const size_t hdr_len, Http::StatusCode *error) STUB_RETVAL(false)
+ int HttpReply::httpMsgParseError() STUB_RETVAL(0)
+ bool HttpReply::expectingBody(const HttpRequestMethod&, int64_t&) const STUB_RETVAL(false)
++size_t HttpReply::parseTerminatedPrefix(const char *, size_t) STUB_RETVAL(0)
+ bool HttpReply::parseFirstLine(const char *start, const char *end) STUB_RETVAL(false)
+ void HttpReply::hdrCacheInit() STUB
+ HttpReply * HttpReply::clone() const STUB_RETVAL(NULL)
+diff --git a/src/tests/stub_store_client.cc b/src/tests/stub_store_client.cc
+index 2a13874..debe24e 100644
+--- a/src/tests/stub_store_client.cc
++++ b/src/tests/stub_store_client.cc
+@@ -34,7 +34,12 @@ void storeLogOpen(void) STUB
+ void storeDigestInit(void) STUB
+ void storeRebuildStart(void) STUB
+ void storeReplSetup(void) STUB
+-bool store_client::memReaderHasLowerOffset(int64_t anOffset) const STUB_RETVAL(false)
+ void store_client::dumpStats(MemBuf * output, int clientNumber) const STUB
+ int store_client::getType() const STUB_RETVAL(0)
++void store_client::noteSwapInDone(bool) STUB
++#if USE_DELAY_POOLS
++int store_client::bytesWanted() const STUB_RETVAL(0)
++#endif
++
++
+ 
+diff --git a/src/urn.cc b/src/urn.cc
+index 74453e1..6efdec1 100644
+--- a/src/urn.cc
++++ b/src/urn.cc
+@@ -26,8 +26,6 @@
+ #include "tools.h"
+ #include "urn.h"
+ 
+-#define URN_REQBUF_SZ   4096
+-
+ class UrnState : public StoreClient
+ {
+     CBDATA_CLASS(UrnState);
+@@ -45,8 +43,8 @@ public:
+     HttpRequest::Pointer request;
+     HttpRequest::Pointer urlres_r;
+ 
+-    char reqbuf[URN_REQBUF_SZ] = { '\0' };
+-    int reqofs = 0;
++    /// for receiving a URN resolver reply body from Store and interpreting it
++    Store::ParsingBuffer parsingBuffer;
+ 
+ private:
+     char *urlres;
+@@ -63,7 +61,7 @@ typedef struct {
+ } url_entry;
+ 
+ static STCB urnHandleReply;
+-static url_entry *urnParseReply(const char *inbuf, const HttpRequestMethod&);
++static url_entry *urnParseReply(const SBuf &, const HttpRequestMethod &);
+ static const char *const crlf = "\r\n";
+ 
+ CBDATA_CLASS_INIT(UrnState);
+@@ -183,13 +181,8 @@ UrnState::created(StoreEntry *newEntry)
+         sc = storeClientListAdd(urlres_e, this);
+     }
+ 
+-    reqofs = 0;
+-    StoreIOBuffer tempBuffer;
+-    tempBuffer.offset = reqofs;
+-    tempBuffer.length = URN_REQBUF_SZ;
+-    tempBuffer.data = reqbuf;
+     storeClientCopy(sc, urlres_e,
+-                    tempBuffer,
++                    parsingBuffer.makeInitialSpace(),
+                     urnHandleReply,
+                     this);
+ }
+@@ -224,9 +217,6 @@ urnHandleReply(void *data, StoreIOBuffer result)
+     UrnState *urnState = static_cast(data);
+     StoreEntry *e = urnState->entry;
+     StoreEntry *urlres_e = urnState->urlres_e;
+-    char *s = NULL;
+-    size_t k;
+-    HttpReply *rep;
+     url_entry *urls;
+     url_entry *u;
+     url_entry *min_u;
+@@ -234,10 +224,7 @@ urnHandleReply(void *data, StoreIOBuffer result)
+     ErrorState *err;
+     int i;
+     int urlcnt = 0;
+-    char *buf = urnState->reqbuf;
+-    StoreIOBuffer tempBuffer;
+-
+-    debugs(52, 3, "urnHandleReply: Called with size=" << result.length << ".");
++    debugs(52, 3, result << " with " << *e);
+ 
+     if (EBIT_TEST(urlres_e->flags, ENTRY_ABORTED) || result.flags.error) {
+         delete urnState;
+@@ -250,59 +237,39 @@ urnHandleReply(void *data, StoreIOBuffer result)
+         return;
+     }
+ 
+-    /* Update reqofs to point to where in the buffer we'd be */
+-    urnState->reqofs += result.length;
+-
+-    /* Handle reqofs being bigger than normal */
+-    if (urnState->reqofs >= URN_REQBUF_SZ) {
+-        delete urnState;
+-        return;
+-    }
++    urnState->parsingBuffer.appended(result.data, result.length);
+ 
+     /* If we haven't received the entire object (urn), copy more */
+-    if (urlres_e->store_status == STORE_PENDING) {
+-        Must(result.length > 0); // zero length ought to imply STORE_OK
+-        tempBuffer.offset = urnState->reqofs;
+-        tempBuffer.length = URN_REQBUF_SZ - urnState->reqofs;
+-        tempBuffer.data = urnState->reqbuf + urnState->reqofs;
++    if (!urnState->sc->atEof()) {
++        const auto bufferedBytes = urnState->parsingBuffer.contentSize();
++        const auto remainingSpace = urnState->parsingBuffer.space().positionAt(bufferedBytes);
++
++        if (!remainingSpace.length) {
++            debugs(52, 3, "ran out of buffer space after " << bufferedBytes << " bytes");
++            // TODO: Here and in other error cases, send ERR_URN_RESOLVE to client.
++            delete urnState;
++            return;
++        }
++
+         storeClientCopy(urnState->sc, urlres_e,
+-                        tempBuffer,
++                        remainingSpace,
+                         urnHandleReply,
+                         urnState);
+         return;
+     }
+ 
+-    /* we know its STORE_OK */
+-    k = headersEnd(buf, urnState->reqofs);
+-
+-    if (0 == k) {
+-        debugs(52, DBG_IMPORTANT, "urnHandleReply: didn't find end-of-headers for " << e->url()  );
+-        delete urnState;
+-        return;
+-    }
+-
+-    s = buf + k;
+-    assert(urlres_e->getReply());
+-    rep = new HttpReply;
+-    rep->parseCharBuf(buf, k);
+-    debugs(52, 3, "reply exists, code=" << rep->sline.status() << ".");
+-
+-    if (rep->sline.status() != Http::scOkay) {
++    const auto &peerReply = urlres_e->mem().baseReply();
++    debugs(52, 3, "got reply, code=" << peerReply.sline.status());
++    if (peerReply.sline.status() != Http::scOkay) {
+         debugs(52, 3, "urnHandleReply: failed.");
+         err = new ErrorState(ERR_URN_RESOLVE, Http::scNotFound, urnState->request.getRaw());
+         err->url = xstrdup(e->url());
+         errorAppendEntry(e, err);
+-        delete rep;
+         delete urnState;
+         return;
+     }
+ 
+-    delete rep;
+-
+-    while (xisspace(*s))
+-        ++s;
+-
+-    urls = urnParseReply(s, urnState->request->method);
++    urls = urnParseReply(urnState->parsingBuffer.toSBuf(), urnState->request->method);
+ 
+     if (!urls) {     /* unknown URN error */
+         debugs(52, 3, "urnTranslateDone: unknown URN " << e->url());
+@@ -350,7 +317,7 @@ urnHandleReply(void *data, StoreIOBuffer result)
+         "Generated by %s@%s\n"
+         "\n",
+         APP_FULLNAME, getMyHostname());
+-    rep = new HttpReply;
++    const auto rep = new HttpReply;
+     rep->setHeaders(Http::scFound, NULL, "text/html", mb->contentSize(), 0, squid_curtime);
+ 
+     if (min_u) {
+@@ -372,9 +339,8 @@ urnHandleReply(void *data, StoreIOBuffer result)
+ }
+ 
+ static url_entry *
+-urnParseReply(const char *inbuf, const HttpRequestMethod& m)
++urnParseReply(const SBuf &inBuf, const HttpRequestMethod &m)
+ {
+-    char *buf = xstrdup(inbuf);
+     char *token;
+     url_entry *list;
+     url_entry *old;
+@@ -383,6 +349,13 @@ urnParseReply(const char *inbuf, const HttpRequestMethod& m)
+     debugs(52, 3, "urnParseReply");
+     list = (url_entry *)xcalloc(n + 1, sizeof(*list));
+ 
++    // XXX: Switch to tokenizer-based parsing.
++    const auto allocated = SBufToCstring(inBuf);
++
++    auto buf = allocated;
++    while (xisspace(*buf))
++        ++buf;
++
+     for (token = strtok(buf, crlf); token; token = strtok(NULL, crlf)) {
+         debugs(52, 3, "urnParseReply: got '" << token << "'");
+ 
+@@ -418,7 +391,7 @@ urnParseReply(const char *inbuf, const HttpRequestMethod& m)
+     }
+ 
+     debugs(52, 3, "urnParseReply: Found " << i << " URLs");
+-    xfree(buf);
++    xfree(allocated);
+     return list;
+ }
+ 
diff --git a/SOURCES/squid-4.15-CVE-2024-25111.patch b/SOURCES/squid-4.15-CVE-2024-25111.patch
new file mode 100644
index 0000000..e8ea010
--- /dev/null
+++ b/SOURCES/squid-4.15-CVE-2024-25111.patch
@@ -0,0 +1,193 @@
+diff --git a/src/http.cc b/src/http.cc
+index b006300..023e411 100644
+--- a/src/http.cc
++++ b/src/http.cc
+@@ -52,6 +52,7 @@
+ #include "rfc1738.h"
+ #include "SquidConfig.h"
+ #include "SquidTime.h"
++#include "SquidMath.h"
+ #include "StatCounters.h"
+ #include "Store.h"
+ #include "StrList.h"
+@@ -1150,18 +1151,26 @@ HttpStateData::readReply(const CommIoCbParams &io)
+      * Plus, it breaks our lame *HalfClosed() detection
+      */
+ 
+-    Must(maybeMakeSpaceAvailable(true));
+-    CommIoCbParams rd(this); // will be expanded with ReadNow results
+-    rd.conn = io.conn;
+-    rd.size = entry->bytesWanted(Range(0, inBuf.spaceSize()));
++    size_t moreDataPermission = 0;
++    if ((!canBufferMoreReplyBytes(&moreDataPermission) || !moreDataPermission)) {
++        abortTransaction("ready to read required data, but the read buffer is full and cannot be drained");
++        return;
++    }
++
++    const auto readSizeMax = maybeMakeSpaceAvailable(moreDataPermission);
++    // TODO: Move this logic inside maybeMakeSpaceAvailable():
++    const auto readSizeWanted = readSizeMax ? entry->bytesWanted(Range(0, readSizeMax)) : 0;
+ 
+-    if (rd.size <= 0) {
++    if (readSizeWanted <= 0) {
+         assert(entry->mem_obj);
+         AsyncCall::Pointer nilCall;
+         entry->mem_obj->delayRead(DeferredRead(readDelayed, this, CommRead(io.conn, NULL, 0, nilCall)));
+         return;
+     }
+ 
++    CommIoCbParams rd(this); // will be expanded with ReadNow results
++    rd.conn = io.conn;
++    rd.size = readSizeWanted;
+     switch (Comm::ReadNow(rd, inBuf)) {
+     case Comm::INPROGRESS:
+         if (inBuf.isEmpty())
+@@ -1520,8 +1529,11 @@ HttpStateData::maybeReadVirginBody()
+     if (!Comm::IsConnOpen(serverConnection) || fd_table[serverConnection->fd].closing())
+         return;
+ 
+-    if (!maybeMakeSpaceAvailable(false))
++    size_t moreDataPermission = 0;
++    if ((!canBufferMoreReplyBytes(&moreDataPermission)) || !moreDataPermission) {
++        abortTransaction("more response bytes required, but the read buffer is full and cannot be drained");
+         return;
++    }
+ 
+     // XXX: get rid of the do_next_read flag
+     // check for the proper reasons preventing read(2)
+@@ -1539,40 +1551,79 @@ HttpStateData::maybeReadVirginBody()
+     Comm::Read(serverConnection, call);
+ }
+ 
++/// Desired inBuf capacity based on various capacity preferences/limits:
++/// * a smaller buffer may not hold enough for look-ahead header/body parsers;
++/// * a smaller buffer may result in inefficient tiny network reads;
++/// * a bigger buffer may waste memory;
++/// * a bigger buffer may exceed SBuf storage capabilities (SBuf::maxSize);
++size_t
++HttpStateData::calcReadBufferCapacityLimit() const
++{
++    if (!flags.headers_parsed)
++        return Config.maxReplyHeaderSize;
++
++    // XXX: Our inBuf is not used to maintain the read-ahead gap, and using
++    // Config.readAheadGap like this creates huge read buffers for large
++    // read_ahead_gap values. TODO: Switch to using tcp_recv_bufsize as the
++    // primary read buffer capacity factor.
++    //
++    // TODO: Cannot reuse throwing NaturalCast() here. Consider removing
++    // .value() dereference in NaturalCast() or add/use NaturalCastOrMax().
++    const auto configurationPreferences = NaturalSum(Config.readAheadGap).second ? NaturalSum(Config.readAheadGap).first : SBuf::maxSize;
++
++    // TODO: Honor TeChunkedParser look-ahead and trailer parsing requirements
++    // (when explicit configurationPreferences are set too low).
++
++    return std::min(configurationPreferences, SBuf::maxSize);
++}
++
++/// The maximum number of virgin reply bytes we may buffer before we violate
++/// the currently configured response buffering limits.
++/// \retval std::nullopt means that no more virgin response bytes can be read
++/// \retval 0 means that more virgin response bytes may be read later
++/// \retval >0 is the number of bytes that can be read now (subject to other constraints)
+ bool
+-HttpStateData::maybeMakeSpaceAvailable(bool doGrow)
++HttpStateData::canBufferMoreReplyBytes(size_t *maxReadSize) const
+ {
+-    // how much we are allowed to buffer
+-    const int limitBuffer = (flags.headers_parsed ? Config.readAheadGap : Config.maxReplyHeaderSize);
+-
+-    if (limitBuffer < 0 || inBuf.length() >= (SBuf::size_type)limitBuffer) {
+-        // when buffer is at or over limit already
+-        debugs(11, 7, "will not read up to " << limitBuffer << ". buffer has (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
+-        debugs(11, DBG_DATA, "buffer has {" << inBuf << "}");
+-        // Process next response from buffer
+-        processReply();
+-        return false;
++#if USE_ADAPTATION
++    // If we do not check this now, we may say the final "no" prematurely below
++    // because inBuf.length() will decrease as adaptation drains buffered bytes.
++    if (responseBodyBuffer) {
++        debugs(11, 3, "yes, but waiting for adaptation to drain read buffer");
++        *maxReadSize = 0; // yes, we may be able to buffer more (but later)
++        return true;
++    }
++#endif
++
++    const auto maxCapacity = calcReadBufferCapacityLimit();
++    if (inBuf.length() >= maxCapacity) {
++        debugs(11, 3, "no, due to a full buffer: " << inBuf.length() << '/' << inBuf.spaceSize() << "; limit: " << maxCapacity);
++        return false; // no, configuration prohibits buffering more
+     }
+ 
++    *maxReadSize = (maxCapacity - inBuf.length()); // positive
++    debugs(11, 7, "yes, may read up to " << *maxReadSize << " into " << inBuf.length() << '/' << inBuf.spaceSize());
++    return true; // yes, can read up to this many bytes (subject to other constraints)
++}
++
++/// prepare read buffer for reading
++/// \return the maximum number of bytes the caller should attempt to read
++/// \retval 0 means that the caller should delay reading
++size_t
++HttpStateData::maybeMakeSpaceAvailable(const size_t maxReadSize)
++{
+     // how much we want to read
+-    const size_t read_size = calcBufferSpaceToReserve(inBuf.spaceSize(), (limitBuffer - inBuf.length()));
++    const size_t read_size = calcBufferSpaceToReserve(inBuf.spaceSize(), maxReadSize);
+ 
+-    if (!read_size) {
++    if (read_size < 2) {
+         debugs(11, 7, "will not read up to " << read_size << " into buffer (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
+-        return false;
++        return 0;
+     }
+ 
+-    // just report whether we could grow or not, do not actually do it
+-    if (doGrow)
+-        return (read_size >= 2);
+-
+     // we may need to grow the buffer
+     inBuf.reserveSpace(read_size);
+-    debugs(11, 8, (!flags.do_next_read ? "will not" : "may") <<
+-           " read up to " << read_size << " bytes info buf(" << inBuf.length() << "/" << inBuf.spaceSize() <<
+-           ") from " << serverConnection);
+-
+-    return (inBuf.spaceSize() >= 2); // only read if there is 1+ bytes of space available
++    debugs(11, 7, "may read up to " << read_size << " bytes info buffer (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
++    return read_size;
+ }
+ 
+ /// called after writing the very last request byte (body, last-chunk, etc)
+diff --git a/src/http.h b/src/http.h
+index 8965b77..007d2e6 100644
+--- a/src/http.h
++++ b/src/http.h
+@@ -15,6 +15,8 @@
+ #include "http/StateFlags.h"
+ #include "sbuf/SBuf.h"
+ 
++#include 
++
+ class FwdState;
+ class HttpHeader;
+ 
+@@ -107,16 +109,9 @@ private:
+ 
+     void abortTransaction(const char *reason) { abortAll(reason); } // abnormal termination
+ 
+-    /**
+-     * determine if read buffer can have space made available
+-     * for a read.
+-     *
+-     * \param grow  whether to actually expand the buffer
+-     *
+-     * \return whether the buffer can be grown to provide space
+-     *         regardless of whether the grow actually happened.
+-     */
+-    bool maybeMakeSpaceAvailable(bool grow);
++    size_t calcReadBufferCapacityLimit() const;
++    bool canBufferMoreReplyBytes(size_t *maxReadSize) const;
++    size_t maybeMakeSpaceAvailable(size_t maxReadSize);
+ 
+     // consuming request body
+     virtual void handleMoreRequestBodyAvailable();
diff --git a/SOURCES/squid-4.15-CVE-2024-25617.patch b/SOURCES/squid-4.15-CVE-2024-25617.patch
new file mode 100644
index 0000000..86e391a
--- /dev/null
+++ b/SOURCES/squid-4.15-CVE-2024-25617.patch
@@ -0,0 +1,105 @@
+diff --git a/src/SquidString.h b/src/SquidString.h
+index a791885..b9aef38 100644
+--- a/src/SquidString.h
++++ b/src/SquidString.h
+@@ -114,7 +114,16 @@ private:
+ 
+     size_type len_;  /* current length  */
+ 
+-    static const size_type SizeMax_ = 65535; ///< 64K limit protects some fixed-size buffers
++    /// An earlier 64KB limit was meant to protect some fixed-size buffers, but
++    /// (a) we do not know where those buffers are (or whether they still exist)
++    /// (b) too many String users unknowingly exceeded that limit and asserted.
++    /// We are now using a larger limit to reduce the number of (b) cases,
++    /// especially cases where "compact" lists of items grow 50% in size when we
++    /// convert them to canonical form. The new limit is selected to withstand
++    /// concatenation and ~50% expansion of two HTTP headers limited by default
++    /// request_header_max_size and reply_header_max_size settings.
++    static const size_type SizeMax_ = 3*64*1024 - 1;
++
+     /// returns true after increasing the first argument by extra if the sum does not exceed SizeMax_
+     static bool SafeAdd(size_type &base, size_type extra) { if (extra <= SizeMax_ && base <= SizeMax_ - extra) { base += extra; return true; } return false; }
+ 
+diff --git a/src/cache_cf.cc b/src/cache_cf.cc
+index a9c1b7e..46f07bb 100644
+--- a/src/cache_cf.cc
++++ b/src/cache_cf.cc
+@@ -935,6 +935,18 @@ configDoConfigure(void)
+                (uint32_t)Config.maxRequestBufferSize, (uint32_t)Config.maxRequestHeaderSize);
+     }
+ 
++    // Warn about the dangers of exceeding String limits when manipulating HTTP
++    // headers. Technically, we do not concatenate _requests_, so we could relax
++    // their check, but we keep the two checks the same for simplicity sake.
++    const auto safeRawHeaderValueSizeMax = (String::SizeMaxXXX()+1)/3;
++    // TODO: static_assert(safeRawHeaderValueSizeMax >= 64*1024); // no WARNINGs for default settings
++    if (Config.maxRequestHeaderSize > safeRawHeaderValueSizeMax)
++        debugs(3, DBG_CRITICAL, "WARNING: Increasing request_header_max_size beyond " << safeRawHeaderValueSizeMax <<
++               " bytes makes Squid more vulnerable to denial-of-service attacks; configured value: " << Config.maxRequestHeaderSize << " bytes");
++    if (Config.maxReplyHeaderSize > safeRawHeaderValueSizeMax)
++        debugs(3, DBG_CRITICAL, "WARNING: Increasing reply_header_max_size beyond " << safeRawHeaderValueSizeMax <<
++               " bytes makes Squid more vulnerable to denial-of-service attacks; configured value: " << Config.maxReplyHeaderSize << " bytes");
++
+     /*
+      * Disable client side request pipelining if client_persistent_connections OFF.
+      * Waste of resources queueing any pipelined requests when the first will close the connection.
+diff --git a/src/cf.data.pre b/src/cf.data.pre
+index bc2ddcd..d55b870 100644
+--- a/src/cf.data.pre
++++ b/src/cf.data.pre
+@@ -6196,11 +6196,14 @@ TYPE: b_size_t
+ DEFAULT: 64 KB
+ LOC: Config.maxRequestHeaderSize
+ DOC_START
+-	This specifies the maximum size for HTTP headers in a request.
+-	Request headers are usually relatively small (about 512 bytes).
+-	Placing a limit on the request header size will catch certain
+-	bugs (for example with persistent connections) and possibly
+-	buffer-overflow or denial-of-service attacks.
++	This directives limits the header size of a received HTTP request
++	(including request-line). Increasing this limit beyond its 64 KB default
++	exposes certain old Squid code to various denial-of-service attacks. This
++	limit also applies to received FTP commands.
++
++	This limit has no direct affect on Squid memory consumption.
++
++	Squid does not check this limit when sending requests.
+ DOC_END
+ 
+ NAME: reply_header_max_size
+@@ -6209,11 +6212,14 @@ TYPE: b_size_t
+ DEFAULT: 64 KB
+ LOC: Config.maxReplyHeaderSize
+ DOC_START
+-	This specifies the maximum size for HTTP headers in a reply.
+-	Reply headers are usually relatively small (about 512 bytes).
+-	Placing a limit on the reply header size will catch certain
+-	bugs (for example with persistent connections) and possibly
+-	buffer-overflow or denial-of-service attacks.
++	This directives limits the header size of a received HTTP response
++	(including status-line). Increasing this limit beyond its 64 KB default
++	exposes certain old Squid code to various denial-of-service attacks. This
++	limit also applies to FTP command responses.
++
++	Squid also checks this limit when loading hit responses from disk cache.
++
++	Squid does not check this limit when sending responses.
+ DOC_END
+ 
+ NAME: request_body_max_size
+diff --git a/src/http.cc b/src/http.cc
+index 877172d..b006300 100644
+--- a/src/http.cc
++++ b/src/http.cc
+@@ -1820,8 +1820,9 @@ HttpStateData::httpBuildRequestHeader(HttpRequest * request,
+ 
+         String strFwd = hdr_in->getList(Http::HdrType::X_FORWARDED_FOR);
+ 
+-        // if we cannot double strFwd size, then it grew past 50% of the limit
+-        if (!strFwd.canGrowBy(strFwd.size())) {
++        // Detect unreasonably long header values. And paranoidly check String
++        // limits: a String ought to accommodate two reasonable-length values.
++        if (strFwd.size() > 32*1024 || !strFwd.canGrowBy(strFwd.size())) {
+             // There is probably a forwarding loop with Via detection disabled.
+             // If we do nothing, String will assert on overflow soon.
+             // TODO: Terminate all transactions with huge XFF?
diff --git a/SOURCES/squid-4.15-ftp-filename-extraction.patch b/SOURCES/squid-4.15-ftp-filename-extraction.patch
new file mode 100644
index 0000000..cf1aeb3
--- /dev/null
+++ b/SOURCES/squid-4.15-ftp-filename-extraction.patch
@@ -0,0 +1,32 @@
+diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc
+index da9867f..e992638 100644
+--- a/src/clients/FtpGateway.cc
++++ b/src/clients/FtpGateway.cc
+@@ -1084,16 +1084,17 @@ Ftp::Gateway::checkAuth(const HttpHeader * req_hdr)
+ void
+ Ftp::Gateway::checkUrlpath()
+ {
+-    static SBuf str_type_eq("type=");
+-    auto t = request->url.path().rfind(';');
+-
+-    if (t != SBuf::npos) {
+-        auto filenameEnd = t-1;
+-        if (request->url.path().substr(++t).cmp(str_type_eq, str_type_eq.length()) == 0) {
+-            t += str_type_eq.length();
+-            typecode = (char)xtoupper(request->url.path()[t]);
+-            request->url.path(request->url.path().substr(0,filenameEnd));
+-        }
++    // If typecode was specified, extract it and leave just the filename in
++    // url.path. Tolerate trailing garbage or missing typecode value. Roughly:
++    // [filename] ;type=[typecode char] [trailing garbage]
++    static const SBuf middle(";type=");
++    const auto typeSpecStart = request->url.path().find(middle);
++    if (typeSpecStart != SBuf::npos) {
++        const auto fullPath = request->url.path();
++        const auto typecodePos = typeSpecStart + middle.length();
++        typecode = (typecodePos < fullPath.length()) ?
++            static_cast(xtoupper(fullPath[typecodePos])) : '\0';
++        request->url.path(fullPath.substr(0, typeSpecStart));
+     }
+ 
+     int l = request->url.path().length();
diff --git a/SOURCES/squid-4.15-halfclosed.patch b/SOURCES/squid-4.15-halfclosed.patch
new file mode 100644
index 0000000..6a9fc59
--- /dev/null
+++ b/SOURCES/squid-4.15-halfclosed.patch
@@ -0,0 +1,163 @@
+diff --git a/src/client_side.cc b/src/client_side.cc
+index f57f3f7..ab393e4 100644
+--- a/src/client_side.cc
++++ b/src/client_side.cc
+@@ -906,7 +906,7 @@ ConnStateData::kick()
+      * We are done with the response, and we are either still receiving request
+      * body (early response!) or have already stopped receiving anything.
+      *
+-     * If we are still receiving, then clientParseRequest() below will fail.
++     * If we are still receiving, then parseRequests() below will fail.
+      * (XXX: but then we will call readNextRequest() which may succeed and
+      * execute a smuggled request as we are not done with the current request).
+      *
+@@ -926,28 +926,12 @@ ConnStateData::kick()
+      * Attempt to parse a request from the request buffer.
+      * If we've been fed a pipelined request it may already
+      * be in our read buffer.
+-     *
+-     \par
+-     * This needs to fall through - if we're unlucky and parse the _last_ request
+-     * from our read buffer we may never re-register for another client read.
+      */
+ 
+-    if (clientParseRequests()) {
+-        debugs(33, 3, clientConnection << ": parsed next request from buffer");
+-    }
++    parseRequests();
+ 
+-    /** \par
+-     * Either we need to kick-start another read or, if we have
+-     * a half-closed connection, kill it after the last request.
+-     * This saves waiting for half-closed connections to finished being
+-     * half-closed _AND_ then, sometimes, spending "Timeout" time in
+-     * the keepalive "Waiting for next request" state.
+-     */
+-    if (commIsHalfClosed(clientConnection->fd) && pipeline.empty()) {
+-        debugs(33, 3, "half-closed client with no pending requests, closing");
+-        clientConnection->close();
++    if (!isOpen())
+         return;
+-    }
+ 
+     /** \par
+      * At this point we either have a parsed request (which we've
+@@ -2058,16 +2042,11 @@ ConnStateData::receivedFirstByte()
+     commSetConnTimeout(clientConnection, Config.Timeout.request, timeoutCall);
+ }
+ 
+-/**
+- * Attempt to parse one or more requests from the input buffer.
+- * Returns true after completing parsing of at least one request [header]. That
+- * includes cases where parsing ended with an error (e.g., a huge request).
+- */
+-bool
+-ConnStateData::clientParseRequests()
++/// Attempt to parse one or more requests from the input buffer.
++/// May close the connection.
++void
++ConnStateData::parseRequests()
+ {
+-    bool parsed_req = false;
+-
+     debugs(33, 5, HERE << clientConnection << ": attempting to parse");
+ 
+     // Loop while we have read bytes that are not needed for producing the body
+@@ -2116,8 +2095,6 @@ ConnStateData::clientParseRequests()
+ 
+             processParsedRequest(context);
+ 
+-            parsed_req = true; // XXX: do we really need to parse everything right NOW ?
+-
+             if (context->mayUseConnection()) {
+                 debugs(33, 3, HERE << "Not parsing new requests, as this request may need the connection");
+                 break;
+@@ -2130,8 +2107,19 @@ ConnStateData::clientParseRequests()
+         }
+     }
+ 
+-    /* XXX where to 'finish' the parsing pass? */
+-    return parsed_req;
++    debugs(33, 7, "buffered leftovers: " << inBuf.length());
++
++    if (isOpen() && commIsHalfClosed(clientConnection->fd)) {
++        if (pipeline.empty()) {
++            // we processed what we could parse, and no more data is coming
++            debugs(33, 5, "closing half-closed without parsed requests: " << clientConnection);
++            clientConnection->close();
++        } else {
++            // we parsed what we could, and no more data is coming
++            debugs(33, 5, "monitoring half-closed while processing parsed requests: " << clientConnection);
++            flags.readMore = false; // may already be false
++        }
++    }
+ }
+ 
+ void
+@@ -2148,23 +2136,7 @@ ConnStateData::afterClientRead()
+     if (pipeline.empty())
+         fd_note(clientConnection->fd, "Reading next request");
+ 
+-    if (!clientParseRequests()) {
+-        if (!isOpen())
+-            return;
+-        /*
+-         * If the client here is half closed and we failed
+-         * to parse a request, close the connection.
+-         * The above check with connFinishedWithConn() only
+-         * succeeds _if_ the buffer is empty which it won't
+-         * be if we have an incomplete request.
+-         * XXX: This duplicates ConnStateData::kick
+-         */
+-        if (pipeline.empty() && commIsHalfClosed(clientConnection->fd)) {
+-            debugs(33, 5, clientConnection << ": half-closed connection, no completed request parsed, connection closing.");
+-            clientConnection->close();
+-            return;
+-        }
+-    }
++    parseRequests();
+ 
+     if (!isOpen())
+         return;
+@@ -3945,7 +3917,7 @@ ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic)
+     startPinnedConnectionMonitoring();
+ 
+     if (pipeline.empty())
+-        kick(); // in case clientParseRequests() was blocked by a busy pic.connection
++        kick(); // in case parseRequests() was blocked by a busy pic.connection
+ }
+ 
+ /// Forward future client requests using the given server connection.
+diff --git a/src/client_side.h b/src/client_side.h
+index 9fe8463..dfb4d8e 100644
+--- a/src/client_side.h
++++ b/src/client_side.h
+@@ -85,7 +85,6 @@ public:
+     virtual void doneWithControlMsg();
+ 
+     /// Traffic parsing
+-    bool clientParseRequests();
+     void readNextRequest();
+ 
+     /// try to make progress on a transaction or read more I/O
+@@ -373,6 +372,7 @@ private:
+     virtual bool connFinishedWithConn(int size);
+     virtual void checkLogging();
+ 
++    void parseRequests();
+     void clientAfterReadingRequests();
+     bool concurrentRequestQueueFilled() const;
+ 
+diff --git a/src/tests/stub_client_side.cc b/src/tests/stub_client_side.cc
+index d7efb0f..655ed83 100644
+--- a/src/tests/stub_client_side.cc
++++ b/src/tests/stub_client_side.cc
+@@ -14,7 +14,7 @@
+ #include "tests/STUB.h"
+ 
+ #include "client_side.h"
+-bool ConnStateData::clientParseRequests() STUB_RETVAL(false)
++void ConnStateData::parseRequests() STUB
+ void ConnStateData::readNextRequest() STUB
+ bool ConnStateData::isOpen() const STUB_RETVAL(false)
+ void ConnStateData::kick() STUB
diff --git a/SOURCES/squid-4.15-ignore-wsp-after-chunk-size.patch b/SOURCES/squid-4.15-ignore-wsp-after-chunk-size.patch
new file mode 100644
index 0000000..ea4025f
--- /dev/null
+++ b/SOURCES/squid-4.15-ignore-wsp-after-chunk-size.patch
@@ -0,0 +1,367 @@
+From 8d0ee420a4d91ac7fd97316338f1e28b4b060cbf Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= 
+Date: Thu, 10 Oct 2024 19:26:27 +0200
+Subject: [PATCH 1/6] Ignore whitespace chars after chunk-size
+
+Previously (before #1498 change), squid was accepting TE-chunked replies
+with whitespaces after chunk-size and missing chunk-ext data. After
+
+It turned out that replies with such whitespace chars are pretty
+common and other webservers which can act as forward proxies (e.g.
+nginx, httpd...) are accepting them.
+
+This change will allow to proxy chunked responses from origin server,
+which had whitespaces inbetween chunk-size and CRLF.
+---
+ src/http/one/TeChunkedParser.cc | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
+index 9cce10fdc91..04753395e16 100644
+--- a/src/http/one/TeChunkedParser.cc
++++ b/src/http/one/TeChunkedParser.cc
+@@ -125,6 +125,7 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
+     // Code becomes much simpler when incremental parsing functions throw on
+     // bad or insufficient input, like in the code below. TODO: Expand up.
+     try {
++        tok.skipAll(CharacterSet::WSP); // Some servers send SP/TAB after chunk-size
+         parseChunkExtensions(tok); // a possibly empty chunk-ext list
+         tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
+         buf_ = tok.remaining();
+
+From 9c8d35f899035fa06021ab3fe6919f892c2f0c6b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= 
+Date: Fri, 11 Oct 2024 02:06:31 +0200
+Subject: [PATCH 2/6] Added new argument to Http::One::ParseBws()
+
+Depending on new wsp_only argument in ParseBws() it will be decided
+which set of whitespaces characters will be parsed. If wsp_only is set
+to true, only SP and HTAB chars will be parsed.
+
+Also optimized number of ParseBws calls.
+---
+ src/http/one/Parser.cc          |  4 ++--
+ src/http/one/Parser.h           |  3 ++-
+ src/http/one/TeChunkedParser.cc | 13 +++++++++----
+ src/http/one/TeChunkedParser.h  |  2 +-
+ 4 files changed, 14 insertions(+), 8 deletions(-)
+
+diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
+index b1908316a0b..01d7e3bc0e8 100644
+--- a/src/http/one/Parser.cc
++++ b/src/http/one/Parser.cc
+@@ -273,9 +273,9 @@ Http::One::ErrorLevel()
+ 
+ // BWS = *( SP / HTAB ) ; WhitespaceCharacters() may relax this RFC 7230 rule
+ void
+-Http::One::ParseBws(Parser::Tokenizer &tok)
++Http::One::ParseBws(Parser::Tokenizer &tok, const bool wsp_only)
+ {
+-    const auto count = tok.skipAll(Parser::WhitespaceCharacters());
++    const auto count = tok.skipAll(wsp_only ? CharacterSet::WSP : Parser::WhitespaceCharacters());
+ 
+     if (tok.atEnd())
+         throw InsufficientInput(); // even if count is positive
+diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
+index d9a0ac8c273..08200371cd6 100644
+--- a/src/http/one/Parser.h
++++ b/src/http/one/Parser.h
+@@ -163,8 +163,9 @@ class Parser : public RefCountable
+ };
+ 
+ /// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
++/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimeter chars
+ /// \throws InsufficientInput when the end of BWS cannot be confirmed
+-void ParseBws(Parser::Tokenizer &);
++void ParseBws(Parser::Tokenizer &, const bool wsp_only = false);
+ 
+ /// the right debugs() level for logging HTTP violation messages
+ int ErrorLevel();
+diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
+index 04753395e16..41e1e5ddaea 100644
+--- a/src/http/one/TeChunkedParser.cc
++++ b/src/http/one/TeChunkedParser.cc
+@@ -125,8 +125,11 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
+     // Code becomes much simpler when incremental parsing functions throw on
+     // bad or insufficient input, like in the code below. TODO: Expand up.
+     try {
+-        tok.skipAll(CharacterSet::WSP); // Some servers send SP/TAB after chunk-size
+-        parseChunkExtensions(tok); // a possibly empty chunk-ext list
++        // A possibly empty chunk-ext list. If no chunk-ext has been found,
++        // try to skip trailing BWS, because some servers send "chunk-size BWS CRLF".
++        if (!parseChunkExtensions(tok))
++            ParseBws(tok, true);
++
+         tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
+         buf_ = tok.remaining();
+         parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
+@@ -140,20 +143,22 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
+ 
+ /// Parses the chunk-ext list (RFC 9112 section 7.1.1:
+ /// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
+-void
++bool
+ Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
+ {
++    bool foundChunkExt = false;
+     do {
+         auto tok = callerTok;
+ 
+         ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
+ 
+         if (!tok.skip(';'))
+-            return; // reached the end of extensions (if any)
++            return foundChunkExt; // reached the end of extensions (if any)
+ 
+         parseOneChunkExtension(tok);
+         buf_ = tok.remaining(); // got one extension
+         callerTok = tok;
++        foundChunkExt = true;
+     } while (true);
+ }
+ 
+diff --git a/src/http/one/TeChunkedParser.h b/src/http/one/TeChunkedParser.h
+index 02eacd1bb89..8c5d4bb4cba 100644
+--- a/src/http/one/TeChunkedParser.h
++++ b/src/http/one/TeChunkedParser.h
+@@ -71,7 +71,7 @@ class TeChunkedParser : public Http1::Parser
+ private:
+     bool parseChunkSize(Tokenizer &tok);
+     bool parseChunkMetadataSuffix(Tokenizer &);
+-    void parseChunkExtensions(Tokenizer &);
++    bool parseChunkExtensions(Tokenizer &);
+     void parseOneChunkExtension(Tokenizer &);
+     bool parseChunkBody(Tokenizer &tok);
+     bool parseChunkEnd(Tokenizer &tok);
+
+From 81e67f97f9c386bdd0bb4a5e182395c46adb70ad Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= 
+Date: Fri, 11 Oct 2024 02:44:33 +0200
+Subject: [PATCH 3/6] Fix typo in Parser.h
+
+---
+ src/http/one/Parser.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
+index 08200371cd6..3ef4c5f7752 100644
+--- a/src/http/one/Parser.h
++++ b/src/http/one/Parser.h
+@@ -163,7 +163,7 @@ class Parser : public RefCountable
+ };
+ 
+ /// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
+-/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimeter chars
++/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimiter chars
+ /// \throws InsufficientInput when the end of BWS cannot be confirmed
+ void ParseBws(Parser::Tokenizer &, const bool wsp_only = false);
+ 
+
+From a0d4fe1794e605f8299a5c118c758a807453f016 Mon Sep 17 00:00:00 2001
+From: Alex Rousskov 
+Date: Thu, 10 Oct 2024 22:39:42 -0400
+Subject: [PATCH 4/6] Bug 5449 is a regression of Bug 4492!
+
+Both bugs deal with "chunk-size SP+ CRLF" use cases. Bug 4492 had _two_
+spaces after chunk-size, which answers one of the PR review questions:
+Should we skip just one space? No, we should not.
+
+The lines moved around in many commits, but I believe this regression
+was introduced in commit 951013d0 because that commit stopped consuming
+partially parsed chunk-ext sequences. That consumption was wrong, but it
+had a positive side effect -- fixing Bug 4492...
+---
+ src/http/one/TeChunkedParser.cc | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
+index 41e1e5ddaea..aa4a840fdcf 100644
+--- a/src/http/one/TeChunkedParser.cc
++++ b/src/http/one/TeChunkedParser.cc
+@@ -125,10 +125,10 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
+     // Code becomes much simpler when incremental parsing functions throw on
+     // bad or insufficient input, like in the code below. TODO: Expand up.
+     try {
+-        // A possibly empty chunk-ext list. If no chunk-ext has been found,
+-        // try to skip trailing BWS, because some servers send "chunk-size BWS CRLF".
+-        if (!parseChunkExtensions(tok))
+-            ParseBws(tok, true);
++        // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
++        ParseBws(tok, true);
++
++        parseChunkExtensions(tok);
+ 
+         tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
+         buf_ = tok.remaining();
+@@ -150,7 +150,7 @@ Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
+     do {
+         auto tok = callerTok;
+ 
+-        ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
++        ParseBws(tok);
+ 
+         if (!tok.skip(';'))
+             return foundChunkExt; // reached the end of extensions (if any)
+
+From f837f5ff61301a17008f16ce1fb793c2abf19786 Mon Sep 17 00:00:00 2001
+From: Alex Rousskov 
+Date: Thu, 10 Oct 2024 23:06:42 -0400
+Subject: [PATCH 5/6] fixup: Fewer conditionals/ifs and more explicit spelling
+
+... to draw code reader attention when something unusual is going on.
+---
+ src/http/one/Parser.cc          | 22 ++++++++++++++++++----
+ src/http/one/Parser.h           | 10 ++++++++--
+ src/http/one/TeChunkedParser.cc | 14 ++++++--------
+ src/http/one/TeChunkedParser.h  |  2 +-
+ 4 files changed, 33 insertions(+), 15 deletions(-)
+
+diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
+index 01d7e3bc0e8..d3937e5e96b 100644
+--- a/src/http/one/Parser.cc
++++ b/src/http/one/Parser.cc
+@@ -271,11 +271,12 @@ Http::One::ErrorLevel()
+     return Config.onoff.relaxed_header_parser < 0 ? DBG_IMPORTANT : 5;
+ }
+ 
+-// BWS = *( SP / HTAB ) ; WhitespaceCharacters() may relax this RFC 7230 rule
+-void
+-Http::One::ParseBws(Parser::Tokenizer &tok, const bool wsp_only)
++/// common part of ParseBws() and ParseStrctBws()
++namespace Http::One {
++static void
++ParseBws_(Parser::Tokenizer &tok, const CharacterSet &bwsChars)
+ {
+-    const auto count = tok.skipAll(wsp_only ? CharacterSet::WSP : Parser::WhitespaceCharacters());
++    const auto count = tok.skipAll(bwsChars);
+ 
+     if (tok.atEnd())
+         throw InsufficientInput(); // even if count is positive
+@@ -290,4 +291,17 @@ Http::One::ParseBws(Parser::Tokenizer &tok, const bool wsp_only)
+ 
+     // success: no more BWS characters expected
+ }
++} // namespace Http::One
++
++void
++Http::One::ParseBws(Parser::Tokenizer &tok)
++{
++    ParseBws_(tok, CharacterSet::WSP);
++}
++
++void
++Http::One::ParseStrictBws(Parser::Tokenizer &tok)
++{
++    ParseBws_(tok, Parser::WhitespaceCharacters());
++}
+ 
+diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
+index 3ef4c5f7752..49e399de546 100644
+--- a/src/http/one/Parser.h
++++ b/src/http/one/Parser.h
+@@ -163,9 +163,15 @@ class Parser : public RefCountable
+ };
+ 
+ /// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
+-/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimiter chars
+ /// \throws InsufficientInput when the end of BWS cannot be confirmed
+-void ParseBws(Parser::Tokenizer &, const bool wsp_only = false);
++/// \sa WhitespaceCharacters() for the definition of BWS characters
++/// \sa ParseStrictBws() that avoids WhitespaceCharacters() uncertainties
++void ParseBws(Parser::Tokenizer &);
++
++/// Like ParseBws() but only skips CharacterSet::WSP characters. This variation
++/// must be used if the next element may start with CR or any other character
++/// from RelaxedDelimiterCharacters().
++void ParseStrictBws(Parser::Tokenizer &);
+ 
+ /// the right debugs() level for logging HTTP violation messages
+ int ErrorLevel();
+diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
+index aa4a840fdcf..859471b8c77 100644
+--- a/src/http/one/TeChunkedParser.cc
++++ b/src/http/one/TeChunkedParser.cc
+@@ -125,11 +125,11 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
+     // Code becomes much simpler when incremental parsing functions throw on
+     // bad or insufficient input, like in the code below. TODO: Expand up.
+     try {
+-        // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
+-        ParseBws(tok, true);
+-
+-        parseChunkExtensions(tok);
++        // Bug 4492: IBM_HTTP_Server sends SP after chunk-size.
++        // No ParseBws() here because it may consume CR required further below.
++        ParseStrictBws(tok);
+ 
++        parseChunkExtensions(tok); // a possibly empty chunk-ext list
+         tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
+         buf_ = tok.remaining();
+         parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
+@@ -143,22 +143,20 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
+ 
+ /// Parses the chunk-ext list (RFC 9112 section 7.1.1:
+ /// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
+-bool
++void
+ Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
+ {
+-    bool foundChunkExt = false;
+     do {
+         auto tok = callerTok;
+ 
+         ParseBws(tok);
+ 
+         if (!tok.skip(';'))
+-            return foundChunkExt; // reached the end of extensions (if any)
++            return; // reached the end of extensions (if any)
+ 
+         parseOneChunkExtension(tok);
+         buf_ = tok.remaining(); // got one extension
+         callerTok = tok;
+-        foundChunkExt = true;
+     } while (true);
+ }
+ 
+diff --git a/src/http/one/TeChunkedParser.h b/src/http/one/TeChunkedParser.h
+index 8c5d4bb4cba..02eacd1bb89 100644
+--- a/src/http/one/TeChunkedParser.h
++++ b/src/http/one/TeChunkedParser.h
+@@ -71,7 +71,7 @@ class TeChunkedParser : public Http1::Parser
+ private:
+     bool parseChunkSize(Tokenizer &tok);
+     bool parseChunkMetadataSuffix(Tokenizer &);
+-    bool parseChunkExtensions(Tokenizer &);
++    void parseChunkExtensions(Tokenizer &);
+     void parseOneChunkExtension(Tokenizer &);
+     bool parseChunkBody(Tokenizer &tok);
+     bool parseChunkEnd(Tokenizer &tok);
+
+From f79936a234e722adb2dd08f31cf6019d81ee712c Mon Sep 17 00:00:00 2001
+From: Alex Rousskov 
+Date: Thu, 10 Oct 2024 23:31:08 -0400
+Subject: [PATCH 6/6] fixup: Deadly typo
+
+---
+ src/http/one/Parser.cc | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
+index d3937e5e96b..7403a9163a2 100644
+--- a/src/http/one/Parser.cc
++++ b/src/http/one/Parser.cc
+@@ -296,12 +296,12 @@ ParseBws_(Parser::Tokenizer &tok, const CharacterSet &bwsChars)
+ void
+ Http::One::ParseBws(Parser::Tokenizer &tok)
+ {
+-    ParseBws_(tok, CharacterSet::WSP);
++    ParseBws_(tok, Parser::WhitespaceCharacters());
+ }
+ 
+ void
+ Http::One::ParseStrictBws(Parser::Tokenizer &tok)
+ {
+-    ParseBws_(tok, Parser::WhitespaceCharacters());
++    ParseBws_(tok, CharacterSet::WSP);
+ }
+ 
+
diff --git a/SOURCES/squid-4.15-ip-bind-address-no-port.patch b/SOURCES/squid-4.15-ip-bind-address-no-port.patch
new file mode 100644
index 0000000..85844ae
--- /dev/null
+++ b/SOURCES/squid-4.15-ip-bind-address-no-port.patch
@@ -0,0 +1,156 @@
+commit c08948c8b831a2ba73c676b48aa11ba1b58cc542
+Author: Tomas Korbar 
+Date:   Thu Dec 8 11:03:08 2022 +0100
+
+    Backport adding IP_BIND_ADDRESS_NO_PORT flag to outgoing connections
+
+diff --git a/src/comm.cc b/src/comm.cc
+index 0d5f34d..6811b54 100644
+--- a/src/comm.cc
++++ b/src/comm.cc
+@@ -58,6 +58,7 @@
+  */
+ 
+ static IOCB commHalfClosedReader;
++static int comm_openex(int sock_type, int proto, Ip::Address &, int flags, const char *note);
+ static void comm_init_opened(const Comm::ConnectionPointer &conn, const char *note, struct addrinfo *AI);
+ static int comm_apply_flags(int new_socket, Ip::Address &addr, int flags, struct addrinfo *AI);
+ 
+@@ -75,6 +76,7 @@ static EVH commHalfClosedCheck;
+ static void commPlanHalfClosedCheck();
+ 
+ static Comm::Flag commBind(int s, struct addrinfo &);
++static void commSetBindAddressNoPort(int);
+ static void commSetReuseAddr(int);
+ static void commSetNoLinger(int);
+ #ifdef TCP_NODELAY
+@@ -201,6 +203,22 @@ comm_local_port(int fd)
+     return F->local_addr.port();
+ }
+ 
++/// sets the IP_BIND_ADDRESS_NO_PORT socket option to optimize ephemeral port
++/// reuse by outgoing TCP connections that must bind(2) to a source IP address
++static void
++commSetBindAddressNoPort(const int fd)
++{
++#if defined(IP_BIND_ADDRESS_NO_PORT)
++    int flag = 1;
++    if (setsockopt(fd, IPPROTO_IP, IP_BIND_ADDRESS_NO_PORT, reinterpret_cast(&flag), sizeof(flag)) < 0) {
++        const auto savedErrno = errno;
++        debugs(50, DBG_IMPORTANT, "ERROR: setsockopt(IP_BIND_ADDRESS_NO_PORT) failure: " << xstrerr(savedErrno));
++    }
++#else
++    (void)fd;
++#endif
++}
++
+ static Comm::Flag
+ commBind(int s, struct addrinfo &inaddr)
+ {
+@@ -227,6 +245,10 @@ comm_open(int sock_type,
+           int flags,
+           const char *note)
+ {
++    // assume zero-port callers do not need to know the assigned port right away
++    if (sock_type == SOCK_STREAM && addr.port() == 0 && ((flags & COMM_DOBIND) || !addr.isAnyAddr()))
++        flags |= COMM_DOBIND_PORT_LATER;
++
+     return comm_openex(sock_type, proto, addr, flags, note);
+ }
+ 
+@@ -328,7 +350,7 @@ comm_set_transparent(int fd)
+  * Create a socket. Default is blocking, stream (TCP) socket.  IO_TYPE
+  * is OR of flags specified in defines.h:COMM_*
+  */
+-int
++static int
+ comm_openex(int sock_type,
+             int proto,
+             Ip::Address &addr,
+@@ -476,6 +498,9 @@ comm_apply_flags(int new_socket,
+         if ( addr.isNoAddr() )
+             debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr << "!!");
+ 
++        if ((flags & COMM_DOBIND_PORT_LATER))
++            commSetBindAddressNoPort(new_socket);
++
+         if (commBind(new_socket, *AI) != Comm::OK) {
+             comm_close(new_socket);
+             return -1;
+diff --git a/src/comm.h b/src/comm.h
+index c963e1c..9ff201d 100644
+--- a/src/comm.h
++++ b/src/comm.h
+@@ -43,7 +43,6 @@ void comm_import_opened(const Comm::ConnectionPointer &, const char *note, struc
+ 
+ /**
+  * Open a port specially bound for listening or sending through a specific port.
+- * This is a wrapper providing IPv4/IPv6 failover around comm_openex().
+  * Please use for all listening sockets and bind() outbound sockets.
+  *
+  * It will open a socket bound for:
+@@ -59,7 +58,6 @@ void comm_import_opened(const Comm::ConnectionPointer &, const char *note, struc
+ int comm_open_listener(int sock_type, int proto, Ip::Address &addr, int flags, const char *note);
+ void comm_open_listener(int sock_type, int proto, Comm::ConnectionPointer &conn, const char *note);
+ 
+-int comm_openex(int, int, Ip::Address &, int, const char *);
+ unsigned short comm_local_port(int fd);
+ 
+ int comm_udp_sendto(int sock, const Ip::Address &to, const void *buf, int buflen);
+diff --git a/src/comm/ConnOpener.cc b/src/comm/ConnOpener.cc
+index 25a30e4..2082214 100644
+--- a/src/comm/ConnOpener.cc
++++ b/src/comm/ConnOpener.cc
+@@ -263,7 +263,7 @@ Comm::ConnOpener::createFd()
+     if (callback_ == NULL || callback_->canceled())
+         return false;
+ 
+-    temporaryFd_ = comm_openex(SOCK_STREAM, IPPROTO_TCP, conn_->local, conn_->flags, host_);
++    temporaryFd_ = comm_open(SOCK_STREAM, IPPROTO_TCP, conn_->local, conn_->flags, host_);
+     if (temporaryFd_ < 0) {
+         sendAnswer(Comm::ERR_CONNECT, 0, "Comm::ConnOpener::createFd");
+         return false;
+diff --git a/src/comm/Connection.h b/src/comm/Connection.h
+index 4f2f23a..1e32c22 100644
+--- a/src/comm/Connection.h
++++ b/src/comm/Connection.h
+@@ -47,6 +47,8 @@ namespace Comm
+ #define COMM_DOBIND             0x08  // requires a bind()
+ #define COMM_TRANSPARENT        0x10  // arrived via TPROXY
+ #define COMM_INTERCEPTION       0x20  // arrived via NAT
++/// Internal Comm optimization: Keep the source port unassigned until connect(2)
++#define COMM_DOBIND_PORT_LATER 0x100
+ 
+ /**
+  * Store data about the physical and logical attributes of a connection.
+diff --git a/src/ipc.cc b/src/ipc.cc
+index e1d48fc..e92a27f 100644
+--- a/src/ipc.cc
++++ b/src/ipc.cc
+@@ -95,12 +95,12 @@ ipcCreate(int type, const char *prog, const char *const args[], const char *name
+     } else void(0)
+ 
+     if (type == IPC_TCP_SOCKET) {
+-        crfd = cwfd = comm_open(SOCK_STREAM,
++        crfd = cwfd = comm_open_listener(SOCK_STREAM,
+                                 0,
+                                 local_addr,
+                                 COMM_NOCLOEXEC,
+                                 name);
+-        prfd = pwfd = comm_open(SOCK_STREAM,
++        prfd = pwfd = comm_open_listener(SOCK_STREAM,
+                                 0,          /* protocol */
+                                 local_addr,
+                                 0,          /* blocking */
+diff --git a/src/tests/stub_comm.cc b/src/tests/stub_comm.cc
+index 58f85e4..5381ab2 100644
+--- a/src/tests/stub_comm.cc
++++ b/src/tests/stub_comm.cc
+@@ -46,7 +46,6 @@ int comm_open_uds(int sock_type, int proto, struct sockaddr_un* addr, int flags)
+ void comm_import_opened(const Comm::ConnectionPointer &, const char *note, struct addrinfo *AI) STUB
+ int comm_open_listener(int sock_type, int proto, Ip::Address &addr, int flags, const char *note) STUB_RETVAL(-1)
+ void comm_open_listener(int sock_type, int proto, Comm::ConnectionPointer &conn, const char *note) STUB
+-int comm_openex(int, int, Ip::Address &, int, tos_t tos, nfmark_t nfmark, const char *) STUB_RETVAL(-1)
+ unsigned short comm_local_port(int fd) STUB_RETVAL(0)
+ int comm_udp_sendto(int sock, const Ip::Address &to, const void *buf, int buflen) STUB_RETVAL(-1)
+ void commCallCloseHandlers(int fd) STUB
diff --git a/SOURCES/squid-4.15.tar.xz.asc b/SOURCES/squid-4.15.tar.xz.asc
new file mode 100644
index 0000000..7305eaa
--- /dev/null
+++ b/SOURCES/squid-4.15.tar.xz.asc
@@ -0,0 +1,25 @@
+File: squid-4.15.tar.xz
+Date: Mon 10 May 2021 10:50:22 UTC
+Size: 2454176
+MD5 : a593de9dc888dfeca4f1f7db2cd7d3b9
+SHA1: 60bda34ba39657e2d870c8c1d2acece8a69c3075
+Key : CD6DBF8EF3B17D3E 
+            B068 84ED B779 C89B 044E  64E3 CD6D BF8E F3B1 7D3E
+      keyring = http://www.squid-cache.org/pgp.asc
+      keyserver = pool.sks-keyservers.net
+-----BEGIN PGP SIGNATURE-----
+
+iQIzBAABCgAdFiEEsGiE7bd5yJsETmTjzW2/jvOxfT4FAmCZD/UACgkQzW2/jvOx
+fT6zZg/+N8JMIYpmVJ7jm4lF0Ub2kEHGTOrc+tnlA3LGnlMQuTm61+BYk58g0SKW
+96NbJ0cycW215Q34L+Y0tWuxEbIU01vIc3AA7rQd0LKy+fQU0OtBuhk5Vf4bKilW
+uHEVIQZs9HmY6bqC+kgtCf49tVZvR8FZYNuilg/68+i/pQdwaDDmVb+j2oF7w+y2
+dgkTFWtM5NTL6bqUVC0E7lLFPjzMefKfxkkpWFdV/VrAhU25jN24kpnjcfotQhdW
+LDFy5okduz3ljso9pBYJfLeMXM1FZPpceC91zj32x3tcUyrD3yIoXob58rEKvfe4
+RDXN4SuClsNe4UQ4oNoGIES9XtaYlOzPR1PlbqPUrdp1cDnhgLJ+1fkAixlMqCml
+wuI1VIKSEY+nvRzQzFHnXJK9otV8QwMF76AHaytO9y+X6JuZmu/CcV1pq61qY9qv
+t1/8z99wWSxpu17zthZgq64J225GF/hkBedaFlYoS5k5YUMDLPlRSCC0yPmb8JBF
+Cns5i/aq2PmOx2ZhQ2RQIF416J3HK8Galw8ytFOjnEcn4ux9yzKNjL38p4+PJJA0
+7GCMAqYYNjok3LSkGbiR7cPgbHnkqRfYbPFLMj4FtruoFlZ9L5MIU3oFvqA3ZR6l
+Az6LaKLsAYPUmukAOPUSIrqpKXZHc7hdBWkT+7RYA4qaoU+9oIo=
+=1Re1
+-----END PGP SIGNATURE-----
diff --git a/SOURCES/squid.logrotate b/SOURCES/squid.logrotate
new file mode 100644
index 0000000..4a0406f
--- /dev/null
+++ b/SOURCES/squid.logrotate
@@ -0,0 +1,16 @@
+/var/log/squid/*.log {
+    weekly
+    rotate 5
+    compress
+    notifempty
+    missingok
+    nocreate
+    sharedscripts
+    postrotate
+      # Asks squid to reopen its logs. (logfile_rotate 0 is set in squid.conf)
+      # errors redirected to make it silent if squid is not running
+      /usr/sbin/squid -k rotate 2>/dev/null
+      # Wait a little to allow Squid to catch up before the logs is compressed
+      sleep 1
+    endscript
+}
diff --git a/SOURCES/squid.nm b/SOURCES/squid.nm
new file mode 100755
index 0000000..5e40f76
--- /dev/null
+++ b/SOURCES/squid.nm
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+case "$2" in
+        up|down|vpn-up|vpn-down)
+                /bin/systemctl -q reload squid.service || :
+                ;;
+esac
diff --git a/SOURCES/squid.pam b/SOURCES/squid.pam
new file mode 100644
index 0000000..1d78594
--- /dev/null
+++ b/SOURCES/squid.pam
@@ -0,0 +1,3 @@
+#%PAM-1.0
+auth		include		password-auth
+account		include		password-auth
diff --git a/SOURCES/squid.service b/SOURCES/squid.service
new file mode 100644
index 0000000..6978032
--- /dev/null
+++ b/SOURCES/squid.service
@@ -0,0 +1,18 @@
+[Unit]
+Description=Squid caching proxy
+Documentation=man:squid(8)
+After=network.target network-online.target nss-lookup.target
+
+[Service]
+Type=notify
+LimitNOFILE=16384
+PIDFile=/run/squid.pid
+EnvironmentFile=/etc/sysconfig/squid
+ExecStartPre=/usr/libexec/squid/cache_swap.sh
+ExecStart=/usr/sbin/squid --foreground $SQUID_OPTS -f ${SQUID_CONF}
+ExecReload=/usr/bin/kill -HUP $MAINPID
+KillMode=mixed
+NotifyAccess=all
+
+[Install]
+WantedBy=multi-user.target
diff --git a/SOURCES/squid.sysconfig b/SOURCES/squid.sysconfig
new file mode 100644
index 0000000..3864bd8
--- /dev/null
+++ b/SOURCES/squid.sysconfig
@@ -0,0 +1,9 @@
+# default squid options
+SQUID_OPTS=""
+
+# Time to wait for Squid to shut down when asked. Should not be necessary
+# most of the time.
+SQUID_SHUTDOWN_TIMEOUT=100
+
+# default squid conf file
+SQUID_CONF="/etc/squid/squid.conf"
diff --git a/SPECS/squid.spec b/SPECS/squid.spec
new file mode 100644
index 0000000..66bd9f5
--- /dev/null
+++ b/SPECS/squid.spec
@@ -0,0 +1,1798 @@
+%define __perl_requires %{SOURCE98}
+
+Name:     squid
+Version:  4.15
+Release:  10%{?dist}.1
+Summary:  The Squid proxy caching server
+Epoch:    7
+# See CREDITS for breakdown of non GPLv2+ code
+License:  GPLv2+ and (LGPLv2+ and MIT and BSD and Public Domain)
+URL:      http://www.squid-cache.org
+Source0:  http://www.squid-cache.org/Versions/v4/squid-%{version}.tar.xz
+Source1:  http://www.squid-cache.org/Versions/v4/squid-%{version}.tar.xz.asc
+Source2:  squid.logrotate
+Source3:  squid.sysconfig
+Source4:  squid.pam
+Source5:  squid.nm
+Source6:  squid.service
+Source7:  cache_swap.sh
+
+Source98: perl-requires-squid.sh
+
+# Upstream patches
+
+# Backported patches
+Patch101: squid-4.15-ip-bind-address-no-port.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2072988
+
+# Local patches
+# Applying upstream patches first makes it less likely that local patches
+# will break upstream ones.
+Patch201: squid-4.11-config.patch
+Patch202: squid-4.11-location.patch
+Patch203: squid-4.11-perlpath.patch
+Patch204: squid-4.11-include-guards.patch
+Patch205: squid-4.11-large-acl.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=980511
+Patch206: squid-4.11-active-ftp.patch
+Patch208: squid-4.11-convert-ipv4.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2006121
+Patch209: squid-4.15-ftp-filename-extraction.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2076717
+Patch210: squid-4.15-halfclosed.patch
+
+# Security fixes
+# https://bugzilla.redhat.com/show_bug.cgi?id=1941506
+Patch300: squid-4.15-CVE-2021-28116.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2100721
+Patch301: squid-4.15-CVE-2021-46784.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2129771
+Patch302: squid-4.15-CVE-2022-41318.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2245910
+# +backported: https://github.com/squid-cache/squid/commit/417da4006cf5c97d44e74431b816fc58fec9e270
+Patch303: squid-4.15-CVE-2023-46846.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2245916
+Patch304: squid-4.15-CVE-2023-46847.patch
+# https://issues.redhat.com/browse/RHEL-14792
+Patch305: squid-4.15-CVE-2023-5824.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2248521
+Patch306: squid-4.15-CVE-2023-46728.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2247567
+Patch307: squid-4.15-CVE-2023-46724.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2252926
+Patch308: squid-4.15-CVE-2023-49285.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2252923
+Patch309: squid-4.15-CVE-2023-49286.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2254663
+Patch310: squid-4.15-CVE-2023-50269.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2264309
+Patch311: squid-4.15-CVE-2024-25617.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2268366
+Patch312: squid-4.15-CVE-2024-25111.patch
+# Regression caused by squid-4.15-CVE-2023-46846.patch
+# Upstream PR: https://github.com/squid-cache/squid/pull/1914
+Patch313: squid-4.15-ignore-wsp-after-chunk-size.patch
+
+
+Requires: bash >= 2.0
+Requires(pre): shadow-utils
+Requires(post): systemd
+Requires(preun): systemd
+Requires(postun): systemd
+# squid_ldap_auth and other LDAP helpers require OpenLDAP
+BuildRequires: openldap-devel
+# squid_pam_auth requires PAM development libs
+BuildRequires: pam-devel
+# SSL support requires OpenSSL
+BuildRequires: openssl-devel
+# squid_kerb_aut requires Kerberos development libs
+BuildRequires: krb5-devel
+# time_quota requires DB
+BuildRequires: libdb-devel
+# ESI support requires Expat & libxml2
+BuildRequires: expat-devel libxml2-devel
+# TPROXY requires libcap, and also increases security somewhat
+BuildRequires: libcap-devel
+# eCAP support
+BuildRequires: libecap-devel
+#ip_user helper requires
+BuildRequires: gcc-c++
+BuildRequires: libtool libtool-ltdl-devel
+BuildRequires: perl-generators
+# For test suite
+BuildRequires: pkgconfig(cppunit)
+BuildRequires: autoconf
+# systemd notify
+BuildRequires: systemd-devel
+
+%description
+Squid is a high-performance proxy caching server for Web clients,
+supporting FTP, gopher, and HTTP data objects. Unlike traditional
+caching software, Squid handles all requests in a single,
+non-blocking, I/O-driven process. Squid keeps meta data and especially
+hot objects cached in RAM, caches DNS lookups, supports non-blocking
+DNS lookups, and implements negative caching of failed requests.
+
+Squid consists of a main server program squid, a Domain Name System
+lookup program (dnsserver), a program for retrieving FTP data
+(ftpget), and some management and client tools.
+
+%prep
+%setup -q
+
+# Upstream patches
+
+# Backported patches
+%patch101 -p1 -b .ip-bind-address-no-port
+
+# Local patches
+%patch201 -p1 -b .config
+%patch202 -p1 -b .location
+%patch203 -p1 -b .perlpath
+%patch204 -p1 -b .include-guards
+%patch205 -p1 -b .large_acl
+%patch206 -p1 -b .active-ftp
+%patch208 -p1 -b .convert-ipv4
+%patch209 -p1 -b .ftp-fn-extraction
+%patch210 -p1 -b .halfclosed
+
+# Security patches
+%patch300 -p1 -b .CVE-2021-28116
+%patch301 -p1 -b .CVE-2021-46784
+%patch302 -p1 -b .CVE-2022-41318
+%patch303 -p1 -b .CVE-2023-46846
+%patch304 -p1 -b .CVE-2023-46847
+%patch305 -p1 -b .CVE-2023-5824
+%patch306 -p1 -b .CVE-2023-46728
+%patch307 -p1 -b .CVE-2023-46724
+%patch308 -p1 -b .CVE-2023-49285
+%patch309 -p1 -b .CVE-2023-49286
+%patch310 -p1 -b .CVE-2023-50269
+%patch311 -p1 -b .CVE-2024-25617
+%patch312 -p1 -b .CVE-2024-25111
+%patch313 -p1 -b .ignore-wsp-chunk-sz
+
+# https://bugzilla.redhat.com/show_bug.cgi?id=1679526
+# Patch in the vendor documentation and used different location for documentation
+sed -i 's|@SYSCONFDIR@/squid.conf.documented|%{_pkgdocdir}/squid.conf.documented|' src/squid.8.in
+
+%build
+# cppunit-config patch changes configure.ac
+autoconf
+
+# NIS helper has been removed because of the following bug
+# https://bugzilla.redhat.com/show_bug.cgi?id=1531540
+%configure \
+   --libexecdir=%{_libdir}/squid \
+   --datadir=%{_datadir}/squid \
+   --sysconfdir=%{_sysconfdir}/squid \
+   --with-logdir='%{_localstatedir}/log/squid' \
+   --with-pidfile='%{_localstatedir}/run/squid.pid' \
+   --disable-dependency-tracking \
+   --enable-eui \
+   --enable-follow-x-forwarded-for \
+   --enable-auth \
+   --enable-auth-basic="DB,fake,getpwnam,LDAP,NCSA,PAM,POP3,RADIUS,SASL,SMB,SMB_LM" \
+   --enable-auth-ntlm="SMB_LM,fake" \
+   --enable-auth-digest="file,LDAP" \
+   --enable-auth-negotiate="kerberos" \
+   --enable-external-acl-helpers="LDAP_group,time_quota,session,unix_group,wbinfo_group,kerberos_ldap_group" \
+   --enable-storeid-rewrite-helpers="file" \
+   --enable-cache-digests \
+   --enable-cachemgr-hostname=localhost \
+   --enable-delay-pools \
+   --enable-epoll \
+   --enable-icap-client \
+   --enable-ident-lookups \
+   %ifnarch %{power64} ia64 x86_64 s390x aarch64
+   --with-large-files \
+   %endif
+   --enable-linux-netfilter \
+   --enable-removal-policies="heap,lru" \
+   --enable-snmp \
+   --enable-ssl \
+   --enable-ssl-crtd \
+   --enable-storeio="aufs,diskd,ufs,rock" \
+   --enable-diskio \
+   --enable-wccpv2 \
+   --enable-esi \
+   --enable-ecap \
+   --with-aio \
+   --with-default-user="squid" \
+   --with-dl \
+   --with-openssl \
+   --with-pthreads \
+   --disable-arch-native \
+   --disable-security-cert-validators \
+   --with-swapdir=%{_localstatedir}/spool/squid
+
+%make_build
+
+%check
+make check
+	
+%install
+%make_install
+
+echo "
+#
+# This is %{_sysconfdir}/httpd/conf.d/squid.conf
+#
+
+ScriptAlias /Squid/cgi-bin/cachemgr.cgi %{_libdir}/squid/cachemgr.cgi
+
+# Only allow access from localhost by default
+
+ Require local
+ # Add additional allowed hosts as needed
+ # Require host example.com
+" > $RPM_BUILD_ROOT/squid.httpd.tmp
+
+
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/pam.d
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.d/
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/NetworkManager/dispatcher.d
+mkdir -p $RPM_BUILD_ROOT%{_unitdir}
+mkdir -p $RPM_BUILD_ROOT%{_libexecdir}/squid
+install -m 644 %{SOURCE2} $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/squid
+install -m 644 %{SOURCE3} $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig/squid
+install -m 644 %{SOURCE4} $RPM_BUILD_ROOT%{_sysconfdir}/pam.d/squid
+install -m 644 %{SOURCE6} $RPM_BUILD_ROOT%{_unitdir}
+install -m 755 %{SOURCE7} $RPM_BUILD_ROOT%{_libexecdir}/squid
+install -m 644 $RPM_BUILD_ROOT/squid.httpd.tmp $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.d/squid.conf
+install -m 644 %{SOURCE5} $RPM_BUILD_ROOT%{_sysconfdir}/NetworkManager/dispatcher.d/20-squid
+mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/log/squid
+mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/spool/squid
+mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/run/squid
+chmod 644 contrib/url-normalizer.pl contrib/user-agents.pl
+iconv -f ISO88591 -t UTF8 ChangeLog -o ChangeLog.tmp
+mv -f ChangeLog.tmp ChangeLog
+
+# install /usr/lib/tmpfiles.d/squid.conf
+mkdir -p ${RPM_BUILD_ROOT}%{_tmpfilesdir}
+cat > ${RPM_BUILD_ROOT}%{_tmpfilesdir}/squid.conf </dev/null 2>&1; then
+  /usr/sbin/groupadd -g 23 squid
+fi
+
+if ! getent passwd squid >/dev/null 2>&1 ; then
+  /usr/sbin/useradd -g 23 -u 23 -d /var/spool/squid -r -s /sbin/nologin squid >/dev/null 2>&1 || exit 1 
+fi
+
+for i in /var/log/squid /var/spool/squid ; do
+        if [ -d $i ] ; then
+                for adir in `find $i -maxdepth 0 \! -user squid`; do
+                        chown -R squid:squid $adir
+                done
+        fi
+done
+
+exit 0
+
+%pretrans -p 
+-- Due to a bug #447156
+paths = {"/usr/share/squid/errors/zh-cn", "/usr/share/squid/errors/zh-tw"}
+for key,path in ipairs(paths)
+do
+  st = posix.stat(path)
+  if st and st.type == "directory" then
+    status = os.rename(path, path .. ".rpmmoved")
+    if not status then
+      suffix = 0
+      while not status do
+        suffix = suffix + 1
+        status = os.rename(path .. ".rpmmoved", path .. ".rpmmoved." .. suffix)
+      end
+      os.rename(path, path .. ".rpmmoved")
+    end
+  end
+end
+
+%post
+%systemd_post squid.service
+
+%preun
+%systemd_preun squid.service
+
+%postun
+%systemd_postun_with_restart squid.service
+
+%triggerin -- samba-common
+if ! getent group wbpriv >/dev/null 2>&1 ; then
+  /usr/sbin/groupadd -g 88 wbpriv >/dev/null 2>&1 || :
+fi
+/usr/sbin/usermod -a -G wbpriv squid >/dev/null 2>&1 || \
+    chgrp squid /var/cache/samba/winbindd_privileged >/dev/null 2>&1 || :
+
+
+%changelog
+* Mon Oct 14 2024 Luboš Uhliarik  - 7:4.15-10.1
+- Resolves: RHEL-56024 - (Regression) Transfer-encoding:chunked data is not sent
+  to the client in its complementary
+
+* Tue Mar 19 2024 Luboš Uhliarik  - 7:4.15-10
+- Resolves: RHEL-28529 - squid:4/squid: Denial of Service in HTTP Chunked
+  Decoding (CVE-2024-25111)
+- Resolves: RHEL-26088 - squid:4/squid: denial of service in HTTP header
+  parser (CVE-2024-25617)
+
+* Fri Feb 02 2024 Luboš Uhliarik  - 7:4.15-9
+- Resolves: RHEL-19552 - squid:4/squid: denial of service in HTTP request 
+  parsing (CVE-2023-50269)
+
+* Fri Feb 02 2024 Luboš Uhliarik  - 7:4.15-8
+- Resolves: RHEL-18351 - squid:4/squid: Buffer over-read in the HTTP Message
+  processing feature (CVE-2023-49285)
+- Resolves: RHEL-18342 - squid:4/squid: Incorrect Check of Function Return
+  Value In Helper Process management (CVE-2023-49286)
+- Resolves: RHEL-18230 - squid:4/squid: Denial of Service in SSL Certificate
+  validation (CVE-2023-46724)
+- Resolves: RHEL-15911 - squid:4/squid: NULL pointer dereference in the gopher
+  protocol code (CVE-2023-46728)
+- Resolves: RHEL-18251 - squid crashes in assertion when a parent peer exists
+- Resolves: RHEL-14794 - squid: squid multiple issues in HTTP response caching
+  (CVE-2023-5824)
+- Resolves: RHEL-14803 - squid: squid: Denial of Service in HTTP Digest
+  Authentication (CVE-2023-46847)
+- Resolves: RHEL-14777 - squid: squid: Request/Response smuggling in HTTP/1.1
+  and ICAP (CVE-2023-46846)
+
+* Wed Aug 16 2023 Luboš Uhliarik  - 7:4.15-7
+- Resolves: #2076717 - Crash with half_closed_client on
+
+* Thu Dec 08 2022 Tomas Korbar  - 4.15-6
+- Resolves: #2072988 - [RFE] Add the "IP_BIND_ADDRESS_NO_PORT"
+  flag to sockets created for outgoing connections in the squid source code.
+
+* Wed Sep 28 2022 Luboš Uhliarik  - 7:4.15-5
+- Resolves: #2130260 - CVE-2022-41318 squid:4/squid: buffer-over-read in SSPI and SMB
+  authentication
+
+* Tue Jun 28 2022 Luboš Uhliarik  - 7:4.15-4
+- Resolves: #2100783 - CVE-2021-46784 squid:4/squid: DoS when processing gopher
+  server responses
+
+* Wed Feb 09 2022 Luboš Uhliarik  - 7:4.15-3
+- Resolves: #1941506 - CVE-2021-28116 squid:4/squid: out-of-bounds read in WCCP
+  protocol data may lead to information disclosure
+
+* Tue Jan 25 2022 Luboš Uhliarik  - 7:4.15-2
+- Resolves: #2006121 - SQUID shortens FTP Link wrong that contains a semi-colon
+  and as a result is not able to download zip file.CODE 404 TO CLIENT)
+
+* Fri Jun 18 2021 Luboš Uhliarik  - 7:4.15-1
+- new version 4.15
+- Resolves: #1964384 - squid:4 rebase to 4.15
+
+* Wed Mar 31 2021 Lubos Uhliarik  - 7:4.11-5
+- Resolves: #1944261 - CVE-2020-25097 squid:4/squid: improper input validation
+  may allow a trusted client to perform HTTP Request Smuggling
+
+* Mon Oct 26 2020 Lubos Uhliarik  - 7:4.11-4
+- Resolves: #1890606 - Fix for CVE 2019-13345 breaks authentication in
+  cachemgr.cgi
+
+* Wed Aug 26 2020 Lubos Uhliarik  - 7:4.11-3
+- Resolves: #1871705 - CVE-2020-24606 squid: Improper Input Validation could
+  result in a DoS
+- Resolves: #1871702 - CVE-2020-15811 squid: HTTP Request Splitting could result
+  in cache poisoning
+- Resolves: #1871700 - CVE-2020-15810 squid: HTTP Request Smuggling could result
+  in cache poisoning
+
+* Thu Jul 02 2020 Lubos Uhliarik  - 7:4.11-2
+- Resolves: #1853130 - CVE-2020-15049 squid:4/squid: request smuggling and
+  poisoning attack against the HTTP cache
+- Resolves: #1853136 - CVE-2020-14058 squid:4/squid: DoS in TLS handshake
+
+* Thu May 07 2020 Lubos Uhliarik  - 7:4.11-1
+- new version 4.11
+- libsystemd integration
+- Resolves: #1829467 - squid:4 rebase
+- Resolves: #1828378 - CVE-2019-12521 squid:4/squid: off-by-one error in
+  addStackElement allows for a heap buffer overflow and a crash
+- Resolves: #1828377 - CVE-2019-12520 squid:4/squid: improper input validation
+  in request allows for proxy manipulation
+- Resolves: #1828375 - CVE-2019-12524 squid:4/squid: improper access restriction
+  in url_regex may lead to security bypass
+- Resolves: #1820664 - CVE-2019-18860 squid: mishandles HTML in the host
+  parameter to cachemgr.cgi which could result in squid behaving in unsecure way
+- Resolves: #1802514 - CVE-2020-8449 squid:4/squid: Improper input validation
+  issues in HTTP Request processing
+- Resolves: #1802513 - CVE-2020-8450 squid:4/squid: Buffer overflow in a Squid
+  acting as reverse-proxy
+- Resolves: #1802512 - CVE-2019-12528 squid:4/squid: Information Disclosure
+  issue in FTP Gateway
+- Resolves: #1771288 - CVE-2019-18678 squid:4/squid: HTTP Request Splitting
+  issue in HTTP message processing
+- Resolves: #1771283 - CVE-2019-18679 squid:4/squid: Information Disclosure
+  issue in HTTP Digest Authentication
+- Resolves: #1771280 - CVE-2019-18677 squid:4/squid: Cross-Site Request Forgery
+  issue in HTTP Request processing
+- Resolves: #1771275 - CVE-2019-12523 squid:4/squid: Improper input validation
+  in URI processor
+- Resolves: #1771272 - CVE-2019-18676 squid:4/squid: Buffer overflow in URI
+  processor
+- Resolves: #1771264 - CVE-2019-12526 squid:4/squid: Heap overflow issue in URN
+  processing
+- Resolves: #1738581 - CVE-2019-12529 squid: OOB read in Proxy-Authorization
+  header causes DoS
+
+* Tue Apr 28 2020 Lubos Uhliarik  - 7:4.4-9
+- Resolves: #1738583 - CVE-2019-12525 squid:4/squid: parsing of header
+  Proxy-Authentication leads to memory corruption
+- Resolves: #1828369 - CVE-2020-11945 squid: improper access restriction upon
+  Digest Authentication nonce replay could lead to remote code execution
+- Resolves: #1828370 - CVE-2019-12519 squid: improper check for new member in
+  ESIExpression::Evaluate allows for stack buffer overflow
+
+* Fri Aug 23 2019 Lubos Uhliarik  - 7:4.4-8
+- Resolves: # 1738485 - CVE-2019-12527 squid:4/squid: heap-based buffer overflow
+  in HttpHeader::getAuth
+
+* Wed Jul 31 2019 Lubos Uhliarik  - 7:4.4-7
+- Resolves: #1729436 - CVE-2019-13345 squid: XSS via user_name or auth parameter
+  in cachemgr.cgi
+
+* Fri Jun 21 2019 Lubos Uhliarik  - 7:4.4-6
+- Resolves: #1679526 - Missing detailed configuration file
+- Resolves: #1703117 - RHEL 7 to 8 fails with squid installed because dirs
+  changed to symlinks
+- Resolves: #1691741 - Squid cache_peer DNS lookup failed when not all lower
+  case
+- Resolves: #1683527 - "Reloading" message on a fresh reboot after enabling
+  squid
+
+* Tue Dec 11 2018 Lubos Uhliarik  - 7:4.4-4
+- Resolves: #1612524 - Man page scan results for squid 
+
+* Tue Dec 11 2018 Lubos Uhliarik  - 7:4.4-3
+- Resolves: #1642384 - squid doesn't work with active ftp
+
+* Tue Dec 11 2018 Lubos Uhliarik  - 7:4.4-2
+- Resolves: #1657847 - Unable to start Squid in Selinux Enforcing mode
+
+* Mon Dec 10 2018 Lubos Uhliarik  - 7:4.4-1
+- Resolves: #1656871 - squid rebase to 4.4
+- Resolves: #1645148 - CVE-2018-19131 squid: Cross-Site Scripting when
+  generating HTTPS response messages about TLS errors
+- Resolves: #1645156 - CVE-2018-19132 squid: Memory leak in SNMP query
+  rejection code
+
+* Mon Aug 06 2018 Lubos Uhliarik  - 7:4.2-1
+- new version 4.2
+- enable back strict error checking
+
+* Wed Aug 01 2018 Luboš Uhliarik  - 7:4.1-1
+- new version 4.1
+
+* Mon Jun 04 2018 Luboš Uhliarik  - 7:4.0.23-5
+- Resolves: #1585617 - Build against libdb only instead of libdb4
+- disabled strict checking for now (squid can not be built with GCC8)
+
+* Mon Apr 16 2018 Luboš Uhliarik  - 7:4.0.23-4
+- Resolves: #1566055 - module squid cannot be installed due to missing
+  perl(Crypt::OpenSSL::X509)
+
+* Fri Feb 09 2018 Fedora Release Engineering  - 7:4.0.23-3
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild
+
+* Tue Jan 23 2018 Luboš Uhliarik  - 7:4.0.23-2
+- Resolves: #1481195 - squid loses some REs when optimising ACLs
+
+* Tue Jan 23 2018 Luboš Uhliarik  - 7:4.0.23-1
+- new version 4.0.23
+
+* Sat Jan 20 2018 Björn Esser  - 7:4.0.22-2
+- Rebuilt for switch to libxcrypt
+
+* Wed Jan 17 2018 Luboš Uhliarik  - 7:4.0.22-1
+- new version 4.0.22
+- Removed NIS helper (#1531540)
+
+* Mon Aug 07 2017 Luboš Uhliarik  - 7:4.0.21-1
+- new version 4.0.21
+
+* Thu Aug 03 2017 Fedora Release Engineering  - 7:4.0.20-4
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild
+
+* Thu Jul 27 2017 Fedora Release Engineering  - 7:4.0.20-3
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild
+
+* Mon Jun 05 2017 Luboš Uhliarik  - 7:4.0.20-2
+- related: new version 4.0.20
+
+* Mon Jun 05 2017 Luboš Uhliarik  - 7:4.0.20-1
+- new version 4.0.20
+
+* Tue Apr 25 2017 Luboš Uhliarik  - 7:4.0.19-4
+- Related: #1445255 - Squid SMP Mode Fails
+
+* Tue Apr 25 2017 Luboš Uhliarik  - 7:4.0.19-3
+- Resolves: #1445255 - Squid SMP Mode Fails
+
+* Tue Apr 18 2017 Luboš Uhliarik  - 7:4.0.19-2
+- Resolves: #1442375 - squid helper squid_kerb_ldap not included in package
+
+* Mon Apr 03 2017 Luboš Uhliarik  - 7:4.0.19-1
+- new version 4.0.19
+
+* Thu Mar 30 2017 Luboš Uhliarik  - 7:4.0.18-1
+- new version 4.0.18
+
+* Sat Feb 11 2017 Fedora Release Engineering  - 7:4.0.17-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild
+
+* Mon Dec 19 2016 Luboš Uhliarik  - 7:4.0.17-1
+- new version 4.0.17
+
+* Mon Oct 31 2016 Luboš Uhliarik  - 7:4.0.16-1
+- new version 4.0.16
+
+* Mon Oct 10 2016 Luboš Uhliarik  - 7:4.0.15-1
+- new version 4.0.15
+
+* Mon Sep 12 2016 Luboš Uhliarik  - 7:4.0.14-1
+- new version 4.0.14
+
+* Tue Aug 09 2016 Luboš Uhliarik  - 7:4.0.13-1
+- new version 4.0.13
+
+* Mon Jul 11 2016 Henrik Nordstrom  - 7:4.0.11-1
+- new version 4.0.11
+
+* Wed May 18 2016 Luboš Uhliarik  - 7:3.5.19-2
+- Resolves: #1336993 - Mistyped command in dirname
+  /etc/NetworkManager/dispatcher.d/20-squid
+
+* Tue May 10 2016 Luboš Uhliarik  - 7:3.5.19-1
+- new version 3.5.19
+
+* Thu May 05 2016 Luboš Uhliarik  - 7:3.5.17-1
+- new version 3.5.17
+
+* Tue Apr 05 2016 Luboš Uhliarik  - 7:3.5.16-1
+- new version 3.5.16
+
+* Tue Mar 01 2016 Luboš Uhliarik  - 7:3.5.15-1
+- new version 3.5.15
+- Resolves: #1311585 - squid: Multiple Denial of Service issues in
+  HTTP Response processing
+- Resolves: #1312267 - squid: SQUID-2016_2 advisory, multiple DoS issues
+
+* Tue Mar 01 2016 Luboš Uhliarik  - 7:3.5.13-3
+- Resolves: #1308866 - CVE-2016-2390 squid: incorrect server error 
+  handling resulting in denial of service
+
+* Fri Feb 05 2016 Fedora Release Engineering  - 7:3.5.13-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild
+
+* Fri Jan 08 2016 Luboš Uhliarik  - 7:3.5.13-1
+- new version 3.5.13
+
+* Thu Dec 03 2015 Luboš Uhliarik  - 7:3.5.12-2
+- new version 3.5.12
+
+* Fri Sep 25 2015 Luboš Uhliarik  - 7:3.5.9-3
+- Resolves: #1231992
+
+* Fri Sep 25 2015 Luboš Uhliarik  - 7:3.5.9-2
+- Resolves: #1230501
+
+* Thu Sep 24 2015 Luboš Uhliarik  - 7:3.5.9-1
+- new version 3.5.9
+- added Patch, which fixes problem with include guards 
+
+* Fri Jun 19 2015 Fedora Release Engineering  - 7:3.5.3-5
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild
+
+* Sat May 02 2015 Kalev Lember  - 7:3.5.3-4
+- Rebuilt for GCC 5 C++11 ABI change
+
+* Tue Mar 31 2015 Pavel Šimerda  - 7:3.5.3-3
+- Fix build by removing eDirectory support
+
+* Tue Mar 31 2015 Pavel Šimerda  - 7:3.5.3-2
+- clean up defunct patches
+
+* Tue Mar 31 2015 Pavel Šimerda  - 7:3.5.3-1
+- new version 3.5.3
+
+* Mon Mar 23 2015 Pavel Šimerda  - 7:3.5.2-4
+- Resolves: #1145235, #1173488, #1176318 – revert a couple of recent changes
+
+* Sun Mar 15 2015 Henrik Nordstrom  - 7:3.5.2-3
+- Correct execmod build issue caused by libtool confusion on
+  required compiler flags
+
+* Sun Mar 15 2015 Henrik Nordstrom  - 7:3.5.2-2
+- Update to latest upstream version 3.5.2
+- Remove deprecated (and renamed) squid_msnt_auth basic auth helper. Only
+  performs LM authentication and not considered useful in todays networks.
+
+* Wed Feb 25 2015 Henrik Nordstrom  - 7:3.4.12-1
+- Update to latest upstream version 3.4.12
+- bug #1173946: Disable -march=native compile time optimization, use Fedora defaults.
+
+* Tue Nov 18 2014 Henrik Nordstrom  - 7:3.4.9-3
+- Update to latest upstream version 3.4.9
+
+* Sun Oct 19 2014 Peter Robinson  7:3.4.7-3
+- Update ppc64 macro to cover little endian too
+
+* Thu Sep 11 2014 Michal Luscon  - 7:3.4.7-2
+- Fixed: CVE-2014-6270
+
+* Thu Aug 28 2014 Michal Luscon  - 7:3.4.7-1
+- Update to latest upstream version
+- Fixed: CVE-2014-3609
+
+* Thu Aug 21 2014 Kevin Fenzi  - 7:3.4.6-3
+- Rebuild for rpm bug 1131960
+
+* Mon Aug 18 2014 Fedora Release Engineering  - 7:3.4.6-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild
+
+* Wed Jul 2 2014 Michal Luscon  - 7:3.4.6-1
+- Update to latest upstream version 3.4.6
+
+* Fri Jun 13 2014 Michal Luscon  - 7:3.4.5-6
+- Fixed #855111: set unlimited start and shutdown timeout
+
+* Sun Jun 08 2014 Michal Luscon  - 7:3.4.5-5
+- Run squid under user and group squid
+
+* Sun Jun 08 2014 Fedora Release Engineering  - 7:3.4.5-4
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild
+
+* Tue May 27 2014 Michal Luscon  - 7:3.4.5-3
+- Remove sysvinit subpackage 
+- Enable rock store
+
+* Fri May 23 2014 Michal Luscon  - 7:3.4.5-2
+- Fixed #1099970: missing /var/run/squid folder
+- Reverted #1038160: breaks SMP mode
+
+* Tue May 6 2014 Michal Luscon  - 7:3.4.5-1
+- Update to latest upstream version 3.4.5
+
+* Fri Apr 25 2014 Michal Luscon  - 7:3.4.4.2-1
+- Update to latest upstream version 3.4.4.2
+
+* Thu Mar 13 2014 Pavel Šimerda  - 7:3.4.4-1
+- bump to 3.4.4
+
+* Tue Feb 04 2014 Henrik Nordstrom  - 7:3.4.3-1
+- Update to latest upstream bugfix version 3.4.3
+
+* Mon Jan 06 2014 Pavel Šimerda  - 7:3.4.1-2
+- Resolves: #1038160 - avoid running squid's own supervisor process
+
+* Mon Dec 30 2013 Michal Luscon  - 7:3.4.1-1
+- Rebase to latest stable upstream release 3.4.1
+- Fixed #1034306: fails to build for AArch64
+- Fixed: active ftp
+
+* Tue Dec 03 2013 Henrik Nordstrom  - 7:3.3.11-1
+- Update to latest upstream bugfix version 3.3.11
+
+* Fri Sep 13 2013 Michal Luscon  - 7:3.3.9-1
+- Update to latest upstream version 3.3.9
+- Fixed #976815: file descriptors are hard coded to 16384
+- Fixed: active ftp crashing
+- Fixed: offset of patches
+
+* Thu Aug 08 2013 Michal Luscon  - 7:3.3.8-3
+- Fixed #994814: enable time_quota helper
+
+* Sun Aug 04 2013 Fedora Release Engineering  - 7:3.3.8-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild
+
+* Mon Jul 22 2013 Michal Luscon  - 7:3.3.8-1
+- Update to latest upstream version 3.3.8
+- Fixed: CVE-2013-4123
+- Fixed: CVE-2013-4115
+
+* Wed Jul 17 2013 Petr Pisar  - 7:3.3.4-3
+- Perl 5.18 rebuild
+
+* Wed May  8 2013 Ville Skyttä  - 7:3.3.4-2
+- Fix basic auth and log daemon DB helper builds.
+- Use xz compressed tarball, fix source URLs.
+- Fix bogus dates in %%changelog.
+
+* Fri May 3 2013 Michal Luscon  - 7:3.3.4-1
+- Rebase to latest upstream version 3.3.4
+
+* Tue Apr 23 2013 Michal Luscon  - 7:3.2.9-3
+- Option '-k' is not stated in squidclient man
+- Remove pid from service file(#913262)
+
+* Fri Apr 19 2013 Michal Luscon  - 7:3.2.9-2
+- Enable full RELRO (-Wl,-z,relro -Wl,-z,now)
+
+* Tue Mar 19 2013 Michal Luscon  - 7:3.2.9-1
+- Update to latest upstream version 3.2.9
+- Fixed: CVE-2013-1839
+- Removed: makefile-patch (+make check)
+
+* Mon Mar 11 2013 Michal Luscon  - 7:3.2.8-3
+- Resolved: /usr move - squid service file
+
+* Sat Mar 09 2013 Michal Luscon  - 7:3.2.8-2
+- Resolved: #896127 - basic_ncsa_auth does not work
+
+* Fri Mar 08 2013 Michal Luscon  - 7:3.2.8-1
+- Update to latest upstream version 3.2.8
+- Fixed rawhide build issues (-make check)
+
+* Thu Feb 07 2013 Michal Luscon  - 7:3.2.7-1
+- Update to latest upstream version 3.2.7
+
+* Thu Jan 24 2013 Michal Luscon  - 7:3.2.5-2
+- CVE-2013-0189: Incomplete fix for the CVE-2012-5643
+
+* Mon Dec 17 2012 Michal Luscon  - 7:3.2.5-1
+- Update to latest upstream version 3.2.5
+
+* Mon Nov 05 2012 Michal Luscon  - 7:3.2.3-3
+- Resolved: #71483 - httpd 2.4 requires new configuration directives
+
+* Fri Oct 26 2012 Michal Luscon  - 7:3.2.3-2
+- Resolved: #854356 - squid.service use PIDFile
+- Resolved: #859393 - Improve cache_swap script
+- Resolved: #791129 - disk space warning
+- Resolved: #862252 - reload on VPN or network up/down
+- Resolved: #867531 - run test suite during build
+- Resolved: #832684 - missing after dependency nss-lookup.target
+- Removed obsolete configure options
+
+* Mon Oct 22 2012 Tomas Hozza  - 7:3.2.3-1
+- Update to latest upstream version 3.2.3
+
+* Tue Oct 16 2012 Tomas Hozza  - 7:3.2.2-1
+- Update to latest upstream version 3.2.2
+
+* Fri Oct 05 2012 Tomas Hozza  - 7:3.2.1-2
+- Introduced new systemd-rpm macros in squid spec file. (#850326)
+
+* Wed Aug 29 2012 Michal Luscon  - 7:3.2.1-1
+- Update to latest upstream 3.2.1
+
+* Sat Jul 21 2012 Fedora Release Engineering  - 7:3.2.0.16-3
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild
+
+* Mon Apr 02 2012 Henrik Nordstrom  - 7:3.2.0.16-2
+- Enable SSL CRTD for ssl bump
+
+* Wed Mar 07 2012 Henrik Nordstrom  - 7:3.2.0.16-1
+- Upstream 3.2.0.16 bugfix release
+
+* Tue Feb 28 2012 Fedora Release Engineering  - 7:3.2.0.15-2
+- Rebuilt for c++ ABI breakage
+
+* Mon Feb 06 2012 Henrik Nordstrom  - 7:3.2.0.15-1
+- Upstream 3.2.0.15 bugfix release
+
+* Wed Feb 01 2012 Henrik Nordstrom  - 7:3.2.0.14-7
+- update with upstreamed patch versions
+
+* Tue Jan 17 2012 Henrik Nordstrom  - 7:3.2.0.14-6
+- upstream gcc-4.7 patch
+- fix for bug #772483 running out of memory, mem_node growing out of bounds
+
+* Mon Jan 16 2012 Jiri Skala  - 7:3.2.0.14-5
+- fixes FTBFS due to gcc-4.7
+
+* Fri Jan 13 2012 Jiri Skala  - 7:3.2.0.14-4
+- fixes #772481 - Low number of open files for squid process
+- fixes FTBFS due to gcc4.7
+
+* Thu Jan 05 2012 Henrik Nordstrom  - 3.2.0.14-3
+- rebuild for gcc-4.7.0
+
+* Mon Dec 19 2011 Jiri Skala  - 7:3.2.0.14-2
+- fixes #768586 - Please enable eCAP support again
+
+* Wed Dec 14 2011 Jiri Skala  - 7:3.2.0.14-1
+- update to latest upstream 3.2.0.14
+
+* Mon Nov 07 2011 Jiri Skala  - 7:3.2.0.13-5
+- fixes #751679 - host_strict_verify setting inverted in squid.conf
+
+* Thu Nov 03 2011 Jiri Skala  - 7:3.2.0.13-4
+- fixes #750550 - Squid might depend on named
+
+* Wed Oct 26 2011 Jiri Skala  - 7:3.2.0.13-3
+- added upstream fix for #747125
+
+* Wed Oct 26 2011 Jiri Skala  - 7:3.2.0.13-2
+- fixes #747103 - squid does not start if /var/spool/squid is empty
+- fixes #747110 - squid does not start adding "memory_pools off"
+
+* Mon Oct 17 2011 Jiri Skala  - 7:3.2.0.13-1
+- update to latest upstream 3.2.0.13
+
+* Tue Sep 20 2011 Jiri Skala  - 7:3.2.0.12-1
+- update to latest upstream 3.2.0.12
+
+* Mon Aug 29 2011 Henrik Nordstrom  - 7:3.2.0.11-3
+- update to latest upstream 3.2.0.11
+
+* Sat Aug 27 2011 Henrik Nordstrom  - 7:3.2.0.10-3
+- Fix for SQUID-2011:3 Gopher vulnerability
+
+* Thu Aug 18 2011 Jiri Skala  - 7:3.2.0.10-2
+- rebuild for rpm
+
+* Mon Aug 01 2011 Jiri Skala  - 7:3.2.0.10-1
+- update to latest upsteam 3.2.0.10
+
+* Mon Aug 01 2011 Jiri Skala  - 7:3.2.0.9-2
+- rebuild for libcap
+
+* Tue Jun 07 2011 Jiri Skala  - 7:3.2.0.9-1
+- upgrade to squid-3.2
+- fixes #720445 - Provide native systemd unit file
+- SysV initscript moved to subpackage
+- temproary disabled eCap
+
+* Wed May 18 2011 Jiri Skala  - 7:3.1.12-3
+- enabled eCAP support
+
+* Wed May 04 2011 Jiri Skala  - 7:3.1.12-2
+- applied corrections of unused patch (Ismail Dönmez)
+
+* Fri Apr 15 2011 Jiri Skala  - 7:3.1.12-1
+- Update to 3.1.12 upstream release
+
+* Thu Feb 10 2011 Jiri Skala  - 7:3.1.11-1
+- Update to 3.1.11 upstream release
+- fixes issue with unused variale after mass rebuild (gcc-4.6)
+
+* Wed Feb 09 2011 Fedora Release Engineering  - 7:3.1.10-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild
+
+* Thu Jan 06 2011 Jiri Skala  - 7:3.1.10-1
+- Update to 3.1.10 upstream release
+
+* Fri Nov 05 2010 Jiri Skala  - 7:3.1.9-5
+- rebuild for libxml2
+
+* Mon Nov 01 2010 Jiri Skala  - 7:3.1.9-4
+- fixes #647967 - build with -fPIE option back and dropped proper libltdl usage
+
+* Sat Oct 30 2010 Henrik Nordstrom  - 7:3.1.9-3
+- Bug #647967 - License clarification & spec-file cleanup
+
+* Mon Oct 25 2010 Henrik Nordstrom  7:3.1.9-2
+- Upstream 3.1.9 bugfix release
+
+* Wed Oct 13 2010 Jiri Skala  - 7:3.1.8-2
+- fixes #584161 - squid userid not added to wbpriv group
+
+* Sun Sep 05 2010 Henrik Nordstrom  - 7:3.1.8-1
+- Bug #630445: SQUID-2010:3 Denial of service issue
+
+* Tue Aug 24 2010 Henrik Nordstrom  - 7:3.1.7-1
+- Upstream 3.1.7 bugfix release
+
+* Fri Aug 20 2010 Henrik Nordstrom  - 7:3.1.6-1
+- Upstream 3.1.6 bugfix release
+- Build with system libtool-ltdl
+
+* Thu Jul 15 2010 Henrik Nordstrom  - 7:3.1.5-2
+- Upstream 3.1.5 bugfix release
+- Upstream patch for Bug #614665: Squid crashes with  ident auth
+- Upstream patches for various memory leaks
+
+* Mon May 31 2010 Henrik Nordstrom  - 7:3.1.4-2
+- Correct case-insensitiveness in HTTP list header parsing
+
+* Sun May 30 2010 Henrik Nordstrom  - 7:3.1.4-1
+- Upstream 3.1.4 bugfix release, issues relating to IPv6, TPROXY, Memory
+  management, follow_x_forwarded_for, and stability fixes
+
+* Fri May 14 2010 Henrik Nordstrom  - 7:3.1.3-2
+- Fully fix #548903 - "comm_open: socket failure: (97) Address family not supported by protocol" if IPv6 disabled
+- Various IPv6 related issues fixed, making tcp_outgoing_address behave
+  as expected and no commResetFD warnings when using tproxy setups.
+
+* Sun May 02 2010 Henrik Nordstrom  - 7:3.1.3-1
+- Update to 3.1.3 Upstream bugfix release, fixing WCCPv1
+
+* Mon Apr 19 2010 Henrik Nordstrom  - 7:3.1.1-4
+- Bug #583489: Adjust logrotate script to changes in logrotate package.
+
+* Mon Apr 19 2010 Jiri Skala 
+- fixes #548903 - "comm_open: socket failure: (97) Address family not supported by protocol" if IPv6 disabled
+
+* Tue Mar 30 2010 Henrik Nordstrom  - 7:3.1.1-2
+- Update to 3.1.1 Squid bug #2827 crash with assertion failed:
+  FilledChecklist.cc:90: "conn() != NULL" under high load.
+
+* Mon Mar 15 2010 Henrik Nordstrom  - 7:3.1.0.18-1
+- Upgrade to 3.1.0.18 fixing Digest authentication and improved HTTP/1.1 support
+
+* Sun Feb 28 2010 Henrik Nordstrom  -  7:3.1.0.17-3
+- Bug 569120, fails to open unbound ipv4 listening sockets
+
+* Thu Feb 25 2010 Henrik Nordstrom  - 7:3.1.0.17-2
+- Upgrade to 3.1.0.17
+
+* Thu Feb 18 2010 Henrik Nordstrom  - 7:3.1.0.16-7
+- Workaround for Fedora-13 build failure
+
+* Sun Feb 14 2010 Henrik Nordstrom  - 7:3.1.0.16-6
+- Patch for Squid security advisory SQUID-2010:2, denial of service
+  issue in HTCP processing (CVE-2010-0639)
+
+* Sun Feb 07 2010 Henrik Nordstrom  - 7:3.1.0.16-5
+- Rebuild 3.1.0.16 with corrected upstream release.
+
+* Wed Feb 03 2010 Jiri Skala  - 7:3.1.0.16-4
+- spec file modified to be fedora packaging guidline compliant
+- little shifting lines in init script header due to rpmlint complaint
+- fixes assertion during start up
+
+* Mon Feb 01 2010 Henrik Nordstrom  7:3.1.0.16-3
+- Upgrade to 3.1.0.16 for DNS related DoS fix (Squid-2010:1)
+
+* Sat Jan 09 2010 Henrik Nordstrom  - 7:3.1.0.15-3
+- fixed #551302 PROXY needs libcap. Also increases security a little.
+- merged relevant upstream bugfixes waiting for next 3.1 release
+
+* Mon Nov 23 2009 Henrik Nordstrom  - 7:3.1.0.15-2
+- Update to 3.1.0.15 with a number of bugfixes and a workaround for
+  ICEcast/SHOUTcast streams.
+
+* Mon Nov 23 2009 Jiri Skala  7:3.1.0.14-2
+- fixed #532930 Syntactic error in /etc/init.d/squid
+- fixed #528453 cannot initialize cache_dir with user specified config file
+
+* Sun Sep 27 2009 Henrik Nordstrom  - 7:3.1.0.14-1
+- Update to 3.1.0.14
+
+* Sat Sep 26 2009 Henrik Nordstrom  - 7:3.1.0.13-7
+- Include upstream patches fixing important operational issues
+- Enable ESI support now that it does not conflict with normal operation
+
+* Fri Sep 18 2009 Henrik Nordstrom  - 7:3.1.0.13-6
+- Rotate store.log if enabled
+
+* Wed Sep 16 2009 Tomas Mraz  - 7:3.1.0.13-5
+- Use password-auth common PAM configuration instead of system-auth
+
+* Tue Sep 15 2009 Jiri Skala  - 7:3.1.0.13-4
+- fixed #521596 - wrong return code of init script
+
+* Tue Sep 08 2009 Henrik Nordstrom  - 7:3.1.0.13-3
+- Enable squid_kerb_auth
+
+* Mon Sep 07 2009 Henrik Nordstrom  - 7:3.1.0.13-2
+- Cleaned up packaging to ease future maintenance
+
+* Fri Sep 04 2009 Henrik Nordstrom  - 7:3.1.0.13-1
+- Upgrade to next upstream release 3.1.0.13 with many new features
+  * IPv6 support
+  * NTLM-passthru
+  * Kerberos/Negotiate authentication scheme support
+  * Localized error pages based on browser language preferences
+  * Follow X-Forwarded-For capability
+  * and more..
+
+* Mon Aug 31 2009 Henrik Nordstrom  - 3.0.STABLE18-3
+- Bug #520445 silence logrotate when Squid is not running
+
+* Fri Aug 21 2009 Tomas Mraz  - 7:3.0.STABLE18-2
+- rebuilt with new openssl
+
+* Tue Aug 04 2009 Henrik Nordstrom  - 7:3.0.STABLE18-1
+- Update to 3.0.STABLE18
+
+* Sat Aug 01 2009 Henrik Nordstrom  - 7:3.0.STABLE17-3
+- Squid Bug #2728: regression: assertion failed: http.cc:705: "!eof"
+
+* Mon Jul 27 2009 Henrik Nordstrom  - 7:3.0.STABLE17-2
+- Bug #514014, update to 3.0.STABLE17 fixing the denial of service issues
+  mentioned in Squid security advisory SQUID-2009_2.
+
+* Sun Jul 26 2009 Fedora Release Engineering  - 7:3.0.STABLE16-3
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild
+
+* Wed Jul 01 2009 Jiri Skala  7:3.0.STABLE16-2
+- fixed patch parameter of bXXX patches
+
+* Mon Jun 29 2009 Henrik Nordstrom  - 7:3.0.STABLE16-1
+- Upgrade to 3.0.STABLE16
+
+* Sat May 23 2009 Henrik Nordstrom  - 7:3.0.STABLE15-2
+- Bug #453304 - Squid requires restart after Network Manager connection setup
+
+* Sat May 09 2009 Henrik Nordstrom  - 7:3.0.STABLE15-1
+- Upgrade to 3.0.STABLE15
+
+* Tue Apr 28 2009 Jiri Skala  - 7:3.0.STABLE14-3
+- fixed ambiguous condition in the init script (exit 4)
+
+* Mon Apr 20 2009 Henrik Nordstrom  - 7:3.0.STABLE14-2
+- Squid bug #2635: assertion failed: HttpHeader.cc:1196: "Headers[id].type == ftInt64"
+
+* Sun Apr 19 2009 Henrik Nordstrom  - 7:3.0.STABLE14-1
+- Upgrade to 3.0.STABLE14
+
+* Fri Mar 06 2009 Henrik Nordstrom  - 7:3.0.STABLE13-2
+- backported logfile.cc syslog parameters patch from 3.1 (b9443.patch)
+- GCC-4.4 workaround in src/wccp2.cc
+
+* Wed Feb 25 2009 Fedora Release Engineering  - 7:3.0.STABLE13-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_11_Mass_Rebuild
+
+* Thu Feb 5 2009 Jonathan Steffan  - 7:3.0.STABLE13-1
+- upgrade to latest upstream
+
+* Tue Jan 27 2009 Henrik Nordstrom  - 7:3.0.STABLE12-1
+- upgrade to latest upstream
+
+* Sun Jan 18 2009 Tomas Mraz  - 7:3.0.STABLE10-4
+- rebuild with new openssl
+
+* Fri Dec 19 2008 Henrik Nordstrom  - 7:3.0.STABLE10-3
+- actually include the upstream bugfixes in the build
+
+* Fri Dec 19 2008 Henrik Nordstrom  - 7:3.0.STABLE10-2
+- upstream bugfixes for cache corruption and access.log response size errors
+
+* Fri Oct 24 2008 Henrik Nordstrom  - 7:3.0.STABLE10-1
+- upgrade to latest upstream
+
+* Sun Oct 19 2008 Henrik Nordstrom  - 7:3.0.STABLE9-2
+- disable coss support, not officially supported in 3.0
+
+* Sun Oct 19 2008 Henrik Nordstrom  - 7:3.0.STABLE9-1
+- update to latest upstream
+
+* Thu Oct 09 2008 Henrik Nordstrom  - 7:3.0.STABLE7-4
+- change logrotate to move instead of copytruncate
+
+* Wed Oct 08 2008 Jiri Skala  - 7:3.0.STABLE7-3
+- fix #465052 -  FTBFS squid-3.0.STABLE7-1.fc10
+
+* Thu Aug 14 2008 Jiri Skala  - 7:3.0.STABLE7-2
+- used ncsa_auth.8 from man-pages. there will be this file removed due to conflict
+- fix #458593 noisy initscript
+- fix #463129 init script tests wrong conf file
+- fix #450352 - build.patch patches only generated files
+
+* Wed Jul 02 2008 Jiri Skala  - 7:3.0.STABLE7-1
+- update to latest upstream
+- fix #453214
+
+* Mon May 26 2008 Martin Nagy  - 7:3.0.STABLE6-2
+- fix bad allocation
+
+* Wed May 21 2008 Martin Nagy  - 7:3.0.STABLE6-1
+- upgrade to latest upstream
+- fix bad allocation
+
+* Fri May 09 2008 Martin Nagy  - 7:3.0.STABLE5-2
+- fix configure detection of netfilter kernel headers (#435499),
+  patch by aoliva@redhat.com
+- add support for negotiate authentication (#445337)
+
+* Fri May 02 2008 Martin Nagy  - 7:3.0.STABLE5-1
+- upgrade to latest upstream
+
+* Tue Apr 08 2008 Martin Nagy  - 7:3.0.STABLE4-1
+- upgrade to latest upstream
+
+* Thu Apr 03 2008 Martin Nagy  - 7:3.0.STABLE2-2
+- add %%{optflags} to make
+- remove warnings about unused return values
+
+* Thu Mar 13 2008 Martin Nagy  - 7:3.0.STABLE2-1
+- upgrade to latest upstream 3.0.STABLE2
+- check config file before starting (#428998)
+- whitespace unification of init script
+- some minor path changes in the QUICKSTART file
+- configure with the --with-filedescriptors=16384 option
+
+* Tue Feb 26 2008 Martin Nagy  - 7:3.0.STABLE1-3
+- change the cache_effective_group default back to none
+
+* Mon Feb 11 2008 Martin Nagy  - 7:3.0.STABLE1-2
+- rebuild for 4.3
+
+* Wed Jan 23 2008 Martin Nagy  - 7:3.0.STABLE1-1
+- upgrade to latest upstream 3.0.STABLE1
+
+* Tue Dec 04 2007 Martin Bacovsky  - 2.6.STABLE17-1
+- upgrade to latest upstream 2.6.STABLE17
+
+* Wed Oct 31 2007 Martin Bacovsky  - 7:2.6.STABLE16-3
+- arp-acl was enabled
+
+* Tue Sep 25 2007 Martin Bacovsky  - 7:2.6.STABLE16-2
+- our fd_config patch was replaced by upstream's version 
+- Source1 (FAQ.sgml) points to local source (upstream's moved to wiki)
+
+* Fri Sep 14 2007 Martin Bacovsky  - 7:2.6.STABLE16-1
+- upgrade to latest upstream 2.6.STABLE16
+
+* Wed Aug 29 2007 Fedora Release Engineering  - 7:2.6.STABLE14-2
+- Rebuild for selinux ppc32 issue.
+
+* Thu Jul 19 2007 Martin Bacovsky  - 7:2.6.STABLE14-1
+- update to latest upstream 2.6.STABLE14
+- resolves: #247064: Initscript Review
+
+* Tue Mar 27 2007 Martin Bacovsky  - 7:2.6.STABLE12-1
+- update to latest upstream 2.6.STABLE12
+- Resolves: #233913: squid: unowned directory
+
+* Mon Feb 19 2007 Martin Bacovsky  - 7:2.6.STABLE9-2
+- Resolves: #226431: Merge Review: squid
+
+* Mon Jan 29 2007 Martin Bacovsky  - 7:2.6.STABLE9-1
+- update to the latest upstream
+
+* Sun Jan 14 2007 Martin Stransky  - 7:2.6.STABLE7-1
+- update to the latest upstream
+
+* Tue Dec 12 2006 Martin Stransky  - 7:2.6.STABLE6-1
+- update to the latest upstream
+
+* Mon Nov  6 2006 Martin Stransky  - 7:2.6.STABLE5-1
+- update to the latest upstream
+
+* Thu Oct 26 2006 Martin Stransky  - 7:2.6.STABLE4-4
+- added fix for #205568 - marked cachemgr.conf as world readable
+
+* Wed Oct 25 2006 Martin Stransky  - 7:2.6.STABLE4-3
+- added fix for #183869 - squid can abort when getting status
+- added upstream fixes:
+    * Bug #1796: Assertion error HttpHeader.c:914: "str"
+    * Bug #1779: Delay pools fairness, correction to first patch
+    * Bug #1802: Crash on exit in certain conditions where cache.log is not writeable
+    * Bug #1779: Delay pools fairness when multiple connections compete for bandwidth
+    * Clarify the select/poll/kqueue/epoll configure --enable/disable options
+- reworked fd patch for STABLE4
+
+* Tue Oct 17 2006 Martin Stransky  - 7:2.6.STABLE4-2
+- upstream fixes:
+  * Accept 00:00-24:00 as a valid time specification (upstream BZ #1794)
+  * aioDone() could be called twice
+  * Squid reconfiguration (upstream BZ #1800)
+
+* Mon Oct 2 2006 Martin Stransky  - 7:2.6.STABLE4-1
+- new upstream
+- fixes from upstream bugzilla, items #1782,#1780,#1785,#1719,#1784,#1776
+
+* Tue Sep 5 2006 Martin Stransky  - 7:2.6.STABLE3-2
+- added upstream patches for ACL
+
+* Mon Aug 21 2006 Martin Stransky  - 7:2.6.STABLE3-1
+- the latest stable upstream
+
+* Thu Aug 10 2006 Karsten Hopp  7:2.6.STABLE2-3
+- added some requirements for pre/post install scripts
+
+* Fri Aug 04 2006 Martin Stransky  - 7:2.6.STABLE2-2
+- added patch for #198253 - squid: don't chgrp another pkg's
+  files/directory
+
+* Mon Jul 31 2006 Martin Stransky  - 7:2.6.STABLE2-1
+- the latest stable upstream
+- reworked fd config patch
+
+* Tue Jul 25 2006 Martin Stransky  - 7:2.6.STABLE1-3
+- the latest CVS upstream snapshot
+
+* Wed Jul 19 2006 Martin Stransky  - 7:2.6.STABLE1-2
+- the latest CVS snapshot
+
+* Tue Jul 18 2006 Martin Stransky  - 7:2.6.STABLE1-1
+- new upstream + the latest CVS snapshot from 2006/07/18
+- updated fd config patch
+- enabled epoll
+- fixed release format (#197405)
+- enabled WCCPv2 support (#198642)
+
+* Wed Jul 12 2006 Jesse Keating  - 7:2.5.STABLE14-2.1
+- rebuild
+
+* Thu Jun 8 2006 Martin Stransky  - 7:2.5.STABLE14-2
+- fix for squid BZ#1511 - assertion failed: HttpReply.c:105: "rep"
+
+* Tue May 30 2006 Martin Stransky  - 7:2.5.STABLE14-1
+- update to new upstream
+
+* Sun May 28 2006 Martin Stransky  - 7:2.5.STABLE13-5
+- fixed libbind patch (#193298)
+
+* Wed May 3  2006 Martin Stransky  - 7:2.5.STABLE13-4
+- added extra group check (#190544)
+
+* Wed Mar 29 2006 Martin Stransky  - 7:2.5.STABLE13-3
+- improved pre script (#187217) - added group switch
+
+* Thu Mar 23 2006 Martin Stransky  - 7:2.5.STABLE13-2
+- removed "--with-large-files" on 64bit arches
+
+* Mon Mar 13 2006 Martin Stransky  - 7:2.5.STABLE13-1
+- update to new upstream
+
+* Fri Feb 10 2006 Jesse Keating  - 7:2.5.STABLE12-5.1
+- bump again for double-long bug on ppc(64)
+
+* Tue Feb 07 2006 Martin Stransky  - 7:2.5.STABLE12-5
+- new upstream patches
+
+* Tue Feb 07 2006 Jesse Keating  - 7:2.5.STABLE12-4.1
+- rebuilt for new gcc4.1 snapshot and glibc changes
+
+* Wed Dec 28 2005  Martin Stransky  7:2.5.STABLE12-4
+- added follow-xff patch (#176055)
+- samba path fix (#176659)
+
+* Mon Dec 19 2005  Martin Stransky  7:2.5.STABLE12-3
+- fd-config.patch clean-up
+- SMB_BadFetch patch from upstream
+
+* Fri Dec 09 2005 Jesse Keating 
+- rebuilt
+
+* Mon Nov 28 2005  Martin Stransky  7:2.5.STABLE12-2
+- rewriten patch squid-2.5.STABLE10-64bit.patch, it works with
+  "--with-large-files" option now
+- fix for #72896 - squid does not support > 1024 file descriptors,
+  new "--enable-fd-config" option for it.
+
+* Wed Nov 9 2005  Martin Stransky  7:2.5.STABLE12-1
+- update to STABLE12
+- setenv patch
+
+* Mon Oct 24 2005 Martin Stransky  7:2.5.STABLE11-6
+- fix for delay pool from upstream
+
+* Thu Oct 20 2005 Martin Stransky  7:2.5.STABLE11-5
+- fix for #171213 - CVE-2005-3258 Squid crash due to malformed FTP response
+- more fixes from upstream
+
+* Fri Oct 14 2005 Martin Stransky  7:2.5.STABLE11-4
+- enabled support for large files (#167503)
+
+* Thu Oct 13 2005 Tomas Mraz  7:2.5.STABLE11-3
+- use include instead of pam_stack in pam config
+
+* Thu Sep 29 2005 Martin Stransky  7:2.5.STABLE11-2
+- added patch for delay pools and some minor fixes
+
+* Fri Sep 23 2005 Martin Stransky  7:2.5.STABLE11-1
+- update to STABLE11
+
+* Mon Sep 5 2005 Martin Stransky  7:2.5.STABLE10-4
+- Three upstream patches for #167414
+- Spanish and Greek messages
+- patch for -D_FORTIFY_SOURCE=2 
+
+* Tue Aug 30 2005 Martin Stransky  7:2.5.STABLE10-3
+- removed "--enable-truncate" option (#165948)
+- added "--enable-cache-digests" option (#102134)
+- added "--enable-ident-lookups" option (#161640)
+- some clean up (#165949)
+
+* Fri Jul 15 2005 Martin Stransky  7:2.5.STABLE10-2
+- pam_auth and ncsa_auth have setuid (#162660)
+
+* Thu Jul 7 2005 Martin Stransky  7:2.5.STABLE10-1
+- new upstream version
+- enabled fakeauth utility (#154020)
+- enabled digest authentication scheme (#155882)
+- all error pages marked as config (#127836)
+- patch for 64bit statvfs interface (#153274)
+- added httpd config file for cachemgr.cgi (#112725)
+
+* Mon May 16 2005 Jay Fenlason  7:2.5.STABLE9-7
+- Upgrade the upstream -dns_query patch from -4 to -5
+
+* Wed May 11 2005 Jay Fenlason  7:2.5.STABLE9-6
+- More upstream patches, including a fix for
+  bz#157456 CAN-2005-1519 DNS lookups unreliable on untrusted networks
+
+* Tue Apr 26 2005 Jay Fenlason  7:2.5.STABLE9-5
+- more upstream patches, including a fix for
+  CVE-1999-0710 cachemgr malicious use
+
+* Fri Apr 22 2005 Jay Fenlason  7:2.5.STABLE9-4
+- More upstream patches, including the fixed 2GB patch.
+- include the -libbind patch, which prevents squid from using the optional
+  -lbind library, even if it's installed.
+
+* Tue Mar 15 2005 Jay Fenlason  7:2.5.STABLE9-2
+- New upstream version, with 14 upstream patches.
+
+* Wed Feb 16 2005 Jay Fenlason  7:2.5.STABLE8-2
+- new upstream version with 4 upstream patches.
+- Reorganize spec file to apply upstream patches first
+
+* Tue Feb 1 2005 Jay Fenlason  7:2.5.STABLE7-4
+- Include two more upstream patches for security vulns:
+  bz#146783 Correct handling of oversized reply headers
+  bz#146778 CAN-2005-0211 Buffer overflow in WCCP recvfrom() call
+
+* Tue Jan 25 2005 Jay Fenlason  7:2.5.STABLE7-3
+- Include more upstream patches, including two for security holes.
+
+* Tue Jan 18 2005 Jay Fenlason  7:2.5.STABLE7-2
+- Add a triggerin on samba-common to make /var/cache/samba/winbindd_privileged
+  accessable so that ntlm_auth will work.  It needs to be in this rpm,
+  because the Samba RPM can't assume the squid user exists.
+  Note that this will only work if the Samba RPM is recent enough to create
+  that directory at install time instead of at winbindd startup time.
+  That should be samba-common-3.0.0-15 or later.
+  This fixes bugzilla #103726
+- Clean up extra whitespace in this spec file.
+- Add additional upstream patches. (Now 18 upstream patches).
+- patch #112 closes CAN-2005-0096 and CAN-2005-0097, remote DOS security holes.
+- patch #113 closes CAN-2005-0094, a remote buffer-overflow DOS security hole.
+- patch #114 closes CAN-2005-0095, a remote DOS security hole.
+- Remove the -nonbl (replaced by #104) and -close (replaced by #111) patches, since
+  they're now fixed by upstream patches.
+
+* Mon Oct 25 2004 Jay Fenlason  7:2.5.STABLE7-1
+- new upstream version, with 3 upstream patches.
+  Updated the -build and -config patches
+- Include patch from Ulrich Drepper  to more
+  intelligently close all file descriptors.
+
+* Mon Oct 18 2004 Jay Fenlason  7:2.5.STABLE6-3
+- include patch from Ulrich Drepper  to stop
+  problems with O_NONBLOCK.  This closes #136049
+
+* Tue Oct 12 2004 Jay Fenlason  7:2.5.STABLE6-2
+- Include fix for CAN-2004-0918
+
+* Tue Sep 28 2004 Jay Fenlason  7:2.5.STABLE6-1
+- New upstream version, with 32 upstream patches.
+  This closes #133970, #133931, #131728, #128143, #126726
+
+- Change the permissions on /etc/squid/squid.conf to 640.  This closes
+  bugzilla #125007
+
+* Mon Jun 28 2004 Jay Fenlason  7:2.5STABLE5-5
+- Merge current upstream patches.
+- Fix the -pipe patch to have the correct name of the winbind pipe.
+
+* Tue Jun 15 2004 Elliot Lee 
+- rebuilt
+
+* Mon Apr 5 2004 Jay Fenlason  7:2.5.STABLE5-2
+- Include the first 10 upstream patches
+- Add a patch for the correct location of the winbindd pipe.  This closes
+  bugzilla #107561
+- Remove the change to ssl_support.c from squid-2.5.STABLE3-build patch
+  This closes #117851
+- Include /etc/pam.d/squid .  This closes #113404
+- Include a patch to close #111254 (assignment in assert)
+- Change squid.init to put output messages in /var/log/squid/squid.out
+  This closes #104697
+- Only useradd the squid user if it doesn't already exist, and error out
+  if the useradd fails.  This closes #118718.
+
+* Tue Mar 2 2004 Jay Fenlason  7:2.5.STABLE5-1
+- New upstream version, obsoletes many patches.
+- Fix --datadir passed to configure.  Configure automatically adds /squid
+  so we shouldn't.
+- Remove the problematic triggerpostun trigger, since is's broken, and FC2
+  never shipped with that old version.
+- add %%{?_smp_mflags} to make line.
+
+* Tue Mar 02 2004 Elliot Lee 
+- rebuilt
+
+* Mon Feb 23 2004 Tim Waugh 
+- Use ':' instead of '.' as separator for chown.
+
+* Fri Feb 20 2004 Jay Fenlason  7:2.5.STABLE4-3
+- Clean up the spec file to work on 64-bit platforms (use %%{_libdir}
+  instead of /usr/lib, etc)
+- Make the release number in the changelog section agree with reality.
+- use -fPIE rather than -fpie.  s390 fails with just -fpie
+
+* Fri Feb 13 2004 Elliot Lee 
+- rebuilt
+
+* Thu Feb 5 2004 Jay Fenlason 
+- Incorporate many upstream patches
+- Include many spec file changes from D.Johnson 
+
+* Tue Sep 23 2003 Jay Fenlason  7:2.5.STABLE4-1
+- New upstream version.
+- Fix the Source: line in this spec file to point to the correct URL.
+- redo the -location patch to work with the new upstream version.
+
+* Mon Jun 30 2003 Jay Fenlason  7:2.5.STABLE3-0
+- Spec file change to enable the nul storage module. bugzilla #74654
+- Upgrade to 2.5STABLE3 with current official patches.
+- Added --enable-auth="basic,ntlm": closes bugzilla #90145
+- Added --with-winbind-auth-challenge: closes bugzilla #78691
+- Added --enable-useragent-log and --enable-referer-log, closes
+- bugzilla #91884
+# - Changed configure line to enable pie
+# (Disabled due to broken compilers on ia64 build machines)
+#- Patched to increase the maximum number of file descriptors #72896
+#- (disabled for now--needs more testing)
+
+* Wed Jun 04 2003 Elliot Lee 
+- rebuilt
+
+* Wed Jan 22 2003 Tim Powers 
+- rebuilt
+
+* Wed Jan 15 2003 Bill Nottingham  7:2.5.STABLE1-1
+- update to 2.5.STABLE1
+
+* Wed Nov 27 2002 Tim Powers  7:2.4.STABLE7-5
+- remove unpackaged files from the buildroot
+
+* Tue Aug 27 2002 Nalin Dahyabhai  2.4.STABLE7-4
+- rebuild
+
+* Wed Jul 31 2002 Karsten Hopp 
+- don't raise an error if the config file is incomplete
+  set defaults instead (#69322, #70065)
+
+* Thu Jul 18 2002 Bill Nottingham  2.4.STABLE7-2
+- don't strip binaries
+
+* Mon Jul  8 2002 Bill Nottingham 
+- update to 2.4.STABLE7
+- fix restart (#53761)
+
+* Tue Jun 25 2002 Bill Nottingham 
+- add various upstream bugfix patches
+
+* Fri Jun 21 2002 Tim Powers 
+- automated rebuild
+
+* Thu May 23 2002 Tim Powers 
+- automated rebuild
+
+* Fri Mar 22 2002 Bill Nottingham 
+- 2.4.STABLE6
+- turn off carp
+
+* Mon Feb 18 2002 Bill Nottingham 
+- 2.4.STABLE3 + patches
+- turn off HTCP at request of maintainers
+- leave SNMP enabled in the build, but disabled in the default config
+
+* Fri Jan 25 2002 Tim Powers 
+- rebuild against new libssl
+
+* Wed Jan 09 2002 Tim Powers 
+- automated rebuild
+
+* Mon Jan 07 2002 Florian La Roche 
+- require linuxdoc-tools instead of sgml-tools
+
+* Tue Sep 25 2001 Bill Nottingham 
+- update to 2.4.STABLE2
+
+* Mon Sep 24 2001 Bill Nottingham 
+- add patch to fix FTP crash
+
+* Mon Aug  6 2001 Bill Nottingham 
+- fix uninstall (#50411)
+
+* Mon Jul 23 2001 Bill Nottingham 
+- add some buildprereqs (#49705)
+
+* Sun Jul 22 2001 Bill Nottingham 
+- update FAQ
+
+* Tue Jul 17 2001 Bill Nottingham 
+- own /etc/squid, /usr/lib/squid
+
+* Tue Jun 12 2001 Nalin Dahyabhai 
+- rebuild in new environment
+- s/Copyright:/License:/
+
+* Tue Apr 24 2001 Bill Nottingham 
+- update to 2.4.STABLE1 + patches
+- enable some more configure options (#24981)
+- oops, ship /etc/sysconfig/squid
+
+* Fri Mar  2 2001 Nalin Dahyabhai 
+- rebuild in new environment
+
+* Tue Feb  6 2001 Trond Eivind Glomsrød 
+- improve i18n
+- make the initscript use the standard OK/FAILED
+
+* Tue Jan 23 2001 Bill Nottingham 
+- change i18n mechanism
+
+* Fri Jan 19 2001 Bill Nottingham 
+- fix path references in QUICKSTART (#15114)
+- fix initscript translations (#24086)
+- fix shutdown logic (#24234), patch from 
+- add /etc/sysconfig/squid for daemon options & shutdown timeouts
+- three more bugfixes from the Squid people
+- update FAQ.sgml
+- build and ship auth modules (#23611)
+
+* Thu Jan 11 2001 Bill Nottingham 
+- initscripts translations
+
+* Mon Jan  8 2001 Bill Nottingham 
+- add patch to use mkstemp (greg@wirex.com)
+
+* Fri Dec 01 2000 Bill Nottingham 
+- rebuild because of broken fileutils
+
+* Sat Nov 11 2000 Bill Nottingham 
+- fix the acl matching cases (only need the second patch)
+
+* Tue Nov  7 2000 Bill Nottingham 
+- add two patches to fix domain ACLs
+- add 2 bugfix patches from the squid people
+
+* Fri Jul 28 2000 Bill Nottingham 
+- clean up init script; fix condrestart
+- update to STABLE4, more bugfixes
+- update FAQ
+
+* Tue Jul 18 2000 Nalin Dahyabhai 
+- fix syntax error in init script
+- finish adding condrestart support
+
+* Fri Jul 14 2000 Bill Nottingham 
+- move initscript back
+
+* Wed Jul 12 2000 Prospector 
+- automatic rebuild
+
+* Thu Jul  6 2000 Bill Nottingham 
+- prereq /etc/init.d
+- add bugfix patch
+- update FAQ
+
+* Thu Jun 29 2000 Bill Nottingham 
+- fix init script
+
+* Tue Jun 27 2000 Bill Nottingham 
+- don't prereq new initscripts
+
+* Mon Jun 26 2000 Bill Nottingham 
+- initscript munging
+
+* Sat Jun 10 2000 Bill Nottingham 
+- rebuild for exciting FHS stuff
+
+* Wed May 31 2000 Bill Nottingham 
+- fix init script again (#11699)
+- add --enable-delay-pools (#11695)
+- update to STABLE3
+- update FAQ
+
+* Fri Apr 28 2000 Bill Nottingham 
+- fix init script (#11087)
+
+* Fri Apr  7 2000 Bill Nottingham 
+- three more bugfix patches from the squid people
+- buildprereq jade, sgmltools
+
+* Sun Mar 26 2000 Florian La Roche 
+- make %%pre more portable
+
+* Thu Mar 16 2000 Bill Nottingham 
+- bugfix patches
+- fix dependency on /usr/local/bin/perl
+
+* Sat Mar  4 2000 Bill Nottingham 
+- 2.3.STABLE2
+
+* Mon Feb 14 2000 Bill Nottingham 
+- Yet More Bugfix Patches
+
+* Tue Feb  8 2000 Bill Nottingham 
+- add more bugfix patches
+- --enable-heap-replacement
+
+* Mon Jan 31 2000 Cristian Gafton 
+- rebuild to fix dependencies
+
+* Fri Jan 28 2000 Bill Nottingham 
+- grab some bugfix patches
+
+* Mon Jan 10 2000 Bill Nottingham 
+- 2.3.STABLE1 (whee, another serial number)
+
+* Tue Dec 21 1999 Bernhard Rosenkraenzer 
+- Fix compliance with ftp RFCs
+  (http://www.wu-ftpd.org/broken-clients.html)
+- Work around a bug in some versions of autoconf
+- BuildPrereq sgml-tools - we're using sgml2html
+
+* Mon Oct 18 1999 Bill Nottingham 
+- add a couple of bugfix patches
+
+* Wed Oct 13 1999 Bill Nottingham 
+- update to 2.2.STABLE5.
+- update FAQ, fix URLs.
+
+* Sat Sep 11 1999 Cristian Gafton 
+- transform restart in reload and add restart to the init script
+
+* Tue Aug 31 1999 Bill Nottingham 
+- add squid user as user 23.
+
+* Mon Aug 16 1999 Bill Nottingham 
+- initscript munging
+- fix conflict between logrotate & squid -k (#4562)
+
+* Wed Jul 28 1999 Bill Nottingham 
+- put cachemgr.cgi back in /usr/lib/squid
+
+* Wed Jul 14 1999 Bill Nottingham 
+- add webdav bugfix patch (#4027)
+
+* Mon Jul 12 1999 Bill Nottingham 
+- fix path to config in squid.init (confuses linuxconf)
+
+* Wed Jul  7 1999 Bill Nottingham 
+- 2.2.STABLE4
+
+* Wed Jun 9 1999 Dale Lovelace 
+- logrotate changes
+- errors from find when /var/spool/squid or
+- /var/log/squid didn't exist
+
+* Thu May 20 1999 Bill Nottingham 
+- 2.2.STABLE3
+
+* Thu Apr 22 1999 Bill Nottingham 
+- update to 2.2.STABLE.2
+
+* Sun Apr 18 1999 Bill Nottingham 
+- update to 2.2.STABLE1
+
+* Thu Apr 15 1999 Bill Nottingham 
+- don't need to run groupdel on remove
+- fix useradd
+
+* Mon Apr 12 1999 Bill Nottingham 
+- fix effective_user (bug #2124)
+
+* Mon Apr  5 1999 Bill Nottingham 
+- strip binaries
+
+* Thu Apr  1 1999 Bill Nottingham 
+- duh. adduser does require a user name.
+- add a serial number
+
+* Tue Mar 30 1999 Bill Nottingham 
+- add an adduser in %%pre, too
+
+* Thu Mar 25 1999 Bill Nottingham 
+- oog. chkconfig must be in %%preun, not %%postun
+
+* Wed Mar 24 1999 Bill Nottingham 
+- switch to using group squid
+- turn off icmp (insecure)
+- update to 2.2.DEVEL3
+- build FAQ docs from source
+
+* Tue Mar 23 1999 Bill Nottingham 
+- logrotate changes
+
+* Sun Mar 21 1999 Cristian Gafton 
+- auto rebuild in the new build environment (release 4)
+
+* Wed Feb 10 1999 Bill Nottingham 
+- update to 2.2.PRE2
+
+* Wed Dec 30 1998 Bill Nottingham 
+- cache & log dirs shouldn't be world readable
+- remove preun script (leave logs & cache @ uninstall)
+
+* Tue Dec 29 1998 Bill Nottingham 
+- fix initscript to get cache_dir correct
+
+* Fri Dec 18 1998 Bill Nottingham 
+- update to 2.1.PATCH2
+- merge in some changes from RHCN version
+
+* Sat Oct 10 1998 Cristian Gafton 
+- strip binaries
+- version 1.1.22
+
+* Sun May 10 1998 Cristian Gafton 
+- don't make packages conflict with each other...
+
+* Sat May 02 1998 Cristian Gafton 
+- added a proxy auth patch from Alex deVries 
+- fixed initscripts
+
+* Thu Apr 09 1998 Cristian Gafton 
+- rebuilt for Manhattan
+
+* Fri Mar 20 1998 Cristian Gafton 
+- upgraded to 1.1.21/1.NOVM.21
+
+* Mon Mar 02 1998 Cristian Gafton 
+- updated the init script to use reconfigure option to restart squid instead
+  of shutdown/restart (both safer and quicker)
+
+* Sat Feb 07 1998 Cristian Gafton 
+- upgraded to 1.1.20
+- added the NOVM package and tryied to reduce the mess in the spec file
+
+* Wed Jan 7 1998 Cristian Gafton 
+- first build against glibc
+- patched out the use of setresuid(), which is available only on kernels
+  2.1.44 and later
+