import from CS git squid-4.15-10.el8.1
This commit is contained in:
commit
48497485e9
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
SOURCES/squid-4.15.tar.xz
|
1
.squid.metadata
Normal file
1
.squid.metadata
Normal file
@ -0,0 +1 @@
|
||||
60bda34ba39657e2d870c8c1d2acece8a69c3075 SOURCES/squid-4.15.tar.xz
|
16
SOURCES/cache_swap.sh
Normal file
16
SOURCES/cache_swap.sh
Normal file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
if [ -f /etc/sysconfig/squid ]; then
|
||||
. /etc/sysconfig/squid
|
||||
fi
|
||||
|
||||
SQUID_CONF=${SQUID_CONF:-"/etc/squid/squid.conf"}
|
||||
|
||||
CACHE_SWAP=`sed -e 's/#.*//g' $SQUID_CONF | \
|
||||
grep cache_dir | awk '{ print $3 }'`
|
||||
|
||||
for adir in $CACHE_SWAP; do
|
||||
if [ ! -d $adir/00 ]; then
|
||||
echo -n "init_cache_dir $adir... "
|
||||
squid -N -z -F -f $SQUID_CONF >> /var/log/squid/squid.out 2>&1
|
||||
fi
|
||||
done
|
3
SOURCES/perl-requires-squid.sh
Executable file
3
SOURCES/perl-requires-squid.sh
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
/usr/lib/rpm/perl.req $* | grep -v "Authen::Smb"
|
127
SOURCES/squid-4.11-active-ftp.patch
Normal file
127
SOURCES/squid-4.11-active-ftp.patch
Normal file
@ -0,0 +1,127 @@
|
||||
diff --git a/src/clients/FtpClient.cc b/src/clients/FtpClient.cc
|
||||
index b665bcf..d287e55 100644
|
||||
--- a/src/clients/FtpClient.cc
|
||||
+++ b/src/clients/FtpClient.cc
|
||||
@@ -778,7 +778,8 @@ Ftp::Client::connectDataChannel()
|
||||
bool
|
||||
Ftp::Client::openListenSocket()
|
||||
{
|
||||
- return false;
|
||||
+ debugs(9, 3, HERE);
|
||||
+ return false;
|
||||
}
|
||||
|
||||
/// creates a data channel Comm close callback
|
||||
diff --git a/src/clients/FtpClient.h b/src/clients/FtpClient.h
|
||||
index a76a5a0..218d696 100644
|
||||
--- a/src/clients/FtpClient.h
|
||||
+++ b/src/clients/FtpClient.h
|
||||
@@ -118,7 +118,7 @@ public:
|
||||
bool sendPort();
|
||||
bool sendPassive();
|
||||
void connectDataChannel();
|
||||
- bool openListenSocket();
|
||||
+ virtual bool openListenSocket();
|
||||
void switchTimeoutToDataChannel();
|
||||
|
||||
CtrlChannel ctrl; ///< FTP control channel state
|
||||
diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc
|
||||
index 411bce9..31d3e36 100644
|
||||
--- a/src/clients/FtpGateway.cc
|
||||
+++ b/src/clients/FtpGateway.cc
|
||||
@@ -87,6 +87,13 @@ struct GatewayFlags {
|
||||
class Gateway;
|
||||
typedef void (StateMethod)(Ftp::Gateway *);
|
||||
|
||||
+} // namespace FTP
|
||||
+
|
||||
+static void ftpOpenListenSocket(Ftp::Gateway * ftpState, int fallback);
|
||||
+
|
||||
+namespace Ftp
|
||||
+{
|
||||
+
|
||||
/// FTP Gateway: An FTP client that takes an HTTP request with an ftp:// URI,
|
||||
/// converts it into one or more FTP commands, and then
|
||||
/// converts one or more FTP responses into the final HTTP response.
|
||||
@@ -137,7 +144,11 @@ public:
|
||||
|
||||
/// create a data channel acceptor and start listening.
|
||||
void listenForDataChannel(const Comm::ConnectionPointer &conn);
|
||||
-
|
||||
+ virtual bool openListenSocket() {
|
||||
+ debugs(9, 3, HERE);
|
||||
+ ftpOpenListenSocket(this, 0);
|
||||
+ return Comm::IsConnOpen(data.conn);
|
||||
+ }
|
||||
int checkAuth(const HttpHeader * req_hdr);
|
||||
void checkUrlpath();
|
||||
void buildTitleUrl();
|
||||
@@ -1787,6 +1798,7 @@ ftpOpenListenSocket(Ftp::Gateway * ftpState, int fallback)
|
||||
}
|
||||
|
||||
ftpState->listenForDataChannel(temp);
|
||||
+ ftpState->data.listenConn = temp;
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1822,13 +1834,19 @@ ftpSendPORT(Ftp::Gateway * ftpState)
|
||||
// pull out the internal IP address bytes to send in PORT command...
|
||||
// source them from the listen_conn->local
|
||||
|
||||
+ struct sockaddr_in addr;
|
||||
+ socklen_t addrlen = sizeof(addr);
|
||||
+ getsockname(ftpState->data.listenConn->fd, (struct sockaddr *) &addr, &addrlen);
|
||||
+ unsigned char port_high = ntohs(addr.sin_port) >> 8;
|
||||
+ unsigned char port_low = ntohs(addr.sin_port) & 0xff;
|
||||
+
|
||||
struct addrinfo *AI = NULL;
|
||||
ftpState->data.listenConn->local.getAddrInfo(AI, AF_INET);
|
||||
unsigned char *addrptr = (unsigned char *) &((struct sockaddr_in*)AI->ai_addr)->sin_addr;
|
||||
- unsigned char *portptr = (unsigned char *) &((struct sockaddr_in*)AI->ai_addr)->sin_port;
|
||||
+ // unsigned char *portptr = (unsigned char *) &((struct sockaddr_in*)AI->ai_addr)->sin_port;
|
||||
snprintf(cbuf, CTRL_BUFLEN, "PORT %d,%d,%d,%d,%d,%d\r\n",
|
||||
addrptr[0], addrptr[1], addrptr[2], addrptr[3],
|
||||
- portptr[0], portptr[1]);
|
||||
+ port_high, port_low);
|
||||
ftpState->writeCommand(cbuf);
|
||||
ftpState->state = Ftp::Client::SENT_PORT;
|
||||
|
||||
@@ -1881,14 +1899,27 @@ ftpSendEPRT(Ftp::Gateway * ftpState)
|
||||
return;
|
||||
}
|
||||
|
||||
+
|
||||
+ unsigned int port;
|
||||
+ struct sockaddr_storage addr;
|
||||
+ socklen_t addrlen = sizeof(addr);
|
||||
+ getsockname(ftpState->data.listenConn->fd, (struct sockaddr *) &addr, &addrlen);
|
||||
+ if (addr.ss_family == AF_INET) {
|
||||
+ struct sockaddr_in *addr4 = (struct sockaddr_in*) &addr;
|
||||
+ port = ntohs( addr4->sin_port );
|
||||
+ } else {
|
||||
+ struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) &addr;
|
||||
+ port = ntohs( addr6->sin6_port );
|
||||
+ }
|
||||
+
|
||||
char buf[MAX_IPSTRLEN];
|
||||
|
||||
/* RFC 2428 defines EPRT as IPv6 equivalent to IPv4 PORT command. */
|
||||
/* Which can be used by EITHER protocol. */
|
||||
- snprintf(cbuf, CTRL_BUFLEN, "EPRT |%d|%s|%d|\r\n",
|
||||
+ snprintf(cbuf, CTRL_BUFLEN, "EPRT |%d|%s|%u|\r\n",
|
||||
( ftpState->data.listenConn->local.isIPv6() ? 2 : 1 ),
|
||||
ftpState->data.listenConn->local.toStr(buf,MAX_IPSTRLEN),
|
||||
- ftpState->data.listenConn->local.port() );
|
||||
+ port);
|
||||
|
||||
ftpState->writeCommand(cbuf);
|
||||
ftpState->state = Ftp::Client::SENT_EPRT;
|
||||
@@ -1907,7 +1938,7 @@ ftpReadEPRT(Ftp::Gateway * ftpState)
|
||||
ftpSendPORT(ftpState);
|
||||
return;
|
||||
}
|
||||
-
|
||||
+ ftpState->ctrl.message = NULL;
|
||||
ftpRestOrList(ftpState);
|
||||
}
|
||||
|
27
SOURCES/squid-4.11-config.patch
Normal file
27
SOURCES/squid-4.11-config.patch
Normal file
@ -0,0 +1,27 @@
|
||||
diff --git a/src/cf.data.pre b/src/cf.data.pre
|
||||
index 26ef576..30d5509 100644
|
||||
--- a/src/cf.data.pre
|
||||
+++ b/src/cf.data.pre
|
||||
@@ -5006,7 +5006,7 @@ DOC_END
|
||||
|
||||
NAME: logfile_rotate
|
||||
TYPE: int
|
||||
-DEFAULT: 10
|
||||
+DEFAULT: 0
|
||||
LOC: Config.Log.rotateNumber
|
||||
DOC_START
|
||||
Specifies the default number of logfile rotations to make when you
|
||||
@@ -6857,11 +6857,11 @@ COMMENT_END
|
||||
|
||||
NAME: cache_mgr
|
||||
TYPE: string
|
||||
-DEFAULT: webmaster
|
||||
+DEFAULT: root
|
||||
LOC: Config.adminEmail
|
||||
DOC_START
|
||||
Email-address of local cache manager who will receive
|
||||
- mail if the cache dies. The default is "webmaster".
|
||||
+ mail if the cache dies. The default is "root".
|
||||
DOC_END
|
||||
|
||||
NAME: mail_from
|
143
SOURCES/squid-4.11-convert-ipv4.patch
Normal file
143
SOURCES/squid-4.11-convert-ipv4.patch
Normal file
@ -0,0 +1,143 @@
|
||||
From 771908d313ee9c255adfb5e4fdba4d6797c18409 Mon Sep 17 00:00:00 2001
|
||||
From: Amos Jeffries <yadij@users.noreply.github.com>
|
||||
Date: Thu, 7 Mar 2019 13:50:38 +0000
|
||||
Subject: [PATCH] Bug 4928: Cannot convert non-IPv4 to IPv4 (#379)
|
||||
|
||||
... when reaching client_ip_max_connections
|
||||
|
||||
The client_ip_max_connections limit is checked before the TCP dst-IP is located for the newly received TCP connection. This leaves Squid unable to fetch the NFMARK or similar
|
||||
details later on (they do not exist for [::]).
|
||||
|
||||
Move client_ip_max_connections test later in the TCP accept process to ensure dst-IP is known when the error is produced.
|
||||
---
|
||||
src/comm/TcpAcceptor.cc | 82 ++++++++++++++++++++---------------------
|
||||
1 file changed, 39 insertions(+), 43 deletions(-)
|
||||
|
||||
diff --git a/src/comm/TcpAcceptor.cc b/src/comm/TcpAcceptor.cc
|
||||
index d4b576d..936aa30 100644
|
||||
--- a/src/comm/TcpAcceptor.cc
|
||||
+++ b/src/comm/TcpAcceptor.cc
|
||||
@@ -282,7 +282,16 @@ Comm::TcpAcceptor::acceptOne()
|
||||
ConnectionPointer newConnDetails = new Connection();
|
||||
const Comm::Flag flag = oldAccept(newConnDetails);
|
||||
|
||||
- if (flag == Comm::COMM_ERROR) {
|
||||
+ /* Check for errors */
|
||||
+ if (!newConnDetails->isOpen()) {
|
||||
+
|
||||
+ if (flag == Comm::NOMESSAGE) {
|
||||
+ /* register interest again */
|
||||
+ debugs(5, 5, HERE << "try later: " << conn << " handler Subscription: " << theCallSub);
|
||||
+ SetSelect(conn->fd, COMM_SELECT_READ, doAccept, this, 0);
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
// A non-recoverable error; notify the caller */
|
||||
debugs(5, 5, HERE << "non-recoverable error:" << status() << " handler Subscription: " << theCallSub);
|
||||
if (intendedForUserConnections())
|
||||
@@ -292,16 +301,12 @@ Comm::TcpAcceptor::acceptOne()
|
||||
return;
|
||||
}
|
||||
|
||||
- if (flag == Comm::NOMESSAGE) {
|
||||
- /* register interest again */
|
||||
- debugs(5, 5, "try later: " << conn << " handler Subscription: " << theCallSub);
|
||||
- } else {
|
||||
- debugs(5, 5, "Listener: " << conn <<
|
||||
- " accepted new connection " << newConnDetails <<
|
||||
- " handler Subscription: " << theCallSub);
|
||||
- notify(flag, newConnDetails);
|
||||
- }
|
||||
+ newConnDetails->nfmark = Ip::Qos::getNfmarkFromConnection(newConnDetails, Ip::Qos::dirAccepted);
|
||||
|
||||
+ debugs(5, 5, HERE << "Listener: " << conn <<
|
||||
+ " accepted new connection " << newConnDetails <<
|
||||
+ " handler Subscription: " << theCallSub);
|
||||
+ notify(flag, newConnDetails);
|
||||
SetSelect(conn->fd, COMM_SELECT_READ, doAccept, this, 0);
|
||||
}
|
||||
|
||||
@@ -341,8 +346,8 @@ Comm::TcpAcceptor::notify(const Comm::Flag flag, const Comm::ConnectionPointer &
|
||||
*
|
||||
* \retval Comm::OK success. details parameter filled.
|
||||
* \retval Comm::NOMESSAGE attempted accept() but nothing useful came in.
|
||||
- * Or this client has too many connections already.
|
||||
* \retval Comm::COMM_ERROR an outright failure occurred.
|
||||
+ * Or this client has too many connections already.
|
||||
*/
|
||||
Comm::Flag
|
||||
Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
|
||||
@@ -383,6 +388,15 @@ Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
|
||||
|
||||
details->remote = *gai;
|
||||
|
||||
+ if ( Config.client_ip_max_connections >= 0) {
|
||||
+ if (clientdbEstablished(details->remote, 0) > Config.client_ip_max_connections) {
|
||||
+ debugs(50, DBG_IMPORTANT, "WARNING: " << details->remote << " attempting more than " << Config.client_ip_max_connections << " connections.");
|
||||
+ Ip::Address::FreeAddr(gai);
|
||||
+ PROF_stop(comm_accept);
|
||||
+ return Comm::COMM_ERROR;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
// lookup the local-end details of this new connection
|
||||
Ip::Address::InitAddr(gai);
|
||||
details->local.setEmpty();
|
||||
@@ -396,6 +410,23 @@ Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
|
||||
details->local = *gai;
|
||||
Ip::Address::FreeAddr(gai);
|
||||
|
||||
+ /* fdstat update */
|
||||
+ fdd_table[sock].close_file = NULL;
|
||||
+ fdd_table[sock].close_line = 0;
|
||||
+
|
||||
+ fde *F = &fd_table[sock];
|
||||
+ details->remote.toStr(F->ipaddr,MAX_IPSTRLEN);
|
||||
+ F->remote_port = details->remote.port();
|
||||
+ F->local_addr = details->local;
|
||||
+ F->sock_family = details->local.isIPv6()?AF_INET6:AF_INET;
|
||||
+
|
||||
+ // set socket flags
|
||||
+ commSetCloseOnExec(sock);
|
||||
+ commSetNonBlocking(sock);
|
||||
+
|
||||
+ /* IFF the socket is (tproxy) transparent, pass the flag down to allow spoofing */
|
||||
+ F->flags.transparent = fd_table[conn->fd].flags.transparent; // XXX: can we remove this line yet?
|
||||
+
|
||||
// Perform NAT or TPROXY operations to retrieve the real client/dest IP addresses
|
||||
if (conn->flags&(COMM_TRANSPARENT|COMM_INTERCEPTION) && !Ip::Interceptor.Lookup(details, conn)) {
|
||||
debugs(50, DBG_IMPORTANT, "ERROR: NAT/TPROXY lookup failed to locate original IPs on " << details);
|
||||
@@ -414,33 +445,6 @@ Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
|
||||
}
|
||||
#endif
|
||||
|
||||
- details->nfmark = Ip::Qos::getNfmarkFromConnection(details, Ip::Qos::dirAccepted);
|
||||
-
|
||||
- if (Config.client_ip_max_connections >= 0) {
|
||||
- if (clientdbEstablished(details->remote, 0) > Config.client_ip_max_connections) {
|
||||
- debugs(50, DBG_IMPORTANT, "WARNING: " << details->remote << " attempting more than " << Config.client_ip_max_connections << " connections.");
|
||||
- PROF_stop(comm_accept);
|
||||
- return Comm::NOMESSAGE;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- /* fdstat update */
|
||||
- fdd_table[sock].close_file = NULL;
|
||||
- fdd_table[sock].close_line = 0;
|
||||
-
|
||||
- fde *F = &fd_table[sock];
|
||||
- details->remote.toStr(F->ipaddr,MAX_IPSTRLEN);
|
||||
- F->remote_port = details->remote.port();
|
||||
- F->local_addr = details->local;
|
||||
- F->sock_family = details->local.isIPv6()?AF_INET6:AF_INET;
|
||||
-
|
||||
- // set socket flags
|
||||
- commSetCloseOnExec(sock);
|
||||
- commSetNonBlocking(sock);
|
||||
-
|
||||
- /* IFF the socket is (tproxy) transparent, pass the flag down to allow spoofing */
|
||||
- F->flags.transparent = fd_table[conn->fd].flags.transparent; // XXX: can we remove this line yet?
|
||||
-
|
||||
PROF_stop(comm_accept);
|
||||
return Comm::OK;
|
||||
}
|
41
SOURCES/squid-4.11-include-guards.patch
Normal file
41
SOURCES/squid-4.11-include-guards.patch
Normal file
@ -0,0 +1,41 @@
|
||||
diff --git a/compat/os/linux.h b/compat/os/linux.h
|
||||
index 0ff05c6..d51389b 100644
|
||||
--- a/compat/os/linux.h
|
||||
+++ b/compat/os/linux.h
|
||||
@@ -44,6 +44,36 @@
|
||||
#include <netinet/in.h>
|
||||
#endif
|
||||
|
||||
+/*
|
||||
+ * Netfilter header madness. (see Bug 4323)
|
||||
+ *
|
||||
+ * Netfilter have a history of defining their own versions of network protocol
|
||||
+ * primitives without sufficient protection against the POSIX defines which are
|
||||
+ * aways present in Linux.
|
||||
+ *
|
||||
+ * netinet/in.h must be included before any other sys header in order to properly
|
||||
+ * activate include guards in <linux/libc-compat.h> the kernel maintainers added
|
||||
+ * to workaround it.
|
||||
+ */
|
||||
+#if HAVE_NETINET_IN_H
|
||||
+#include <netinet/in.h>
|
||||
+#endif
|
||||
+
|
||||
+/*
|
||||
+ * Netfilter header madness. (see Bug 4323)
|
||||
+ *
|
||||
+ * Netfilter have a history of defining their own versions of network protocol
|
||||
+ * primitives without sufficient protection against the POSIX defines which are
|
||||
+ * aways present in Linux.
|
||||
+ *
|
||||
+ * netinet/in.h must be included before any other sys header in order to properly
|
||||
+ * activate include guards in <linux/libc-compat.h> the kernel maintainers added
|
||||
+ * to workaround it.
|
||||
+ */
|
||||
+#if HAVE_NETINET_IN_H
|
||||
+#include <netinet/in.h>
|
||||
+#endif
|
||||
+
|
||||
/*
|
||||
* sys/capability.h is only needed in Linux apparently.
|
||||
*
|
178
SOURCES/squid-4.11-large-acl.patch
Normal file
178
SOURCES/squid-4.11-large-acl.patch
Normal file
@ -0,0 +1,178 @@
|
||||
diff --git a/src/acl/RegexData.cc b/src/acl/RegexData.cc
|
||||
index 01a4c12..b5c1679 100644
|
||||
--- a/src/acl/RegexData.cc
|
||||
+++ b/src/acl/RegexData.cc
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "ConfigParser.h"
|
||||
#include "Debug.h"
|
||||
#include "sbuf/List.h"
|
||||
+#include "sbuf/Algorithms.h"
|
||||
|
||||
ACLRegexData::~ACLRegexData()
|
||||
{
|
||||
@@ -129,6 +130,18 @@ compileRE(std::list<RegexPattern> &curlist, const char * RE, int flags)
|
||||
return true;
|
||||
}
|
||||
|
||||
+static bool
|
||||
+compileRE(std::list<RegexPattern> &curlist, const SBufList &RE, int flags)
|
||||
+{
|
||||
+ if (RE.empty())
|
||||
+ return curlist.empty(); // XXX: old code did this. It looks wrong.
|
||||
+ SBuf regexp;
|
||||
+ static const SBuf openparen("("), closeparen(")"), separator(")|(");
|
||||
+ JoinContainerIntoSBuf(regexp, RE.begin(), RE.end(), separator, openparen,
|
||||
+ closeparen);
|
||||
+ return compileRE(curlist, regexp.c_str(), flags);
|
||||
+}
|
||||
+
|
||||
/** Compose and compile one large RE from a set of (small) REs.
|
||||
* The ultimate goal is to have only one RE per ACL so that match() is
|
||||
* called only once per ACL.
|
||||
@@ -137,16 +150,11 @@ static int
|
||||
compileOptimisedREs(std::list<RegexPattern> &curlist, const SBufList &sl)
|
||||
{
|
||||
std::list<RegexPattern> newlist;
|
||||
- int numREs = 0;
|
||||
+ SBufList accumulatedRE;
|
||||
+ int numREs = 0, reSize = 0;
|
||||
int flags = REG_EXTENDED | REG_NOSUB;
|
||||
- int largeREindex = 0;
|
||||
- char largeRE[BUFSIZ];
|
||||
- *largeRE = 0;
|
||||
|
||||
for (const SBuf & configurationLineWord : sl) {
|
||||
- int RElen;
|
||||
- RElen = configurationLineWord.length();
|
||||
-
|
||||
static const SBuf minus_i("-i");
|
||||
static const SBuf plus_i("+i");
|
||||
if (configurationLineWord == minus_i) {
|
||||
@@ -155,10 +163,11 @@ compileOptimisedREs(std::list<RegexPattern> &curlist, const SBufList &sl)
|
||||
debugs(28, 2, "optimisation of -i ... -i" );
|
||||
} else {
|
||||
debugs(28, 2, "-i" );
|
||||
- if (!compileRE(newlist, largeRE, flags))
|
||||
+ if (!compileRE(newlist, accumulatedRE, flags))
|
||||
return 0;
|
||||
flags |= REG_ICASE;
|
||||
- largeRE[largeREindex=0] = '\0';
|
||||
+ accumulatedRE.clear();
|
||||
+ reSize = 0;
|
||||
}
|
||||
} else if (configurationLineWord == plus_i) {
|
||||
if ((flags & REG_ICASE) == 0) {
|
||||
@@ -166,37 +175,34 @@ compileOptimisedREs(std::list<RegexPattern> &curlist, const SBufList &sl)
|
||||
debugs(28, 2, "optimisation of +i ... +i");
|
||||
} else {
|
||||
debugs(28, 2, "+i");
|
||||
- if (!compileRE(newlist, largeRE, flags))
|
||||
+ if (!compileRE(newlist, accumulatedRE, flags))
|
||||
return 0;
|
||||
flags &= ~REG_ICASE;
|
||||
- largeRE[largeREindex=0] = '\0';
|
||||
+ accumulatedRE.clear();
|
||||
+ reSize = 0;
|
||||
}
|
||||
- } else if (RElen + largeREindex + 3 < BUFSIZ-1) {
|
||||
+ } else if (reSize < 1024) {
|
||||
debugs(28, 2, "adding RE '" << configurationLineWord << "'");
|
||||
- if (largeREindex > 0) {
|
||||
- largeRE[largeREindex] = '|';
|
||||
- ++largeREindex;
|
||||
- }
|
||||
- largeRE[largeREindex] = '(';
|
||||
- ++largeREindex;
|
||||
- configurationLineWord.copy(largeRE+largeREindex, BUFSIZ-largeREindex);
|
||||
- largeREindex += configurationLineWord.length();
|
||||
- largeRE[largeREindex] = ')';
|
||||
- ++largeREindex;
|
||||
- largeRE[largeREindex] = '\0';
|
||||
+ accumulatedRE.push_back(configurationLineWord);
|
||||
++numREs;
|
||||
+ reSize += configurationLineWord.length();
|
||||
} else {
|
||||
debugs(28, 2, "buffer full, generating new optimised RE..." );
|
||||
- if (!compileRE(newlist, largeRE, flags))
|
||||
+ accumulatedRE.push_back(configurationLineWord);
|
||||
+ if (!compileRE(newlist, accumulatedRE, flags))
|
||||
return 0;
|
||||
- largeRE[largeREindex=0] = '\0';
|
||||
+ accumulatedRE.clear();
|
||||
+ reSize = 0;
|
||||
continue; /* do the loop again to add the RE to largeRE */
|
||||
}
|
||||
}
|
||||
|
||||
- if (!compileRE(newlist, largeRE, flags))
|
||||
+ if (!compileRE(newlist, accumulatedRE, flags))
|
||||
return 0;
|
||||
|
||||
+ accumulatedRE.clear();
|
||||
+ reSize = 0;
|
||||
+
|
||||
/* all was successful, so put the new list at the tail */
|
||||
curlist.splice(curlist.end(), newlist);
|
||||
|
||||
diff --git a/src/sbuf/Algorithms.h b/src/sbuf/Algorithms.h
|
||||
index 21ee889..338e9c0 100644
|
||||
--- a/src/sbuf/Algorithms.h
|
||||
+++ b/src/sbuf/Algorithms.h
|
||||
@@ -81,6 +81,57 @@ SBufContainerJoin(const Container &items, const SBuf& separator)
|
||||
return rv;
|
||||
}
|
||||
|
||||
+/** Join container of SBufs and append to supplied target
|
||||
+ *
|
||||
+ * append to the target SBuf all elements in the [begin,end) range from
|
||||
+ * an iterable container, prefixed by prefix, separated by separator and
|
||||
+ * followed by suffix. Prefix and suffix are added also in case of empty
|
||||
+ * iterable
|
||||
+ *
|
||||
+ * \return the modified dest
|
||||
+ */
|
||||
+template <class ContainerIterator>
|
||||
+SBuf&
|
||||
+JoinContainerIntoSBuf(SBuf &dest, const ContainerIterator &begin,
|
||||
+ const ContainerIterator &end, const SBuf& separator,
|
||||
+ const SBuf& prefix = SBuf(), const SBuf& suffix = SBuf())
|
||||
+{
|
||||
+ if (begin == end) {
|
||||
+ dest.append(prefix).append(suffix);
|
||||
+ return dest;
|
||||
+ }
|
||||
+
|
||||
+ // optimization: pre-calculate needed storage
|
||||
+ const SBuf::size_type totalContainerSize =
|
||||
+ std::accumulate(begin, end, 0, SBufAddLength(separator)) +
|
||||
+ dest.length() + prefix.length() + suffix.length();
|
||||
+ SBufReservationRequirements req;
|
||||
+ req.minSpace = totalContainerSize;
|
||||
+ dest.reserve(req);
|
||||
+
|
||||
+ auto i = begin;
|
||||
+ dest.append(prefix);
|
||||
+ dest.append(*i);
|
||||
+ ++i;
|
||||
+ for (; i != end; ++i)
|
||||
+ dest.append(separator).append(*i);
|
||||
+ dest.append(suffix);
|
||||
+ return dest;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+/// convenience wrapper of JoinContainerIntoSBuf with no caller-supplied SBuf
|
||||
+template <class ContainerIterator>
|
||||
+SBuf
|
||||
+JoinContainerToSBuf(const ContainerIterator &begin,
|
||||
+ const ContainerIterator &end, const SBuf& separator,
|
||||
+ const SBuf& prefix = SBuf(), const SBuf& suffix = SBuf())
|
||||
+{
|
||||
+ SBuf rv;
|
||||
+ return JoinContainerIntoSBuf(rv, begin, end, separator, prefix, suffix);
|
||||
+}
|
||||
+
|
||||
+
|
||||
namespace std {
|
||||
/// default hash functor to support std::unordered_map<SBuf,*>
|
||||
template <>
|
33
SOURCES/squid-4.11-location.patch
Normal file
33
SOURCES/squid-4.11-location.patch
Normal file
@ -0,0 +1,33 @@
|
||||
diff --git a/QUICKSTART b/QUICKSTART
|
||||
index e5299b4..a243437 100644
|
||||
--- a/QUICKSTART
|
||||
+++ b/QUICKSTART
|
||||
@@ -10,10 +10,9 @@ After you retrieved, compiled and installed the Squid software (see
|
||||
INSTALL in the same directory), you have to configure the squid.conf
|
||||
file. This is the list of the values you *need* to change, because no
|
||||
sensible defaults could be defined. Do not touch the other variables
|
||||
-for now. We assume you have installed Squid in the default location:
|
||||
-/usr/local/squid
|
||||
+for now.
|
||||
|
||||
-Uncomment and edit the following lines in /usr/local/squid/etc/squid.conf:
|
||||
+Uncomment and edit the following lines in /etc/squid/squid.conf:
|
||||
|
||||
==============================================================================
|
||||
|
||||
@@ -80,12 +79,12 @@ After editing squid.conf to your liking, run Squid from the command
|
||||
line TWICE:
|
||||
|
||||
To create any disk cache_dir configured:
|
||||
- % /usr/local/squid/sbin/squid -z
|
||||
+ % /usr/sbin/squid -z
|
||||
|
||||
To start squid:
|
||||
- % /usr/local/squid/sbin/squid
|
||||
+ % /usr/sbin/squid
|
||||
|
||||
-Check in the cache.log (/usr/local/squid/var/logs/cache.log) that
|
||||
+Check in the cache.log (/var/log/squid/cache.log) that
|
||||
everything is all right.
|
||||
|
||||
Once Squid created all its files (it can take several minutes on some
|
10
SOURCES/squid-4.11-perlpath.patch
Normal file
10
SOURCES/squid-4.11-perlpath.patch
Normal file
@ -0,0 +1,10 @@
|
||||
diff --git a/contrib/url-normalizer.pl b/contrib/url-normalizer.pl
|
||||
index 90ac6a4..8dbed90 100755
|
||||
--- a/contrib/url-normalizer.pl
|
||||
+++ b/contrib/url-normalizer.pl
|
||||
@@ -1,4 +1,4 @@
|
||||
-#!/usr/local/bin/perl -Tw
|
||||
+#!/usr/bin/perl -Tw
|
||||
#
|
||||
# * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
|
||||
# *
|
424
SOURCES/squid-4.15-CVE-2021-28116.patch
Normal file
424
SOURCES/squid-4.15-CVE-2021-28116.patch
Normal file
@ -0,0 +1,424 @@
|
||||
commit b003a0da7865caa25b5d1e70c79329b32409b02a (HEAD -> refs/heads/v4, refs/remotes/origin/v4)
|
||||
Author: Amos Jeffries <yadij@users.noreply.github.com>
|
||||
Date: 2021-09-24 21:53:11 +0000
|
||||
|
||||
WCCP: Validate packets better (#899)
|
||||
|
||||
Update WCCP to support exception based error handling for
|
||||
parsing and processing we are moving Squid to for protocol
|
||||
handling.
|
||||
|
||||
Update the main WCCPv2 parsing checks to throw meaningful
|
||||
exceptions when detected.
|
||||
|
||||
diff --git a/src/wccp2.cc b/src/wccp2.cc
|
||||
index ee592449c..6ef469e91 100644
|
||||
--- a/src/wccp2.cc
|
||||
+++ b/src/wccp2.cc
|
||||
@@ -1108,6 +1108,59 @@ wccp2ConnectionClose(void)
|
||||
* Functions for handling the requests.
|
||||
*/
|
||||
|
||||
+/// Checks that the given area section ends inside the given (whole) area.
|
||||
+/// \param error the message to throw when the section does not fit
|
||||
+static void
|
||||
+CheckSectionLength(const void *sectionStart, const size_t sectionLength, const void *wholeStart, const size_t wholeSize, const char *error)
|
||||
+{
|
||||
+ assert(sectionStart);
|
||||
+ assert(wholeStart);
|
||||
+
|
||||
+ const auto wholeEnd = static_cast<const char*>(wholeStart) + wholeSize;
|
||||
+ assert(sectionStart >= wholeStart && "we never go backwards");
|
||||
+ assert(sectionStart <= wholeEnd && "we never go beyond our whole (but zero-sized fields are OK)");
|
||||
+ static_assert(sizeof(wccp2_i_see_you_t) <= PTRDIFF_MAX, "paranoid: no UB when subtracting in-whole pointers");
|
||||
+ // subtraction safe due to the three assertions above
|
||||
+ const auto remainderDiff = wholeEnd - static_cast<const char*>(sectionStart);
|
||||
+
|
||||
+ // casting safe due to the assertions above (and size_t definition)
|
||||
+ assert(remainderDiff >= 0);
|
||||
+ const auto remainderSize = static_cast<size_t>(remainderDiff);
|
||||
+
|
||||
+ if (sectionLength <= remainderSize)
|
||||
+ return;
|
||||
+
|
||||
+ throw TextException(error, Here());
|
||||
+}
|
||||
+
|
||||
+/// Checks that the area contains at least dataLength bytes after the header.
|
||||
+/// The size of the field header itself is not included in dataLength.
|
||||
+/// \returns the total field size -- the field header and field data combined
|
||||
+template<class FieldHeader>
|
||||
+static size_t
|
||||
+CheckFieldDataLength(const FieldHeader *header, const size_t dataLength, const void *areaStart, const size_t areaSize, const char *error)
|
||||
+{
|
||||
+ assert(header);
|
||||
+ const auto dataStart = reinterpret_cast<const char*>(header) + sizeof(header);
|
||||
+ CheckSectionLength(dataStart, dataLength, areaStart, areaSize, error);
|
||||
+ return sizeof(header) + dataLength; // no overflow after CheckSectionLength()
|
||||
+}
|
||||
+
|
||||
+/// Positions the given field at a given start within a given packet area.
|
||||
+/// The Field type determines the correct field size (used for bounds checking).
|
||||
+/// \param field the field pointer the function should set
|
||||
+/// \param areaStart the start of a packet (sub)structure containing the field
|
||||
+/// \param areaSize the size of the packet (sub)structure starting at areaStart
|
||||
+/// \param fieldStart the start of a field within the given area
|
||||
+/// \param error the message to throw when the field does not fit the area
|
||||
+template<class Field>
|
||||
+static void
|
||||
+SetField(Field *&field, const void *fieldStart, const void *areaStart, const size_t areaSize, const char *error)
|
||||
+{
|
||||
+ CheckSectionLength(fieldStart, sizeof(Field), areaStart, areaSize, error);
|
||||
+ field = static_cast<Field*>(const_cast<void*>(fieldStart));
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Accept the UDP packet
|
||||
*/
|
||||
@@ -1124,8 +1177,6 @@ wccp2HandleUdp(int sock, void *)
|
||||
|
||||
/* These structs form the parts of the packet */
|
||||
|
||||
- struct wccp2_item_header_t *header = NULL;
|
||||
-
|
||||
struct wccp2_security_none_t *security_info = NULL;
|
||||
|
||||
struct wccp2_service_info_t *service_info = NULL;
|
||||
@@ -1141,14 +1192,13 @@ wccp2HandleUdp(int sock, void *)
|
||||
struct wccp2_cache_identity_info_t *cache_identity = NULL;
|
||||
|
||||
struct wccp2_capability_info_header_t *router_capability_header = NULL;
|
||||
+ char *router_capability_data_start = nullptr;
|
||||
|
||||
struct wccp2_capability_element_t *router_capability_element;
|
||||
|
||||
struct sockaddr_in from;
|
||||
|
||||
struct in_addr cache_address;
|
||||
- int len, found;
|
||||
- short int data_length, offset;
|
||||
uint32_t tmp;
|
||||
char *ptr;
|
||||
int num_caches;
|
||||
@@ -1161,20 +1211,18 @@ wccp2HandleUdp(int sock, void *)
|
||||
Ip::Address from_tmp;
|
||||
from_tmp.setIPv4();
|
||||
|
||||
- len = comm_udp_recvfrom(sock,
|
||||
- &wccp2_i_see_you,
|
||||
- WCCP_RESPONSE_SIZE,
|
||||
- 0,
|
||||
- from_tmp);
|
||||
+ const auto lenOrError = comm_udp_recvfrom(sock, &wccp2_i_see_you, WCCP_RESPONSE_SIZE, 0, from_tmp);
|
||||
|
||||
- if (len < 0)
|
||||
+ if (lenOrError < 0)
|
||||
return;
|
||||
+ const auto len = static_cast<size_t>(lenOrError);
|
||||
|
||||
- if (ntohs(wccp2_i_see_you.version) != WCCP2_VERSION)
|
||||
- return;
|
||||
-
|
||||
- if (ntohl(wccp2_i_see_you.type) != WCCP2_I_SEE_YOU)
|
||||
- return;
|
||||
+ try {
|
||||
+ // TODO: Remove wccp2_i_see_you.data and use a buffer to read messages.
|
||||
+ const auto message_header_size = sizeof(wccp2_i_see_you) - sizeof(wccp2_i_see_you.data);
|
||||
+ Must2(len >= message_header_size, "incomplete WCCP message header");
|
||||
+ Must2(ntohs(wccp2_i_see_you.version) == WCCP2_VERSION, "WCCP version unsupported");
|
||||
+ Must2(ntohl(wccp2_i_see_you.type) == WCCP2_I_SEE_YOU, "WCCP packet type unsupported");
|
||||
|
||||
/* FIXME INET6 : drop conversion boundary */
|
||||
from_tmp.getSockAddr(from);
|
||||
@@ -1182,73 +1230,60 @@ wccp2HandleUdp(int sock, void *)
|
||||
debugs(80, 3, "Incoming WCCPv2 I_SEE_YOU length " << ntohs(wccp2_i_see_you.length) << ".");
|
||||
|
||||
/* Record the total data length */
|
||||
- data_length = ntohs(wccp2_i_see_you.length);
|
||||
+ const auto data_length = ntohs(wccp2_i_see_you.length);
|
||||
+ Must2(data_length <= len - message_header_size,
|
||||
+ "malformed packet claiming it's bigger than received data");
|
||||
|
||||
- offset = 0;
|
||||
-
|
||||
- if (data_length > len) {
|
||||
- debugs(80, DBG_IMPORTANT, "ERROR: Malformed WCCPv2 packet claiming it's bigger than received data");
|
||||
- return;
|
||||
- }
|
||||
+ size_t offset = 0;
|
||||
|
||||
/* Go through the data structure */
|
||||
- while (data_length > offset) {
|
||||
+ while (offset + sizeof(struct wccp2_item_header_t) <= data_length) {
|
||||
|
||||
char *data = wccp2_i_see_you.data;
|
||||
|
||||
- header = (struct wccp2_item_header_t *) &data[offset];
|
||||
+ const auto itemHeader = reinterpret_cast<const wccp2_item_header_t*>(&data[offset]);
|
||||
+ const auto itemSize = CheckFieldDataLength(itemHeader, ntohs(itemHeader->length),
|
||||
+ data, data_length, "truncated record");
|
||||
+ // XXX: Check "The specified length must be a multiple of 4 octets"
|
||||
+ // requirement to avoid unaligned memory reads after the first item.
|
||||
|
||||
- switch (ntohs(header->type)) {
|
||||
+ switch (ntohs(itemHeader->type)) {
|
||||
|
||||
case WCCP2_SECURITY_INFO:
|
||||
-
|
||||
- if (security_info != NULL) {
|
||||
- debugs(80, DBG_IMPORTANT, "Duplicate security definition");
|
||||
- return;
|
||||
- }
|
||||
-
|
||||
- security_info = (struct wccp2_security_none_t *) &wccp2_i_see_you.data[offset];
|
||||
+ Must2(!security_info, "duplicate security definition");
|
||||
+ SetField(security_info, itemHeader, itemHeader, itemSize,
|
||||
+ "security definition truncated");
|
||||
break;
|
||||
|
||||
case WCCP2_SERVICE_INFO:
|
||||
-
|
||||
- if (service_info != NULL) {
|
||||
- debugs(80, DBG_IMPORTANT, "Duplicate service_info definition");
|
||||
- return;
|
||||
- }
|
||||
-
|
||||
- service_info = (struct wccp2_service_info_t *) &wccp2_i_see_you.data[offset];
|
||||
+ Must2(!service_info, "duplicate service_info definition");
|
||||
+ SetField(service_info, itemHeader, itemHeader, itemSize,
|
||||
+ "service_info definition truncated");
|
||||
break;
|
||||
|
||||
case WCCP2_ROUTER_ID_INFO:
|
||||
-
|
||||
- if (router_identity_info != NULL) {
|
||||
- debugs(80, DBG_IMPORTANT, "Duplicate router_identity_info definition");
|
||||
- return;
|
||||
- }
|
||||
-
|
||||
- router_identity_info = (struct router_identity_info_t *) &wccp2_i_see_you.data[offset];
|
||||
+ Must2(!router_identity_info, "duplicate router_identity_info definition");
|
||||
+ SetField(router_identity_info, itemHeader, itemHeader, itemSize,
|
||||
+ "router_identity_info definition truncated");
|
||||
break;
|
||||
|
||||
case WCCP2_RTR_VIEW_INFO:
|
||||
-
|
||||
- if (router_view_header != NULL) {
|
||||
- debugs(80, DBG_IMPORTANT, "Duplicate router_view definition");
|
||||
- return;
|
||||
- }
|
||||
-
|
||||
- router_view_header = (struct router_view_t *) &wccp2_i_see_you.data[offset];
|
||||
+ Must2(!router_view_header, "duplicate router_view definition");
|
||||
+ SetField(router_view_header, itemHeader, itemHeader, itemSize,
|
||||
+ "router_view definition truncated");
|
||||
break;
|
||||
|
||||
- case WCCP2_CAPABILITY_INFO:
|
||||
-
|
||||
- if (router_capability_header != NULL) {
|
||||
- debugs(80, DBG_IMPORTANT, "Duplicate router_capability definition");
|
||||
- return;
|
||||
- }
|
||||
+ case WCCP2_CAPABILITY_INFO: {
|
||||
+ Must2(!router_capability_header, "duplicate router_capability definition");
|
||||
+ SetField(router_capability_header, itemHeader, itemHeader, itemSize,
|
||||
+ "router_capability definition truncated");
|
||||
|
||||
- router_capability_header = (struct wccp2_capability_info_header_t *) &wccp2_i_see_you.data[offset];
|
||||
+ CheckFieldDataLength(router_capability_header, ntohs(router_capability_header->capability_info_length),
|
||||
+ itemHeader, itemSize, "capability info truncated");
|
||||
+ router_capability_data_start = reinterpret_cast<char*>(router_capability_header) +
|
||||
+ sizeof(*router_capability_header);
|
||||
break;
|
||||
+ }
|
||||
|
||||
/* Nothing to do for the types below */
|
||||
|
||||
@@ -1257,22 +1292,17 @@ wccp2HandleUdp(int sock, void *)
|
||||
break;
|
||||
|
||||
default:
|
||||
- debugs(80, DBG_IMPORTANT, "Unknown record type in WCCPv2 Packet (" << ntohs(header->type) << ").");
|
||||
+ debugs(80, DBG_IMPORTANT, "Unknown record type in WCCPv2 Packet (" << ntohs(itemHeader->type) << ").");
|
||||
}
|
||||
|
||||
- offset += sizeof(struct wccp2_item_header_t);
|
||||
- offset += ntohs(header->length);
|
||||
-
|
||||
- if (offset > data_length) {
|
||||
- debugs(80, DBG_IMPORTANT, "Error: WCCPv2 packet tried to tell us there is data beyond the end of the packet");
|
||||
- return;
|
||||
- }
|
||||
+ offset += itemSize;
|
||||
+ assert(offset <= data_length && "CheckFieldDataLength(itemHeader...) established that");
|
||||
}
|
||||
|
||||
- if ((security_info == NULL) || (service_info == NULL) || (router_identity_info == NULL) || (router_view_header == NULL)) {
|
||||
- debugs(80, DBG_IMPORTANT, "Incomplete WCCPv2 Packet");
|
||||
- return;
|
||||
- }
|
||||
+ Must2(security_info, "packet missing security definition");
|
||||
+ Must2(service_info, "packet missing service_info definition");
|
||||
+ Must2(router_identity_info, "packet missing router_identity_info definition");
|
||||
+ Must2(router_view_header, "packet missing router_view definition");
|
||||
|
||||
debugs(80, 5, "Complete packet received");
|
||||
|
||||
@@ -1308,10 +1338,7 @@ wccp2HandleUdp(int sock, void *)
|
||||
break;
|
||||
}
|
||||
|
||||
- if (router_list_ptr->next == NULL) {
|
||||
- debugs(80, DBG_IMPORTANT, "WCCPv2 Packet received from unknown router");
|
||||
- return;
|
||||
- }
|
||||
+ Must2(router_list_ptr->next, "packet received from unknown router");
|
||||
|
||||
/* Set the router id */
|
||||
router_list_ptr->info->router_address = router_identity_info->router_id_element.router_address;
|
||||
@@ -1331,11 +1358,20 @@ wccp2HandleUdp(int sock, void *)
|
||||
}
|
||||
} else {
|
||||
|
||||
- char *end = ((char *) router_capability_header) + sizeof(*router_capability_header) + ntohs(router_capability_header->capability_info_length) - sizeof(struct wccp2_capability_info_header_t);
|
||||
-
|
||||
- router_capability_element = (struct wccp2_capability_element_t *) (((char *) router_capability_header) + sizeof(*router_capability_header));
|
||||
-
|
||||
- while ((char *) router_capability_element <= end) {
|
||||
+ const auto router_capability_data_length = ntohs(router_capability_header->capability_info_length);
|
||||
+ assert(router_capability_data_start);
|
||||
+ const auto router_capability_data_end = router_capability_data_start +
|
||||
+ router_capability_data_length;
|
||||
+ for (auto router_capability_data_current = router_capability_data_start;
|
||||
+ router_capability_data_current < router_capability_data_end;) {
|
||||
+
|
||||
+ SetField(router_capability_element, router_capability_data_current,
|
||||
+ router_capability_data_start, router_capability_data_length,
|
||||
+ "capability element header truncated");
|
||||
+ const auto elementSize = CheckFieldDataLength(
|
||||
+ router_capability_element, ntohs(router_capability_element->capability_length),
|
||||
+ router_capability_data_start, router_capability_data_length,
|
||||
+ "capability element truncated");
|
||||
|
||||
switch (ntohs(router_capability_element->capability_type)) {
|
||||
|
||||
@@ -1377,7 +1413,7 @@ wccp2HandleUdp(int sock, void *)
|
||||
debugs(80, DBG_IMPORTANT, "Unknown capability type in WCCPv2 Packet (" << ntohs(router_capability_element->capability_type) << ").");
|
||||
}
|
||||
|
||||
- router_capability_element = (struct wccp2_capability_element_t *) (((char *) router_capability_element) + sizeof(struct wccp2_item_header_t) + ntohs(router_capability_element->capability_length));
|
||||
+ router_capability_data_current += elementSize;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1396,23 +1432,34 @@ wccp2HandleUdp(int sock, void *)
|
||||
num_caches = 0;
|
||||
|
||||
/* Check to see if we're the master cache and update the cache list */
|
||||
- found = 0;
|
||||
+ bool found = false;
|
||||
service_list_ptr->lowest_ip = 1;
|
||||
cache_list_ptr = &router_list_ptr->cache_list_head;
|
||||
|
||||
/* to find the list of caches, we start at the end of the router view header */
|
||||
|
||||
ptr = (char *) (router_view_header) + sizeof(struct router_view_t);
|
||||
+ const auto router_view_size = sizeof(struct router_view_t) +
|
||||
+ ntohs(router_view_header->header.length);
|
||||
|
||||
/* Then we read the number of routers */
|
||||
- memcpy(&tmp, ptr, sizeof(tmp));
|
||||
+ const uint32_t *routerCountRaw = nullptr;
|
||||
+ SetField(routerCountRaw, ptr, router_view_header, router_view_size,
|
||||
+ "malformed packet (truncated router view info w/o number of routers)");
|
||||
|
||||
/* skip the number plus all the ip's */
|
||||
-
|
||||
- ptr += sizeof(tmp) + (ntohl(tmp) * sizeof(struct in_addr));
|
||||
+ ptr += sizeof(*routerCountRaw);
|
||||
+ const auto ipCount = ntohl(*routerCountRaw);
|
||||
+ const auto ipsSize = ipCount * sizeof(struct in_addr); // we check for unsigned overflow below
|
||||
+ Must2(ipsSize / sizeof(struct in_addr) != ipCount, "huge IP address count");
|
||||
+ CheckSectionLength(ptr, ipsSize, router_view_header, router_view_size, "invalid IP address count");
|
||||
+ ptr += ipsSize;
|
||||
|
||||
/* Then read the number of caches */
|
||||
- memcpy(&tmp, ptr, sizeof(tmp));
|
||||
+ const uint32_t *cacheCountRaw = nullptr;
|
||||
+ SetField(cacheCountRaw, ptr, router_view_header, router_view_size,
|
||||
+ "malformed packet (truncated router view info w/o cache count)");
|
||||
+ memcpy(&tmp, cacheCountRaw, sizeof(tmp)); // TODO: Replace tmp with cacheCount
|
||||
ptr += sizeof(tmp);
|
||||
|
||||
if (ntohl(tmp) != 0) {
|
||||
@@ -1426,7 +1473,8 @@ wccp2HandleUdp(int sock, void *)
|
||||
|
||||
case WCCP2_ASSIGNMENT_METHOD_HASH:
|
||||
|
||||
- cache_identity = (struct wccp2_cache_identity_info_t *) ptr;
|
||||
+ SetField(cache_identity, ptr, router_view_header, router_view_size,
|
||||
+ "malformed packet (truncated router view info cache w/o assignment hash)");
|
||||
|
||||
ptr += sizeof(struct wccp2_cache_identity_info_t);
|
||||
|
||||
@@ -1437,13 +1485,15 @@ wccp2HandleUdp(int sock, void *)
|
||||
|
||||
case WCCP2_ASSIGNMENT_METHOD_MASK:
|
||||
|
||||
- cache_mask_info = (struct cache_mask_info_t *) ptr;
|
||||
+ SetField(cache_mask_info, ptr, router_view_header, router_view_size,
|
||||
+ "malformed packet (truncated router view info cache w/o assignment mask)");
|
||||
|
||||
/* The mask assignment has an undocumented variable length entry here */
|
||||
|
||||
if (ntohl(cache_mask_info->num1) == 3) {
|
||||
|
||||
- cache_mask_identity = (struct wccp2_cache_mask_identity_info_t *) ptr;
|
||||
+ SetField(cache_mask_identity, ptr, router_view_header, router_view_size,
|
||||
+ "malformed packet (truncated router view info cache w/o assignment mask identity)");
|
||||
|
||||
ptr += sizeof(struct wccp2_cache_mask_identity_info_t);
|
||||
|
||||
@@ -1474,10 +1524,7 @@ wccp2HandleUdp(int sock, void *)
|
||||
debugs (80, 5, "checking cache list: (" << std::hex << cache_address.s_addr << ":" << router_list_ptr->local_ip.s_addr << ")");
|
||||
|
||||
/* Check to see if it's the master, or us */
|
||||
-
|
||||
- if (cache_address.s_addr == router_list_ptr->local_ip.s_addr) {
|
||||
- found = 1;
|
||||
- }
|
||||
+ found = found || (cache_address.s_addr == router_list_ptr->local_ip.s_addr);
|
||||
|
||||
if (cache_address.s_addr < router_list_ptr->local_ip.s_addr) {
|
||||
service_list_ptr->lowest_ip = 0;
|
||||
@@ -1494,7 +1541,7 @@ wccp2HandleUdp(int sock, void *)
|
||||
cache_list_ptr->next = NULL;
|
||||
|
||||
service_list_ptr->lowest_ip = 1;
|
||||
- found = 1;
|
||||
+ found = true;
|
||||
num_caches = 1;
|
||||
}
|
||||
|
||||
@@ -1502,7 +1549,7 @@ wccp2HandleUdp(int sock, void *)
|
||||
|
||||
router_list_ptr->num_caches = htonl(num_caches);
|
||||
|
||||
- if ((found == 1) && (service_list_ptr->lowest_ip == 1)) {
|
||||
+ if (found && (service_list_ptr->lowest_ip == 1)) {
|
||||
if (ntohl(router_view_header->change_number) != router_list_ptr->member_change) {
|
||||
debugs(80, 4, "Change detected - queueing up new assignment");
|
||||
router_list_ptr->member_change = ntohl(router_view_header->change_number);
|
||||
@@ -1515,6 +1562,10 @@ wccp2HandleUdp(int sock, void *)
|
||||
eventDelete(wccp2AssignBuckets, NULL);
|
||||
debugs(80, 5, "I am not the lowest ip cache - not assigning buckets");
|
||||
}
|
||||
+
|
||||
+ } catch (...) {
|
||||
+ debugs(80, DBG_IMPORTANT, "ERROR: Ignoring WCCPv2 message: " << CurrentException);
|
||||
+ }
|
||||
}
|
||||
|
||||
static void
|
129
SOURCES/squid-4.15-CVE-2021-46784.patch
Normal file
129
SOURCES/squid-4.15-CVE-2021-46784.patch
Normal file
@ -0,0 +1,129 @@
|
||||
From 780c4ea1b4c9d2fb41f6962aa6ed73ae57f74b2b Mon Sep 17 00:00:00 2001
|
||||
From: Joshua Rogers <MegaManSec@users.noreply.github.com>
|
||||
Date: Mon, 18 Apr 2022 13:42:36 +0000
|
||||
Subject: [PATCH] Improve handling of Gopher responses (#1022)
|
||||
|
||||
---
|
||||
src/gopher.cc | 45 ++++++++++++++++++++-------------------------
|
||||
1 file changed, 20 insertions(+), 25 deletions(-)
|
||||
|
||||
diff --git a/src/gopher.cc b/src/gopher.cc
|
||||
index 169b0e18299..6187da18bcd 100644
|
||||
--- a/src/gopher.cc
|
||||
+++ b/src/gopher.cc
|
||||
@@ -371,7 +371,6 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
|
||||
char *lpos = NULL;
|
||||
char *tline = NULL;
|
||||
LOCAL_ARRAY(char, line, TEMP_BUF_SIZE);
|
||||
- LOCAL_ARRAY(char, tmpbuf, TEMP_BUF_SIZE);
|
||||
char *name = NULL;
|
||||
char *selector = NULL;
|
||||
char *host = NULL;
|
||||
@@ -381,7 +380,6 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
|
||||
char gtype;
|
||||
StoreEntry *entry = NULL;
|
||||
|
||||
- memset(tmpbuf, '\0', TEMP_BUF_SIZE);
|
||||
memset(line, '\0', TEMP_BUF_SIZE);
|
||||
|
||||
entry = gopherState->entry;
|
||||
@@ -416,7 +414,7 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
|
||||
return;
|
||||
}
|
||||
|
||||
- String outbuf;
|
||||
+ SBuf outbuf;
|
||||
|
||||
if (!gopherState->HTML_header_added) {
|
||||
if (gopherState->conversion == GopherStateData::HTML_CSO_RESULT)
|
||||
@@ -583,34 +581,34 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
|
||||
break;
|
||||
}
|
||||
|
||||
- memset(tmpbuf, '\0', TEMP_BUF_SIZE);
|
||||
-
|
||||
if ((gtype == GOPHER_TELNET) || (gtype == GOPHER_3270)) {
|
||||
if (strlen(escaped_selector) != 0)
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"telnet://%s@%s%s%s/\">%s</A>\n",
|
||||
- icon_url, escaped_selector, rfc1738_escape_part(host),
|
||||
- *port ? ":" : "", port, html_quote(name));
|
||||
+ outbuf.appendf("<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"telnet://%s@%s%s%s/\">%s</A>\n",
|
||||
+ icon_url, escaped_selector, rfc1738_escape_part(host),
|
||||
+ *port ? ":" : "", port, html_quote(name));
|
||||
else
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"telnet://%s%s%s/\">%s</A>\n",
|
||||
- icon_url, rfc1738_escape_part(host), *port ? ":" : "",
|
||||
- port, html_quote(name));
|
||||
+ outbuf.appendf("<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"telnet://%s%s%s/\">%s</A>\n",
|
||||
+ icon_url, rfc1738_escape_part(host), *port ? ":" : "",
|
||||
+ port, html_quote(name));
|
||||
|
||||
} else if (gtype == GOPHER_INFO) {
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "\t%s\n", html_quote(name));
|
||||
+ outbuf.appendf("\t%s\n", html_quote(name));
|
||||
} else {
|
||||
if (strncmp(selector, "GET /", 5) == 0) {
|
||||
/* WWW link */
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"http://%s/%s\">%s</A>\n",
|
||||
- icon_url, host, rfc1738_escape_unescaped(selector + 5), html_quote(name));
|
||||
+ outbuf.appendf("<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"http://%s/%s\">%s</A>\n",
|
||||
+ icon_url, host, rfc1738_escape_unescaped(selector + 5), html_quote(name));
|
||||
+ } else if (gtype == GOPHER_WWW) {
|
||||
+ outbuf.appendf("<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"gopher://%s/%c%s\">%s</A>\n",
|
||||
+ icon_url, rfc1738_escape_unescaped(selector), html_quote(name));
|
||||
} else {
|
||||
/* Standard link */
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"gopher://%s/%c%s\">%s</A>\n",
|
||||
- icon_url, host, gtype, escaped_selector, html_quote(name));
|
||||
+ outbuf.appendf("<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"gopher://%s/%c%s\">%s</A>\n",
|
||||
+ icon_url, host, gtype, escaped_selector, html_quote(name));
|
||||
}
|
||||
}
|
||||
|
||||
safe_free(escaped_selector);
|
||||
- outbuf.append(tmpbuf);
|
||||
} else {
|
||||
memset(line, '\0', TEMP_BUF_SIZE);
|
||||
continue;
|
||||
@@ -643,13 +641,12 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
|
||||
break;
|
||||
|
||||
if (gopherState->cso_recno != recno) {
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "</PRE><HR noshade size=\"1px\"><H2>Record# %d<br><i>%s</i></H2>\n<PRE>", recno, html_quote(result));
|
||||
+ outbuf.appendf("</PRE><HR noshade size=\"1px\"><H2>Record# %d<br><i>%s</i></H2>\n<PRE>", recno, html_quote(result));
|
||||
gopherState->cso_recno = recno;
|
||||
} else {
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "%s\n", html_quote(result));
|
||||
+ outbuf.appendf("%s\n", html_quote(result));
|
||||
}
|
||||
|
||||
- outbuf.append(tmpbuf);
|
||||
break;
|
||||
} else {
|
||||
int code;
|
||||
@@ -677,8 +674,7 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
|
||||
|
||||
case 502: { /* Too Many Matches */
|
||||
/* Print the message the server returns */
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "</PRE><HR noshade size=\"1px\"><H2>%s</H2>\n<PRE>", html_quote(result));
|
||||
- outbuf.append(tmpbuf);
|
||||
+ outbuf.appendf("</PRE><HR noshade size=\"1px\"><H2>%s</H2>\n<PRE>", html_quote(result));
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -694,13 +690,12 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
|
||||
|
||||
} /* while loop */
|
||||
|
||||
- if (outbuf.size() > 0) {
|
||||
- entry->append(outbuf.rawBuf(), outbuf.size());
|
||||
+ if (outbuf.length() > 0) {
|
||||
+ entry->append(outbuf.rawContent(), outbuf.length());
|
||||
/* now let start sending stuff to client */
|
||||
entry->flush();
|
||||
}
|
||||
|
||||
- outbuf.clean();
|
||||
return;
|
||||
}
|
||||
|
38
SOURCES/squid-4.15-CVE-2022-41318.patch
Normal file
38
SOURCES/squid-4.15-CVE-2022-41318.patch
Normal file
@ -0,0 +1,38 @@
|
||||
commit 4031c6c2b004190fdffbc19dab7cd0305a2025b7 (refs/remotes/origin/v4, refs/remotes/github/v4, refs/heads/v4)
|
||||
Author: Amos Jeffries <yadij@users.noreply.github.com>
|
||||
Date: 2022-08-09 23:34:54 +0000
|
||||
|
||||
Bug 3193 pt2: NTLM decoder truncating strings (#1114)
|
||||
|
||||
The initial bug fix overlooked large 'offset' causing integer
|
||||
wrap to extract a too-short length string.
|
||||
|
||||
Improve debugs and checks sequence to clarify cases and ensure
|
||||
that all are handled correctly.
|
||||
|
||||
diff --git a/lib/ntlmauth/ntlmauth.cc b/lib/ntlmauth/ntlmauth.cc
|
||||
index 5d9637290..f00fd51f8 100644
|
||||
--- a/lib/ntlmauth/ntlmauth.cc
|
||||
+++ b/lib/ntlmauth/ntlmauth.cc
|
||||
@@ -107,10 +107,19 @@ ntlm_fetch_string(const ntlmhdr *packet, const int32_t packet_size, const strhdr
|
||||
int32_t o = le32toh(str->offset);
|
||||
// debug("ntlm_fetch_string(plength=%d,l=%d,o=%d)\n",packet_size,l,o);
|
||||
|
||||
- if (l < 0 || l > NTLM_MAX_FIELD_LENGTH || o + l > packet_size || o == 0) {
|
||||
- debug("ntlm_fetch_string: insane data (pkt-sz: %d, fetch len: %d, offset: %d)\n", packet_size,l,o);
|
||||
+ if (l < 0 || l > NTLM_MAX_FIELD_LENGTH) {
|
||||
+ debug("ntlm_fetch_string: insane string length (pkt-sz: %d, fetch len: %d, offset: %d)\n", packet_size,l,o);
|
||||
return rv;
|
||||
}
|
||||
+ else if (o <= 0 || o > packet_size) {
|
||||
+ debug("ntlm_fetch_string: insane string offset (pkt-sz: %d, fetch len: %d, offset: %d)\n", packet_size,l,o);
|
||||
+ return rv;
|
||||
+ }
|
||||
+ else if (l > packet_size - o) {
|
||||
+ debug("ntlm_fetch_string: truncated string data (pkt-sz: %d, fetch len: %d, offset: %d)\n", packet_size,l,o);
|
||||
+ return rv;
|
||||
+ }
|
||||
+
|
||||
rv.str = (char *)packet + o;
|
||||
rv.l = 0;
|
||||
if ((flags & NTLM_NEGOTIATE_ASCII) == 0) {
|
24
SOURCES/squid-4.15-CVE-2023-46724.patch
Normal file
24
SOURCES/squid-4.15-CVE-2023-46724.patch
Normal file
@ -0,0 +1,24 @@
|
||||
diff --git a/src/anyp/Uri.cc b/src/anyp/Uri.cc
|
||||
index 20b9bf1..81ebb18 100644
|
||||
--- a/src/anyp/Uri.cc
|
||||
+++ b/src/anyp/Uri.cc
|
||||
@@ -173,6 +173,10 @@ urlInitialize(void)
|
||||
assert(0 == matchDomainName("*.foo.com", ".foo.com", mdnHonorWildcards));
|
||||
assert(0 != matchDomainName("*.foo.com", "foo.com", mdnHonorWildcards));
|
||||
|
||||
+ assert(0 != matchDomainName("foo.com", ""));
|
||||
+ assert(0 != matchDomainName("foo.com", "", mdnHonorWildcards));
|
||||
+ assert(0 != matchDomainName("foo.com", "", mdnRejectSubsubDomains));
|
||||
+
|
||||
/* more cases? */
|
||||
}
|
||||
|
||||
@@ -756,6 +760,8 @@ matchDomainName(const char *h, const char *d, MatchDomainNameFlags flags)
|
||||
return -1;
|
||||
|
||||
dl = strlen(d);
|
||||
+ if (dl == 0)
|
||||
+ return 1;
|
||||
|
||||
/*
|
||||
* Start at the ends of the two strings and work towards the
|
1673
SOURCES/squid-4.15-CVE-2023-46728.patch
Normal file
1673
SOURCES/squid-4.15-CVE-2023-46728.patch
Normal file
File diff suppressed because it is too large
Load Diff
1281
SOURCES/squid-4.15-CVE-2023-46846.patch
Normal file
1281
SOURCES/squid-4.15-CVE-2023-46846.patch
Normal file
File diff suppressed because it is too large
Load Diff
23
SOURCES/squid-4.15-CVE-2023-46847.patch
Normal file
23
SOURCES/squid-4.15-CVE-2023-46847.patch
Normal file
@ -0,0 +1,23 @@
|
||||
diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc
|
||||
index 6a9736f..0a883fa 100644
|
||||
--- a/src/auth/digest/Config.cc
|
||||
+++ b/src/auth/digest/Config.cc
|
||||
@@ -847,11 +847,15 @@ Auth::Digest::Config::decode(char const *proxy_auth, const char *aRequestRealm)
|
||||
break;
|
||||
|
||||
case DIGEST_NC:
|
||||
- if (value.size() != 8) {
|
||||
+ if (value.size() == 8) {
|
||||
+ // for historical reasons, the nc value MUST be exactly 8 bytes
|
||||
+ static_assert(sizeof(digest_request->nc) == 8 + 1, "bad nc buffer size");
|
||||
+ xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
|
||||
+ debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
|
||||
+ } else {
|
||||
debugs(29, 9, "Invalid nc '" << value << "' in '" << temp << "'");
|
||||
+ digest_request->nc[0] = 0;
|
||||
}
|
||||
- xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
|
||||
- debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
|
||||
break;
|
||||
|
||||
case DIGEST_CNONCE:
|
30
SOURCES/squid-4.15-CVE-2023-49285.patch
Normal file
30
SOURCES/squid-4.15-CVE-2023-49285.patch
Normal file
@ -0,0 +1,30 @@
|
||||
commit 77b3fb4df0f126784d5fd4967c28ed40eb8d521b
|
||||
Author: Alex Rousskov <rousskov@measurement-factory.com>
|
||||
Date: Wed Oct 25 19:41:45 2023 +0000
|
||||
|
||||
RFC 1123: Fix date parsing (#1538)
|
||||
|
||||
The bug was discovered and detailed by Joshua Rogers at
|
||||
https://megamansec.github.io/Squid-Security-Audit/datetime-overflow.html
|
||||
where it was filed as "1-Byte Buffer OverRead in RFC 1123 date/time
|
||||
Handling".
|
||||
|
||||
diff --git a/lib/rfc1123.c b/lib/rfc1123.c
|
||||
index e5bf9a4d7..cb484cc00 100644
|
||||
--- a/lib/rfc1123.c
|
||||
+++ b/lib/rfc1123.c
|
||||
@@ -50,7 +50,13 @@ make_month(const char *s)
|
||||
char month[3];
|
||||
|
||||
month[0] = xtoupper(*s);
|
||||
+ if (!month[0])
|
||||
+ return -1; // protects *(s + 1) below
|
||||
+
|
||||
month[1] = xtolower(*(s + 1));
|
||||
+ if (!month[1])
|
||||
+ return -1; // protects *(s + 2) below
|
||||
+
|
||||
month[2] = xtolower(*(s + 2));
|
||||
|
||||
for (i = 0; i < 12; i++)
|
||||
|
62
SOURCES/squid-4.15-CVE-2023-49286.patch
Normal file
62
SOURCES/squid-4.15-CVE-2023-49286.patch
Normal file
@ -0,0 +1,62 @@
|
||||
diff --git a/src/ipc.cc b/src/ipc.cc
|
||||
index 42e11e6..a68e623 100644
|
||||
--- a/src/ipc.cc
|
||||
+++ b/src/ipc.cc
|
||||
@@ -19,6 +19,11 @@
|
||||
#include "SquidConfig.h"
|
||||
#include "SquidIpc.h"
|
||||
#include "tools.h"
|
||||
+#include <cstdlib>
|
||||
+
|
||||
+#if HAVE_UNISTD_H
|
||||
+#include <unistd.h>
|
||||
+#endif
|
||||
|
||||
static const char *hello_string = "hi there\n";
|
||||
#ifndef HELLO_BUF_SZ
|
||||
@@ -365,6 +370,22 @@ ipcCreate(int type, const char *prog, const char *const args[], const char *name
|
||||
}
|
||||
|
||||
PutEnvironment();
|
||||
+
|
||||
+ // A dup(2) wrapper that reports and exits the process on errors. The
|
||||
+ // exiting logic is only suitable for this child process context.
|
||||
+ const auto dupOrExit = [prog,name](const int oldFd) {
|
||||
+ const auto newFd = dup(oldFd);
|
||||
+ if (newFd < 0) {
|
||||
+ const auto savedErrno = errno;
|
||||
+ debugs(54, DBG_CRITICAL, "ERROR: Helper process initialization failure: " << name <<
|
||||
+ Debug::Extra << "helper (CHILD) PID: " << getpid() <<
|
||||
+ Debug::Extra << "helper program name: " << prog <<
|
||||
+ Debug::Extra << "dup(2) system call error for FD " << oldFd << ": " << xstrerr(savedErrno));
|
||||
+ _exit(EXIT_FAILURE);
|
||||
+ }
|
||||
+ return newFd;
|
||||
+ };
|
||||
+
|
||||
/*
|
||||
* This double-dup stuff avoids problems when one of
|
||||
* crfd, cwfd, or debug_log are in the rage 0-2.
|
||||
@@ -372,17 +393,16 @@ ipcCreate(int type, const char *prog, const char *const args[], const char *name
|
||||
|
||||
do {
|
||||
/* First make sure 0-2 is occupied by something. Gets cleaned up later */
|
||||
- x = dup(crfd);
|
||||
- assert(x > -1);
|
||||
- } while (x < 3 && x > -1);
|
||||
+ x = dupOrExit(crfd);
|
||||
+ } while (x < 3);
|
||||
|
||||
close(x);
|
||||
|
||||
- t1 = dup(crfd);
|
||||
+ t1 = dupOrExit(crfd);
|
||||
|
||||
- t2 = dup(cwfd);
|
||||
+ t2 = dupOrExit(cwfd);
|
||||
|
||||
- t3 = dup(fileno(debug_log));
|
||||
+ t3 = dupOrExit(fileno(debug_log));
|
||||
|
||||
assert(t1 > 2 && t2 > 2 && t3 > 2);
|
||||
|
50
SOURCES/squid-4.15-CVE-2023-50269.patch
Normal file
50
SOURCES/squid-4.15-CVE-2023-50269.patch
Normal file
@ -0,0 +1,50 @@
|
||||
diff --git a/src/ClientRequestContext.h b/src/ClientRequestContext.h
|
||||
index fe2edf6..47aa935 100644
|
||||
--- a/src/ClientRequestContext.h
|
||||
+++ b/src/ClientRequestContext.h
|
||||
@@ -81,6 +81,10 @@ public:
|
||||
#endif
|
||||
ErrorState *error; ///< saved error page for centralized/delayed processing
|
||||
bool readNextRequest; ///< whether Squid should read after error handling
|
||||
+
|
||||
+#if FOLLOW_X_FORWARDED_FOR
|
||||
+ size_t currentXffHopNumber = 0; ///< number of X-Forwarded-For header values processed so far
|
||||
+#endif
|
||||
};
|
||||
|
||||
#endif /* SQUID_CLIENTREQUESTCONTEXT_H */
|
||||
diff --git a/src/client_side_request.cc b/src/client_side_request.cc
|
||||
index 1c6ff62..b758f6f 100644
|
||||
--- a/src/client_side_request.cc
|
||||
+++ b/src/client_side_request.cc
|
||||
@@ -78,6 +78,11 @@
|
||||
static const char *const crlf = "\r\n";
|
||||
|
||||
#if FOLLOW_X_FORWARDED_FOR
|
||||
+
|
||||
+#if !defined(SQUID_X_FORWARDED_FOR_HOP_MAX)
|
||||
+#define SQUID_X_FORWARDED_FOR_HOP_MAX 64
|
||||
+#endif
|
||||
+
|
||||
static void clientFollowXForwardedForCheck(allow_t answer, void *data);
|
||||
#endif /* FOLLOW_X_FORWARDED_FOR */
|
||||
|
||||
@@ -485,8 +490,16 @@ clientFollowXForwardedForCheck(allow_t answer, void *data)
|
||||
/* override the default src_addr tested if we have to go deeper than one level into XFF */
|
||||
Filled(calloutContext->acl_checklist)->src_addr = request->indirect_client_addr;
|
||||
}
|
||||
- calloutContext->acl_checklist->nonBlockingCheck(clientFollowXForwardedForCheck, data);
|
||||
- return;
|
||||
+ if (++calloutContext->currentXffHopNumber < SQUID_X_FORWARDED_FOR_HOP_MAX) {
|
||||
+ calloutContext->acl_checklist->nonBlockingCheck(clientFollowXForwardedForCheck, data);
|
||||
+ return;
|
||||
+ }
|
||||
+ const auto headerName = Http::HeaderLookupTable.lookup(Http::HdrType::X_FORWARDED_FOR).name;
|
||||
+ debugs(28, DBG_CRITICAL, "ERROR: Ignoring trailing " << headerName << " addresses" <<
|
||||
+ Debug::Extra << "addresses allowed by follow_x_forwarded_for: " << calloutContext->currentXffHopNumber <<
|
||||
+ Debug::Extra << "last/accepted address: " << request->indirect_client_addr <<
|
||||
+ Debug::Extra << "ignored trailing addresses: " << request->x_forwarded_for_iterator);
|
||||
+ // fall through to resume clientAccessCheck() processing
|
||||
}
|
||||
}
|
||||
|
4352
SOURCES/squid-4.15-CVE-2023-5824.patch
Normal file
4352
SOURCES/squid-4.15-CVE-2023-5824.patch
Normal file
File diff suppressed because it is too large
Load Diff
193
SOURCES/squid-4.15-CVE-2024-25111.patch
Normal file
193
SOURCES/squid-4.15-CVE-2024-25111.patch
Normal file
@ -0,0 +1,193 @@
|
||||
diff --git a/src/http.cc b/src/http.cc
|
||||
index b006300..023e411 100644
|
||||
--- a/src/http.cc
|
||||
+++ b/src/http.cc
|
||||
@@ -52,6 +52,7 @@
|
||||
#include "rfc1738.h"
|
||||
#include "SquidConfig.h"
|
||||
#include "SquidTime.h"
|
||||
+#include "SquidMath.h"
|
||||
#include "StatCounters.h"
|
||||
#include "Store.h"
|
||||
#include "StrList.h"
|
||||
@@ -1150,18 +1151,26 @@ HttpStateData::readReply(const CommIoCbParams &io)
|
||||
* Plus, it breaks our lame *HalfClosed() detection
|
||||
*/
|
||||
|
||||
- Must(maybeMakeSpaceAvailable(true));
|
||||
- CommIoCbParams rd(this); // will be expanded with ReadNow results
|
||||
- rd.conn = io.conn;
|
||||
- rd.size = entry->bytesWanted(Range<size_t>(0, inBuf.spaceSize()));
|
||||
+ size_t moreDataPermission = 0;
|
||||
+ if ((!canBufferMoreReplyBytes(&moreDataPermission) || !moreDataPermission)) {
|
||||
+ abortTransaction("ready to read required data, but the read buffer is full and cannot be drained");
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ const auto readSizeMax = maybeMakeSpaceAvailable(moreDataPermission);
|
||||
+ // TODO: Move this logic inside maybeMakeSpaceAvailable():
|
||||
+ const auto readSizeWanted = readSizeMax ? entry->bytesWanted(Range<size_t>(0, readSizeMax)) : 0;
|
||||
|
||||
- if (rd.size <= 0) {
|
||||
+ if (readSizeWanted <= 0) {
|
||||
assert(entry->mem_obj);
|
||||
AsyncCall::Pointer nilCall;
|
||||
entry->mem_obj->delayRead(DeferredRead(readDelayed, this, CommRead(io.conn, NULL, 0, nilCall)));
|
||||
return;
|
||||
}
|
||||
|
||||
+ CommIoCbParams rd(this); // will be expanded with ReadNow results
|
||||
+ rd.conn = io.conn;
|
||||
+ rd.size = readSizeWanted;
|
||||
switch (Comm::ReadNow(rd, inBuf)) {
|
||||
case Comm::INPROGRESS:
|
||||
if (inBuf.isEmpty())
|
||||
@@ -1520,8 +1529,11 @@ HttpStateData::maybeReadVirginBody()
|
||||
if (!Comm::IsConnOpen(serverConnection) || fd_table[serverConnection->fd].closing())
|
||||
return;
|
||||
|
||||
- if (!maybeMakeSpaceAvailable(false))
|
||||
+ size_t moreDataPermission = 0;
|
||||
+ if ((!canBufferMoreReplyBytes(&moreDataPermission)) || !moreDataPermission) {
|
||||
+ abortTransaction("more response bytes required, but the read buffer is full and cannot be drained");
|
||||
return;
|
||||
+ }
|
||||
|
||||
// XXX: get rid of the do_next_read flag
|
||||
// check for the proper reasons preventing read(2)
|
||||
@@ -1539,40 +1551,79 @@ HttpStateData::maybeReadVirginBody()
|
||||
Comm::Read(serverConnection, call);
|
||||
}
|
||||
|
||||
+/// Desired inBuf capacity based on various capacity preferences/limits:
|
||||
+/// * a smaller buffer may not hold enough for look-ahead header/body parsers;
|
||||
+/// * a smaller buffer may result in inefficient tiny network reads;
|
||||
+/// * a bigger buffer may waste memory;
|
||||
+/// * a bigger buffer may exceed SBuf storage capabilities (SBuf::maxSize);
|
||||
+size_t
|
||||
+HttpStateData::calcReadBufferCapacityLimit() const
|
||||
+{
|
||||
+ if (!flags.headers_parsed)
|
||||
+ return Config.maxReplyHeaderSize;
|
||||
+
|
||||
+ // XXX: Our inBuf is not used to maintain the read-ahead gap, and using
|
||||
+ // Config.readAheadGap like this creates huge read buffers for large
|
||||
+ // read_ahead_gap values. TODO: Switch to using tcp_recv_bufsize as the
|
||||
+ // primary read buffer capacity factor.
|
||||
+ //
|
||||
+ // TODO: Cannot reuse throwing NaturalCast() here. Consider removing
|
||||
+ // .value() dereference in NaturalCast() or add/use NaturalCastOrMax().
|
||||
+ const auto configurationPreferences = NaturalSum<size_t>(Config.readAheadGap).second ? NaturalSum<size_t>(Config.readAheadGap).first : SBuf::maxSize;
|
||||
+
|
||||
+ // TODO: Honor TeChunkedParser look-ahead and trailer parsing requirements
|
||||
+ // (when explicit configurationPreferences are set too low).
|
||||
+
|
||||
+ return std::min<size_t>(configurationPreferences, SBuf::maxSize);
|
||||
+}
|
||||
+
|
||||
+/// The maximum number of virgin reply bytes we may buffer before we violate
|
||||
+/// the currently configured response buffering limits.
|
||||
+/// \retval std::nullopt means that no more virgin response bytes can be read
|
||||
+/// \retval 0 means that more virgin response bytes may be read later
|
||||
+/// \retval >0 is the number of bytes that can be read now (subject to other constraints)
|
||||
bool
|
||||
-HttpStateData::maybeMakeSpaceAvailable(bool doGrow)
|
||||
+HttpStateData::canBufferMoreReplyBytes(size_t *maxReadSize) const
|
||||
{
|
||||
- // how much we are allowed to buffer
|
||||
- const int limitBuffer = (flags.headers_parsed ? Config.readAheadGap : Config.maxReplyHeaderSize);
|
||||
-
|
||||
- if (limitBuffer < 0 || inBuf.length() >= (SBuf::size_type)limitBuffer) {
|
||||
- // when buffer is at or over limit already
|
||||
- debugs(11, 7, "will not read up to " << limitBuffer << ". buffer has (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
|
||||
- debugs(11, DBG_DATA, "buffer has {" << inBuf << "}");
|
||||
- // Process next response from buffer
|
||||
- processReply();
|
||||
- return false;
|
||||
+#if USE_ADAPTATION
|
||||
+ // If we do not check this now, we may say the final "no" prematurely below
|
||||
+ // because inBuf.length() will decrease as adaptation drains buffered bytes.
|
||||
+ if (responseBodyBuffer) {
|
||||
+ debugs(11, 3, "yes, but waiting for adaptation to drain read buffer");
|
||||
+ *maxReadSize = 0; // yes, we may be able to buffer more (but later)
|
||||
+ return true;
|
||||
+ }
|
||||
+#endif
|
||||
+
|
||||
+ const auto maxCapacity = calcReadBufferCapacityLimit();
|
||||
+ if (inBuf.length() >= maxCapacity) {
|
||||
+ debugs(11, 3, "no, due to a full buffer: " << inBuf.length() << '/' << inBuf.spaceSize() << "; limit: " << maxCapacity);
|
||||
+ return false; // no, configuration prohibits buffering more
|
||||
}
|
||||
|
||||
+ *maxReadSize = (maxCapacity - inBuf.length()); // positive
|
||||
+ debugs(11, 7, "yes, may read up to " << *maxReadSize << " into " << inBuf.length() << '/' << inBuf.spaceSize());
|
||||
+ return true; // yes, can read up to this many bytes (subject to other constraints)
|
||||
+}
|
||||
+
|
||||
+/// prepare read buffer for reading
|
||||
+/// \return the maximum number of bytes the caller should attempt to read
|
||||
+/// \retval 0 means that the caller should delay reading
|
||||
+size_t
|
||||
+HttpStateData::maybeMakeSpaceAvailable(const size_t maxReadSize)
|
||||
+{
|
||||
// how much we want to read
|
||||
- const size_t read_size = calcBufferSpaceToReserve(inBuf.spaceSize(), (limitBuffer - inBuf.length()));
|
||||
+ const size_t read_size = calcBufferSpaceToReserve(inBuf.spaceSize(), maxReadSize);
|
||||
|
||||
- if (!read_size) {
|
||||
+ if (read_size < 2) {
|
||||
debugs(11, 7, "will not read up to " << read_size << " into buffer (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
|
||||
- return false;
|
||||
+ return 0;
|
||||
}
|
||||
|
||||
- // just report whether we could grow or not, do not actually do it
|
||||
- if (doGrow)
|
||||
- return (read_size >= 2);
|
||||
-
|
||||
// we may need to grow the buffer
|
||||
inBuf.reserveSpace(read_size);
|
||||
- debugs(11, 8, (!flags.do_next_read ? "will not" : "may") <<
|
||||
- " read up to " << read_size << " bytes info buf(" << inBuf.length() << "/" << inBuf.spaceSize() <<
|
||||
- ") from " << serverConnection);
|
||||
-
|
||||
- return (inBuf.spaceSize() >= 2); // only read if there is 1+ bytes of space available
|
||||
+ debugs(11, 7, "may read up to " << read_size << " bytes info buffer (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
|
||||
+ return read_size;
|
||||
}
|
||||
|
||||
/// called after writing the very last request byte (body, last-chunk, etc)
|
||||
diff --git a/src/http.h b/src/http.h
|
||||
index 8965b77..007d2e6 100644
|
||||
--- a/src/http.h
|
||||
+++ b/src/http.h
|
||||
@@ -15,6 +15,8 @@
|
||||
#include "http/StateFlags.h"
|
||||
#include "sbuf/SBuf.h"
|
||||
|
||||
+#include <optional>
|
||||
+
|
||||
class FwdState;
|
||||
class HttpHeader;
|
||||
|
||||
@@ -107,16 +109,9 @@ private:
|
||||
|
||||
void abortTransaction(const char *reason) { abortAll(reason); } // abnormal termination
|
||||
|
||||
- /**
|
||||
- * determine if read buffer can have space made available
|
||||
- * for a read.
|
||||
- *
|
||||
- * \param grow whether to actually expand the buffer
|
||||
- *
|
||||
- * \return whether the buffer can be grown to provide space
|
||||
- * regardless of whether the grow actually happened.
|
||||
- */
|
||||
- bool maybeMakeSpaceAvailable(bool grow);
|
||||
+ size_t calcReadBufferCapacityLimit() const;
|
||||
+ bool canBufferMoreReplyBytes(size_t *maxReadSize) const;
|
||||
+ size_t maybeMakeSpaceAvailable(size_t maxReadSize);
|
||||
|
||||
// consuming request body
|
||||
virtual void handleMoreRequestBodyAvailable();
|
105
SOURCES/squid-4.15-CVE-2024-25617.patch
Normal file
105
SOURCES/squid-4.15-CVE-2024-25617.patch
Normal file
@ -0,0 +1,105 @@
|
||||
diff --git a/src/SquidString.h b/src/SquidString.h
|
||||
index a791885..b9aef38 100644
|
||||
--- a/src/SquidString.h
|
||||
+++ b/src/SquidString.h
|
||||
@@ -114,7 +114,16 @@ private:
|
||||
|
||||
size_type len_; /* current length */
|
||||
|
||||
- static const size_type SizeMax_ = 65535; ///< 64K limit protects some fixed-size buffers
|
||||
+ /// An earlier 64KB limit was meant to protect some fixed-size buffers, but
|
||||
+ /// (a) we do not know where those buffers are (or whether they still exist)
|
||||
+ /// (b) too many String users unknowingly exceeded that limit and asserted.
|
||||
+ /// We are now using a larger limit to reduce the number of (b) cases,
|
||||
+ /// especially cases where "compact" lists of items grow 50% in size when we
|
||||
+ /// convert them to canonical form. The new limit is selected to withstand
|
||||
+ /// concatenation and ~50% expansion of two HTTP headers limited by default
|
||||
+ /// request_header_max_size and reply_header_max_size settings.
|
||||
+ static const size_type SizeMax_ = 3*64*1024 - 1;
|
||||
+
|
||||
/// returns true after increasing the first argument by extra if the sum does not exceed SizeMax_
|
||||
static bool SafeAdd(size_type &base, size_type extra) { if (extra <= SizeMax_ && base <= SizeMax_ - extra) { base += extra; return true; } return false; }
|
||||
|
||||
diff --git a/src/cache_cf.cc b/src/cache_cf.cc
|
||||
index a9c1b7e..46f07bb 100644
|
||||
--- a/src/cache_cf.cc
|
||||
+++ b/src/cache_cf.cc
|
||||
@@ -935,6 +935,18 @@ configDoConfigure(void)
|
||||
(uint32_t)Config.maxRequestBufferSize, (uint32_t)Config.maxRequestHeaderSize);
|
||||
}
|
||||
|
||||
+ // Warn about the dangers of exceeding String limits when manipulating HTTP
|
||||
+ // headers. Technically, we do not concatenate _requests_, so we could relax
|
||||
+ // their check, but we keep the two checks the same for simplicity sake.
|
||||
+ const auto safeRawHeaderValueSizeMax = (String::SizeMaxXXX()+1)/3;
|
||||
+ // TODO: static_assert(safeRawHeaderValueSizeMax >= 64*1024); // no WARNINGs for default settings
|
||||
+ if (Config.maxRequestHeaderSize > safeRawHeaderValueSizeMax)
|
||||
+ debugs(3, DBG_CRITICAL, "WARNING: Increasing request_header_max_size beyond " << safeRawHeaderValueSizeMax <<
|
||||
+ " bytes makes Squid more vulnerable to denial-of-service attacks; configured value: " << Config.maxRequestHeaderSize << " bytes");
|
||||
+ if (Config.maxReplyHeaderSize > safeRawHeaderValueSizeMax)
|
||||
+ debugs(3, DBG_CRITICAL, "WARNING: Increasing reply_header_max_size beyond " << safeRawHeaderValueSizeMax <<
|
||||
+ " bytes makes Squid more vulnerable to denial-of-service attacks; configured value: " << Config.maxReplyHeaderSize << " bytes");
|
||||
+
|
||||
/*
|
||||
* Disable client side request pipelining if client_persistent_connections OFF.
|
||||
* Waste of resources queueing any pipelined requests when the first will close the connection.
|
||||
diff --git a/src/cf.data.pre b/src/cf.data.pre
|
||||
index bc2ddcd..d55b870 100644
|
||||
--- a/src/cf.data.pre
|
||||
+++ b/src/cf.data.pre
|
||||
@@ -6196,11 +6196,14 @@ TYPE: b_size_t
|
||||
DEFAULT: 64 KB
|
||||
LOC: Config.maxRequestHeaderSize
|
||||
DOC_START
|
||||
- This specifies the maximum size for HTTP headers in a request.
|
||||
- Request headers are usually relatively small (about 512 bytes).
|
||||
- Placing a limit on the request header size will catch certain
|
||||
- bugs (for example with persistent connections) and possibly
|
||||
- buffer-overflow or denial-of-service attacks.
|
||||
+ This directives limits the header size of a received HTTP request
|
||||
+ (including request-line). Increasing this limit beyond its 64 KB default
|
||||
+ exposes certain old Squid code to various denial-of-service attacks. This
|
||||
+ limit also applies to received FTP commands.
|
||||
+
|
||||
+ This limit has no direct affect on Squid memory consumption.
|
||||
+
|
||||
+ Squid does not check this limit when sending requests.
|
||||
DOC_END
|
||||
|
||||
NAME: reply_header_max_size
|
||||
@@ -6209,11 +6212,14 @@ TYPE: b_size_t
|
||||
DEFAULT: 64 KB
|
||||
LOC: Config.maxReplyHeaderSize
|
||||
DOC_START
|
||||
- This specifies the maximum size for HTTP headers in a reply.
|
||||
- Reply headers are usually relatively small (about 512 bytes).
|
||||
- Placing a limit on the reply header size will catch certain
|
||||
- bugs (for example with persistent connections) and possibly
|
||||
- buffer-overflow or denial-of-service attacks.
|
||||
+ This directives limits the header size of a received HTTP response
|
||||
+ (including status-line). Increasing this limit beyond its 64 KB default
|
||||
+ exposes certain old Squid code to various denial-of-service attacks. This
|
||||
+ limit also applies to FTP command responses.
|
||||
+
|
||||
+ Squid also checks this limit when loading hit responses from disk cache.
|
||||
+
|
||||
+ Squid does not check this limit when sending responses.
|
||||
DOC_END
|
||||
|
||||
NAME: request_body_max_size
|
||||
diff --git a/src/http.cc b/src/http.cc
|
||||
index 877172d..b006300 100644
|
||||
--- a/src/http.cc
|
||||
+++ b/src/http.cc
|
||||
@@ -1820,8 +1820,9 @@ HttpStateData::httpBuildRequestHeader(HttpRequest * request,
|
||||
|
||||
String strFwd = hdr_in->getList(Http::HdrType::X_FORWARDED_FOR);
|
||||
|
||||
- // if we cannot double strFwd size, then it grew past 50% of the limit
|
||||
- if (!strFwd.canGrowBy(strFwd.size())) {
|
||||
+ // Detect unreasonably long header values. And paranoidly check String
|
||||
+ // limits: a String ought to accommodate two reasonable-length values.
|
||||
+ if (strFwd.size() > 32*1024 || !strFwd.canGrowBy(strFwd.size())) {
|
||||
// There is probably a forwarding loop with Via detection disabled.
|
||||
// If we do nothing, String will assert on overflow soon.
|
||||
// TODO: Terminate all transactions with huge XFF?
|
32
SOURCES/squid-4.15-ftp-filename-extraction.patch
Normal file
32
SOURCES/squid-4.15-ftp-filename-extraction.patch
Normal file
@ -0,0 +1,32 @@
|
||||
diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc
|
||||
index da9867f..e992638 100644
|
||||
--- a/src/clients/FtpGateway.cc
|
||||
+++ b/src/clients/FtpGateway.cc
|
||||
@@ -1084,16 +1084,17 @@ Ftp::Gateway::checkAuth(const HttpHeader * req_hdr)
|
||||
void
|
||||
Ftp::Gateway::checkUrlpath()
|
||||
{
|
||||
- static SBuf str_type_eq("type=");
|
||||
- auto t = request->url.path().rfind(';');
|
||||
-
|
||||
- if (t != SBuf::npos) {
|
||||
- auto filenameEnd = t-1;
|
||||
- if (request->url.path().substr(++t).cmp(str_type_eq, str_type_eq.length()) == 0) {
|
||||
- t += str_type_eq.length();
|
||||
- typecode = (char)xtoupper(request->url.path()[t]);
|
||||
- request->url.path(request->url.path().substr(0,filenameEnd));
|
||||
- }
|
||||
+ // If typecode was specified, extract it and leave just the filename in
|
||||
+ // url.path. Tolerate trailing garbage or missing typecode value. Roughly:
|
||||
+ // [filename] ;type=[typecode char] [trailing garbage]
|
||||
+ static const SBuf middle(";type=");
|
||||
+ const auto typeSpecStart = request->url.path().find(middle);
|
||||
+ if (typeSpecStart != SBuf::npos) {
|
||||
+ const auto fullPath = request->url.path();
|
||||
+ const auto typecodePos = typeSpecStart + middle.length();
|
||||
+ typecode = (typecodePos < fullPath.length()) ?
|
||||
+ static_cast<char>(xtoupper(fullPath[typecodePos])) : '\0';
|
||||
+ request->url.path(fullPath.substr(0, typeSpecStart));
|
||||
}
|
||||
|
||||
int l = request->url.path().length();
|
163
SOURCES/squid-4.15-halfclosed.patch
Normal file
163
SOURCES/squid-4.15-halfclosed.patch
Normal file
@ -0,0 +1,163 @@
|
||||
diff --git a/src/client_side.cc b/src/client_side.cc
|
||||
index f57f3f7..ab393e4 100644
|
||||
--- a/src/client_side.cc
|
||||
+++ b/src/client_side.cc
|
||||
@@ -906,7 +906,7 @@ ConnStateData::kick()
|
||||
* We are done with the response, and we are either still receiving request
|
||||
* body (early response!) or have already stopped receiving anything.
|
||||
*
|
||||
- * If we are still receiving, then clientParseRequest() below will fail.
|
||||
+ * If we are still receiving, then parseRequests() below will fail.
|
||||
* (XXX: but then we will call readNextRequest() which may succeed and
|
||||
* execute a smuggled request as we are not done with the current request).
|
||||
*
|
||||
@@ -926,28 +926,12 @@ ConnStateData::kick()
|
||||
* Attempt to parse a request from the request buffer.
|
||||
* If we've been fed a pipelined request it may already
|
||||
* be in our read buffer.
|
||||
- *
|
||||
- \par
|
||||
- * This needs to fall through - if we're unlucky and parse the _last_ request
|
||||
- * from our read buffer we may never re-register for another client read.
|
||||
*/
|
||||
|
||||
- if (clientParseRequests()) {
|
||||
- debugs(33, 3, clientConnection << ": parsed next request from buffer");
|
||||
- }
|
||||
+ parseRequests();
|
||||
|
||||
- /** \par
|
||||
- * Either we need to kick-start another read or, if we have
|
||||
- * a half-closed connection, kill it after the last request.
|
||||
- * This saves waiting for half-closed connections to finished being
|
||||
- * half-closed _AND_ then, sometimes, spending "Timeout" time in
|
||||
- * the keepalive "Waiting for next request" state.
|
||||
- */
|
||||
- if (commIsHalfClosed(clientConnection->fd) && pipeline.empty()) {
|
||||
- debugs(33, 3, "half-closed client with no pending requests, closing");
|
||||
- clientConnection->close();
|
||||
+ if (!isOpen())
|
||||
return;
|
||||
- }
|
||||
|
||||
/** \par
|
||||
* At this point we either have a parsed request (which we've
|
||||
@@ -2058,16 +2042,11 @@ ConnStateData::receivedFirstByte()
|
||||
commSetConnTimeout(clientConnection, Config.Timeout.request, timeoutCall);
|
||||
}
|
||||
|
||||
-/**
|
||||
- * Attempt to parse one or more requests from the input buffer.
|
||||
- * Returns true after completing parsing of at least one request [header]. That
|
||||
- * includes cases where parsing ended with an error (e.g., a huge request).
|
||||
- */
|
||||
-bool
|
||||
-ConnStateData::clientParseRequests()
|
||||
+/// Attempt to parse one or more requests from the input buffer.
|
||||
+/// May close the connection.
|
||||
+void
|
||||
+ConnStateData::parseRequests()
|
||||
{
|
||||
- bool parsed_req = false;
|
||||
-
|
||||
debugs(33, 5, HERE << clientConnection << ": attempting to parse");
|
||||
|
||||
// Loop while we have read bytes that are not needed for producing the body
|
||||
@@ -2116,8 +2095,6 @@ ConnStateData::clientParseRequests()
|
||||
|
||||
processParsedRequest(context);
|
||||
|
||||
- parsed_req = true; // XXX: do we really need to parse everything right NOW ?
|
||||
-
|
||||
if (context->mayUseConnection()) {
|
||||
debugs(33, 3, HERE << "Not parsing new requests, as this request may need the connection");
|
||||
break;
|
||||
@@ -2130,8 +2107,19 @@ ConnStateData::clientParseRequests()
|
||||
}
|
||||
}
|
||||
|
||||
- /* XXX where to 'finish' the parsing pass? */
|
||||
- return parsed_req;
|
||||
+ debugs(33, 7, "buffered leftovers: " << inBuf.length());
|
||||
+
|
||||
+ if (isOpen() && commIsHalfClosed(clientConnection->fd)) {
|
||||
+ if (pipeline.empty()) {
|
||||
+ // we processed what we could parse, and no more data is coming
|
||||
+ debugs(33, 5, "closing half-closed without parsed requests: " << clientConnection);
|
||||
+ clientConnection->close();
|
||||
+ } else {
|
||||
+ // we parsed what we could, and no more data is coming
|
||||
+ debugs(33, 5, "monitoring half-closed while processing parsed requests: " << clientConnection);
|
||||
+ flags.readMore = false; // may already be false
|
||||
+ }
|
||||
+ }
|
||||
}
|
||||
|
||||
void
|
||||
@@ -2148,23 +2136,7 @@ ConnStateData::afterClientRead()
|
||||
if (pipeline.empty())
|
||||
fd_note(clientConnection->fd, "Reading next request");
|
||||
|
||||
- if (!clientParseRequests()) {
|
||||
- if (!isOpen())
|
||||
- return;
|
||||
- /*
|
||||
- * If the client here is half closed and we failed
|
||||
- * to parse a request, close the connection.
|
||||
- * The above check with connFinishedWithConn() only
|
||||
- * succeeds _if_ the buffer is empty which it won't
|
||||
- * be if we have an incomplete request.
|
||||
- * XXX: This duplicates ConnStateData::kick
|
||||
- */
|
||||
- if (pipeline.empty() && commIsHalfClosed(clientConnection->fd)) {
|
||||
- debugs(33, 5, clientConnection << ": half-closed connection, no completed request parsed, connection closing.");
|
||||
- clientConnection->close();
|
||||
- return;
|
||||
- }
|
||||
- }
|
||||
+ parseRequests();
|
||||
|
||||
if (!isOpen())
|
||||
return;
|
||||
@@ -3945,7 +3917,7 @@ ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic)
|
||||
startPinnedConnectionMonitoring();
|
||||
|
||||
if (pipeline.empty())
|
||||
- kick(); // in case clientParseRequests() was blocked by a busy pic.connection
|
||||
+ kick(); // in case parseRequests() was blocked by a busy pic.connection
|
||||
}
|
||||
|
||||
/// Forward future client requests using the given server connection.
|
||||
diff --git a/src/client_side.h b/src/client_side.h
|
||||
index 9fe8463..dfb4d8e 100644
|
||||
--- a/src/client_side.h
|
||||
+++ b/src/client_side.h
|
||||
@@ -85,7 +85,6 @@ public:
|
||||
virtual void doneWithControlMsg();
|
||||
|
||||
/// Traffic parsing
|
||||
- bool clientParseRequests();
|
||||
void readNextRequest();
|
||||
|
||||
/// try to make progress on a transaction or read more I/O
|
||||
@@ -373,6 +372,7 @@ private:
|
||||
virtual bool connFinishedWithConn(int size);
|
||||
virtual void checkLogging();
|
||||
|
||||
+ void parseRequests();
|
||||
void clientAfterReadingRequests();
|
||||
bool concurrentRequestQueueFilled() const;
|
||||
|
||||
diff --git a/src/tests/stub_client_side.cc b/src/tests/stub_client_side.cc
|
||||
index d7efb0f..655ed83 100644
|
||||
--- a/src/tests/stub_client_side.cc
|
||||
+++ b/src/tests/stub_client_side.cc
|
||||
@@ -14,7 +14,7 @@
|
||||
#include "tests/STUB.h"
|
||||
|
||||
#include "client_side.h"
|
||||
-bool ConnStateData::clientParseRequests() STUB_RETVAL(false)
|
||||
+void ConnStateData::parseRequests() STUB
|
||||
void ConnStateData::readNextRequest() STUB
|
||||
bool ConnStateData::isOpen() const STUB_RETVAL(false)
|
||||
void ConnStateData::kick() STUB
|
367
SOURCES/squid-4.15-ignore-wsp-after-chunk-size.patch
Normal file
367
SOURCES/squid-4.15-ignore-wsp-after-chunk-size.patch
Normal file
@ -0,0 +1,367 @@
|
||||
From 8d0ee420a4d91ac7fd97316338f1e28b4b060cbf Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= <luhliari@redhat.com>
|
||||
Date: Thu, 10 Oct 2024 19:26:27 +0200
|
||||
Subject: [PATCH 1/6] Ignore whitespace chars after chunk-size
|
||||
|
||||
Previously (before #1498 change), squid was accepting TE-chunked replies
|
||||
with whitespaces after chunk-size and missing chunk-ext data. After
|
||||
|
||||
It turned out that replies with such whitespace chars are pretty
|
||||
common and other webservers which can act as forward proxies (e.g.
|
||||
nginx, httpd...) are accepting them.
|
||||
|
||||
This change will allow to proxy chunked responses from origin server,
|
||||
which had whitespaces inbetween chunk-size and CRLF.
|
||||
---
|
||||
src/http/one/TeChunkedParser.cc | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
|
||||
index 9cce10fdc91..04753395e16 100644
|
||||
--- a/src/http/one/TeChunkedParser.cc
|
||||
+++ b/src/http/one/TeChunkedParser.cc
|
||||
@@ -125,6 +125,7 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
||||
// Code becomes much simpler when incremental parsing functions throw on
|
||||
// bad or insufficient input, like in the code below. TODO: Expand up.
|
||||
try {
|
||||
+ tok.skipAll(CharacterSet::WSP); // Some servers send SP/TAB after chunk-size
|
||||
parseChunkExtensions(tok); // a possibly empty chunk-ext list
|
||||
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
|
||||
buf_ = tok.remaining();
|
||||
|
||||
From 9c8d35f899035fa06021ab3fe6919f892c2f0c6b Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= <luhliari@redhat.com>
|
||||
Date: Fri, 11 Oct 2024 02:06:31 +0200
|
||||
Subject: [PATCH 2/6] Added new argument to Http::One::ParseBws()
|
||||
|
||||
Depending on new wsp_only argument in ParseBws() it will be decided
|
||||
which set of whitespaces characters will be parsed. If wsp_only is set
|
||||
to true, only SP and HTAB chars will be parsed.
|
||||
|
||||
Also optimized number of ParseBws calls.
|
||||
---
|
||||
src/http/one/Parser.cc | 4 ++--
|
||||
src/http/one/Parser.h | 3 ++-
|
||||
src/http/one/TeChunkedParser.cc | 13 +++++++++----
|
||||
src/http/one/TeChunkedParser.h | 2 +-
|
||||
4 files changed, 14 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
|
||||
index b1908316a0b..01d7e3bc0e8 100644
|
||||
--- a/src/http/one/Parser.cc
|
||||
+++ b/src/http/one/Parser.cc
|
||||
@@ -273,9 +273,9 @@ Http::One::ErrorLevel()
|
||||
|
||||
// BWS = *( SP / HTAB ) ; WhitespaceCharacters() may relax this RFC 7230 rule
|
||||
void
|
||||
-Http::One::ParseBws(Parser::Tokenizer &tok)
|
||||
+Http::One::ParseBws(Parser::Tokenizer &tok, const bool wsp_only)
|
||||
{
|
||||
- const auto count = tok.skipAll(Parser::WhitespaceCharacters());
|
||||
+ const auto count = tok.skipAll(wsp_only ? CharacterSet::WSP : Parser::WhitespaceCharacters());
|
||||
|
||||
if (tok.atEnd())
|
||||
throw InsufficientInput(); // even if count is positive
|
||||
diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
|
||||
index d9a0ac8c273..08200371cd6 100644
|
||||
--- a/src/http/one/Parser.h
|
||||
+++ b/src/http/one/Parser.h
|
||||
@@ -163,8 +163,9 @@ class Parser : public RefCountable
|
||||
};
|
||||
|
||||
/// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
|
||||
+/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimeter chars
|
||||
/// \throws InsufficientInput when the end of BWS cannot be confirmed
|
||||
-void ParseBws(Parser::Tokenizer &);
|
||||
+void ParseBws(Parser::Tokenizer &, const bool wsp_only = false);
|
||||
|
||||
/// the right debugs() level for logging HTTP violation messages
|
||||
int ErrorLevel();
|
||||
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
|
||||
index 04753395e16..41e1e5ddaea 100644
|
||||
--- a/src/http/one/TeChunkedParser.cc
|
||||
+++ b/src/http/one/TeChunkedParser.cc
|
||||
@@ -125,8 +125,11 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
||||
// Code becomes much simpler when incremental parsing functions throw on
|
||||
// bad or insufficient input, like in the code below. TODO: Expand up.
|
||||
try {
|
||||
- tok.skipAll(CharacterSet::WSP); // Some servers send SP/TAB after chunk-size
|
||||
- parseChunkExtensions(tok); // a possibly empty chunk-ext list
|
||||
+ // A possibly empty chunk-ext list. If no chunk-ext has been found,
|
||||
+ // try to skip trailing BWS, because some servers send "chunk-size BWS CRLF".
|
||||
+ if (!parseChunkExtensions(tok))
|
||||
+ ParseBws(tok, true);
|
||||
+
|
||||
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
|
||||
buf_ = tok.remaining();
|
||||
parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
|
||||
@@ -140,20 +143,22 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
||||
|
||||
/// Parses the chunk-ext list (RFC 9112 section 7.1.1:
|
||||
/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
|
||||
-void
|
||||
+bool
|
||||
Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
|
||||
{
|
||||
+ bool foundChunkExt = false;
|
||||
do {
|
||||
auto tok = callerTok;
|
||||
|
||||
ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
|
||||
|
||||
if (!tok.skip(';'))
|
||||
- return; // reached the end of extensions (if any)
|
||||
+ return foundChunkExt; // reached the end of extensions (if any)
|
||||
|
||||
parseOneChunkExtension(tok);
|
||||
buf_ = tok.remaining(); // got one extension
|
||||
callerTok = tok;
|
||||
+ foundChunkExt = true;
|
||||
} while (true);
|
||||
}
|
||||
|
||||
diff --git a/src/http/one/TeChunkedParser.h b/src/http/one/TeChunkedParser.h
|
||||
index 02eacd1bb89..8c5d4bb4cba 100644
|
||||
--- a/src/http/one/TeChunkedParser.h
|
||||
+++ b/src/http/one/TeChunkedParser.h
|
||||
@@ -71,7 +71,7 @@ class TeChunkedParser : public Http1::Parser
|
||||
private:
|
||||
bool parseChunkSize(Tokenizer &tok);
|
||||
bool parseChunkMetadataSuffix(Tokenizer &);
|
||||
- void parseChunkExtensions(Tokenizer &);
|
||||
+ bool parseChunkExtensions(Tokenizer &);
|
||||
void parseOneChunkExtension(Tokenizer &);
|
||||
bool parseChunkBody(Tokenizer &tok);
|
||||
bool parseChunkEnd(Tokenizer &tok);
|
||||
|
||||
From 81e67f97f9c386bdd0bb4a5e182395c46adb70ad Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Lubo=C5=A1=20Uhliarik?= <luhliari@redhat.com>
|
||||
Date: Fri, 11 Oct 2024 02:44:33 +0200
|
||||
Subject: [PATCH 3/6] Fix typo in Parser.h
|
||||
|
||||
---
|
||||
src/http/one/Parser.h | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
|
||||
index 08200371cd6..3ef4c5f7752 100644
|
||||
--- a/src/http/one/Parser.h
|
||||
+++ b/src/http/one/Parser.h
|
||||
@@ -163,7 +163,7 @@ class Parser : public RefCountable
|
||||
};
|
||||
|
||||
/// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
|
||||
-/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimeter chars
|
||||
+/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimiter chars
|
||||
/// \throws InsufficientInput when the end of BWS cannot be confirmed
|
||||
void ParseBws(Parser::Tokenizer &, const bool wsp_only = false);
|
||||
|
||||
|
||||
From a0d4fe1794e605f8299a5c118c758a807453f016 Mon Sep 17 00:00:00 2001
|
||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
||||
Date: Thu, 10 Oct 2024 22:39:42 -0400
|
||||
Subject: [PATCH 4/6] Bug 5449 is a regression of Bug 4492!
|
||||
|
||||
Both bugs deal with "chunk-size SP+ CRLF" use cases. Bug 4492 had _two_
|
||||
spaces after chunk-size, which answers one of the PR review questions:
|
||||
Should we skip just one space? No, we should not.
|
||||
|
||||
The lines moved around in many commits, but I believe this regression
|
||||
was introduced in commit 951013d0 because that commit stopped consuming
|
||||
partially parsed chunk-ext sequences. That consumption was wrong, but it
|
||||
had a positive side effect -- fixing Bug 4492...
|
||||
---
|
||||
src/http/one/TeChunkedParser.cc | 10 +++++-----
|
||||
1 file changed, 5 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
|
||||
index 41e1e5ddaea..aa4a840fdcf 100644
|
||||
--- a/src/http/one/TeChunkedParser.cc
|
||||
+++ b/src/http/one/TeChunkedParser.cc
|
||||
@@ -125,10 +125,10 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
||||
// Code becomes much simpler when incremental parsing functions throw on
|
||||
// bad or insufficient input, like in the code below. TODO: Expand up.
|
||||
try {
|
||||
- // A possibly empty chunk-ext list. If no chunk-ext has been found,
|
||||
- // try to skip trailing BWS, because some servers send "chunk-size BWS CRLF".
|
||||
- if (!parseChunkExtensions(tok))
|
||||
- ParseBws(tok, true);
|
||||
+ // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
|
||||
+ ParseBws(tok, true);
|
||||
+
|
||||
+ parseChunkExtensions(tok);
|
||||
|
||||
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
|
||||
buf_ = tok.remaining();
|
||||
@@ -150,7 +150,7 @@ Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
|
||||
do {
|
||||
auto tok = callerTok;
|
||||
|
||||
- ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
|
||||
+ ParseBws(tok);
|
||||
|
||||
if (!tok.skip(';'))
|
||||
return foundChunkExt; // reached the end of extensions (if any)
|
||||
|
||||
From f837f5ff61301a17008f16ce1fb793c2abf19786 Mon Sep 17 00:00:00 2001
|
||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
||||
Date: Thu, 10 Oct 2024 23:06:42 -0400
|
||||
Subject: [PATCH 5/6] fixup: Fewer conditionals/ifs and more explicit spelling
|
||||
|
||||
... to draw code reader attention when something unusual is going on.
|
||||
---
|
||||
src/http/one/Parser.cc | 22 ++++++++++++++++++----
|
||||
src/http/one/Parser.h | 10 ++++++++--
|
||||
src/http/one/TeChunkedParser.cc | 14 ++++++--------
|
||||
src/http/one/TeChunkedParser.h | 2 +-
|
||||
4 files changed, 33 insertions(+), 15 deletions(-)
|
||||
|
||||
diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
|
||||
index 01d7e3bc0e8..d3937e5e96b 100644
|
||||
--- a/src/http/one/Parser.cc
|
||||
+++ b/src/http/one/Parser.cc
|
||||
@@ -271,11 +271,12 @@ Http::One::ErrorLevel()
|
||||
return Config.onoff.relaxed_header_parser < 0 ? DBG_IMPORTANT : 5;
|
||||
}
|
||||
|
||||
-// BWS = *( SP / HTAB ) ; WhitespaceCharacters() may relax this RFC 7230 rule
|
||||
-void
|
||||
-Http::One::ParseBws(Parser::Tokenizer &tok, const bool wsp_only)
|
||||
+/// common part of ParseBws() and ParseStrctBws()
|
||||
+namespace Http::One {
|
||||
+static void
|
||||
+ParseBws_(Parser::Tokenizer &tok, const CharacterSet &bwsChars)
|
||||
{
|
||||
- const auto count = tok.skipAll(wsp_only ? CharacterSet::WSP : Parser::WhitespaceCharacters());
|
||||
+ const auto count = tok.skipAll(bwsChars);
|
||||
|
||||
if (tok.atEnd())
|
||||
throw InsufficientInput(); // even if count is positive
|
||||
@@ -290,4 +291,17 @@ Http::One::ParseBws(Parser::Tokenizer &tok, const bool wsp_only)
|
||||
|
||||
// success: no more BWS characters expected
|
||||
}
|
||||
+} // namespace Http::One
|
||||
+
|
||||
+void
|
||||
+Http::One::ParseBws(Parser::Tokenizer &tok)
|
||||
+{
|
||||
+ ParseBws_(tok, CharacterSet::WSP);
|
||||
+}
|
||||
+
|
||||
+void
|
||||
+Http::One::ParseStrictBws(Parser::Tokenizer &tok)
|
||||
+{
|
||||
+ ParseBws_(tok, Parser::WhitespaceCharacters());
|
||||
+}
|
||||
|
||||
diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
|
||||
index 3ef4c5f7752..49e399de546 100644
|
||||
--- a/src/http/one/Parser.h
|
||||
+++ b/src/http/one/Parser.h
|
||||
@@ -163,9 +163,15 @@ class Parser : public RefCountable
|
||||
};
|
||||
|
||||
/// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
|
||||
-/// \param wsp_only force skipping of whitespaces only, don't consider skipping relaxed delimiter chars
|
||||
/// \throws InsufficientInput when the end of BWS cannot be confirmed
|
||||
-void ParseBws(Parser::Tokenizer &, const bool wsp_only = false);
|
||||
+/// \sa WhitespaceCharacters() for the definition of BWS characters
|
||||
+/// \sa ParseStrictBws() that avoids WhitespaceCharacters() uncertainties
|
||||
+void ParseBws(Parser::Tokenizer &);
|
||||
+
|
||||
+/// Like ParseBws() but only skips CharacterSet::WSP characters. This variation
|
||||
+/// must be used if the next element may start with CR or any other character
|
||||
+/// from RelaxedDelimiterCharacters().
|
||||
+void ParseStrictBws(Parser::Tokenizer &);
|
||||
|
||||
/// the right debugs() level for logging HTTP violation messages
|
||||
int ErrorLevel();
|
||||
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
|
||||
index aa4a840fdcf..859471b8c77 100644
|
||||
--- a/src/http/one/TeChunkedParser.cc
|
||||
+++ b/src/http/one/TeChunkedParser.cc
|
||||
@@ -125,11 +125,11 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
||||
// Code becomes much simpler when incremental parsing functions throw on
|
||||
// bad or insufficient input, like in the code below. TODO: Expand up.
|
||||
try {
|
||||
- // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
|
||||
- ParseBws(tok, true);
|
||||
-
|
||||
- parseChunkExtensions(tok);
|
||||
+ // Bug 4492: IBM_HTTP_Server sends SP after chunk-size.
|
||||
+ // No ParseBws() here because it may consume CR required further below.
|
||||
+ ParseStrictBws(tok);
|
||||
|
||||
+ parseChunkExtensions(tok); // a possibly empty chunk-ext list
|
||||
tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
|
||||
buf_ = tok.remaining();
|
||||
parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
|
||||
@@ -143,22 +143,20 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
||||
|
||||
/// Parses the chunk-ext list (RFC 9112 section 7.1.1:
|
||||
/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
|
||||
-bool
|
||||
+void
|
||||
Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
|
||||
{
|
||||
- bool foundChunkExt = false;
|
||||
do {
|
||||
auto tok = callerTok;
|
||||
|
||||
ParseBws(tok);
|
||||
|
||||
if (!tok.skip(';'))
|
||||
- return foundChunkExt; // reached the end of extensions (if any)
|
||||
+ return; // reached the end of extensions (if any)
|
||||
|
||||
parseOneChunkExtension(tok);
|
||||
buf_ = tok.remaining(); // got one extension
|
||||
callerTok = tok;
|
||||
- foundChunkExt = true;
|
||||
} while (true);
|
||||
}
|
||||
|
||||
diff --git a/src/http/one/TeChunkedParser.h b/src/http/one/TeChunkedParser.h
|
||||
index 8c5d4bb4cba..02eacd1bb89 100644
|
||||
--- a/src/http/one/TeChunkedParser.h
|
||||
+++ b/src/http/one/TeChunkedParser.h
|
||||
@@ -71,7 +71,7 @@ class TeChunkedParser : public Http1::Parser
|
||||
private:
|
||||
bool parseChunkSize(Tokenizer &tok);
|
||||
bool parseChunkMetadataSuffix(Tokenizer &);
|
||||
- bool parseChunkExtensions(Tokenizer &);
|
||||
+ void parseChunkExtensions(Tokenizer &);
|
||||
void parseOneChunkExtension(Tokenizer &);
|
||||
bool parseChunkBody(Tokenizer &tok);
|
||||
bool parseChunkEnd(Tokenizer &tok);
|
||||
|
||||
From f79936a234e722adb2dd08f31cf6019d81ee712c Mon Sep 17 00:00:00 2001
|
||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
||||
Date: Thu, 10 Oct 2024 23:31:08 -0400
|
||||
Subject: [PATCH 6/6] fixup: Deadly typo
|
||||
|
||||
---
|
||||
src/http/one/Parser.cc | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
|
||||
index d3937e5e96b..7403a9163a2 100644
|
||||
--- a/src/http/one/Parser.cc
|
||||
+++ b/src/http/one/Parser.cc
|
||||
@@ -296,12 +296,12 @@ ParseBws_(Parser::Tokenizer &tok, const CharacterSet &bwsChars)
|
||||
void
|
||||
Http::One::ParseBws(Parser::Tokenizer &tok)
|
||||
{
|
||||
- ParseBws_(tok, CharacterSet::WSP);
|
||||
+ ParseBws_(tok, Parser::WhitespaceCharacters());
|
||||
}
|
||||
|
||||
void
|
||||
Http::One::ParseStrictBws(Parser::Tokenizer &tok)
|
||||
{
|
||||
- ParseBws_(tok, Parser::WhitespaceCharacters());
|
||||
+ ParseBws_(tok, CharacterSet::WSP);
|
||||
}
|
||||
|
||||
|
156
SOURCES/squid-4.15-ip-bind-address-no-port.patch
Normal file
156
SOURCES/squid-4.15-ip-bind-address-no-port.patch
Normal file
@ -0,0 +1,156 @@
|
||||
commit c08948c8b831a2ba73c676b48aa11ba1b58cc542
|
||||
Author: Tomas Korbar <tkorbar@redhat.com>
|
||||
Date: Thu Dec 8 11:03:08 2022 +0100
|
||||
|
||||
Backport adding IP_BIND_ADDRESS_NO_PORT flag to outgoing connections
|
||||
|
||||
diff --git a/src/comm.cc b/src/comm.cc
|
||||
index 0d5f34d..6811b54 100644
|
||||
--- a/src/comm.cc
|
||||
+++ b/src/comm.cc
|
||||
@@ -58,6 +58,7 @@
|
||||
*/
|
||||
|
||||
static IOCB commHalfClosedReader;
|
||||
+static int comm_openex(int sock_type, int proto, Ip::Address &, int flags, const char *note);
|
||||
static void comm_init_opened(const Comm::ConnectionPointer &conn, const char *note, struct addrinfo *AI);
|
||||
static int comm_apply_flags(int new_socket, Ip::Address &addr, int flags, struct addrinfo *AI);
|
||||
|
||||
@@ -75,6 +76,7 @@ static EVH commHalfClosedCheck;
|
||||
static void commPlanHalfClosedCheck();
|
||||
|
||||
static Comm::Flag commBind(int s, struct addrinfo &);
|
||||
+static void commSetBindAddressNoPort(int);
|
||||
static void commSetReuseAddr(int);
|
||||
static void commSetNoLinger(int);
|
||||
#ifdef TCP_NODELAY
|
||||
@@ -201,6 +203,22 @@ comm_local_port(int fd)
|
||||
return F->local_addr.port();
|
||||
}
|
||||
|
||||
+/// sets the IP_BIND_ADDRESS_NO_PORT socket option to optimize ephemeral port
|
||||
+/// reuse by outgoing TCP connections that must bind(2) to a source IP address
|
||||
+static void
|
||||
+commSetBindAddressNoPort(const int fd)
|
||||
+{
|
||||
+#if defined(IP_BIND_ADDRESS_NO_PORT)
|
||||
+ int flag = 1;
|
||||
+ if (setsockopt(fd, IPPROTO_IP, IP_BIND_ADDRESS_NO_PORT, reinterpret_cast<char*>(&flag), sizeof(flag)) < 0) {
|
||||
+ const auto savedErrno = errno;
|
||||
+ debugs(50, DBG_IMPORTANT, "ERROR: setsockopt(IP_BIND_ADDRESS_NO_PORT) failure: " << xstrerr(savedErrno));
|
||||
+ }
|
||||
+#else
|
||||
+ (void)fd;
|
||||
+#endif
|
||||
+}
|
||||
+
|
||||
static Comm::Flag
|
||||
commBind(int s, struct addrinfo &inaddr)
|
||||
{
|
||||
@@ -227,6 +245,10 @@ comm_open(int sock_type,
|
||||
int flags,
|
||||
const char *note)
|
||||
{
|
||||
+ // assume zero-port callers do not need to know the assigned port right away
|
||||
+ if (sock_type == SOCK_STREAM && addr.port() == 0 && ((flags & COMM_DOBIND) || !addr.isAnyAddr()))
|
||||
+ flags |= COMM_DOBIND_PORT_LATER;
|
||||
+
|
||||
return comm_openex(sock_type, proto, addr, flags, note);
|
||||
}
|
||||
|
||||
@@ -328,7 +350,7 @@ comm_set_transparent(int fd)
|
||||
* Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
|
||||
* is OR of flags specified in defines.h:COMM_*
|
||||
*/
|
||||
-int
|
||||
+static int
|
||||
comm_openex(int sock_type,
|
||||
int proto,
|
||||
Ip::Address &addr,
|
||||
@@ -476,6 +498,9 @@ comm_apply_flags(int new_socket,
|
||||
if ( addr.isNoAddr() )
|
||||
debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr << "!!");
|
||||
|
||||
+ if ((flags & COMM_DOBIND_PORT_LATER))
|
||||
+ commSetBindAddressNoPort(new_socket);
|
||||
+
|
||||
if (commBind(new_socket, *AI) != Comm::OK) {
|
||||
comm_close(new_socket);
|
||||
return -1;
|
||||
diff --git a/src/comm.h b/src/comm.h
|
||||
index c963e1c..9ff201d 100644
|
||||
--- a/src/comm.h
|
||||
+++ b/src/comm.h
|
||||
@@ -43,7 +43,6 @@ void comm_import_opened(const Comm::ConnectionPointer &, const char *note, struc
|
||||
|
||||
/**
|
||||
* Open a port specially bound for listening or sending through a specific port.
|
||||
- * This is a wrapper providing IPv4/IPv6 failover around comm_openex().
|
||||
* Please use for all listening sockets and bind() outbound sockets.
|
||||
*
|
||||
* It will open a socket bound for:
|
||||
@@ -59,7 +58,6 @@ void comm_import_opened(const Comm::ConnectionPointer &, const char *note, struc
|
||||
int comm_open_listener(int sock_type, int proto, Ip::Address &addr, int flags, const char *note);
|
||||
void comm_open_listener(int sock_type, int proto, Comm::ConnectionPointer &conn, const char *note);
|
||||
|
||||
-int comm_openex(int, int, Ip::Address &, int, const char *);
|
||||
unsigned short comm_local_port(int fd);
|
||||
|
||||
int comm_udp_sendto(int sock, const Ip::Address &to, const void *buf, int buflen);
|
||||
diff --git a/src/comm/ConnOpener.cc b/src/comm/ConnOpener.cc
|
||||
index 25a30e4..2082214 100644
|
||||
--- a/src/comm/ConnOpener.cc
|
||||
+++ b/src/comm/ConnOpener.cc
|
||||
@@ -263,7 +263,7 @@ Comm::ConnOpener::createFd()
|
||||
if (callback_ == NULL || callback_->canceled())
|
||||
return false;
|
||||
|
||||
- temporaryFd_ = comm_openex(SOCK_STREAM, IPPROTO_TCP, conn_->local, conn_->flags, host_);
|
||||
+ temporaryFd_ = comm_open(SOCK_STREAM, IPPROTO_TCP, conn_->local, conn_->flags, host_);
|
||||
if (temporaryFd_ < 0) {
|
||||
sendAnswer(Comm::ERR_CONNECT, 0, "Comm::ConnOpener::createFd");
|
||||
return false;
|
||||
diff --git a/src/comm/Connection.h b/src/comm/Connection.h
|
||||
index 4f2f23a..1e32c22 100644
|
||||
--- a/src/comm/Connection.h
|
||||
+++ b/src/comm/Connection.h
|
||||
@@ -47,6 +47,8 @@ namespace Comm
|
||||
#define COMM_DOBIND 0x08 // requires a bind()
|
||||
#define COMM_TRANSPARENT 0x10 // arrived via TPROXY
|
||||
#define COMM_INTERCEPTION 0x20 // arrived via NAT
|
||||
+/// Internal Comm optimization: Keep the source port unassigned until connect(2)
|
||||
+#define COMM_DOBIND_PORT_LATER 0x100
|
||||
|
||||
/**
|
||||
* Store data about the physical and logical attributes of a connection.
|
||||
diff --git a/src/ipc.cc b/src/ipc.cc
|
||||
index e1d48fc..e92a27f 100644
|
||||
--- a/src/ipc.cc
|
||||
+++ b/src/ipc.cc
|
||||
@@ -95,12 +95,12 @@ ipcCreate(int type, const char *prog, const char *const args[], const char *name
|
||||
} else void(0)
|
||||
|
||||
if (type == IPC_TCP_SOCKET) {
|
||||
- crfd = cwfd = comm_open(SOCK_STREAM,
|
||||
+ crfd = cwfd = comm_open_listener(SOCK_STREAM,
|
||||
0,
|
||||
local_addr,
|
||||
COMM_NOCLOEXEC,
|
||||
name);
|
||||
- prfd = pwfd = comm_open(SOCK_STREAM,
|
||||
+ prfd = pwfd = comm_open_listener(SOCK_STREAM,
|
||||
0, /* protocol */
|
||||
local_addr,
|
||||
0, /* blocking */
|
||||
diff --git a/src/tests/stub_comm.cc b/src/tests/stub_comm.cc
|
||||
index 58f85e4..5381ab2 100644
|
||||
--- a/src/tests/stub_comm.cc
|
||||
+++ b/src/tests/stub_comm.cc
|
||||
@@ -46,7 +46,6 @@ int comm_open_uds(int sock_type, int proto, struct sockaddr_un* addr, int flags)
|
||||
void comm_import_opened(const Comm::ConnectionPointer &, const char *note, struct addrinfo *AI) STUB
|
||||
int comm_open_listener(int sock_type, int proto, Ip::Address &addr, int flags, const char *note) STUB_RETVAL(-1)
|
||||
void comm_open_listener(int sock_type, int proto, Comm::ConnectionPointer &conn, const char *note) STUB
|
||||
-int comm_openex(int, int, Ip::Address &, int, tos_t tos, nfmark_t nfmark, const char *) STUB_RETVAL(-1)
|
||||
unsigned short comm_local_port(int fd) STUB_RETVAL(0)
|
||||
int comm_udp_sendto(int sock, const Ip::Address &to, const void *buf, int buflen) STUB_RETVAL(-1)
|
||||
void commCallCloseHandlers(int fd) STUB
|
25
SOURCES/squid-4.15.tar.xz.asc
Normal file
25
SOURCES/squid-4.15.tar.xz.asc
Normal file
@ -0,0 +1,25 @@
|
||||
File: squid-4.15.tar.xz
|
||||
Date: Mon 10 May 2021 10:50:22 UTC
|
||||
Size: 2454176
|
||||
MD5 : a593de9dc888dfeca4f1f7db2cd7d3b9
|
||||
SHA1: 60bda34ba39657e2d870c8c1d2acece8a69c3075
|
||||
Key : CD6DBF8EF3B17D3E <squid3@treenet.co.nz>
|
||||
B068 84ED B779 C89B 044E 64E3 CD6D BF8E F3B1 7D3E
|
||||
keyring = http://www.squid-cache.org/pgp.asc
|
||||
keyserver = pool.sks-keyservers.net
|
||||
-----BEGIN PGP SIGNATURE-----
|
||||
|
||||
iQIzBAABCgAdFiEEsGiE7bd5yJsETmTjzW2/jvOxfT4FAmCZD/UACgkQzW2/jvOx
|
||||
fT6zZg/+N8JMIYpmVJ7jm4lF0Ub2kEHGTOrc+tnlA3LGnlMQuTm61+BYk58g0SKW
|
||||
96NbJ0cycW215Q34L+Y0tWuxEbIU01vIc3AA7rQd0LKy+fQU0OtBuhk5Vf4bKilW
|
||||
uHEVIQZs9HmY6bqC+kgtCf49tVZvR8FZYNuilg/68+i/pQdwaDDmVb+j2oF7w+y2
|
||||
dgkTFWtM5NTL6bqUVC0E7lLFPjzMefKfxkkpWFdV/VrAhU25jN24kpnjcfotQhdW
|
||||
LDFy5okduz3ljso9pBYJfLeMXM1FZPpceC91zj32x3tcUyrD3yIoXob58rEKvfe4
|
||||
RDXN4SuClsNe4UQ4oNoGIES9XtaYlOzPR1PlbqPUrdp1cDnhgLJ+1fkAixlMqCml
|
||||
wuI1VIKSEY+nvRzQzFHnXJK9otV8QwMF76AHaytO9y+X6JuZmu/CcV1pq61qY9qv
|
||||
t1/8z99wWSxpu17zthZgq64J225GF/hkBedaFlYoS5k5YUMDLPlRSCC0yPmb8JBF
|
||||
Cns5i/aq2PmOx2ZhQ2RQIF416J3HK8Galw8ytFOjnEcn4ux9yzKNjL38p4+PJJA0
|
||||
7GCMAqYYNjok3LSkGbiR7cPgbHnkqRfYbPFLMj4FtruoFlZ9L5MIU3oFvqA3ZR6l
|
||||
Az6LaKLsAYPUmukAOPUSIrqpKXZHc7hdBWkT+7RYA4qaoU+9oIo=
|
||||
=1Re1
|
||||
-----END PGP SIGNATURE-----
|
16
SOURCES/squid.logrotate
Normal file
16
SOURCES/squid.logrotate
Normal file
@ -0,0 +1,16 @@
|
||||
/var/log/squid/*.log {
|
||||
weekly
|
||||
rotate 5
|
||||
compress
|
||||
notifempty
|
||||
missingok
|
||||
nocreate
|
||||
sharedscripts
|
||||
postrotate
|
||||
# Asks squid to reopen its logs. (logfile_rotate 0 is set in squid.conf)
|
||||
# errors redirected to make it silent if squid is not running
|
||||
/usr/sbin/squid -k rotate 2>/dev/null
|
||||
# Wait a little to allow Squid to catch up before the logs is compressed
|
||||
sleep 1
|
||||
endscript
|
||||
}
|
7
SOURCES/squid.nm
Executable file
7
SOURCES/squid.nm
Executable file
@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
case "$2" in
|
||||
up|down|vpn-up|vpn-down)
|
||||
/bin/systemctl -q reload squid.service || :
|
||||
;;
|
||||
esac
|
3
SOURCES/squid.pam
Normal file
3
SOURCES/squid.pam
Normal file
@ -0,0 +1,3 @@
|
||||
#%PAM-1.0
|
||||
auth include password-auth
|
||||
account include password-auth
|
18
SOURCES/squid.service
Normal file
18
SOURCES/squid.service
Normal file
@ -0,0 +1,18 @@
|
||||
[Unit]
|
||||
Description=Squid caching proxy
|
||||
Documentation=man:squid(8)
|
||||
After=network.target network-online.target nss-lookup.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
LimitNOFILE=16384
|
||||
PIDFile=/run/squid.pid
|
||||
EnvironmentFile=/etc/sysconfig/squid
|
||||
ExecStartPre=/usr/libexec/squid/cache_swap.sh
|
||||
ExecStart=/usr/sbin/squid --foreground $SQUID_OPTS -f ${SQUID_CONF}
|
||||
ExecReload=/usr/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
NotifyAccess=all
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
9
SOURCES/squid.sysconfig
Normal file
9
SOURCES/squid.sysconfig
Normal file
@ -0,0 +1,9 @@
|
||||
# default squid options
|
||||
SQUID_OPTS=""
|
||||
|
||||
# Time to wait for Squid to shut down when asked. Should not be necessary
|
||||
# most of the time.
|
||||
SQUID_SHUTDOWN_TIMEOUT=100
|
||||
|
||||
# default squid conf file
|
||||
SQUID_CONF="/etc/squid/squid.conf"
|
1798
SPECS/squid.spec
Normal file
1798
SPECS/squid.spec
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user