Import rpm: 602ea65a25fa2db67807d3a6e8f23364256ef875
This commit is contained in:
commit
506662cb1c
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
SOURCES/squid-4.15.tar.xz
|
16
cache_swap.sh
Normal file
16
cache_swap.sh
Normal file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
if [ -f /etc/sysconfig/squid ]; then
|
||||
. /etc/sysconfig/squid
|
||||
fi
|
||||
|
||||
SQUID_CONF=${SQUID_CONF:-"/etc/squid/squid.conf"}
|
||||
|
||||
CACHE_SWAP=`sed -e 's/#.*//g' $SQUID_CONF | \
|
||||
grep cache_dir | awk '{ print $3 }'`
|
||||
|
||||
for adir in $CACHE_SWAP; do
|
||||
if [ ! -d $adir/00 ]; then
|
||||
echo -n "init_cache_dir $adir... "
|
||||
squid -N -z -F -f $SQUID_CONF >> /var/log/squid/squid.out 2>&1
|
||||
fi
|
||||
done
|
9
gating.yaml
Normal file
9
gating.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
--- !Policy
|
||||
product_versions:
|
||||
- rhel-9
|
||||
decision_context: osci_compose_gate
|
||||
rules:
|
||||
- !PassingTestCaseRule {test_case_name: baseos-ci.brew-build.tier1.functional}
|
||||
- !PassingTestCaseRule {test_case_name: baseos-ci.brew-build.tier2.functional}
|
||||
- !PassingTestCaseRule {test_case_name: baseos-ci.brew-build.tier3.functional}
|
||||
- !PassingTestCaseRule {test_case_name: baseos-ci.brew-build.acceptance-tier.functional}
|
3
perl-requires-squid.sh
Executable file
3
perl-requires-squid.sh
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
/usr/lib/rpm/perl.req $* | grep -v "Authen::Smb"
|
1
sources
Normal file
1
sources
Normal file
@ -0,0 +1 @@
|
||||
SHA1 (squid-4.15.tar.xz) = 60bda34ba39657e2d870c8c1d2acece8a69c3075
|
10
squid-3.0.STABLE1-perlpath.patch
Normal file
10
squid-3.0.STABLE1-perlpath.patch
Normal file
@ -0,0 +1,10 @@
|
||||
diff --git a/contrib/url-normalizer.pl b/contrib/url-normalizer.pl
|
||||
index 4cb0480..4b89910 100755
|
||||
--- a/contrib/url-normalizer.pl
|
||||
+++ b/contrib/url-normalizer.pl
|
||||
@@ -1,4 +1,4 @@
|
||||
-#!/usr/local/bin/perl -Tw
|
||||
+#!/usr/bin/perl -Tw
|
||||
#
|
||||
# * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
|
||||
# *
|
32
squid-3.1.0.9-location.patch
Normal file
32
squid-3.1.0.9-location.patch
Normal file
@ -0,0 +1,32 @@
|
||||
diff -up squid-3.1.0.9/QUICKSTART.location squid-3.1.0.9/QUICKSTART
|
||||
--- squid-3.1.0.9/QUICKSTART.location 2009-06-26 12:35:27.000000000 +0200
|
||||
+++ squid-3.1.0.9/QUICKSTART 2009-07-17 14:03:10.000000000 +0200
|
||||
@@ -10,10 +10,9 @@ After you retrieved, compiled and instal
|
||||
INSTALL in the same directory), you have to configure the squid.conf
|
||||
file. This is the list of the values you *need* to change, because no
|
||||
sensible defaults could be defined. Do not touch the other variables
|
||||
-for now. We assume you have installed Squid in the default location:
|
||||
-/usr/local/squid
|
||||
+for now.
|
||||
|
||||
-Uncomment and edit the following lines in /usr/local/squid/etc/squid.conf:
|
||||
+Uncomment and edit the following lines in /etc/squid/squid.conf:
|
||||
|
||||
==============================================================================
|
||||
|
||||
@@ -82,12 +81,12 @@ After editing squid.conf to your liking,
|
||||
line TWICE:
|
||||
|
||||
To create any disk cache_dir configured:
|
||||
- % /usr/local/squid/sbin/squid -z
|
||||
+ % /usr/sbin/squid -z
|
||||
|
||||
To start squid:
|
||||
- % /usr/local/squid/sbin/squid
|
||||
+ % /usr/sbin/squid
|
||||
|
||||
-Check in the cache.log (/usr/local/squid/var/logs/cache.log) that
|
||||
+Check in the cache.log (/var/log/squid/cache.log) that
|
||||
everything is all right.
|
||||
|
||||
Once Squid created all its files (it can take several minutes on some
|
95
squid-3.5.9-include-guards.patch
Normal file
95
squid-3.5.9-include-guards.patch
Normal file
@ -0,0 +1,95 @@
|
||||
------------------------------------------------------------
|
||||
revno: 14311
|
||||
revision-id: squid3@treenet.co.nz-20150924130537-lqwzd1z99a3l9gt4
|
||||
parent: squid3@treenet.co.nz-20150924032241-6cx3g6hwz9xfoybr
|
||||
------------------------------------------------------------
|
||||
revno: 14311
|
||||
revision-id: squid3@treenet.co.nz-20150924130537-lqwzd1z99a3l9gt4
|
||||
parent: squid3@treenet.co.nz-20150924032241-6cx3g6hwz9xfoybr
|
||||
fixes bug: http://bugs.squid-cache.org/show_bug.cgi?id=4323
|
||||
author: Francesco Chemolli <kinkie@squid-cache.org>
|
||||
committer: Amos Jeffries <squid3@treenet.co.nz>
|
||||
branch nick: trunk
|
||||
timestamp: Thu 2015-09-24 06:05:37 -0700
|
||||
message:
|
||||
Bug 4323: Netfilter broken cross-includes with Linux 4.2
|
||||
------------------------------------------------------------
|
||||
# Bazaar merge directive format 2 (Bazaar 0.90)
|
||||
# revision_id: squid3@treenet.co.nz-20150924130537-lqwzd1z99a3l9gt4
|
||||
# target_branch: http://bzr.squid-cache.org/bzr/squid3/trunk/
|
||||
# testament_sha1: c67cfca81040f3845d7c4caf2f40518511f14d0b
|
||||
# timestamp: 2015-09-24 13:06:33 +0000
|
||||
# source_branch: http://bzr.squid-cache.org/bzr/squid3/trunk
|
||||
# base_revision_id: squid3@treenet.co.nz-20150924032241-\
|
||||
# 6cx3g6hwz9xfoybr
|
||||
#
|
||||
# Begin patch
|
||||
=== modified file 'compat/os/linux.h'
|
||||
--- compat/os/linux.h 2015-01-13 07:25:36 +0000
|
||||
+++ compat/os/linux.h 2015-09-24 13:05:37 +0000
|
||||
@@ -30,6 +30,21 @@
|
||||
#endif
|
||||
|
||||
/*
|
||||
+ * Netfilter header madness. (see Bug 4323)
|
||||
+ *
|
||||
+ * Netfilter have a history of defining their own versions of network protocol
|
||||
+ * primitives without sufficient protection against the POSIX defines which are
|
||||
+ * aways present in Linux.
|
||||
+ *
|
||||
+ * netinet/in.h must be included before any other sys header in order to properly
|
||||
+ * activate include guards in <linux/libc-compat.h> the kernel maintainers added
|
||||
+ * to workaround it.
|
||||
+ */
|
||||
+#if HAVE_NETINET_IN_H
|
||||
+#include <netinet/in.h>
|
||||
+#endif
|
||||
+
|
||||
+/*
|
||||
* sys/capability.h is only needed in Linux apparently.
|
||||
*
|
||||
* HACK: LIBCAP_BROKEN Ugly glue to get around linux header madness colliding with glibc
|
||||
fixes bug: http://bugs.squid-cache.org/show_bug.cgi?id=4323
|
||||
author: Francesco Chemolli <kinkie@squid-cache.org>
|
||||
committer: Amos Jeffries <squid3@treenet.co.nz>
|
||||
branch nick: trunk
|
||||
timestamp: Thu 2015-09-24 06:05:37 -0700
|
||||
message:
|
||||
Bug 4323: Netfilter broken cross-includes with Linux 4.2
|
||||
------------------------------------------------------------
|
||||
# Bazaar merge directive format 2 (Bazaar 0.90)
|
||||
# revision_id: squid3@treenet.co.nz-20150924130537-lqwzd1z99a3l9gt4
|
||||
# target_branch: http://bzr.squid-cache.org/bzr/squid3/trunk/
|
||||
# testament_sha1: c67cfca81040f3845d7c4caf2f40518511f14d0b
|
||||
# timestamp: 2015-09-24 13:06:33 +0000
|
||||
# source_branch: http://bzr.squid-cache.org/bzr/squid3/trunk
|
||||
# base_revision_id: squid3@treenet.co.nz-20150924032241-\
|
||||
# 6cx3g6hwz9xfoybr
|
||||
#
|
||||
# Begin patch
|
||||
=== modified file 'compat/os/linux.h'
|
||||
--- compat/os/linux.h 2015-01-13 07:25:36 +0000
|
||||
+++ compat/os/linux.h 2015-09-24 13:05:37 +0000
|
||||
@@ -30,6 +30,21 @@
|
||||
#endif
|
||||
|
||||
/*
|
||||
+ * Netfilter header madness. (see Bug 4323)
|
||||
+ *
|
||||
+ * Netfilter have a history of defining their own versions of network protocol
|
||||
+ * primitives without sufficient protection against the POSIX defines which are
|
||||
+ * aways present in Linux.
|
||||
+ *
|
||||
+ * netinet/in.h must be included before any other sys header in order to properly
|
||||
+ * activate include guards in <linux/libc-compat.h> the kernel maintainers added
|
||||
+ * to workaround it.
|
||||
+ */
|
||||
+#if HAVE_NETINET_IN_H
|
||||
+#include <netinet/in.h>
|
||||
+#endif
|
||||
+
|
||||
+/*
|
||||
* sys/capability.h is only needed in Linux apparently.
|
||||
*
|
||||
* HACK: LIBCAP_BROKEN Ugly glue to get around linux header madness colliding with glibc
|
||||
|
26
squid-4.0.11-config.patch
Normal file
26
squid-4.0.11-config.patch
Normal file
@ -0,0 +1,26 @@
|
||||
diff -up squid-4.0.11/src/cf.data.pre.config squid-4.0.11/src/cf.data.pre
|
||||
--- squid-4.0.11/src/cf.data.pre.config 2016-06-09 22:32:57.000000000 +0200
|
||||
+++ squid-4.0.11/src/cf.data.pre 2016-07-11 21:08:35.090976840 +0200
|
||||
@@ -4658,7 +4658,7 @@ DOC_END
|
||||
|
||||
NAME: logfile_rotate
|
||||
TYPE: int
|
||||
-DEFAULT: 10
|
||||
+DEFAULT: 0
|
||||
LOC: Config.Log.rotateNumber
|
||||
DOC_START
|
||||
Specifies the default number of logfile rotations to make when you
|
||||
@@ -6444,11 +6444,11 @@ COMMENT_END
|
||||
|
||||
NAME: cache_mgr
|
||||
TYPE: string
|
||||
-DEFAULT: webmaster
|
||||
+DEFAULT: root
|
||||
LOC: Config.adminEmail
|
||||
DOC_START
|
||||
Email-address of local cache manager who will receive
|
||||
- mail if the cache dies. The default is "webmaster".
|
||||
+ mail if the cache dies. The default is "root".
|
||||
DOC_END
|
||||
|
||||
NAME: mail_from
|
127
squid-4.11-active-ftp.patch
Normal file
127
squid-4.11-active-ftp.patch
Normal file
@ -0,0 +1,127 @@
|
||||
diff --git a/src/clients/FtpClient.cc b/src/clients/FtpClient.cc
|
||||
index b665bcf..d287e55 100644
|
||||
--- a/src/clients/FtpClient.cc
|
||||
+++ b/src/clients/FtpClient.cc
|
||||
@@ -778,7 +778,8 @@ Ftp::Client::connectDataChannel()
|
||||
bool
|
||||
Ftp::Client::openListenSocket()
|
||||
{
|
||||
- return false;
|
||||
+ debugs(9, 3, HERE);
|
||||
+ return false;
|
||||
}
|
||||
|
||||
/// creates a data channel Comm close callback
|
||||
diff --git a/src/clients/FtpClient.h b/src/clients/FtpClient.h
|
||||
index a76a5a0..218d696 100644
|
||||
--- a/src/clients/FtpClient.h
|
||||
+++ b/src/clients/FtpClient.h
|
||||
@@ -118,7 +118,7 @@ public:
|
||||
bool sendPort();
|
||||
bool sendPassive();
|
||||
void connectDataChannel();
|
||||
- bool openListenSocket();
|
||||
+ virtual bool openListenSocket();
|
||||
void switchTimeoutToDataChannel();
|
||||
|
||||
CtrlChannel ctrl; ///< FTP control channel state
|
||||
diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc
|
||||
index 411bce9..31d3e36 100644
|
||||
--- a/src/clients/FtpGateway.cc
|
||||
+++ b/src/clients/FtpGateway.cc
|
||||
@@ -87,6 +87,13 @@ struct GatewayFlags {
|
||||
class Gateway;
|
||||
typedef void (StateMethod)(Ftp::Gateway *);
|
||||
|
||||
+} // namespace FTP
|
||||
+
|
||||
+static void ftpOpenListenSocket(Ftp::Gateway * ftpState, int fallback);
|
||||
+
|
||||
+namespace Ftp
|
||||
+{
|
||||
+
|
||||
/// FTP Gateway: An FTP client that takes an HTTP request with an ftp:// URI,
|
||||
/// converts it into one or more FTP commands, and then
|
||||
/// converts one or more FTP responses into the final HTTP response.
|
||||
@@ -137,7 +144,11 @@ public:
|
||||
|
||||
/// create a data channel acceptor and start listening.
|
||||
void listenForDataChannel(const Comm::ConnectionPointer &conn);
|
||||
-
|
||||
+ virtual bool openListenSocket() {
|
||||
+ debugs(9, 3, HERE);
|
||||
+ ftpOpenListenSocket(this, 0);
|
||||
+ return Comm::IsConnOpen(data.conn);
|
||||
+ }
|
||||
int checkAuth(const HttpHeader * req_hdr);
|
||||
void checkUrlpath();
|
||||
void buildTitleUrl();
|
||||
@@ -1787,6 +1798,7 @@ ftpOpenListenSocket(Ftp::Gateway * ftpState, int fallback)
|
||||
}
|
||||
|
||||
ftpState->listenForDataChannel(temp);
|
||||
+ ftpState->data.listenConn = temp;
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1822,13 +1834,19 @@ ftpSendPORT(Ftp::Gateway * ftpState)
|
||||
// pull out the internal IP address bytes to send in PORT command...
|
||||
// source them from the listen_conn->local
|
||||
|
||||
+ struct sockaddr_in addr;
|
||||
+ socklen_t addrlen = sizeof(addr);
|
||||
+ getsockname(ftpState->data.listenConn->fd, (struct sockaddr *) &addr, &addrlen);
|
||||
+ unsigned char port_high = ntohs(addr.sin_port) >> 8;
|
||||
+ unsigned char port_low = ntohs(addr.sin_port) & 0xff;
|
||||
+
|
||||
struct addrinfo *AI = NULL;
|
||||
ftpState->data.listenConn->local.getAddrInfo(AI, AF_INET);
|
||||
unsigned char *addrptr = (unsigned char *) &((struct sockaddr_in*)AI->ai_addr)->sin_addr;
|
||||
- unsigned char *portptr = (unsigned char *) &((struct sockaddr_in*)AI->ai_addr)->sin_port;
|
||||
+ // unsigned char *portptr = (unsigned char *) &((struct sockaddr_in*)AI->ai_addr)->sin_port;
|
||||
snprintf(cbuf, CTRL_BUFLEN, "PORT %d,%d,%d,%d,%d,%d\r\n",
|
||||
addrptr[0], addrptr[1], addrptr[2], addrptr[3],
|
||||
- portptr[0], portptr[1]);
|
||||
+ port_high, port_low);
|
||||
ftpState->writeCommand(cbuf);
|
||||
ftpState->state = Ftp::Client::SENT_PORT;
|
||||
|
||||
@@ -1881,14 +1899,27 @@ ftpSendEPRT(Ftp::Gateway * ftpState)
|
||||
return;
|
||||
}
|
||||
|
||||
+
|
||||
+ unsigned int port;
|
||||
+ struct sockaddr_storage addr;
|
||||
+ socklen_t addrlen = sizeof(addr);
|
||||
+ getsockname(ftpState->data.listenConn->fd, (struct sockaddr *) &addr, &addrlen);
|
||||
+ if (addr.ss_family == AF_INET) {
|
||||
+ struct sockaddr_in *addr4 = (struct sockaddr_in*) &addr;
|
||||
+ port = ntohs( addr4->sin_port );
|
||||
+ } else {
|
||||
+ struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) &addr;
|
||||
+ port = ntohs( addr6->sin6_port );
|
||||
+ }
|
||||
+
|
||||
char buf[MAX_IPSTRLEN];
|
||||
|
||||
/* RFC 2428 defines EPRT as IPv6 equivalent to IPv4 PORT command. */
|
||||
/* Which can be used by EITHER protocol. */
|
||||
- snprintf(cbuf, CTRL_BUFLEN, "EPRT |%d|%s|%d|\r\n",
|
||||
+ snprintf(cbuf, CTRL_BUFLEN, "EPRT |%d|%s|%u|\r\n",
|
||||
( ftpState->data.listenConn->local.isIPv6() ? 2 : 1 ),
|
||||
ftpState->data.listenConn->local.toStr(buf,MAX_IPSTRLEN),
|
||||
- ftpState->data.listenConn->local.port() );
|
||||
+ port);
|
||||
|
||||
ftpState->writeCommand(cbuf);
|
||||
ftpState->state = Ftp::Client::SENT_EPRT;
|
||||
@@ -1907,7 +1938,7 @@ ftpReadEPRT(Ftp::Gateway * ftpState)
|
||||
ftpSendPORT(ftpState);
|
||||
return;
|
||||
}
|
||||
-
|
||||
+ ftpState->ctrl.message = NULL;
|
||||
ftpRestOrList(ftpState);
|
||||
}
|
||||
|
27
squid-4.11-config.patch
Normal file
27
squid-4.11-config.patch
Normal file
@ -0,0 +1,27 @@
|
||||
diff --git a/src/cf.data.pre b/src/cf.data.pre
|
||||
index 26ef576..30d5509 100644
|
||||
--- a/src/cf.data.pre
|
||||
+++ b/src/cf.data.pre
|
||||
@@ -5006,7 +5006,7 @@ DOC_END
|
||||
|
||||
NAME: logfile_rotate
|
||||
TYPE: int
|
||||
-DEFAULT: 10
|
||||
+DEFAULT: 0
|
||||
LOC: Config.Log.rotateNumber
|
||||
DOC_START
|
||||
Specifies the default number of logfile rotations to make when you
|
||||
@@ -6857,11 +6857,11 @@ COMMENT_END
|
||||
|
||||
NAME: cache_mgr
|
||||
TYPE: string
|
||||
-DEFAULT: webmaster
|
||||
+DEFAULT: root
|
||||
LOC: Config.adminEmail
|
||||
DOC_START
|
||||
Email-address of local cache manager who will receive
|
||||
- mail if the cache dies. The default is "webmaster".
|
||||
+ mail if the cache dies. The default is "root".
|
||||
DOC_END
|
||||
|
||||
NAME: mail_from
|
143
squid-4.11-convert-ipv4.patch
Normal file
143
squid-4.11-convert-ipv4.patch
Normal file
@ -0,0 +1,143 @@
|
||||
From 771908d313ee9c255adfb5e4fdba4d6797c18409 Mon Sep 17 00:00:00 2001
|
||||
From: Amos Jeffries <yadij@users.noreply.github.com>
|
||||
Date: Thu, 7 Mar 2019 13:50:38 +0000
|
||||
Subject: [PATCH] Bug 4928: Cannot convert non-IPv4 to IPv4 (#379)
|
||||
|
||||
... when reaching client_ip_max_connections
|
||||
|
||||
The client_ip_max_connections limit is checked before the TCP dst-IP is located for the newly received TCP connection. This leaves Squid unable to fetch the NFMARK or similar
|
||||
details later on (they do not exist for [::]).
|
||||
|
||||
Move client_ip_max_connections test later in the TCP accept process to ensure dst-IP is known when the error is produced.
|
||||
---
|
||||
src/comm/TcpAcceptor.cc | 82 ++++++++++++++++++++---------------------
|
||||
1 file changed, 39 insertions(+), 43 deletions(-)
|
||||
|
||||
diff --git a/src/comm/TcpAcceptor.cc b/src/comm/TcpAcceptor.cc
|
||||
index d4b576d..936aa30 100644
|
||||
--- a/src/comm/TcpAcceptor.cc
|
||||
+++ b/src/comm/TcpAcceptor.cc
|
||||
@@ -282,7 +282,16 @@ Comm::TcpAcceptor::acceptOne()
|
||||
ConnectionPointer newConnDetails = new Connection();
|
||||
const Comm::Flag flag = oldAccept(newConnDetails);
|
||||
|
||||
- if (flag == Comm::COMM_ERROR) {
|
||||
+ /* Check for errors */
|
||||
+ if (!newConnDetails->isOpen()) {
|
||||
+
|
||||
+ if (flag == Comm::NOMESSAGE) {
|
||||
+ /* register interest again */
|
||||
+ debugs(5, 5, HERE << "try later: " << conn << " handler Subscription: " << theCallSub);
|
||||
+ SetSelect(conn->fd, COMM_SELECT_READ, doAccept, this, 0);
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
// A non-recoverable error; notify the caller */
|
||||
debugs(5, 5, HERE << "non-recoverable error:" << status() << " handler Subscription: " << theCallSub);
|
||||
if (intendedForUserConnections())
|
||||
@@ -292,16 +301,12 @@ Comm::TcpAcceptor::acceptOne()
|
||||
return;
|
||||
}
|
||||
|
||||
- if (flag == Comm::NOMESSAGE) {
|
||||
- /* register interest again */
|
||||
- debugs(5, 5, "try later: " << conn << " handler Subscription: " << theCallSub);
|
||||
- } else {
|
||||
- debugs(5, 5, "Listener: " << conn <<
|
||||
- " accepted new connection " << newConnDetails <<
|
||||
- " handler Subscription: " << theCallSub);
|
||||
- notify(flag, newConnDetails);
|
||||
- }
|
||||
+ newConnDetails->nfmark = Ip::Qos::getNfmarkFromConnection(newConnDetails, Ip::Qos::dirAccepted);
|
||||
|
||||
+ debugs(5, 5, HERE << "Listener: " << conn <<
|
||||
+ " accepted new connection " << newConnDetails <<
|
||||
+ " handler Subscription: " << theCallSub);
|
||||
+ notify(flag, newConnDetails);
|
||||
SetSelect(conn->fd, COMM_SELECT_READ, doAccept, this, 0);
|
||||
}
|
||||
|
||||
@@ -341,8 +346,8 @@ Comm::TcpAcceptor::notify(const Comm::Flag flag, const Comm::ConnectionPointer &
|
||||
*
|
||||
* \retval Comm::OK success. details parameter filled.
|
||||
* \retval Comm::NOMESSAGE attempted accept() but nothing useful came in.
|
||||
- * Or this client has too many connections already.
|
||||
* \retval Comm::COMM_ERROR an outright failure occurred.
|
||||
+ * Or this client has too many connections already.
|
||||
*/
|
||||
Comm::Flag
|
||||
Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
|
||||
@@ -383,6 +388,15 @@ Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
|
||||
|
||||
details->remote = *gai;
|
||||
|
||||
+ if ( Config.client_ip_max_connections >= 0) {
|
||||
+ if (clientdbEstablished(details->remote, 0) > Config.client_ip_max_connections) {
|
||||
+ debugs(50, DBG_IMPORTANT, "WARNING: " << details->remote << " attempting more than " << Config.client_ip_max_connections << " connections.");
|
||||
+ Ip::Address::FreeAddr(gai);
|
||||
+ PROF_stop(comm_accept);
|
||||
+ return Comm::COMM_ERROR;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
// lookup the local-end details of this new connection
|
||||
Ip::Address::InitAddr(gai);
|
||||
details->local.setEmpty();
|
||||
@@ -396,6 +410,23 @@ Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
|
||||
details->local = *gai;
|
||||
Ip::Address::FreeAddr(gai);
|
||||
|
||||
+ /* fdstat update */
|
||||
+ fdd_table[sock].close_file = NULL;
|
||||
+ fdd_table[sock].close_line = 0;
|
||||
+
|
||||
+ fde *F = &fd_table[sock];
|
||||
+ details->remote.toStr(F->ipaddr,MAX_IPSTRLEN);
|
||||
+ F->remote_port = details->remote.port();
|
||||
+ F->local_addr = details->local;
|
||||
+ F->sock_family = details->local.isIPv6()?AF_INET6:AF_INET;
|
||||
+
|
||||
+ // set socket flags
|
||||
+ commSetCloseOnExec(sock);
|
||||
+ commSetNonBlocking(sock);
|
||||
+
|
||||
+ /* IFF the socket is (tproxy) transparent, pass the flag down to allow spoofing */
|
||||
+ F->flags.transparent = fd_table[conn->fd].flags.transparent; // XXX: can we remove this line yet?
|
||||
+
|
||||
// Perform NAT or TPROXY operations to retrieve the real client/dest IP addresses
|
||||
if (conn->flags&(COMM_TRANSPARENT|COMM_INTERCEPTION) && !Ip::Interceptor.Lookup(details, conn)) {
|
||||
debugs(50, DBG_IMPORTANT, "ERROR: NAT/TPROXY lookup failed to locate original IPs on " << details);
|
||||
@@ -414,33 +445,6 @@ Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
|
||||
}
|
||||
#endif
|
||||
|
||||
- details->nfmark = Ip::Qos::getNfmarkFromConnection(details, Ip::Qos::dirAccepted);
|
||||
-
|
||||
- if (Config.client_ip_max_connections >= 0) {
|
||||
- if (clientdbEstablished(details->remote, 0) > Config.client_ip_max_connections) {
|
||||
- debugs(50, DBG_IMPORTANT, "WARNING: " << details->remote << " attempting more than " << Config.client_ip_max_connections << " connections.");
|
||||
- PROF_stop(comm_accept);
|
||||
- return Comm::NOMESSAGE;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- /* fdstat update */
|
||||
- fdd_table[sock].close_file = NULL;
|
||||
- fdd_table[sock].close_line = 0;
|
||||
-
|
||||
- fde *F = &fd_table[sock];
|
||||
- details->remote.toStr(F->ipaddr,MAX_IPSTRLEN);
|
||||
- F->remote_port = details->remote.port();
|
||||
- F->local_addr = details->local;
|
||||
- F->sock_family = details->local.isIPv6()?AF_INET6:AF_INET;
|
||||
-
|
||||
- // set socket flags
|
||||
- commSetCloseOnExec(sock);
|
||||
- commSetNonBlocking(sock);
|
||||
-
|
||||
- /* IFF the socket is (tproxy) transparent, pass the flag down to allow spoofing */
|
||||
- F->flags.transparent = fd_table[conn->fd].flags.transparent; // XXX: can we remove this line yet?
|
||||
-
|
||||
PROF_stop(comm_accept);
|
||||
return Comm::OK;
|
||||
}
|
41
squid-4.11-include-guards.patch
Normal file
41
squid-4.11-include-guards.patch
Normal file
@ -0,0 +1,41 @@
|
||||
diff --git a/compat/os/linux.h b/compat/os/linux.h
|
||||
index 0ff05c6..d51389b 100644
|
||||
--- a/compat/os/linux.h
|
||||
+++ b/compat/os/linux.h
|
||||
@@ -44,6 +44,36 @@
|
||||
#include <netinet/in.h>
|
||||
#endif
|
||||
|
||||
+/*
|
||||
+ * Netfilter header madness. (see Bug 4323)
|
||||
+ *
|
||||
+ * Netfilter have a history of defining their own versions of network protocol
|
||||
+ * primitives without sufficient protection against the POSIX defines which are
|
||||
+ * aways present in Linux.
|
||||
+ *
|
||||
+ * netinet/in.h must be included before any other sys header in order to properly
|
||||
+ * activate include guards in <linux/libc-compat.h> the kernel maintainers added
|
||||
+ * to workaround it.
|
||||
+ */
|
||||
+#if HAVE_NETINET_IN_H
|
||||
+#include <netinet/in.h>
|
||||
+#endif
|
||||
+
|
||||
+/*
|
||||
+ * Netfilter header madness. (see Bug 4323)
|
||||
+ *
|
||||
+ * Netfilter have a history of defining their own versions of network protocol
|
||||
+ * primitives without sufficient protection against the POSIX defines which are
|
||||
+ * aways present in Linux.
|
||||
+ *
|
||||
+ * netinet/in.h must be included before any other sys header in order to properly
|
||||
+ * activate include guards in <linux/libc-compat.h> the kernel maintainers added
|
||||
+ * to workaround it.
|
||||
+ */
|
||||
+#if HAVE_NETINET_IN_H
|
||||
+#include <netinet/in.h>
|
||||
+#endif
|
||||
+
|
||||
/*
|
||||
* sys/capability.h is only needed in Linux apparently.
|
||||
*
|
178
squid-4.11-large-acl.patch
Normal file
178
squid-4.11-large-acl.patch
Normal file
@ -0,0 +1,178 @@
|
||||
diff --git a/src/acl/RegexData.cc b/src/acl/RegexData.cc
|
||||
index 01a4c12..b5c1679 100644
|
||||
--- a/src/acl/RegexData.cc
|
||||
+++ b/src/acl/RegexData.cc
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "ConfigParser.h"
|
||||
#include "Debug.h"
|
||||
#include "sbuf/List.h"
|
||||
+#include "sbuf/Algorithms.h"
|
||||
|
||||
ACLRegexData::~ACLRegexData()
|
||||
{
|
||||
@@ -129,6 +130,18 @@ compileRE(std::list<RegexPattern> &curlist, const char * RE, int flags)
|
||||
return true;
|
||||
}
|
||||
|
||||
+static bool
|
||||
+compileRE(std::list<RegexPattern> &curlist, const SBufList &RE, int flags)
|
||||
+{
|
||||
+ if (RE.empty())
|
||||
+ return curlist.empty(); // XXX: old code did this. It looks wrong.
|
||||
+ SBuf regexp;
|
||||
+ static const SBuf openparen("("), closeparen(")"), separator(")|(");
|
||||
+ JoinContainerIntoSBuf(regexp, RE.begin(), RE.end(), separator, openparen,
|
||||
+ closeparen);
|
||||
+ return compileRE(curlist, regexp.c_str(), flags);
|
||||
+}
|
||||
+
|
||||
/** Compose and compile one large RE from a set of (small) REs.
|
||||
* The ultimate goal is to have only one RE per ACL so that match() is
|
||||
* called only once per ACL.
|
||||
@@ -137,16 +150,11 @@ static int
|
||||
compileOptimisedREs(std::list<RegexPattern> &curlist, const SBufList &sl)
|
||||
{
|
||||
std::list<RegexPattern> newlist;
|
||||
- int numREs = 0;
|
||||
+ SBufList accumulatedRE;
|
||||
+ int numREs = 0, reSize = 0;
|
||||
int flags = REG_EXTENDED | REG_NOSUB;
|
||||
- int largeREindex = 0;
|
||||
- char largeRE[BUFSIZ];
|
||||
- *largeRE = 0;
|
||||
|
||||
for (const SBuf & configurationLineWord : sl) {
|
||||
- int RElen;
|
||||
- RElen = configurationLineWord.length();
|
||||
-
|
||||
static const SBuf minus_i("-i");
|
||||
static const SBuf plus_i("+i");
|
||||
if (configurationLineWord == minus_i) {
|
||||
@@ -155,10 +163,11 @@ compileOptimisedREs(std::list<RegexPattern> &curlist, const SBufList &sl)
|
||||
debugs(28, 2, "optimisation of -i ... -i" );
|
||||
} else {
|
||||
debugs(28, 2, "-i" );
|
||||
- if (!compileRE(newlist, largeRE, flags))
|
||||
+ if (!compileRE(newlist, accumulatedRE, flags))
|
||||
return 0;
|
||||
flags |= REG_ICASE;
|
||||
- largeRE[largeREindex=0] = '\0';
|
||||
+ accumulatedRE.clear();
|
||||
+ reSize = 0;
|
||||
}
|
||||
} else if (configurationLineWord == plus_i) {
|
||||
if ((flags & REG_ICASE) == 0) {
|
||||
@@ -166,37 +175,34 @@ compileOptimisedREs(std::list<RegexPattern> &curlist, const SBufList &sl)
|
||||
debugs(28, 2, "optimisation of +i ... +i");
|
||||
} else {
|
||||
debugs(28, 2, "+i");
|
||||
- if (!compileRE(newlist, largeRE, flags))
|
||||
+ if (!compileRE(newlist, accumulatedRE, flags))
|
||||
return 0;
|
||||
flags &= ~REG_ICASE;
|
||||
- largeRE[largeREindex=0] = '\0';
|
||||
+ accumulatedRE.clear();
|
||||
+ reSize = 0;
|
||||
}
|
||||
- } else if (RElen + largeREindex + 3 < BUFSIZ-1) {
|
||||
+ } else if (reSize < 1024) {
|
||||
debugs(28, 2, "adding RE '" << configurationLineWord << "'");
|
||||
- if (largeREindex > 0) {
|
||||
- largeRE[largeREindex] = '|';
|
||||
- ++largeREindex;
|
||||
- }
|
||||
- largeRE[largeREindex] = '(';
|
||||
- ++largeREindex;
|
||||
- configurationLineWord.copy(largeRE+largeREindex, BUFSIZ-largeREindex);
|
||||
- largeREindex += configurationLineWord.length();
|
||||
- largeRE[largeREindex] = ')';
|
||||
- ++largeREindex;
|
||||
- largeRE[largeREindex] = '\0';
|
||||
+ accumulatedRE.push_back(configurationLineWord);
|
||||
++numREs;
|
||||
+ reSize += configurationLineWord.length();
|
||||
} else {
|
||||
debugs(28, 2, "buffer full, generating new optimised RE..." );
|
||||
- if (!compileRE(newlist, largeRE, flags))
|
||||
+ accumulatedRE.push_back(configurationLineWord);
|
||||
+ if (!compileRE(newlist, accumulatedRE, flags))
|
||||
return 0;
|
||||
- largeRE[largeREindex=0] = '\0';
|
||||
+ accumulatedRE.clear();
|
||||
+ reSize = 0;
|
||||
continue; /* do the loop again to add the RE to largeRE */
|
||||
}
|
||||
}
|
||||
|
||||
- if (!compileRE(newlist, largeRE, flags))
|
||||
+ if (!compileRE(newlist, accumulatedRE, flags))
|
||||
return 0;
|
||||
|
||||
+ accumulatedRE.clear();
|
||||
+ reSize = 0;
|
||||
+
|
||||
/* all was successful, so put the new list at the tail */
|
||||
curlist.splice(curlist.end(), newlist);
|
||||
|
||||
diff --git a/src/sbuf/Algorithms.h b/src/sbuf/Algorithms.h
|
||||
index 21ee889..338e9c0 100644
|
||||
--- a/src/sbuf/Algorithms.h
|
||||
+++ b/src/sbuf/Algorithms.h
|
||||
@@ -81,6 +81,57 @@ SBufContainerJoin(const Container &items, const SBuf& separator)
|
||||
return rv;
|
||||
}
|
||||
|
||||
+/** Join container of SBufs and append to supplied target
|
||||
+ *
|
||||
+ * append to the target SBuf all elements in the [begin,end) range from
|
||||
+ * an iterable container, prefixed by prefix, separated by separator and
|
||||
+ * followed by suffix. Prefix and suffix are added also in case of empty
|
||||
+ * iterable
|
||||
+ *
|
||||
+ * \return the modified dest
|
||||
+ */
|
||||
+template <class ContainerIterator>
|
||||
+SBuf&
|
||||
+JoinContainerIntoSBuf(SBuf &dest, const ContainerIterator &begin,
|
||||
+ const ContainerIterator &end, const SBuf& separator,
|
||||
+ const SBuf& prefix = SBuf(), const SBuf& suffix = SBuf())
|
||||
+{
|
||||
+ if (begin == end) {
|
||||
+ dest.append(prefix).append(suffix);
|
||||
+ return dest;
|
||||
+ }
|
||||
+
|
||||
+ // optimization: pre-calculate needed storage
|
||||
+ const SBuf::size_type totalContainerSize =
|
||||
+ std::accumulate(begin, end, 0, SBufAddLength(separator)) +
|
||||
+ dest.length() + prefix.length() + suffix.length();
|
||||
+ SBufReservationRequirements req;
|
||||
+ req.minSpace = totalContainerSize;
|
||||
+ dest.reserve(req);
|
||||
+
|
||||
+ auto i = begin;
|
||||
+ dest.append(prefix);
|
||||
+ dest.append(*i);
|
||||
+ ++i;
|
||||
+ for (; i != end; ++i)
|
||||
+ dest.append(separator).append(*i);
|
||||
+ dest.append(suffix);
|
||||
+ return dest;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+/// convenience wrapper of JoinContainerIntoSBuf with no caller-supplied SBuf
|
||||
+template <class ContainerIterator>
|
||||
+SBuf
|
||||
+JoinContainerToSBuf(const ContainerIterator &begin,
|
||||
+ const ContainerIterator &end, const SBuf& separator,
|
||||
+ const SBuf& prefix = SBuf(), const SBuf& suffix = SBuf())
|
||||
+{
|
||||
+ SBuf rv;
|
||||
+ return JoinContainerIntoSBuf(rv, begin, end, separator, prefix, suffix);
|
||||
+}
|
||||
+
|
||||
+
|
||||
namespace std {
|
||||
/// default hash functor to support std::unordered_map<SBuf,*>
|
||||
template <>
|
33
squid-4.11-location.patch
Normal file
33
squid-4.11-location.patch
Normal file
@ -0,0 +1,33 @@
|
||||
diff --git a/QUICKSTART b/QUICKSTART
|
||||
index e5299b4..a243437 100644
|
||||
--- a/QUICKSTART
|
||||
+++ b/QUICKSTART
|
||||
@@ -10,10 +10,9 @@ After you retrieved, compiled and installed the Squid software (see
|
||||
INSTALL in the same directory), you have to configure the squid.conf
|
||||
file. This is the list of the values you *need* to change, because no
|
||||
sensible defaults could be defined. Do not touch the other variables
|
||||
-for now. We assume you have installed Squid in the default location:
|
||||
-/usr/local/squid
|
||||
+for now.
|
||||
|
||||
-Uncomment and edit the following lines in /usr/local/squid/etc/squid.conf:
|
||||
+Uncomment and edit the following lines in /etc/squid/squid.conf:
|
||||
|
||||
==============================================================================
|
||||
|
||||
@@ -80,12 +79,12 @@ After editing squid.conf to your liking, run Squid from the command
|
||||
line TWICE:
|
||||
|
||||
To create any disk cache_dir configured:
|
||||
- % /usr/local/squid/sbin/squid -z
|
||||
+ % /usr/sbin/squid -z
|
||||
|
||||
To start squid:
|
||||
- % /usr/local/squid/sbin/squid
|
||||
+ % /usr/sbin/squid
|
||||
|
||||
-Check in the cache.log (/usr/local/squid/var/logs/cache.log) that
|
||||
+Check in the cache.log (/var/log/squid/cache.log) that
|
||||
everything is all right.
|
||||
|
||||
Once Squid created all its files (it can take several minutes on some
|
10
squid-4.11-perlpath.patch
Normal file
10
squid-4.11-perlpath.patch
Normal file
@ -0,0 +1,10 @@
|
||||
diff --git a/contrib/url-normalizer.pl b/contrib/url-normalizer.pl
|
||||
index 90ac6a4..8dbed90 100755
|
||||
--- a/contrib/url-normalizer.pl
|
||||
+++ b/contrib/url-normalizer.pl
|
||||
@@ -1,4 +1,4 @@
|
||||
-#!/usr/local/bin/perl -Tw
|
||||
+#!/usr/bin/perl -Tw
|
||||
#
|
||||
# * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
|
||||
# *
|
424
squid-4.15-CVE-2021-28116.patch
Normal file
424
squid-4.15-CVE-2021-28116.patch
Normal file
@ -0,0 +1,424 @@
|
||||
commit b003a0da7865caa25b5d1e70c79329b32409b02a (HEAD -> refs/heads/v4, refs/remotes/origin/v4)
|
||||
Author: Amos Jeffries <yadij@users.noreply.github.com>
|
||||
Date: 2021-09-24 21:53:11 +0000
|
||||
|
||||
WCCP: Validate packets better (#899)
|
||||
|
||||
Update WCCP to support exception based error handling for
|
||||
parsing and processing we are moving Squid to for protocol
|
||||
handling.
|
||||
|
||||
Update the main WCCPv2 parsing checks to throw meaningful
|
||||
exceptions when detected.
|
||||
|
||||
diff --git a/src/wccp2.cc b/src/wccp2.cc
|
||||
index ee592449c..6ef469e91 100644
|
||||
--- a/src/wccp2.cc
|
||||
+++ b/src/wccp2.cc
|
||||
@@ -1108,6 +1108,59 @@ wccp2ConnectionClose(void)
|
||||
* Functions for handling the requests.
|
||||
*/
|
||||
|
||||
+/// Checks that the given area section ends inside the given (whole) area.
|
||||
+/// \param error the message to throw when the section does not fit
|
||||
+static void
|
||||
+CheckSectionLength(const void *sectionStart, const size_t sectionLength, const void *wholeStart, const size_t wholeSize, const char *error)
|
||||
+{
|
||||
+ assert(sectionStart);
|
||||
+ assert(wholeStart);
|
||||
+
|
||||
+ const auto wholeEnd = static_cast<const char*>(wholeStart) + wholeSize;
|
||||
+ assert(sectionStart >= wholeStart && "we never go backwards");
|
||||
+ assert(sectionStart <= wholeEnd && "we never go beyond our whole (but zero-sized fields are OK)");
|
||||
+ static_assert(sizeof(wccp2_i_see_you_t) <= PTRDIFF_MAX, "paranoid: no UB when subtracting in-whole pointers");
|
||||
+ // subtraction safe due to the three assertions above
|
||||
+ const auto remainderDiff = wholeEnd - static_cast<const char*>(sectionStart);
|
||||
+
|
||||
+ // casting safe due to the assertions above (and size_t definition)
|
||||
+ assert(remainderDiff >= 0);
|
||||
+ const auto remainderSize = static_cast<size_t>(remainderDiff);
|
||||
+
|
||||
+ if (sectionLength <= remainderSize)
|
||||
+ return;
|
||||
+
|
||||
+ throw TextException(error, Here());
|
||||
+}
|
||||
+
|
||||
+/// Checks that the area contains at least dataLength bytes after the header.
|
||||
+/// The size of the field header itself is not included in dataLength.
|
||||
+/// \returns the total field size -- the field header and field data combined
|
||||
+template<class FieldHeader>
|
||||
+static size_t
|
||||
+CheckFieldDataLength(const FieldHeader *header, const size_t dataLength, const void *areaStart, const size_t areaSize, const char *error)
|
||||
+{
|
||||
+ assert(header);
|
||||
+ const auto dataStart = reinterpret_cast<const char*>(header) + sizeof(header);
|
||||
+ CheckSectionLength(dataStart, dataLength, areaStart, areaSize, error);
|
||||
+ return sizeof(header) + dataLength; // no overflow after CheckSectionLength()
|
||||
+}
|
||||
+
|
||||
+/// Positions the given field at a given start within a given packet area.
|
||||
+/// The Field type determines the correct field size (used for bounds checking).
|
||||
+/// \param field the field pointer the function should set
|
||||
+/// \param areaStart the start of a packet (sub)structure containing the field
|
||||
+/// \param areaSize the size of the packet (sub)structure starting at areaStart
|
||||
+/// \param fieldStart the start of a field within the given area
|
||||
+/// \param error the message to throw when the field does not fit the area
|
||||
+template<class Field>
|
||||
+static void
|
||||
+SetField(Field *&field, const void *fieldStart, const void *areaStart, const size_t areaSize, const char *error)
|
||||
+{
|
||||
+ CheckSectionLength(fieldStart, sizeof(Field), areaStart, areaSize, error);
|
||||
+ field = static_cast<Field*>(const_cast<void*>(fieldStart));
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Accept the UDP packet
|
||||
*/
|
||||
@@ -1124,8 +1177,6 @@ wccp2HandleUdp(int sock, void *)
|
||||
|
||||
/* These structs form the parts of the packet */
|
||||
|
||||
- struct wccp2_item_header_t *header = NULL;
|
||||
-
|
||||
struct wccp2_security_none_t *security_info = NULL;
|
||||
|
||||
struct wccp2_service_info_t *service_info = NULL;
|
||||
@@ -1141,14 +1192,13 @@ wccp2HandleUdp(int sock, void *)
|
||||
struct wccp2_cache_identity_info_t *cache_identity = NULL;
|
||||
|
||||
struct wccp2_capability_info_header_t *router_capability_header = NULL;
|
||||
+ char *router_capability_data_start = nullptr;
|
||||
|
||||
struct wccp2_capability_element_t *router_capability_element;
|
||||
|
||||
struct sockaddr_in from;
|
||||
|
||||
struct in_addr cache_address;
|
||||
- int len, found;
|
||||
- short int data_length, offset;
|
||||
uint32_t tmp;
|
||||
char *ptr;
|
||||
int num_caches;
|
||||
@@ -1161,20 +1211,18 @@ wccp2HandleUdp(int sock, void *)
|
||||
Ip::Address from_tmp;
|
||||
from_tmp.setIPv4();
|
||||
|
||||
- len = comm_udp_recvfrom(sock,
|
||||
- &wccp2_i_see_you,
|
||||
- WCCP_RESPONSE_SIZE,
|
||||
- 0,
|
||||
- from_tmp);
|
||||
+ const auto lenOrError = comm_udp_recvfrom(sock, &wccp2_i_see_you, WCCP_RESPONSE_SIZE, 0, from_tmp);
|
||||
|
||||
- if (len < 0)
|
||||
+ if (lenOrError < 0)
|
||||
return;
|
||||
+ const auto len = static_cast<size_t>(lenOrError);
|
||||
|
||||
- if (ntohs(wccp2_i_see_you.version) != WCCP2_VERSION)
|
||||
- return;
|
||||
-
|
||||
- if (ntohl(wccp2_i_see_you.type) != WCCP2_I_SEE_YOU)
|
||||
- return;
|
||||
+ try {
|
||||
+ // TODO: Remove wccp2_i_see_you.data and use a buffer to read messages.
|
||||
+ const auto message_header_size = sizeof(wccp2_i_see_you) - sizeof(wccp2_i_see_you.data);
|
||||
+ Must2(len >= message_header_size, "incomplete WCCP message header");
|
||||
+ Must2(ntohs(wccp2_i_see_you.version) == WCCP2_VERSION, "WCCP version unsupported");
|
||||
+ Must2(ntohl(wccp2_i_see_you.type) == WCCP2_I_SEE_YOU, "WCCP packet type unsupported");
|
||||
|
||||
/* FIXME INET6 : drop conversion boundary */
|
||||
from_tmp.getSockAddr(from);
|
||||
@@ -1182,73 +1230,60 @@ wccp2HandleUdp(int sock, void *)
|
||||
debugs(80, 3, "Incoming WCCPv2 I_SEE_YOU length " << ntohs(wccp2_i_see_you.length) << ".");
|
||||
|
||||
/* Record the total data length */
|
||||
- data_length = ntohs(wccp2_i_see_you.length);
|
||||
+ const auto data_length = ntohs(wccp2_i_see_you.length);
|
||||
+ Must2(data_length <= len - message_header_size,
|
||||
+ "malformed packet claiming it's bigger than received data");
|
||||
|
||||
- offset = 0;
|
||||
-
|
||||
- if (data_length > len) {
|
||||
- debugs(80, DBG_IMPORTANT, "ERROR: Malformed WCCPv2 packet claiming it's bigger than received data");
|
||||
- return;
|
||||
- }
|
||||
+ size_t offset = 0;
|
||||
|
||||
/* Go through the data structure */
|
||||
- while (data_length > offset) {
|
||||
+ while (offset + sizeof(struct wccp2_item_header_t) <= data_length) {
|
||||
|
||||
char *data = wccp2_i_see_you.data;
|
||||
|
||||
- header = (struct wccp2_item_header_t *) &data[offset];
|
||||
+ const auto itemHeader = reinterpret_cast<const wccp2_item_header_t*>(&data[offset]);
|
||||
+ const auto itemSize = CheckFieldDataLength(itemHeader, ntohs(itemHeader->length),
|
||||
+ data, data_length, "truncated record");
|
||||
+ // XXX: Check "The specified length must be a multiple of 4 octets"
|
||||
+ // requirement to avoid unaligned memory reads after the first item.
|
||||
|
||||
- switch (ntohs(header->type)) {
|
||||
+ switch (ntohs(itemHeader->type)) {
|
||||
|
||||
case WCCP2_SECURITY_INFO:
|
||||
-
|
||||
- if (security_info != NULL) {
|
||||
- debugs(80, DBG_IMPORTANT, "Duplicate security definition");
|
||||
- return;
|
||||
- }
|
||||
-
|
||||
- security_info = (struct wccp2_security_none_t *) &wccp2_i_see_you.data[offset];
|
||||
+ Must2(!security_info, "duplicate security definition");
|
||||
+ SetField(security_info, itemHeader, itemHeader, itemSize,
|
||||
+ "security definition truncated");
|
||||
break;
|
||||
|
||||
case WCCP2_SERVICE_INFO:
|
||||
-
|
||||
- if (service_info != NULL) {
|
||||
- debugs(80, DBG_IMPORTANT, "Duplicate service_info definition");
|
||||
- return;
|
||||
- }
|
||||
-
|
||||
- service_info = (struct wccp2_service_info_t *) &wccp2_i_see_you.data[offset];
|
||||
+ Must2(!service_info, "duplicate service_info definition");
|
||||
+ SetField(service_info, itemHeader, itemHeader, itemSize,
|
||||
+ "service_info definition truncated");
|
||||
break;
|
||||
|
||||
case WCCP2_ROUTER_ID_INFO:
|
||||
-
|
||||
- if (router_identity_info != NULL) {
|
||||
- debugs(80, DBG_IMPORTANT, "Duplicate router_identity_info definition");
|
||||
- return;
|
||||
- }
|
||||
-
|
||||
- router_identity_info = (struct router_identity_info_t *) &wccp2_i_see_you.data[offset];
|
||||
+ Must2(!router_identity_info, "duplicate router_identity_info definition");
|
||||
+ SetField(router_identity_info, itemHeader, itemHeader, itemSize,
|
||||
+ "router_identity_info definition truncated");
|
||||
break;
|
||||
|
||||
case WCCP2_RTR_VIEW_INFO:
|
||||
-
|
||||
- if (router_view_header != NULL) {
|
||||
- debugs(80, DBG_IMPORTANT, "Duplicate router_view definition");
|
||||
- return;
|
||||
- }
|
||||
-
|
||||
- router_view_header = (struct router_view_t *) &wccp2_i_see_you.data[offset];
|
||||
+ Must2(!router_view_header, "duplicate router_view definition");
|
||||
+ SetField(router_view_header, itemHeader, itemHeader, itemSize,
|
||||
+ "router_view definition truncated");
|
||||
break;
|
||||
|
||||
- case WCCP2_CAPABILITY_INFO:
|
||||
-
|
||||
- if (router_capability_header != NULL) {
|
||||
- debugs(80, DBG_IMPORTANT, "Duplicate router_capability definition");
|
||||
- return;
|
||||
- }
|
||||
+ case WCCP2_CAPABILITY_INFO: {
|
||||
+ Must2(!router_capability_header, "duplicate router_capability definition");
|
||||
+ SetField(router_capability_header, itemHeader, itemHeader, itemSize,
|
||||
+ "router_capability definition truncated");
|
||||
|
||||
- router_capability_header = (struct wccp2_capability_info_header_t *) &wccp2_i_see_you.data[offset];
|
||||
+ CheckFieldDataLength(router_capability_header, ntohs(router_capability_header->capability_info_length),
|
||||
+ itemHeader, itemSize, "capability info truncated");
|
||||
+ router_capability_data_start = reinterpret_cast<char*>(router_capability_header) +
|
||||
+ sizeof(*router_capability_header);
|
||||
break;
|
||||
+ }
|
||||
|
||||
/* Nothing to do for the types below */
|
||||
|
||||
@@ -1257,22 +1292,17 @@ wccp2HandleUdp(int sock, void *)
|
||||
break;
|
||||
|
||||
default:
|
||||
- debugs(80, DBG_IMPORTANT, "Unknown record type in WCCPv2 Packet (" << ntohs(header->type) << ").");
|
||||
+ debugs(80, DBG_IMPORTANT, "Unknown record type in WCCPv2 Packet (" << ntohs(itemHeader->type) << ").");
|
||||
}
|
||||
|
||||
- offset += sizeof(struct wccp2_item_header_t);
|
||||
- offset += ntohs(header->length);
|
||||
-
|
||||
- if (offset > data_length) {
|
||||
- debugs(80, DBG_IMPORTANT, "Error: WCCPv2 packet tried to tell us there is data beyond the end of the packet");
|
||||
- return;
|
||||
- }
|
||||
+ offset += itemSize;
|
||||
+ assert(offset <= data_length && "CheckFieldDataLength(itemHeader...) established that");
|
||||
}
|
||||
|
||||
- if ((security_info == NULL) || (service_info == NULL) || (router_identity_info == NULL) || (router_view_header == NULL)) {
|
||||
- debugs(80, DBG_IMPORTANT, "Incomplete WCCPv2 Packet");
|
||||
- return;
|
||||
- }
|
||||
+ Must2(security_info, "packet missing security definition");
|
||||
+ Must2(service_info, "packet missing service_info definition");
|
||||
+ Must2(router_identity_info, "packet missing router_identity_info definition");
|
||||
+ Must2(router_view_header, "packet missing router_view definition");
|
||||
|
||||
debugs(80, 5, "Complete packet received");
|
||||
|
||||
@@ -1308,10 +1338,7 @@ wccp2HandleUdp(int sock, void *)
|
||||
break;
|
||||
}
|
||||
|
||||
- if (router_list_ptr->next == NULL) {
|
||||
- debugs(80, DBG_IMPORTANT, "WCCPv2 Packet received from unknown router");
|
||||
- return;
|
||||
- }
|
||||
+ Must2(router_list_ptr->next, "packet received from unknown router");
|
||||
|
||||
/* Set the router id */
|
||||
router_list_ptr->info->router_address = router_identity_info->router_id_element.router_address;
|
||||
@@ -1331,11 +1358,20 @@ wccp2HandleUdp(int sock, void *)
|
||||
}
|
||||
} else {
|
||||
|
||||
- char *end = ((char *) router_capability_header) + sizeof(*router_capability_header) + ntohs(router_capability_header->capability_info_length) - sizeof(struct wccp2_capability_info_header_t);
|
||||
-
|
||||
- router_capability_element = (struct wccp2_capability_element_t *) (((char *) router_capability_header) + sizeof(*router_capability_header));
|
||||
-
|
||||
- while ((char *) router_capability_element <= end) {
|
||||
+ const auto router_capability_data_length = ntohs(router_capability_header->capability_info_length);
|
||||
+ assert(router_capability_data_start);
|
||||
+ const auto router_capability_data_end = router_capability_data_start +
|
||||
+ router_capability_data_length;
|
||||
+ for (auto router_capability_data_current = router_capability_data_start;
|
||||
+ router_capability_data_current < router_capability_data_end;) {
|
||||
+
|
||||
+ SetField(router_capability_element, router_capability_data_current,
|
||||
+ router_capability_data_start, router_capability_data_length,
|
||||
+ "capability element header truncated");
|
||||
+ const auto elementSize = CheckFieldDataLength(
|
||||
+ router_capability_element, ntohs(router_capability_element->capability_length),
|
||||
+ router_capability_data_start, router_capability_data_length,
|
||||
+ "capability element truncated");
|
||||
|
||||
switch (ntohs(router_capability_element->capability_type)) {
|
||||
|
||||
@@ -1377,7 +1413,7 @@ wccp2HandleUdp(int sock, void *)
|
||||
debugs(80, DBG_IMPORTANT, "Unknown capability type in WCCPv2 Packet (" << ntohs(router_capability_element->capability_type) << ").");
|
||||
}
|
||||
|
||||
- router_capability_element = (struct wccp2_capability_element_t *) (((char *) router_capability_element) + sizeof(struct wccp2_item_header_t) + ntohs(router_capability_element->capability_length));
|
||||
+ router_capability_data_current += elementSize;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1396,23 +1432,34 @@ wccp2HandleUdp(int sock, void *)
|
||||
num_caches = 0;
|
||||
|
||||
/* Check to see if we're the master cache and update the cache list */
|
||||
- found = 0;
|
||||
+ bool found = false;
|
||||
service_list_ptr->lowest_ip = 1;
|
||||
cache_list_ptr = &router_list_ptr->cache_list_head;
|
||||
|
||||
/* to find the list of caches, we start at the end of the router view header */
|
||||
|
||||
ptr = (char *) (router_view_header) + sizeof(struct router_view_t);
|
||||
+ const auto router_view_size = sizeof(struct router_view_t) +
|
||||
+ ntohs(router_view_header->header.length);
|
||||
|
||||
/* Then we read the number of routers */
|
||||
- memcpy(&tmp, ptr, sizeof(tmp));
|
||||
+ const uint32_t *routerCountRaw = nullptr;
|
||||
+ SetField(routerCountRaw, ptr, router_view_header, router_view_size,
|
||||
+ "malformed packet (truncated router view info w/o number of routers)");
|
||||
|
||||
/* skip the number plus all the ip's */
|
||||
-
|
||||
- ptr += sizeof(tmp) + (ntohl(tmp) * sizeof(struct in_addr));
|
||||
+ ptr += sizeof(*routerCountRaw);
|
||||
+ const auto ipCount = ntohl(*routerCountRaw);
|
||||
+ const auto ipsSize = ipCount * sizeof(struct in_addr); // we check for unsigned overflow below
|
||||
+ Must2(ipsSize / sizeof(struct in_addr) != ipCount, "huge IP address count");
|
||||
+ CheckSectionLength(ptr, ipsSize, router_view_header, router_view_size, "invalid IP address count");
|
||||
+ ptr += ipsSize;
|
||||
|
||||
/* Then read the number of caches */
|
||||
- memcpy(&tmp, ptr, sizeof(tmp));
|
||||
+ const uint32_t *cacheCountRaw = nullptr;
|
||||
+ SetField(cacheCountRaw, ptr, router_view_header, router_view_size,
|
||||
+ "malformed packet (truncated router view info w/o cache count)");
|
||||
+ memcpy(&tmp, cacheCountRaw, sizeof(tmp)); // TODO: Replace tmp with cacheCount
|
||||
ptr += sizeof(tmp);
|
||||
|
||||
if (ntohl(tmp) != 0) {
|
||||
@@ -1426,7 +1473,8 @@ wccp2HandleUdp(int sock, void *)
|
||||
|
||||
case WCCP2_ASSIGNMENT_METHOD_HASH:
|
||||
|
||||
- cache_identity = (struct wccp2_cache_identity_info_t *) ptr;
|
||||
+ SetField(cache_identity, ptr, router_view_header, router_view_size,
|
||||
+ "malformed packet (truncated router view info cache w/o assignment hash)");
|
||||
|
||||
ptr += sizeof(struct wccp2_cache_identity_info_t);
|
||||
|
||||
@@ -1437,13 +1485,15 @@ wccp2HandleUdp(int sock, void *)
|
||||
|
||||
case WCCP2_ASSIGNMENT_METHOD_MASK:
|
||||
|
||||
- cache_mask_info = (struct cache_mask_info_t *) ptr;
|
||||
+ SetField(cache_mask_info, ptr, router_view_header, router_view_size,
|
||||
+ "malformed packet (truncated router view info cache w/o assignment mask)");
|
||||
|
||||
/* The mask assignment has an undocumented variable length entry here */
|
||||
|
||||
if (ntohl(cache_mask_info->num1) == 3) {
|
||||
|
||||
- cache_mask_identity = (struct wccp2_cache_mask_identity_info_t *) ptr;
|
||||
+ SetField(cache_mask_identity, ptr, router_view_header, router_view_size,
|
||||
+ "malformed packet (truncated router view info cache w/o assignment mask identity)");
|
||||
|
||||
ptr += sizeof(struct wccp2_cache_mask_identity_info_t);
|
||||
|
||||
@@ -1474,10 +1524,7 @@ wccp2HandleUdp(int sock, void *)
|
||||
debugs (80, 5, "checking cache list: (" << std::hex << cache_address.s_addr << ":" << router_list_ptr->local_ip.s_addr << ")");
|
||||
|
||||
/* Check to see if it's the master, or us */
|
||||
-
|
||||
- if (cache_address.s_addr == router_list_ptr->local_ip.s_addr) {
|
||||
- found = 1;
|
||||
- }
|
||||
+ found = found || (cache_address.s_addr == router_list_ptr->local_ip.s_addr);
|
||||
|
||||
if (cache_address.s_addr < router_list_ptr->local_ip.s_addr) {
|
||||
service_list_ptr->lowest_ip = 0;
|
||||
@@ -1494,7 +1541,7 @@ wccp2HandleUdp(int sock, void *)
|
||||
cache_list_ptr->next = NULL;
|
||||
|
||||
service_list_ptr->lowest_ip = 1;
|
||||
- found = 1;
|
||||
+ found = true;
|
||||
num_caches = 1;
|
||||
}
|
||||
|
||||
@@ -1502,7 +1549,7 @@ wccp2HandleUdp(int sock, void *)
|
||||
|
||||
router_list_ptr->num_caches = htonl(num_caches);
|
||||
|
||||
- if ((found == 1) && (service_list_ptr->lowest_ip == 1)) {
|
||||
+ if (found && (service_list_ptr->lowest_ip == 1)) {
|
||||
if (ntohl(router_view_header->change_number) != router_list_ptr->member_change) {
|
||||
debugs(80, 4, "Change detected - queueing up new assignment");
|
||||
router_list_ptr->member_change = ntohl(router_view_header->change_number);
|
||||
@@ -1515,6 +1562,10 @@ wccp2HandleUdp(int sock, void *)
|
||||
eventDelete(wccp2AssignBuckets, NULL);
|
||||
debugs(80, 5, "I am not the lowest ip cache - not assigning buckets");
|
||||
}
|
||||
+
|
||||
+ } catch (...) {
|
||||
+ debugs(80, DBG_IMPORTANT, "ERROR: Ignoring WCCPv2 message: " << CurrentException);
|
||||
+ }
|
||||
}
|
||||
|
||||
static void
|
32
squid-4.15-ftp-filename-extraction.patch
Normal file
32
squid-4.15-ftp-filename-extraction.patch
Normal file
@ -0,0 +1,32 @@
|
||||
diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc
|
||||
index da9867f..e992638 100644
|
||||
--- a/src/clients/FtpGateway.cc
|
||||
+++ b/src/clients/FtpGateway.cc
|
||||
@@ -1084,16 +1084,17 @@ Ftp::Gateway::checkAuth(const HttpHeader * req_hdr)
|
||||
void
|
||||
Ftp::Gateway::checkUrlpath()
|
||||
{
|
||||
- static SBuf str_type_eq("type=");
|
||||
- auto t = request->url.path().rfind(';');
|
||||
-
|
||||
- if (t != SBuf::npos) {
|
||||
- auto filenameEnd = t-1;
|
||||
- if (request->url.path().substr(++t).cmp(str_type_eq, str_type_eq.length()) == 0) {
|
||||
- t += str_type_eq.length();
|
||||
- typecode = (char)xtoupper(request->url.path()[t]);
|
||||
- request->url.path(request->url.path().substr(0,filenameEnd));
|
||||
- }
|
||||
+ // If typecode was specified, extract it and leave just the filename in
|
||||
+ // url.path. Tolerate trailing garbage or missing typecode value. Roughly:
|
||||
+ // [filename] ;type=[typecode char] [trailing garbage]
|
||||
+ static const SBuf middle(";type=");
|
||||
+ const auto typeSpecStart = request->url.path().find(middle);
|
||||
+ if (typeSpecStart != SBuf::npos) {
|
||||
+ const auto fullPath = request->url.path();
|
||||
+ const auto typecodePos = typeSpecStart + middle.length();
|
||||
+ typecode = (typecodePos < fullPath.length()) ?
|
||||
+ static_cast<char>(xtoupper(fullPath[typecodePos])) : '\0';
|
||||
+ request->url.path(fullPath.substr(0, typeSpecStart));
|
||||
}
|
||||
|
||||
int l = request->url.path().length();
|
25
squid-4.15.tar.xz.asc
Normal file
25
squid-4.15.tar.xz.asc
Normal file
@ -0,0 +1,25 @@
|
||||
File: squid-4.15.tar.xz
|
||||
Date: Mon 10 May 2021 10:50:22 UTC
|
||||
Size: 2454176
|
||||
MD5 : a593de9dc888dfeca4f1f7db2cd7d3b9
|
||||
SHA1: 60bda34ba39657e2d870c8c1d2acece8a69c3075
|
||||
Key : CD6DBF8EF3B17D3E <squid3@treenet.co.nz>
|
||||
B068 84ED B779 C89B 044E 64E3 CD6D BF8E F3B1 7D3E
|
||||
keyring = http://www.squid-cache.org/pgp.asc
|
||||
keyserver = pool.sks-keyservers.net
|
||||
-----BEGIN PGP SIGNATURE-----
|
||||
|
||||
iQIzBAABCgAdFiEEsGiE7bd5yJsETmTjzW2/jvOxfT4FAmCZD/UACgkQzW2/jvOx
|
||||
fT6zZg/+N8JMIYpmVJ7jm4lF0Ub2kEHGTOrc+tnlA3LGnlMQuTm61+BYk58g0SKW
|
||||
96NbJ0cycW215Q34L+Y0tWuxEbIU01vIc3AA7rQd0LKy+fQU0OtBuhk5Vf4bKilW
|
||||
uHEVIQZs9HmY6bqC+kgtCf49tVZvR8FZYNuilg/68+i/pQdwaDDmVb+j2oF7w+y2
|
||||
dgkTFWtM5NTL6bqUVC0E7lLFPjzMefKfxkkpWFdV/VrAhU25jN24kpnjcfotQhdW
|
||||
LDFy5okduz3ljso9pBYJfLeMXM1FZPpceC91zj32x3tcUyrD3yIoXob58rEKvfe4
|
||||
RDXN4SuClsNe4UQ4oNoGIES9XtaYlOzPR1PlbqPUrdp1cDnhgLJ+1fkAixlMqCml
|
||||
wuI1VIKSEY+nvRzQzFHnXJK9otV8QwMF76AHaytO9y+X6JuZmu/CcV1pq61qY9qv
|
||||
t1/8z99wWSxpu17zthZgq64J225GF/hkBedaFlYoS5k5YUMDLPlRSCC0yPmb8JBF
|
||||
Cns5i/aq2PmOx2ZhQ2RQIF416J3HK8Galw8ytFOjnEcn4ux9yzKNjL38p4+PJJA0
|
||||
7GCMAqYYNjok3LSkGbiR7cPgbHnkqRfYbPFLMj4FtruoFlZ9L5MIU3oFvqA3ZR6l
|
||||
Az6LaKLsAYPUmukAOPUSIrqpKXZHc7hdBWkT+7RYA4qaoU+9oIo=
|
||||
=1Re1
|
||||
-----END PGP SIGNATURE-----
|
68
squid-5.0.5-symlink-lang-err.patch
Normal file
68
squid-5.0.5-symlink-lang-err.patch
Normal file
@ -0,0 +1,68 @@
|
||||
From fc01451000eaa5592cd5afbd6aee14e53f7dd2c3 Mon Sep 17 00:00:00 2001
|
||||
From: Amos Jeffries <amosjeffries@squid-cache.org>
|
||||
Date: Sun, 18 Oct 2020 20:23:10 +1300
|
||||
Subject: [PATCH] Update translations integration
|
||||
|
||||
* Add credits for es-mx translation moderator
|
||||
* Use es-mx for default of all Spanish (Central America) texts
|
||||
* Update translation related .am files
|
||||
---
|
||||
doc/manuals/language.am | 2 +-
|
||||
errors/TRANSLATORS | 1 +
|
||||
errors/aliases | 3 ++-
|
||||
errors/language.am | 3 ++-
|
||||
errors/template.am | 2 +-
|
||||
5 files changed, 7 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/doc/manuals/language.am b/doc/manuals/language.am
|
||||
index 7670c88380c..f03c4cf71b4 100644
|
||||
--- a/doc/manuals/language.am
|
||||
+++ b/doc/manuals/language.am
|
||||
@@ -18,4 +18,4 @@ TRANSLATE_LANGUAGES = \
|
||||
oc.lang \
|
||||
pt.lang \
|
||||
ro.lang \
|
||||
- ru.lang
|
||||
+ ru.lang
|
||||
diff --git a/errors/aliases b/errors/aliases
|
||||
index 36f17f4b80f..cf0116f297d 100644
|
||||
--- a/errors/aliases
|
||||
+++ b/errors/aliases
|
||||
@@ -14,7 +14,8 @@ da da-dk
|
||||
de de-at de-ch de-de de-li de-lu
|
||||
el el-gr
|
||||
en en-au en-bz en-ca en-cn en-gb en-ie en-in en-jm en-nz en-ph en-sg en-tt en-uk en-us en-za en-zw
|
||||
-es es-ar es-bo es-cl es-co es-cr es-do es-ec es-es es-gt es-hn es-mx es-ni es-pa es-pe es-pr es-py es-sv es-us es-uy es-ve es-xl
|
||||
+es es-ar es-bo es-cl es-cu es-co es-do es-ec es-es es-pe es-pr es-py es-us es-uy es-ve es-xl spq
|
||||
+es-mx es-bz es-cr es-gt es-hn es-ni es-pa es-sv
|
||||
et et-ee
|
||||
fa fa-fa fa-ir
|
||||
fi fi-fi
|
||||
diff --git a/errors/language.am b/errors/language.am
|
||||
index 12b1b2b3b43..029e8c1eb2f 100644
|
||||
--- a/errors/language.am
|
||||
+++ b/errors/language.am
|
||||
@@ -17,6 +17,7 @@ TRANSLATE_LANGUAGES = \
|
||||
de.lang \
|
||||
el.lang \
|
||||
en.lang \
|
||||
+ es-mx.lang \
|
||||
es.lang \
|
||||
et.lang \
|
||||
fa.lang \
|
||||
@@ -51,4 +52,4 @@ TRANSLATE_LANGUAGES = \
|
||||
uz.lang \
|
||||
vi.lang \
|
||||
zh-hans.lang \
|
||||
- zh-hant.lang
|
||||
+ zh-hant.lang
|
||||
diff --git a/errors/template.am b/errors/template.am
|
||||
index 6c12781e6f4..715c65aa22b 100644
|
||||
--- a/errors/template.am
|
||||
+++ b/errors/template.am
|
||||
@@ -48,4 +48,4 @@ ERROR_TEMPLATES = \
|
||||
templates/ERR_UNSUP_REQ \
|
||||
templates/ERR_URN_RESOLVE \
|
||||
templates/ERR_WRITE_ERROR \
|
||||
- templates/ERR_ZERO_SIZE_OBJECT
|
||||
+ templates/ERR_ZERO_SIZE_OBJECT
|
127
squid-5.0.6-active-ftp.patch
Normal file
127
squid-5.0.6-active-ftp.patch
Normal file
@ -0,0 +1,127 @@
|
||||
diff --git a/src/clients/FtpClient.cc b/src/clients/FtpClient.cc
|
||||
index 747ed35..f2b7126 100644
|
||||
--- a/src/clients/FtpClient.cc
|
||||
+++ b/src/clients/FtpClient.cc
|
||||
@@ -795,7 +795,8 @@ Ftp::Client::connectDataChannel()
|
||||
bool
|
||||
Ftp::Client::openListenSocket()
|
||||
{
|
||||
- return false;
|
||||
+ debugs(9, 3, HERE);
|
||||
+ return false;
|
||||
}
|
||||
|
||||
/// creates a data channel Comm close callback
|
||||
diff --git a/src/clients/FtpClient.h b/src/clients/FtpClient.h
|
||||
index eb5ea1b..e92c007 100644
|
||||
--- a/src/clients/FtpClient.h
|
||||
+++ b/src/clients/FtpClient.h
|
||||
@@ -137,7 +137,7 @@ public:
|
||||
bool sendPort();
|
||||
bool sendPassive();
|
||||
void connectDataChannel();
|
||||
- bool openListenSocket();
|
||||
+ virtual bool openListenSocket();
|
||||
void switchTimeoutToDataChannel();
|
||||
|
||||
CtrlChannel ctrl; ///< FTP control channel state
|
||||
diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc
|
||||
index 05db817..2989cd2 100644
|
||||
--- a/src/clients/FtpGateway.cc
|
||||
+++ b/src/clients/FtpGateway.cc
|
||||
@@ -86,6 +86,13 @@ struct GatewayFlags {
|
||||
class Gateway;
|
||||
typedef void (StateMethod)(Ftp::Gateway *);
|
||||
|
||||
+} // namespace FTP
|
||||
+
|
||||
+static void ftpOpenListenSocket(Ftp::Gateway * ftpState, int fallback);
|
||||
+
|
||||
+namespace Ftp
|
||||
+{
|
||||
+
|
||||
/// FTP Gateway: An FTP client that takes an HTTP request with an ftp:// URI,
|
||||
/// converts it into one or more FTP commands, and then
|
||||
/// converts one or more FTP responses into the final HTTP response.
|
||||
@@ -136,7 +143,11 @@ public:
|
||||
|
||||
/// create a data channel acceptor and start listening.
|
||||
void listenForDataChannel(const Comm::ConnectionPointer &conn);
|
||||
-
|
||||
+ virtual bool openListenSocket() {
|
||||
+ debugs(9, 3, HERE);
|
||||
+ ftpOpenListenSocket(this, 0);
|
||||
+ return Comm::IsConnOpen(data.conn);
|
||||
+ }
|
||||
int checkAuth(const HttpHeader * req_hdr);
|
||||
void checkUrlpath();
|
||||
void buildTitleUrl();
|
||||
@@ -1786,6 +1797,7 @@ ftpOpenListenSocket(Ftp::Gateway * ftpState, int fallback)
|
||||
}
|
||||
|
||||
ftpState->listenForDataChannel(temp);
|
||||
+ ftpState->data.listenConn = temp;
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1821,13 +1833,19 @@ ftpSendPORT(Ftp::Gateway * ftpState)
|
||||
// pull out the internal IP address bytes to send in PORT command...
|
||||
// source them from the listen_conn->local
|
||||
|
||||
+ struct sockaddr_in addr;
|
||||
+ socklen_t addrlen = sizeof(addr);
|
||||
+ getsockname(ftpState->data.listenConn->fd, (struct sockaddr *) &addr, &addrlen);
|
||||
+ unsigned char port_high = ntohs(addr.sin_port) >> 8;
|
||||
+ unsigned char port_low = ntohs(addr.sin_port) & 0xff;
|
||||
+
|
||||
struct addrinfo *AI = NULL;
|
||||
ftpState->data.listenConn->local.getAddrInfo(AI, AF_INET);
|
||||
unsigned char *addrptr = (unsigned char *) &((struct sockaddr_in*)AI->ai_addr)->sin_addr;
|
||||
- unsigned char *portptr = (unsigned char *) &((struct sockaddr_in*)AI->ai_addr)->sin_port;
|
||||
+ // unsigned char *portptr = (unsigned char *) &((struct sockaddr_in*)AI->ai_addr)->sin_port;
|
||||
snprintf(cbuf, CTRL_BUFLEN, "PORT %d,%d,%d,%d,%d,%d\r\n",
|
||||
addrptr[0], addrptr[1], addrptr[2], addrptr[3],
|
||||
- portptr[0], portptr[1]);
|
||||
+ port_high, port_low);
|
||||
ftpState->writeCommand(cbuf);
|
||||
ftpState->state = Ftp::Client::SENT_PORT;
|
||||
|
||||
@@ -1880,14 +1898,27 @@ ftpSendEPRT(Ftp::Gateway * ftpState)
|
||||
return;
|
||||
}
|
||||
|
||||
+
|
||||
+ unsigned int port;
|
||||
+ struct sockaddr_storage addr;
|
||||
+ socklen_t addrlen = sizeof(addr);
|
||||
+ getsockname(ftpState->data.listenConn->fd, (struct sockaddr *) &addr, &addrlen);
|
||||
+ if (addr.ss_family == AF_INET) {
|
||||
+ struct sockaddr_in *addr4 = (struct sockaddr_in*) &addr;
|
||||
+ port = ntohs( addr4->sin_port );
|
||||
+ } else {
|
||||
+ struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) &addr;
|
||||
+ port = ntohs( addr6->sin6_port );
|
||||
+ }
|
||||
+
|
||||
char buf[MAX_IPSTRLEN];
|
||||
|
||||
/* RFC 2428 defines EPRT as IPv6 equivalent to IPv4 PORT command. */
|
||||
/* Which can be used by EITHER protocol. */
|
||||
- snprintf(cbuf, CTRL_BUFLEN, "EPRT |%d|%s|%d|\r\n",
|
||||
+ snprintf(cbuf, CTRL_BUFLEN, "EPRT |%d|%s|%u|\r\n",
|
||||
( ftpState->data.listenConn->local.isIPv6() ? 2 : 1 ),
|
||||
ftpState->data.listenConn->local.toStr(buf,MAX_IPSTRLEN),
|
||||
- ftpState->data.listenConn->local.port() );
|
||||
+ port);
|
||||
|
||||
ftpState->writeCommand(cbuf);
|
||||
ftpState->state = Ftp::Client::SENT_EPRT;
|
||||
@@ -1906,7 +1937,7 @@ ftpReadEPRT(Ftp::Gateway * ftpState)
|
||||
ftpSendPORT(ftpState);
|
||||
return;
|
||||
}
|
||||
-
|
||||
+ ftpState->ctrl.message = NULL;
|
||||
ftpRestOrList(ftpState);
|
||||
}
|
||||
|
185
squid-5.0.6-openssl3.patch
Normal file
185
squid-5.0.6-openssl3.patch
Normal file
@ -0,0 +1,185 @@
|
||||
diff --git a/src/ssl/support.cc b/src/ssl/support.cc
|
||||
index 3ad135d..73912ce 100644
|
||||
--- a/src/ssl/support.cc
|
||||
+++ b/src/ssl/support.cc
|
||||
@@ -557,7 +557,11 @@ Ssl::VerifyCallbackParameters::At(Security::Connection &sconn)
|
||||
}
|
||||
|
||||
// "dup" function for SSL_get_ex_new_index("cert_err_check")
|
||||
-#if SQUID_USE_CONST_CRYPTO_EX_DATA_DUP
|
||||
+#if OPENSSL_VERSION_MAJOR >= 3
|
||||
+static int
|
||||
+ssl_dupAclChecklist(CRYPTO_EX_DATA *, const CRYPTO_EX_DATA *, void **,
|
||||
+ int, long, void *)
|
||||
+#elif SQUID_USE_CONST_CRYPTO_EX_DATA_DUP
|
||||
static int
|
||||
ssl_dupAclChecklist(CRYPTO_EX_DATA *, const CRYPTO_EX_DATA *, void *,
|
||||
int, long, void *)
|
||||
diff --git a/src/security/PeerOptions.cc b/src/security/PeerOptions.cc
|
||||
index cf1d4ba..4346ba5 100644
|
||||
--- a/src/security/PeerOptions.cc
|
||||
+++ b/src/security/PeerOptions.cc
|
||||
@@ -297,130 +297,130 @@ static struct ssl_option {
|
||||
|
||||
} ssl_options[] = {
|
||||
|
||||
-#if SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG
|
||||
+#ifdef SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG
|
||||
{
|
||||
"NETSCAPE_REUSE_CIPHER_CHANGE_BUG", SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG
|
||||
+#ifdef SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG
|
||||
{
|
||||
"SSLREF2_REUSE_CERT_TYPE_BUG", SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER
|
||||
+#ifdef SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER
|
||||
{
|
||||
"MICROSOFT_BIG_SSLV3_BUFFER", SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_SSLEAY_080_CLIENT_DH_BUG
|
||||
+#ifdef SSL_OP_SSLEAY_080_CLIENT_DH_BUG
|
||||
{
|
||||
"SSLEAY_080_CLIENT_DH_BUG", SSL_OP_SSLEAY_080_CLIENT_DH_BUG
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_TLS_D5_BUG
|
||||
+#ifdef SSL_OP_TLS_D5_BUG
|
||||
{
|
||||
"TLS_D5_BUG", SSL_OP_TLS_D5_BUG
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_TLS_BLOCK_PADDING_BUG
|
||||
+#ifdef SSL_OP_TLS_BLOCK_PADDING_BUG
|
||||
{
|
||||
"TLS_BLOCK_PADDING_BUG", SSL_OP_TLS_BLOCK_PADDING_BUG
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_TLS_ROLLBACK_BUG
|
||||
+#ifdef SSL_OP_TLS_ROLLBACK_BUG
|
||||
{
|
||||
"TLS_ROLLBACK_BUG", SSL_OP_TLS_ROLLBACK_BUG
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_ALL
|
||||
+#ifdef SSL_OP_ALL
|
||||
{
|
||||
"ALL", (long)SSL_OP_ALL
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_SINGLE_DH_USE
|
||||
+#ifdef SSL_OP_SINGLE_DH_USE
|
||||
{
|
||||
"SINGLE_DH_USE", SSL_OP_SINGLE_DH_USE
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_EPHEMERAL_RSA
|
||||
+#ifdef SSL_OP_EPHEMERAL_RSA
|
||||
{
|
||||
"EPHEMERAL_RSA", SSL_OP_EPHEMERAL_RSA
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_PKCS1_CHECK_1
|
||||
+#ifdef SSL_OP_PKCS1_CHECK_1
|
||||
{
|
||||
"PKCS1_CHECK_1", SSL_OP_PKCS1_CHECK_1
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_PKCS1_CHECK_2
|
||||
+#ifdef SSL_OP_PKCS1_CHECK_2
|
||||
{
|
||||
"PKCS1_CHECK_2", SSL_OP_PKCS1_CHECK_2
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_NETSCAPE_CA_DN_BUG
|
||||
+#ifdef SSL_OP_NETSCAPE_CA_DN_BUG
|
||||
{
|
||||
"NETSCAPE_CA_DN_BUG", SSL_OP_NETSCAPE_CA_DN_BUG
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_NON_EXPORT_FIRST
|
||||
+#ifdef SSL_OP_NON_EXPORT_FIRST
|
||||
{
|
||||
"NON_EXPORT_FIRST", SSL_OP_NON_EXPORT_FIRST
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_CIPHER_SERVER_PREFERENCE
|
||||
+#ifdef SSL_OP_CIPHER_SERVER_PREFERENCE
|
||||
{
|
||||
"CIPHER_SERVER_PREFERENCE", SSL_OP_CIPHER_SERVER_PREFERENCE
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG
|
||||
+#ifdef SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG
|
||||
{
|
||||
"NETSCAPE_DEMO_CIPHER_CHANGE_BUG", SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_NO_SSLv3
|
||||
+#ifdef SSL_OP_NO_SSLv3
|
||||
{
|
||||
"NO_SSLv3", SSL_OP_NO_SSLv3
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_NO_TLSv1
|
||||
+#ifdef SSL_OP_NO_TLSv1
|
||||
{
|
||||
"NO_TLSv1", SSL_OP_NO_TLSv1
|
||||
},
|
||||
#else
|
||||
{ "NO_TLSv1", 0 },
|
||||
#endif
|
||||
-#if SSL_OP_NO_TLSv1_1
|
||||
+#ifdef SSL_OP_NO_TLSv1_1
|
||||
{
|
||||
"NO_TLSv1_1", SSL_OP_NO_TLSv1_1
|
||||
},
|
||||
#else
|
||||
{ "NO_TLSv1_1", 0 },
|
||||
#endif
|
||||
-#if SSL_OP_NO_TLSv1_2
|
||||
+#ifdef SSL_OP_NO_TLSv1_2
|
||||
{
|
||||
"NO_TLSv1_2", SSL_OP_NO_TLSv1_2
|
||||
},
|
||||
#else
|
||||
{ "NO_TLSv1_2", 0 },
|
||||
#endif
|
||||
-#if SSL_OP_NO_TLSv1_3
|
||||
+#ifdef SSL_OP_NO_TLSv1_3
|
||||
{
|
||||
"NO_TLSv1_3", SSL_OP_NO_TLSv1_3
|
||||
},
|
||||
#else
|
||||
{ "NO_TLSv1_3", 0 },
|
||||
#endif
|
||||
-#if SSL_OP_NO_COMPRESSION
|
||||
+#ifdef SSL_OP_NO_COMPRESSION
|
||||
{
|
||||
"No_Compression", SSL_OP_NO_COMPRESSION
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_NO_TICKET
|
||||
+#ifdef SSL_OP_NO_TICKET
|
||||
{
|
||||
"NO_TICKET", SSL_OP_NO_TICKET
|
||||
},
|
||||
#endif
|
||||
-#if SSL_OP_SINGLE_ECDH_USE
|
||||
+#ifdef SSL_OP_SINGLE_ECDH_USE
|
||||
{
|
||||
"SINGLE_ECDH_USE", SSL_OP_SINGLE_ECDH_USE
|
||||
},
|
||||
@@ -512,7 +512,7 @@ Security::PeerOptions::parseOptions()
|
||||
|
||||
}
|
||||
|
||||
-#if SSL_OP_NO_SSLv2
|
||||
+#ifdef SSL_OP_NO_SSLv2
|
||||
// compliance with RFC 6176: Prohibiting Secure Sockets Layer (SSL) Version 2.0
|
||||
op = op | SSL_OP_NO_SSLv2;
|
||||
#endif
|
24
squid-5.1-test-store-cppsuite.patch
Normal file
24
squid-5.1-test-store-cppsuite.patch
Normal file
@ -0,0 +1,24 @@
|
||||
diff --git a/src/tests/testStoreHashIndex.cc b/src/tests/testStoreHashIndex.cc
|
||||
index 0564380..fcd60b9 100644
|
||||
--- a/src/tests/testStoreHashIndex.cc
|
||||
+++ b/src/tests/testStoreHashIndex.cc
|
||||
@@ -102,6 +102,8 @@ void commonInit()
|
||||
if (inited)
|
||||
return;
|
||||
|
||||
+ inited = true;
|
||||
+
|
||||
Mem::Init();
|
||||
|
||||
Config.Store.avgObjectSize = 1024;
|
||||
@@ -109,6 +111,10 @@ void commonInit()
|
||||
Config.Store.objectsPerBucket = 20;
|
||||
|
||||
Config.Store.maxObjectSize = 2048;
|
||||
+
|
||||
+ Config.memShared.defaultTo(false);
|
||||
+
|
||||
+ Config.store_dir_select_algorithm = xstrdup("round-robin");
|
||||
}
|
||||
|
||||
/* TODO make this a cbdata class */
|
120
squid-5.5-CVE-2021-46784.patch
Normal file
120
squid-5.5-CVE-2021-46784.patch
Normal file
@ -0,0 +1,120 @@
|
||||
diff --git a/src/gopher.cc b/src/gopher.cc
|
||||
index 576a3f7..2645b6b 100644
|
||||
--- a/src/gopher.cc
|
||||
+++ b/src/gopher.cc
|
||||
@@ -364,7 +364,6 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
|
||||
char *lpos = NULL;
|
||||
char *tline = NULL;
|
||||
LOCAL_ARRAY(char, line, TEMP_BUF_SIZE);
|
||||
- LOCAL_ARRAY(char, tmpbuf, TEMP_BUF_SIZE);
|
||||
char *name = NULL;
|
||||
char *selector = NULL;
|
||||
char *host = NULL;
|
||||
@@ -374,7 +373,6 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
|
||||
char gtype;
|
||||
StoreEntry *entry = NULL;
|
||||
|
||||
- memset(tmpbuf, '\0', TEMP_BUF_SIZE);
|
||||
memset(line, '\0', TEMP_BUF_SIZE);
|
||||
|
||||
entry = gopherState->entry;
|
||||
@@ -409,7 +407,7 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
|
||||
return;
|
||||
}
|
||||
|
||||
- String outbuf;
|
||||
+ SBuf outbuf;
|
||||
|
||||
if (!gopherState->HTML_header_added) {
|
||||
if (gopherState->conversion == GopherStateData::HTML_CSO_RESULT)
|
||||
@@ -577,34 +575,34 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
|
||||
break;
|
||||
}
|
||||
|
||||
- memset(tmpbuf, '\0', TEMP_BUF_SIZE);
|
||||
-
|
||||
if ((gtype == GOPHER_TELNET) || (gtype == GOPHER_3270)) {
|
||||
if (strlen(escaped_selector) != 0)
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"telnet://%s@%s%s%s/\">%s</A>\n",
|
||||
- icon_url, escaped_selector, rfc1738_escape_part(host),
|
||||
- *port ? ":" : "", port, html_quote(name));
|
||||
+ outbuf.appendf("<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"telnet://%s@%s%s%s/\">%s</A>\n",
|
||||
+ icon_url, escaped_selector, rfc1738_escape_part(host),
|
||||
+ *port ? ":" : "", port, html_quote(name));
|
||||
else
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"telnet://%s%s%s/\">%s</A>\n",
|
||||
- icon_url, rfc1738_escape_part(host), *port ? ":" : "",
|
||||
- port, html_quote(name));
|
||||
+ outbuf.appendf("<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"telnet://%s%s%s/\">%s</A>\n",
|
||||
+ icon_url, rfc1738_escape_part(host), *port ? ":" : "",
|
||||
+ port, html_quote(name));
|
||||
|
||||
} else if (gtype == GOPHER_INFO) {
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "\t%s\n", html_quote(name));
|
||||
+ outbuf.appendf("\t%s\n", html_quote(name));
|
||||
} else {
|
||||
if (strncmp(selector, "GET /", 5) == 0) {
|
||||
/* WWW link */
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"http://%s/%s\">%s</A>\n",
|
||||
- icon_url, host, rfc1738_escape_unescaped(selector + 5), html_quote(name));
|
||||
+ outbuf.appendf("<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"http://%s/%s\">%s</A>\n",
|
||||
+ icon_url, host, rfc1738_escape_unescaped(selector + 5), html_quote(name));
|
||||
+ } else if (gtype == GOPHER_WWW) {
|
||||
+ outbuf.appendf("<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"gopher://%s/%c%s\">%s</A>\n",
|
||||
+ icon_url, rfc1738_escape_unescaped(selector), html_quote(name));
|
||||
} else {
|
||||
/* Standard link */
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"gopher://%s/%c%s\">%s</A>\n",
|
||||
- icon_url, host, gtype, escaped_selector, html_quote(name));
|
||||
+ outbuf.appendf("<IMG border=\"0\" SRC=\"%s\"> <A HREF=\"gopher://%s/%c%s\">%s</A>\n",
|
||||
+ icon_url, host, gtype, escaped_selector, html_quote(name));
|
||||
}
|
||||
}
|
||||
|
||||
safe_free(escaped_selector);
|
||||
- outbuf.append(tmpbuf);
|
||||
} else {
|
||||
memset(line, '\0', TEMP_BUF_SIZE);
|
||||
continue;
|
||||
@@ -637,13 +635,12 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
|
||||
break;
|
||||
|
||||
if (gopherState->cso_recno != recno) {
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "</PRE><HR noshade size=\"1px\"><H2>Record# %d<br><i>%s</i></H2>\n<PRE>", recno, html_quote(result));
|
||||
+ outbuf.appendf("</PRE><HR noshade size=\"1px\"><H2>Record# %d<br><i>%s</i></H2>\n<PRE>", recno, html_quote(result));
|
||||
gopherState->cso_recno = recno;
|
||||
} else {
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "%s\n", html_quote(result));
|
||||
+ outbuf.appendf("%s\n", html_quote(result));
|
||||
}
|
||||
|
||||
- outbuf.append(tmpbuf);
|
||||
break;
|
||||
} else {
|
||||
int code;
|
||||
@@ -671,8 +668,7 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
|
||||
|
||||
case 502: { /* Too Many Matches */
|
||||
/* Print the message the server returns */
|
||||
- snprintf(tmpbuf, TEMP_BUF_SIZE, "</PRE><HR noshade size=\"1px\"><H2>%s</H2>\n<PRE>", html_quote(result));
|
||||
- outbuf.append(tmpbuf);
|
||||
+ outbuf.appendf("</PRE><HR noshade size=\"1px\"><H2>%s</H2>\n<PRE>", html_quote(result));
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -688,13 +684,12 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
|
||||
|
||||
} /* while loop */
|
||||
|
||||
- if (outbuf.size() > 0) {
|
||||
- entry->append(outbuf.rawBuf(), outbuf.size());
|
||||
+ if (outbuf.length() > 0) {
|
||||
+ entry->append(outbuf.rawContent(), outbuf.length());
|
||||
/* now let start sending stuff to client */
|
||||
entry->flush();
|
||||
}
|
||||
|
||||
- outbuf.clean();
|
||||
return;
|
||||
}
|
||||
|
16
squid.logrotate
Normal file
16
squid.logrotate
Normal file
@ -0,0 +1,16 @@
|
||||
/var/log/squid/*.log {
|
||||
weekly
|
||||
rotate 5
|
||||
compress
|
||||
notifempty
|
||||
missingok
|
||||
nocreate
|
||||
sharedscripts
|
||||
postrotate
|
||||
# Asks squid to reopen its logs. (logfile_rotate 0 is set in squid.conf)
|
||||
# errors redirected to make it silent if squid is not running
|
||||
/usr/sbin/squid -k rotate 2>/dev/null
|
||||
# Wait a little to allow Squid to catch up before the logs is compressed
|
||||
sleep 1
|
||||
endscript
|
||||
}
|
7
squid.nm
Executable file
7
squid.nm
Executable file
@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
case "$2" in
|
||||
up|down|vpn-up|vpn-down)
|
||||
/bin/systemctl -q reload squid.service || :
|
||||
;;
|
||||
esac
|
3
squid.pam
Normal file
3
squid.pam
Normal file
@ -0,0 +1,3 @@
|
||||
#%PAM-1.0
|
||||
auth include password-auth
|
||||
account include password-auth
|
18
squid.service
Normal file
18
squid.service
Normal file
@ -0,0 +1,18 @@
|
||||
[Unit]
|
||||
Description=Squid caching proxy
|
||||
Documentation=man:squid(8)
|
||||
After=network.target network-online.target nss-lookup.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
LimitNOFILE=16384
|
||||
PIDFile=/run/squid.pid
|
||||
EnvironmentFile=/etc/sysconfig/squid
|
||||
ExecStartPre=/usr/libexec/squid/cache_swap.sh
|
||||
ExecStart=/usr/sbin/squid --foreground $SQUID_OPTS -f ${SQUID_CONF}
|
||||
ExecReload=/usr/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
NotifyAccess=all
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
1704
squid.spec
Normal file
1704
squid.spec
Normal file
File diff suppressed because it is too large
Load Diff
9
squid.sysconfig
Normal file
9
squid.sysconfig
Normal file
@ -0,0 +1,9 @@
|
||||
# default squid options
|
||||
SQUID_OPTS=""
|
||||
|
||||
# Time to wait for Squid to shut down when asked. Should not be necessary
|
||||
# most of the time.
|
||||
SQUID_SHUTDOWN_TIMEOUT=100
|
||||
|
||||
# default squid conf file
|
||||
SQUID_CONF="/etc/squid/squid.conf"
|
Loading…
Reference in New Issue
Block a user