import squid-4.11-3.module+el8.3.0+7851+7808b5f9
This commit is contained in:
parent
9152c92e75
commit
11b0ae8e0d
2
.gitignore
vendored
2
.gitignore
vendored
@ -1 +1 @@
|
||||
SOURCES/squid-4.4.tar.xz
|
||||
SOURCES/squid-4.11.tar.xz
|
||||
|
@ -1 +1 @@
|
||||
0ab6b133f65866d825bf72cbbe8cef209768b2fa SOURCES/squid-4.4.tar.xz
|
||||
053277bf5497163ffc9261b9807abda5959bb6fc SOURCES/squid-4.11.tar.xz
|
||||
|
@ -1,95 +0,0 @@
|
||||
------------------------------------------------------------
|
||||
revno: 14311
|
||||
revision-id: squid3@treenet.co.nz-20150924130537-lqwzd1z99a3l9gt4
|
||||
parent: squid3@treenet.co.nz-20150924032241-6cx3g6hwz9xfoybr
|
||||
------------------------------------------------------------
|
||||
revno: 14311
|
||||
revision-id: squid3@treenet.co.nz-20150924130537-lqwzd1z99a3l9gt4
|
||||
parent: squid3@treenet.co.nz-20150924032241-6cx3g6hwz9xfoybr
|
||||
fixes bug: http://bugs.squid-cache.org/show_bug.cgi?id=4323
|
||||
author: Francesco Chemolli <kinkie@squid-cache.org>
|
||||
committer: Amos Jeffries <squid3@treenet.co.nz>
|
||||
branch nick: trunk
|
||||
timestamp: Thu 2015-09-24 06:05:37 -0700
|
||||
message:
|
||||
Bug 4323: Netfilter broken cross-includes with Linux 4.2
|
||||
------------------------------------------------------------
|
||||
# Bazaar merge directive format 2 (Bazaar 0.90)
|
||||
# revision_id: squid3@treenet.co.nz-20150924130537-lqwzd1z99a3l9gt4
|
||||
# target_branch: http://bzr.squid-cache.org/bzr/squid3/trunk/
|
||||
# testament_sha1: c67cfca81040f3845d7c4caf2f40518511f14d0b
|
||||
# timestamp: 2015-09-24 13:06:33 +0000
|
||||
# source_branch: http://bzr.squid-cache.org/bzr/squid3/trunk
|
||||
# base_revision_id: squid3@treenet.co.nz-20150924032241-\
|
||||
# 6cx3g6hwz9xfoybr
|
||||
#
|
||||
# Begin patch
|
||||
=== modified file 'compat/os/linux.h'
|
||||
--- compat/os/linux.h 2015-01-13 07:25:36 +0000
|
||||
+++ compat/os/linux.h 2015-09-24 13:05:37 +0000
|
||||
@@ -30,6 +30,21 @@
|
||||
#endif
|
||||
|
||||
/*
|
||||
+ * Netfilter header madness. (see Bug 4323)
|
||||
+ *
|
||||
+ * Netfilter have a history of defining their own versions of network protocol
|
||||
+ * primitives without sufficient protection against the POSIX defines which are
|
||||
+ * aways present in Linux.
|
||||
+ *
|
||||
+ * netinet/in.h must be included before any other sys header in order to properly
|
||||
+ * activate include guards in <linux/libc-compat.h> the kernel maintainers added
|
||||
+ * to workaround it.
|
||||
+ */
|
||||
+#if HAVE_NETINET_IN_H
|
||||
+#include <netinet/in.h>
|
||||
+#endif
|
||||
+
|
||||
+/*
|
||||
* sys/capability.h is only needed in Linux apparently.
|
||||
*
|
||||
* HACK: LIBCAP_BROKEN Ugly glue to get around linux header madness colliding with glibc
|
||||
fixes bug: http://bugs.squid-cache.org/show_bug.cgi?id=4323
|
||||
author: Francesco Chemolli <kinkie@squid-cache.org>
|
||||
committer: Amos Jeffries <squid3@treenet.co.nz>
|
||||
branch nick: trunk
|
||||
timestamp: Thu 2015-09-24 06:05:37 -0700
|
||||
message:
|
||||
Bug 4323: Netfilter broken cross-includes with Linux 4.2
|
||||
------------------------------------------------------------
|
||||
# Bazaar merge directive format 2 (Bazaar 0.90)
|
||||
# revision_id: squid3@treenet.co.nz-20150924130537-lqwzd1z99a3l9gt4
|
||||
# target_branch: http://bzr.squid-cache.org/bzr/squid3/trunk/
|
||||
# testament_sha1: c67cfca81040f3845d7c4caf2f40518511f14d0b
|
||||
# timestamp: 2015-09-24 13:06:33 +0000
|
||||
# source_branch: http://bzr.squid-cache.org/bzr/squid3/trunk
|
||||
# base_revision_id: squid3@treenet.co.nz-20150924032241-\
|
||||
# 6cx3g6hwz9xfoybr
|
||||
#
|
||||
# Begin patch
|
||||
=== modified file 'compat/os/linux.h'
|
||||
--- compat/os/linux.h 2015-01-13 07:25:36 +0000
|
||||
+++ compat/os/linux.h 2015-09-24 13:05:37 +0000
|
||||
@@ -30,6 +30,21 @@
|
||||
#endif
|
||||
|
||||
/*
|
||||
+ * Netfilter header madness. (see Bug 4323)
|
||||
+ *
|
||||
+ * Netfilter have a history of defining their own versions of network protocol
|
||||
+ * primitives without sufficient protection against the POSIX defines which are
|
||||
+ * aways present in Linux.
|
||||
+ *
|
||||
+ * netinet/in.h must be included before any other sys header in order to properly
|
||||
+ * activate include guards in <linux/libc-compat.h> the kernel maintainers added
|
||||
+ * to workaround it.
|
||||
+ */
|
||||
+#if HAVE_NETINET_IN_H
|
||||
+#include <netinet/in.h>
|
||||
+#endif
|
||||
+
|
||||
+/*
|
||||
* sys/capability.h is only needed in Linux apparently.
|
||||
*
|
||||
* HACK: LIBCAP_BROKEN Ugly glue to get around linux header madness colliding with glibc
|
||||
|
295
SOURCES/squid-4.11-CVE-2020-14058.patch
Normal file
295
SOURCES/squid-4.11-CVE-2020-14058.patch
Normal file
@ -0,0 +1,295 @@
|
||||
commit 93f5fda134a2a010b84ffedbe833d670e63ba4be
|
||||
Author: Christos Tsantilas <christos@chtsanti.net>
|
||||
Date: 2020-05-15 04:54:54 +0000
|
||||
|
||||
Fix sending of unknown validation errors to cert. validator (#633)
|
||||
|
||||
Squid may be compiled with an OpenSSL release introducing X509
|
||||
validation errors that Squid does not have the names for. Send their
|
||||
integer codes.
|
||||
|
||||
Also sync Squid certificate verification errors with OpenSSL v1.1.1g.
|
||||
|
||||
This is a Measurement Factory project.
|
||||
|
||||
diff --git a/src/format/Format.cc b/src/format/Format.cc
|
||||
index 8c5574b..4b4ad42 100644
|
||||
--- a/src/format/Format.cc
|
||||
+++ b/src/format/Format.cc
|
||||
@@ -322,15 +322,6 @@ log_quoted_string(const char *str, char *out)
|
||||
*p = '\0';
|
||||
}
|
||||
|
||||
-#if USE_OPENSSL
|
||||
-static char *
|
||||
-sslErrorName(Security::ErrorCode err, char *buf, size_t size)
|
||||
-{
|
||||
- snprintf(buf, size, "SSL_ERR=%d", err);
|
||||
- return buf;
|
||||
-}
|
||||
-#endif
|
||||
-
|
||||
/// XXX: Misnamed. TODO: Split <h (and this function) to distinguish received
|
||||
/// headers from sent headers rather than failing to distinguish requests from responses.
|
||||
/// \retval HttpReply sent to the HTTP client (access.log and default context).
|
||||
@@ -959,9 +950,7 @@ Format::Format::assemble(MemBuf &mb, const AccessLogEntry::Pointer &al, int logS
|
||||
case LFT_SQUID_ERROR_DETAIL:
|
||||
#if USE_OPENSSL
|
||||
if (al->request && al->request->errType == ERR_SECURE_CONNECT_FAIL) {
|
||||
- out = Ssl::GetErrorName(al->request->errDetail);
|
||||
- if (!out)
|
||||
- out = sslErrorName(al->request->errDetail, tmp, sizeof(tmp));
|
||||
+ out = Ssl::GetErrorName(al->request->errDetail, true);
|
||||
} else
|
||||
#endif
|
||||
if (al->request && al->request->errDetail != ERR_DETAIL_NONE) {
|
||||
@@ -1263,10 +1252,7 @@ Format::Format::assemble(MemBuf &mb, const AccessLogEntry::Pointer &al, int logS
|
||||
for (const Security::CertErrors *sslError = srvBump->sslErrors(); sslError; sslError = sslError->next) {
|
||||
if (!sb.isEmpty())
|
||||
sb.append(separator);
|
||||
- if (const char *errorName = Ssl::GetErrorName(sslError->element.code))
|
||||
- sb.append(errorName);
|
||||
- else
|
||||
- sb.append(sslErrorName(sslError->element.code, tmp, sizeof(tmp)));
|
||||
+ sb.append(Ssl::GetErrorName(sslError->element.code, true));
|
||||
if (sslError->element.depth >= 0)
|
||||
sb.appendf("@depth=%d", sslError->element.depth);
|
||||
}
|
||||
diff --git a/src/ssl/ErrorDetail.cc b/src/ssl/ErrorDetail.cc
|
||||
index ddd61fd..00eb0e2 100644
|
||||
--- a/src/ssl/ErrorDetail.cc
|
||||
+++ b/src/ssl/ErrorDetail.cc
|
||||
@@ -233,6 +233,9 @@ static SslErrorEntry TheSslErrorArray[] = {
|
||||
"X509_V_ERR_SUBTREE_MINMAX"
|
||||
},
|
||||
#endif
|
||||
+ { X509_V_ERR_APPLICATION_VERIFICATION, //50
|
||||
+ "X509_V_ERR_APPLICATION_VERIFICATION"
|
||||
+ },
|
||||
#if defined(X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE)
|
||||
{
|
||||
X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE, //51
|
||||
@@ -257,9 +260,132 @@ static SslErrorEntry TheSslErrorArray[] = {
|
||||
"X509_V_ERR_CRL_PATH_VALIDATION_ERROR"
|
||||
},
|
||||
#endif
|
||||
- { X509_V_ERR_APPLICATION_VERIFICATION,
|
||||
- "X509_V_ERR_APPLICATION_VERIFICATION"
|
||||
+#if defined(X509_V_ERR_PATH_LOOP)
|
||||
+ {
|
||||
+ X509_V_ERR_PATH_LOOP, //55
|
||||
+ "X509_V_ERR_PATH_LOOP"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_SUITE_B_INVALID_VERSION)
|
||||
+ {
|
||||
+ X509_V_ERR_SUITE_B_INVALID_VERSION, //56
|
||||
+ "X509_V_ERR_SUITE_B_INVALID_VERSION"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_SUITE_B_INVALID_ALGORITHM)
|
||||
+ {
|
||||
+ X509_V_ERR_SUITE_B_INVALID_ALGORITHM, //57
|
||||
+ "X509_V_ERR_SUITE_B_INVALID_ALGORITHM"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_SUITE_B_INVALID_CURVE)
|
||||
+ {
|
||||
+ X509_V_ERR_SUITE_B_INVALID_CURVE, //58
|
||||
+ "X509_V_ERR_SUITE_B_INVALID_CURVE"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM)
|
||||
+ {
|
||||
+ X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM, //59
|
||||
+ "X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED)
|
||||
+ {
|
||||
+ X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED, //60
|
||||
+ "X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256)
|
||||
+ {
|
||||
+ X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256, //61
|
||||
+ "X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_HOSTNAME_MISMATCH)
|
||||
+ {
|
||||
+ X509_V_ERR_HOSTNAME_MISMATCH, //62
|
||||
+ "X509_V_ERR_HOSTNAME_MISMATCH"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_EMAIL_MISMATCH)
|
||||
+ {
|
||||
+ X509_V_ERR_EMAIL_MISMATCH, //63
|
||||
+ "X509_V_ERR_EMAIL_MISMATCH"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_IP_ADDRESS_MISMATCH)
|
||||
+ {
|
||||
+ X509_V_ERR_IP_ADDRESS_MISMATCH, //64
|
||||
+ "X509_V_ERR_IP_ADDRESS_MISMATCH"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_DANE_NO_MATCH)
|
||||
+ {
|
||||
+ X509_V_ERR_DANE_NO_MATCH, //65
|
||||
+ "X509_V_ERR_DANE_NO_MATCH"
|
||||
},
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_EE_KEY_TOO_SMALL)
|
||||
+ {
|
||||
+ X509_V_ERR_EE_KEY_TOO_SMALL, //66
|
||||
+ "X509_V_ERR_EE_KEY_TOO_SMALL"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_CA_KEY_TOO_SMALL)
|
||||
+ {
|
||||
+ X509_V_ERR_CA_KEY_TOO_SMALL, //67
|
||||
+ "X509_V_ERR_CA_KEY_TOO_SMALL"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_CA_MD_TOO_WEAK)
|
||||
+ {
|
||||
+ X509_V_ERR_CA_MD_TOO_WEAK, //68
|
||||
+ "X509_V_ERR_CA_MD_TOO_WEAK"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_INVALID_CALL)
|
||||
+ {
|
||||
+ X509_V_ERR_INVALID_CALL, //69
|
||||
+ "X509_V_ERR_INVALID_CALL"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_STORE_LOOKUP)
|
||||
+ {
|
||||
+ X509_V_ERR_STORE_LOOKUP, //70
|
||||
+ "X509_V_ERR_STORE_LOOKUP"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_NO_VALID_SCTS)
|
||||
+ {
|
||||
+ X509_V_ERR_NO_VALID_SCTS, //71
|
||||
+ "X509_V_ERR_NO_VALID_SCTS"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_PROXY_SUBJECT_NAME_VIOLATION)
|
||||
+ {
|
||||
+ X509_V_ERR_PROXY_SUBJECT_NAME_VIOLATION, //72
|
||||
+ "X509_V_ERR_PROXY_SUBJECT_NAME_VIOLATION"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_OCSP_VERIFY_NEEDED)
|
||||
+ {
|
||||
+ X509_V_ERR_OCSP_VERIFY_NEEDED, //73
|
||||
+ "X509_V_ERR_OCSP_VERIFY_NEEDED"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_OCSP_VERIFY_FAILED)
|
||||
+ {
|
||||
+ X509_V_ERR_OCSP_VERIFY_FAILED, //74
|
||||
+ "X509_V_ERR_OCSP_VERIFY_FAILED"
|
||||
+ },
|
||||
+#endif
|
||||
+#if defined(X509_V_ERR_OCSP_CERT_UNKNOWN)
|
||||
+ {
|
||||
+ X509_V_ERR_OCSP_CERT_UNKNOWN, //75
|
||||
+ "X509_V_ERR_OCSP_CERT_UNKNOWN"
|
||||
+ },
|
||||
+#endif
|
||||
{ SSL_ERROR_NONE, "SSL_ERROR_NONE"},
|
||||
{SSL_ERROR_NONE, NULL}
|
||||
};
|
||||
@@ -286,6 +412,27 @@ static const char *OptionalSslErrors[] = {
|
||||
"X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX",
|
||||
"X509_V_ERR_UNSUPPORTED_NAME_SYNTAX",
|
||||
"X509_V_ERR_CRL_PATH_VALIDATION_ERROR",
|
||||
+ "X509_V_ERR_PATH_LOOP",
|
||||
+ "X509_V_ERR_SUITE_B_INVALID_VERSION",
|
||||
+ "X509_V_ERR_SUITE_B_INVALID_ALGORITHM",
|
||||
+ "X509_V_ERR_SUITE_B_INVALID_CURVE",
|
||||
+ "X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM",
|
||||
+ "X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED",
|
||||
+ "X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256",
|
||||
+ "X509_V_ERR_HOSTNAME_MISMATCH",
|
||||
+ "X509_V_ERR_EMAIL_MISMATCH",
|
||||
+ "X509_V_ERR_IP_ADDRESS_MISMATCH",
|
||||
+ "X509_V_ERR_DANE_NO_MATCH",
|
||||
+ "X509_V_ERR_EE_KEY_TOO_SMALL",
|
||||
+ "X509_V_ERR_CA_KEY_TOO_SMALL",
|
||||
+ "X509_V_ERR_CA_MD_TOO_WEAK",
|
||||
+ "X509_V_ERR_INVALID_CALL",
|
||||
+ "X509_V_ERR_STORE_LOOKUP",
|
||||
+ "X509_V_ERR_NO_VALID_SCTS",
|
||||
+ "X509_V_ERR_PROXY_SUBJECT_NAME_VIOLATION",
|
||||
+ "X509_V_ERR_OCSP_VERIFY_NEEDED",
|
||||
+ "X509_V_ERR_OCSP_VERIFY_FAILED",
|
||||
+ "X509_V_ERR_OCSP_CERT_UNKNOWN",
|
||||
NULL
|
||||
};
|
||||
|
||||
@@ -390,7 +537,7 @@ Ssl::ParseErrorString(const char *name, Security::Errors &errors)
|
||||
return false; // not reached
|
||||
}
|
||||
|
||||
-const char *Ssl::GetErrorName(Security::ErrorCode value)
|
||||
+const char *Ssl::GetErrorName(Security::ErrorCode value, const bool prefixRawCode)
|
||||
{
|
||||
if (TheSslErrors.empty())
|
||||
loadSslErrorMap();
|
||||
@@ -399,7 +546,9 @@ const char *Ssl::GetErrorName(Security::ErrorCode value)
|
||||
if (it != TheSslErrors.end())
|
||||
return it->second->name;
|
||||
|
||||
- return NULL;
|
||||
+ static char tmpBuffer[128];
|
||||
+ snprintf(tmpBuffer, sizeof(tmpBuffer), "%s%d", prefixRawCode ? "SSL_ERR=" : "", (int)value);
|
||||
+ return tmpBuffer;
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -529,21 +678,14 @@ const char *Ssl::ErrorDetail::notafter() const
|
||||
*/
|
||||
const char *Ssl::ErrorDetail::err_code() const
|
||||
{
|
||||
- static char tmpBuffer[64];
|
||||
// We can use the GetErrorName but using the detailEntry is faster,
|
||||
// so try it first.
|
||||
- const char *err = detailEntry.name.termedBuf();
|
||||
+ if (const char *err = detailEntry.name.termedBuf())
|
||||
+ return err;
|
||||
|
||||
// error details not loaded yet or not defined in error_details.txt,
|
||||
// try the GetErrorName...
|
||||
- if (!err)
|
||||
- err = GetErrorName(error_no);
|
||||
-
|
||||
- if (!err) {
|
||||
- snprintf(tmpBuffer, 64, "%d", (int)error_no);
|
||||
- err = tmpBuffer;
|
||||
- }
|
||||
- return err;
|
||||
+ return GetErrorName(error_no);
|
||||
}
|
||||
|
||||
/**
|
||||
diff --git a/src/ssl/ErrorDetail.h b/src/ssl/ErrorDetail.h
|
||||
index 48dc405..0eec0a9 100644
|
||||
--- a/src/ssl/ErrorDetail.h
|
||||
+++ b/src/ssl/ErrorDetail.h
|
||||
@@ -26,8 +26,9 @@ bool ParseErrorString(const char *name, Security::Errors &);
|
||||
/// The Security::ErrorCode code of the error described by "name".
|
||||
Security::ErrorCode GetErrorCode(const char *name);
|
||||
|
||||
-/// The string representation of the TLS error "value"
|
||||
-const char *GetErrorName(Security::ErrorCode value);
|
||||
+/// \return string representation of a known TLS error (or a raw error code)
|
||||
+/// \param prefixRawCode whether to prefix raw codes with "SSL_ERR="
|
||||
+const char *GetErrorName(Security::ErrorCode value, const bool prefixRawCode = false);
|
||||
|
||||
/// A short description of the TLS error "value"
|
||||
const char *GetErrorDescr(Security::ErrorCode value);
|
105
SOURCES/squid-4.11-CVE-2020-15049.patch
Normal file
105
SOURCES/squid-4.11-CVE-2020-15049.patch
Normal file
@ -0,0 +1,105 @@
|
||||
commit ea12a34d338b962707d5078d6d1fc7c6eb119a22
|
||||
Author: Alex Rousskov <rousskov@measurement-factory.com>
|
||||
Date: 2020-05-13 14:05:00 +0000
|
||||
|
||||
Validate Content-Length value prefix (#629)
|
||||
|
||||
The new code detects all invalid Content-Length prefixes but the old
|
||||
code was already rejecting most invalid prefixes using strtoll(). The
|
||||
newly covered (and now rejected) invalid characters are
|
||||
|
||||
* explicit "+" sign;
|
||||
* explicit "-" sign in "-0" values;
|
||||
* isspace(3) characters that are not (relaxed) OWS characters.
|
||||
|
||||
In most deployment environments, the last set is probably empty because
|
||||
the relaxed OWS set has all the POSIX/C isspace(3) characters but the
|
||||
new line, and the new line is unlikely to sneak in past other checks.
|
||||
|
||||
Thank you, Amit Klein <amit.klein@safebreach.com>, for elevating the
|
||||
importance of this 2016 TODO (added in commit a1b9ec2).
|
||||
|
||||
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
|
||||
index 36957f2..c10a221 100644
|
||||
--- a/CONTRIBUTORS
|
||||
+++ b/CONTRIBUTORS
|
||||
@@ -25,6 +25,7 @@ Thank you!
|
||||
Alex Wu <alex_wu2012@hotmail.com>
|
||||
Alin Nastac <mrness@gentoo.org>
|
||||
Alter <alter@alter.org.ua>
|
||||
+ Amit Klein <amit.klein@safebreach.com>
|
||||
Amos Jeffries
|
||||
Amos Jeffries <amosjeffries@squid-cache.org>
|
||||
Amos Jeffries <squid3@treenet.co.nz>
|
||||
diff --git a/src/http/ContentLengthInterpreter.cc b/src/http/ContentLengthInterpreter.cc
|
||||
index 3fdf7de..a3741eb 100644
|
||||
--- a/src/http/ContentLengthInterpreter.cc
|
||||
+++ b/src/http/ContentLengthInterpreter.cc
|
||||
@@ -28,6 +28,24 @@ Http::ContentLengthInterpreter::ContentLengthInterpreter(const int aDebugLevel):
|
||||
{
|
||||
}
|
||||
|
||||
+/// checks whether all characters before the Content-Length number are allowed
|
||||
+/// \returns the start of the digit sequence (or nil on errors)
|
||||
+const char *
|
||||
+Http::ContentLengthInterpreter::findDigits(const char *prefix, const char * const valueEnd) const
|
||||
+{
|
||||
+ // skip leading OWS in RFC 7230's `OWS field-value OWS`
|
||||
+ const CharacterSet &whitespace = Http::One::Parser::WhitespaceCharacters();
|
||||
+ while (prefix < valueEnd) {
|
||||
+ const auto ch = *prefix;
|
||||
+ if (CharacterSet::DIGIT[ch])
|
||||
+ return prefix; // common case: a pre-trimmed field value
|
||||
+ if (!whitespace[ch])
|
||||
+ return nullptr; // (trimmed) length does not start with a digit
|
||||
+ ++prefix;
|
||||
+ }
|
||||
+ return nullptr; // empty or whitespace-only value
|
||||
+}
|
||||
+
|
||||
/// checks whether all characters after the Content-Length are allowed
|
||||
bool
|
||||
Http::ContentLengthInterpreter::goodSuffix(const char *suffix, const char * const end) const
|
||||
@@ -52,10 +70,19 @@ Http::ContentLengthInterpreter::checkValue(const char *rawValue, const int value
|
||||
{
|
||||
Must(!sawBad);
|
||||
|
||||
+ const auto valueEnd = rawValue + valueSize;
|
||||
+
|
||||
+ const auto digits = findDigits(rawValue, valueEnd);
|
||||
+ if (!digits) {
|
||||
+ debugs(55, debugLevel, "WARNING: Leading garbage or empty value in" << Raw("Content-Length", rawValue, valueSize));
|
||||
+ sawBad = true;
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
int64_t latestValue = -1;
|
||||
char *suffix = nullptr;
|
||||
- // TODO: Handle malformed values with leading signs (e.g., "-0" or "+1").
|
||||
- if (!httpHeaderParseOffset(rawValue, &latestValue, &suffix)) {
|
||||
+
|
||||
+ if (!httpHeaderParseOffset(digits, &latestValue, &suffix)) {
|
||||
debugs(55, DBG_IMPORTANT, "WARNING: Malformed" << Raw("Content-Length", rawValue, valueSize));
|
||||
sawBad = true;
|
||||
return false;
|
||||
@@ -68,7 +95,7 @@ Http::ContentLengthInterpreter::checkValue(const char *rawValue, const int value
|
||||
}
|
||||
|
||||
// check for garbage after the number
|
||||
- if (!goodSuffix(suffix, rawValue + valueSize)) {
|
||||
+ if (!goodSuffix(suffix, valueEnd)) {
|
||||
debugs(55, debugLevel, "WARNING: Trailing garbage in" << Raw("Content-Length", rawValue, valueSize));
|
||||
sawBad = true;
|
||||
return false;
|
||||
diff --git a/src/http/ContentLengthInterpreter.h b/src/http/ContentLengthInterpreter.h
|
||||
index ce36e22..f22de91 100644
|
||||
--- a/src/http/ContentLengthInterpreter.h
|
||||
+++ b/src/http/ContentLengthInterpreter.h
|
||||
@@ -46,6 +46,7 @@ public:
|
||||
bool sawGood;
|
||||
|
||||
protected:
|
||||
+ const char *findDigits(const char *prefix, const char *valueEnd) const;
|
||||
bool goodSuffix(const char *suffix, const char * const end) const;
|
||||
bool checkValue(const char *start, const int size);
|
||||
bool checkList(const String &list);
|
@ -1,8 +1,8 @@
|
||||
diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc
|
||||
index 284a057..cd9d71f 100644
|
||||
index 67543a4..19efc6d 100644
|
||||
--- a/src/HttpHeader.cc
|
||||
+++ b/src/HttpHeader.cc
|
||||
@@ -446,18 +446,6 @@ HttpHeader::parse(const char *header_start, size_t hdrLen)
|
||||
@@ -445,18 +445,6 @@ HttpHeader::parse(const char *header_start, size_t hdrLen)
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -21,8 +21,8 @@ index 284a057..cd9d71f 100644
|
||||
addEntry(e);
|
||||
}
|
||||
|
||||
@@ -1418,6 +1406,20 @@ HttpHeaderEntry::parse(const char *field_start, const char *field_end)
|
||||
return NULL;
|
||||
@@ -1451,6 +1439,20 @@ HttpHeaderEntry::parse(const char *field_start, const char *field_end, const htt
|
||||
}
|
||||
}
|
||||
|
||||
+ /* RFC 7230 section 3.2:
|
||||
@ -42,15 +42,3 @@ index 284a057..cd9d71f 100644
|
||||
/* now we know we can parse it */
|
||||
|
||||
debugs(55, 9, "parsing HttpHeaderEntry: near '" << getStringPrefix(field_start, field_end-field_start) << "'");
|
||||
diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc
|
||||
index adeea9c..85c1c00 100644
|
||||
--- a/src/HttpHeader.cc
|
||||
+++ b/src/HttpHeader.cc
|
||||
@@ -13,6 +13,7 @@
|
||||
#include "base64.h"
|
||||
#include "globals.h"
|
||||
#include "http/ContentLengthInterpreter.h"
|
||||
+#include "base/CharacterSet.h"
|
||||
#include "HttpHdrCc.h"
|
||||
#include "HttpHdrContRange.h"
|
||||
#include "HttpHdrScTarget.h" // also includes HttpHdrSc.h
|
@ -1,5 +1,5 @@
|
||||
diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc
|
||||
index cd9d71f..adeea9c 100644
|
||||
index dc6e0ff..67543a4 100644
|
||||
--- a/src/HttpHeader.cc
|
||||
+++ b/src/HttpHeader.cc
|
||||
@@ -174,6 +174,7 @@ HttpHeader::operator =(const HttpHeader &other)
|
||||
@ -18,7 +18,7 @@ index cd9d71f..adeea9c 100644
|
||||
PROF_stop(HttpHeaderClean);
|
||||
}
|
||||
|
||||
@@ -455,11 +457,23 @@ HttpHeader::parse(const char *header_start, size_t hdrLen)
|
||||
@@ -464,11 +466,23 @@ HttpHeader::parse(const char *header_start, size_t hdrLen)
|
||||
Raw("header", header_start, hdrLen));
|
||||
}
|
||||
|
||||
@ -44,7 +44,7 @@ index cd9d71f..adeea9c 100644
|
||||
// ensure our callers do not accidentally see bad Content-Length values
|
||||
delById(Http::HdrType::CONTENT_LENGTH);
|
||||
diff --git a/src/HttpHeader.h b/src/HttpHeader.h
|
||||
index 3b262be..2a73af4 100644
|
||||
index e3553a4..64f294a 100644
|
||||
--- a/src/HttpHeader.h
|
||||
+++ b/src/HttpHeader.h
|
||||
@@ -140,7 +140,13 @@ public:
|
||||
@ -87,10 +87,10 @@ index 3b262be..2a73af4 100644
|
||||
|
||||
#endif /* SQUID_HTTPHEADER_H */
|
||||
diff --git a/src/client_side.cc b/src/client_side.cc
|
||||
index d61e278..429ce7f 100644
|
||||
index 5f5a79e..000a00b 100644
|
||||
--- a/src/client_side.cc
|
||||
+++ b/src/client_side.cc
|
||||
@@ -1552,9 +1552,7 @@ void
|
||||
@@ -1600,9 +1600,7 @@ void
|
||||
clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp, Http::Stream *context)
|
||||
{
|
||||
ClientHttpRequest *http = context->http;
|
||||
@ -100,7 +100,7 @@ index d61e278..429ce7f 100644
|
||||
bool expectBody = false;
|
||||
|
||||
// We already have the request parsed and checked, so we
|
||||
@@ -1611,13 +1609,7 @@ clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp,
|
||||
@@ -1659,13 +1657,7 @@ clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp,
|
||||
request->http_ver.minor = http_ver.minor;
|
||||
}
|
||||
|
||||
@ -115,7 +115,7 @@ index d61e278..429ce7f 100644
|
||||
|
||||
mustReplyToOptions = (request->method == Http::METHOD_OPTIONS) &&
|
||||
(request->header.getInt64(Http::HdrType::MAX_FORWARDS) == 0);
|
||||
@@ -1634,6 +1626,7 @@ clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp,
|
||||
@@ -1682,6 +1674,7 @@ clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp,
|
||||
return;
|
||||
}
|
||||
|
||||
@ -124,7 +124,7 @@ index d61e278..429ce7f 100644
|
||||
clientStreamNode *node = context->getClientReplyContext();
|
||||
clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
|
||||
diff --git a/src/http.cc b/src/http.cc
|
||||
index 1ed98ca..68594aa 100644
|
||||
index 9654c4a..6f4d3b2 100644
|
||||
--- a/src/http.cc
|
||||
+++ b/src/http.cc
|
||||
@@ -1292,6 +1292,9 @@ HttpStateData::continueAfterParsingHeader()
|
34
SOURCES/squid-4.11-CVE-2020-24606.patch
Normal file
34
SOURCES/squid-4.11-CVE-2020-24606.patch
Normal file
@ -0,0 +1,34 @@
|
||||
commit b789e719affbb0a6ff9c22095f6ca8db6a5f4926
|
||||
Author: Eduard Bagdasaryan <eduard.bagdasaryan@measurement-factory.com>
|
||||
Date: 2020-07-27 15:28:31 +0000
|
||||
|
||||
Fix livelocking in peerDigestHandleReply (#698)
|
||||
|
||||
peerDigestHandleReply() was missing a premature EOF check. The existing
|
||||
peerDigestFetchedEnough() cannot detect EOF because it does not have
|
||||
access to receivedData.length used to indicate the EOF condition. We did
|
||||
not adjust peerDigestFetchedEnough() because it is abused to check both
|
||||
post-I/O state and the state after each digest processing step. The
|
||||
latter invocations lack access to receivedData.length and should not
|
||||
really bother with EOF anyway.
|
||||
|
||||
diff --git a/src/peer_digest.cc b/src/peer_digest.cc
|
||||
index d48340f97..265f16183 100644
|
||||
--- a/src/peer_digest.cc
|
||||
+++ b/src/peer_digest.cc
|
||||
@@ -483,6 +483,15 @@ peerDigestHandleReply(void *data, StoreIOBuffer receivedData)
|
||||
|
||||
} while (cbdataReferenceValid(fetch) && prevstate != fetch->state && fetch->bufofs > 0);
|
||||
|
||||
+ // Check for EOF here, thus giving the parser one extra run. We could avoid this overhead by
|
||||
+ // checking at the beginning of this function. However, in this case, we would have to require
|
||||
+ // that the parser does not regard EOF as a special condition (it is true now but may change
|
||||
+ // in the future).
|
||||
+ if (!receivedData.length) { // EOF
|
||||
+ peerDigestFetchAbort(fetch, fetch->buf, "premature end of digest reply");
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
/* Update the copy offset */
|
||||
fetch->offset += receivedData.length;
|
||||
|
@ -1,5 +1,5 @@
|
||||
diff --git a/src/clients/FtpClient.cc b/src/clients/FtpClient.cc
|
||||
index 777210c..4c80511 100644
|
||||
index b665bcf..d287e55 100644
|
||||
--- a/src/clients/FtpClient.cc
|
||||
+++ b/src/clients/FtpClient.cc
|
||||
@@ -778,7 +778,8 @@ Ftp::Client::connectDataChannel()
|
||||
@ -13,7 +13,7 @@ index 777210c..4c80511 100644
|
||||
|
||||
/// creates a data channel Comm close callback
|
||||
diff --git a/src/clients/FtpClient.h b/src/clients/FtpClient.h
|
||||
index 465fdb7..75dbd3b 100644
|
||||
index a76a5a0..218d696 100644
|
||||
--- a/src/clients/FtpClient.h
|
||||
+++ b/src/clients/FtpClient.h
|
||||
@@ -118,7 +118,7 @@ public:
|
||||
@ -26,7 +26,7 @@ index 465fdb7..75dbd3b 100644
|
||||
|
||||
CtrlChannel ctrl; ///< FTP control channel state
|
||||
diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc
|
||||
index a13cdda..b958b14 100644
|
||||
index 411bce9..31d3e36 100644
|
||||
--- a/src/clients/FtpGateway.cc
|
||||
+++ b/src/clients/FtpGateway.cc
|
||||
@@ -87,6 +87,13 @@ struct GatewayFlags {
|
||||
@ -56,7 +56,7 @@ index a13cdda..b958b14 100644
|
||||
int checkAuth(const HttpHeader * req_hdr);
|
||||
void checkUrlpath();
|
||||
void buildTitleUrl();
|
||||
@@ -1792,6 +1803,7 @@ ftpOpenListenSocket(Ftp::Gateway * ftpState, int fallback)
|
||||
@@ -1787,6 +1798,7 @@ ftpOpenListenSocket(Ftp::Gateway * ftpState, int fallback)
|
||||
}
|
||||
|
||||
ftpState->listenForDataChannel(temp);
|
||||
@ -64,7 +64,7 @@ index a13cdda..b958b14 100644
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1827,13 +1839,19 @@ ftpSendPORT(Ftp::Gateway * ftpState)
|
||||
@@ -1822,13 +1834,19 @@ ftpSendPORT(Ftp::Gateway * ftpState)
|
||||
// pull out the internal IP address bytes to send in PORT command...
|
||||
// source them from the listen_conn->local
|
||||
|
||||
@ -86,7 +86,7 @@ index a13cdda..b958b14 100644
|
||||
ftpState->writeCommand(cbuf);
|
||||
ftpState->state = Ftp::Client::SENT_PORT;
|
||||
|
||||
@@ -1886,14 +1904,27 @@ ftpSendEPRT(Ftp::Gateway * ftpState)
|
||||
@@ -1881,14 +1899,27 @@ ftpSendEPRT(Ftp::Gateway * ftpState)
|
||||
return;
|
||||
}
|
||||
|
||||
@ -116,7 +116,7 @@ index a13cdda..b958b14 100644
|
||||
|
||||
ftpState->writeCommand(cbuf);
|
||||
ftpState->state = Ftp::Client::SENT_EPRT;
|
||||
@@ -1912,7 +1943,7 @@ ftpReadEPRT(Ftp::Gateway * ftpState)
|
||||
@@ -1907,7 +1938,7 @@ ftpReadEPRT(Ftp::Gateway * ftpState)
|
||||
ftpSendPORT(ftpState);
|
||||
return;
|
||||
}
|
@ -1,7 +1,8 @@
|
||||
diff -up squid-4.0.11/src/cf.data.pre.config squid-4.0.11/src/cf.data.pre
|
||||
--- squid-4.0.11/src/cf.data.pre.config 2016-06-09 22:32:57.000000000 +0200
|
||||
+++ squid-4.0.11/src/cf.data.pre 2016-07-11 21:08:35.090976840 +0200
|
||||
@@ -4658,7 +4658,7 @@ DOC_END
|
||||
diff --git a/src/cf.data.pre b/src/cf.data.pre
|
||||
index 26ef576..30d5509 100644
|
||||
--- a/src/cf.data.pre
|
||||
+++ b/src/cf.data.pre
|
||||
@@ -5006,7 +5006,7 @@ DOC_END
|
||||
|
||||
NAME: logfile_rotate
|
||||
TYPE: int
|
||||
@ -10,7 +11,7 @@ diff -up squid-4.0.11/src/cf.data.pre.config squid-4.0.11/src/cf.data.pre
|
||||
LOC: Config.Log.rotateNumber
|
||||
DOC_START
|
||||
Specifies the default number of logfile rotations to make when you
|
||||
@@ -6444,11 +6444,11 @@ COMMENT_END
|
||||
@@ -6857,11 +6857,11 @@ COMMENT_END
|
||||
|
||||
NAME: cache_mgr
|
||||
TYPE: string
|
145
SOURCES/squid-4.11-convert-ipv4.patch
Normal file
145
SOURCES/squid-4.11-convert-ipv4.patch
Normal file
@ -0,0 +1,145 @@
|
||||
From 771908d313ee9c255adfb5e4fdba4d6797c18409 Mon Sep 17 00:00:00 2001
|
||||
From: Amos Jeffries <yadij@users.noreply.github.com>
|
||||
Date: Thu, 7 Mar 2019 13:50:38 +0000
|
||||
Subject: [PATCH] Bug 4928: Cannot convert non-IPv4 to IPv4 (#379)
|
||||
|
||||
... when reaching client_ip_max_connections
|
||||
|
||||
The client_ip_max_connections limit is checked before the TCP dst-IP is located for the newly received TCP connection. This leaves Squid unable to fetch the NFMARK or similar
|
||||
details later on (they do not exist for [::]).
|
||||
|
||||
Move client_ip_max_connections test later in the TCP accept process to ensure dst-IP is known when the error is produced.
|
||||
---
|
||||
src/comm/TcpAcceptor.cc | 82 ++++++++++++++++++++---------------------
|
||||
1 file changed, 39 insertions(+), 43 deletions(-)
|
||||
|
||||
diff --git a/src/comm/TcpAcceptor.cc b/src/comm/TcpAcceptor.cc
|
||||
index cae92a7b1e..2109913008 100644
|
||||
--- a/src/comm/TcpAcceptor.cc
|
||||
+++ b/src/comm/TcpAcceptor.cc
|
||||
@@ -282,16 +282,7 @@ Comm::TcpAcceptor::acceptOne()
|
||||
ConnectionPointer newConnDetails = new Connection();
|
||||
const Comm::Flag flag = oldAccept(newConnDetails);
|
||||
|
||||
- /* Check for errors */
|
||||
- if (!newConnDetails->isOpen()) {
|
||||
-
|
||||
- if (flag == Comm::NOMESSAGE) {
|
||||
- /* register interest again */
|
||||
- debugs(5, 5, HERE << "try later: " << conn << " handler Subscription: " << theCallSub);
|
||||
- SetSelect(conn->fd, COMM_SELECT_READ, doAccept, this, 0);
|
||||
- return;
|
||||
- }
|
||||
-
|
||||
+ if (flag == Comm::COMM_ERROR) {
|
||||
// A non-recoverable error; notify the caller */
|
||||
debugs(5, 5, HERE << "non-recoverable error:" << status() << " handler Subscription: " << theCallSub);
|
||||
if (intendedForUserConnections())
|
||||
@@ -301,12 +292,16 @@ Comm::TcpAcceptor::acceptOne()
|
||||
return;
|
||||
}
|
||||
|
||||
- newConnDetails->nfmark = Ip::Qos::getNfmarkFromConnection(newConnDetails, Ip::Qos::dirAccepted);
|
||||
+ if (flag == Comm::NOMESSAGE) {
|
||||
+ /* register interest again */
|
||||
+ debugs(5, 5, "try later: " << conn << " handler Subscription: " << theCallSub);
|
||||
+ } else {
|
||||
+ debugs(5, 5, "Listener: " << conn <<
|
||||
+ " accepted new connection " << newConnDetails <<
|
||||
+ " handler Subscription: " << theCallSub);
|
||||
+ notify(flag, newConnDetails);
|
||||
+ }
|
||||
|
||||
- debugs(5, 5, HERE << "Listener: " << conn <<
|
||||
- " accepted new connection " << newConnDetails <<
|
||||
- " handler Subscription: " << theCallSub);
|
||||
- notify(flag, newConnDetails);
|
||||
SetSelect(conn->fd, COMM_SELECT_READ, doAccept, this, 0);
|
||||
}
|
||||
|
||||
@@ -346,8 +341,8 @@ Comm::TcpAcceptor::notify(const Comm::Flag flag, const Comm::ConnectionPointer &
|
||||
*
|
||||
* \retval Comm::OK success. details parameter filled.
|
||||
* \retval Comm::NOMESSAGE attempted accept() but nothing useful came in.
|
||||
- * \retval Comm::COMM_ERROR an outright failure occurred.
|
||||
* Or this client has too many connections already.
|
||||
+ * \retval Comm::COMM_ERROR an outright failure occurred.
|
||||
*/
|
||||
Comm::Flag
|
||||
Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
|
||||
@@ -382,15 +377,6 @@ Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
|
||||
details->fd = sock;
|
||||
details->remote = *gai;
|
||||
|
||||
- if ( Config.client_ip_max_connections >= 0) {
|
||||
- if (clientdbEstablished(details->remote, 0) > Config.client_ip_max_connections) {
|
||||
- debugs(50, DBG_IMPORTANT, "WARNING: " << details->remote << " attempting more than " << Config.client_ip_max_connections << " connections.");
|
||||
- Ip::Address::FreeAddr(gai);
|
||||
- PROF_stop(comm_accept);
|
||||
- return Comm::COMM_ERROR;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
// lookup the local-end details of this new connection
|
||||
Ip::Address::InitAddr(gai);
|
||||
details->local.setEmpty();
|
||||
@@ -404,6 +390,34 @@ Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
|
||||
details->local = *gai;
|
||||
Ip::Address::FreeAddr(gai);
|
||||
|
||||
+ // Perform NAT or TPROXY operations to retrieve the real client/dest IP addresses
|
||||
+ if (conn->flags&(COMM_TRANSPARENT|COMM_INTERCEPTION) && !Ip::Interceptor.Lookup(details, conn)) {
|
||||
+ debugs(50, DBG_IMPORTANT, "ERROR: NAT/TPROXY lookup failed to locate original IPs on " << details);
|
||||
+ // Failed.
|
||||
+ PROF_stop(comm_accept);
|
||||
+ return Comm::COMM_ERROR;
|
||||
+ }
|
||||
+
|
||||
+#if USE_SQUID_EUI
|
||||
+ if (Eui::TheConfig.euiLookup) {
|
||||
+ if (details->remote.isIPv4()) {
|
||||
+ details->remoteEui48.lookup(details->remote);
|
||||
+ } else if (details->remote.isIPv6()) {
|
||||
+ details->remoteEui64.lookup(details->remote);
|
||||
+ }
|
||||
+ }
|
||||
+#endif
|
||||
+
|
||||
+ details->nfmark = Ip::Qos::getNfmarkFromConnection(details, Ip::Qos::dirAccepted);
|
||||
+
|
||||
+ if (Config.client_ip_max_connections >= 0) {
|
||||
+ if (clientdbEstablished(details->remote, 0) > Config.client_ip_max_connections) {
|
||||
+ debugs(50, DBG_IMPORTANT, "WARNING: " << details->remote << " attempting more than " << Config.client_ip_max_connections << " connections.");
|
||||
+ PROF_stop(comm_accept);
|
||||
+ return Comm::NOMESSAGE;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
/* fdstat update */
|
||||
// XXX : these are not all HTTP requests. use a note about type and ip:port details->
|
||||
// so we end up with a uniform "(HTTP|FTP-data|HTTPS|...) remote-ip:remote-port"
|
||||
@@ -425,24 +439,6 @@ Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
|
||||
/* IFF the socket is (tproxy) transparent, pass the flag down to allow spoofing */
|
||||
F->flags.transparent = fd_table[conn->fd].flags.transparent; // XXX: can we remove this line yet?
|
||||
|
||||
- // Perform NAT or TPROXY operations to retrieve the real client/dest IP addresses
|
||||
- if (conn->flags&(COMM_TRANSPARENT|COMM_INTERCEPTION) && !Ip::Interceptor.Lookup(details, conn)) {
|
||||
- debugs(50, DBG_IMPORTANT, "ERROR: NAT/TPROXY lookup failed to locate original IPs on " << details);
|
||||
- // Failed.
|
||||
- PROF_stop(comm_accept);
|
||||
- return Comm::COMM_ERROR;
|
||||
- }
|
||||
-
|
||||
-#if USE_SQUID_EUI
|
||||
- if (Eui::TheConfig.euiLookup) {
|
||||
- if (details->remote.isIPv4()) {
|
||||
- details->remoteEui48.lookup(details->remote);
|
||||
- } else if (details->remote.isIPv6()) {
|
||||
- details->remoteEui64.lookup(details->remote);
|
||||
- }
|
||||
- }
|
||||
-#endif
|
||||
-
|
||||
PROF_stop(comm_accept);
|
||||
return Comm::OK;
|
||||
}
|
41
SOURCES/squid-4.11-include-guards.patch
Normal file
41
SOURCES/squid-4.11-include-guards.patch
Normal file
@ -0,0 +1,41 @@
|
||||
diff --git a/compat/os/linux.h b/compat/os/linux.h
|
||||
index 0ff05c6..d51389b 100644
|
||||
--- a/compat/os/linux.h
|
||||
+++ b/compat/os/linux.h
|
||||
@@ -44,6 +44,36 @@
|
||||
#include <netinet/in.h>
|
||||
#endif
|
||||
|
||||
+/*
|
||||
+ * Netfilter header madness. (see Bug 4323)
|
||||
+ *
|
||||
+ * Netfilter have a history of defining their own versions of network protocol
|
||||
+ * primitives without sufficient protection against the POSIX defines which are
|
||||
+ * aways present in Linux.
|
||||
+ *
|
||||
+ * netinet/in.h must be included before any other sys header in order to properly
|
||||
+ * activate include guards in <linux/libc-compat.h> the kernel maintainers added
|
||||
+ * to workaround it.
|
||||
+ */
|
||||
+#if HAVE_NETINET_IN_H
|
||||
+#include <netinet/in.h>
|
||||
+#endif
|
||||
+
|
||||
+/*
|
||||
+ * Netfilter header madness. (see Bug 4323)
|
||||
+ *
|
||||
+ * Netfilter have a history of defining their own versions of network protocol
|
||||
+ * primitives without sufficient protection against the POSIX defines which are
|
||||
+ * aways present in Linux.
|
||||
+ *
|
||||
+ * netinet/in.h must be included before any other sys header in order to properly
|
||||
+ * activate include guards in <linux/libc-compat.h> the kernel maintainers added
|
||||
+ * to workaround it.
|
||||
+ */
|
||||
+#if HAVE_NETINET_IN_H
|
||||
+#include <netinet/in.h>
|
||||
+#endif
|
||||
+
|
||||
/*
|
||||
* sys/capability.h is only needed in Linux apparently.
|
||||
*
|
@ -1,7 +1,8 @@
|
||||
diff -up squid-3.1.0.9/QUICKSTART.location squid-3.1.0.9/QUICKSTART
|
||||
--- squid-3.1.0.9/QUICKSTART.location 2009-06-26 12:35:27.000000000 +0200
|
||||
+++ squid-3.1.0.9/QUICKSTART 2009-07-17 14:03:10.000000000 +0200
|
||||
@@ -10,10 +10,9 @@ After you retrieved, compiled and instal
|
||||
diff --git a/QUICKSTART b/QUICKSTART
|
||||
index e5299b4..a243437 100644
|
||||
--- a/QUICKSTART
|
||||
+++ b/QUICKSTART
|
||||
@@ -10,10 +10,9 @@ After you retrieved, compiled and installed the Squid software (see
|
||||
INSTALL in the same directory), you have to configure the squid.conf
|
||||
file. This is the list of the values you *need* to change, because no
|
||||
sensible defaults could be defined. Do not touch the other variables
|
||||
@ -14,7 +15,7 @@ diff -up squid-3.1.0.9/QUICKSTART.location squid-3.1.0.9/QUICKSTART
|
||||
|
||||
==============================================================================
|
||||
|
||||
@@ -82,12 +81,12 @@ After editing squid.conf to your liking,
|
||||
@@ -80,12 +79,12 @@ After editing squid.conf to your liking, run Squid from the command
|
||||
line TWICE:
|
||||
|
||||
To create any disk cache_dir configured:
|
@ -6,5 +6,5 @@ index 90ac6a4..8dbed90 100755
|
||||
-#!/usr/local/bin/perl -Tw
|
||||
+#!/usr/bin/perl -Tw
|
||||
#
|
||||
# * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
|
||||
# * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
|
||||
# *
|
39
SOURCES/squid-4.11-systemd.patch
Normal file
39
SOURCES/squid-4.11-systemd.patch
Normal file
@ -0,0 +1,39 @@
|
||||
diff --git a/configure b/configure
|
||||
index 17b2ebf..9530f6b 100755
|
||||
--- a/configure
|
||||
+++ b/configure
|
||||
@@ -33915,6 +33915,7 @@ done
|
||||
fi
|
||||
if test "x$SYSTEMD_LIBS" != "x" ; then
|
||||
CXXFLAGS="$SYSTEMD_CFLAGS $CXXFLAGS"
|
||||
+ LDFLAGS="$SYSTEMD_LIBS $LDFLAGS"
|
||||
|
||||
$as_echo "#define USE_SYSTEMD 1" >>confdefs.h
|
||||
|
||||
diff --git a/src/Debug.h b/src/Debug.h
|
||||
index 6eecd01..ddd9e38 100644
|
||||
--- a/src/Debug.h
|
||||
+++ b/src/Debug.h
|
||||
@@ -99,6 +99,10 @@ public:
|
||||
|
||||
/// configures the active debugging context to write syslog ALERT
|
||||
static void ForceAlert();
|
||||
+
|
||||
+ /// prefixes each grouped debugs() line after the first one in the group
|
||||
+ static std::ostream& Extra(std::ostream &os) { return os << "\n "; }
|
||||
+
|
||||
private:
|
||||
static Context *Current; ///< deepest active context; nil outside debugs()
|
||||
};
|
||||
diff --git a/configure.ac b/configure.ac
|
||||
index d3c5da8..806302c 100644
|
||||
--- a/configure.ac
|
||||
+++ b/configure.ac
|
||||
@@ -2162,6 +2162,7 @@ if test "x$with_systemd" != "xno" -a "x$squid_host_os" = "xlinux"; then
|
||||
fi
|
||||
if test "x$SYSTEMD_LIBS" != "x" ; then
|
||||
CXXFLAGS="$SYSTEMD_CFLAGS $CXXFLAGS"
|
||||
+ LDFLAGS="$SYSTEMD_LIBS $LDFLAGS"
|
||||
AC_DEFINE(USE_SYSTEMD,1,[systemd support is available])
|
||||
else
|
||||
with_systemd=no
|
25
SOURCES/squid-4.11.tar.xz.asc
Normal file
25
SOURCES/squid-4.11.tar.xz.asc
Normal file
@ -0,0 +1,25 @@
|
||||
File: squid-4.11.tar.xz
|
||||
Date: Sun Apr 19 12:56:37 UTC 2020
|
||||
Size: 2447700
|
||||
MD5 : 10f34e852153a9996aa4614670e2bda1
|
||||
SHA1: 053277bf5497163ffc9261b9807abda5959bb6fc
|
||||
Key : CD6DBF8EF3B17D3E <squid3@treenet.co.nz>
|
||||
B068 84ED B779 C89B 044E 64E3 CD6D BF8E F3B1 7D3E
|
||||
keyring = http://www.squid-cache.org/pgp.asc
|
||||
keyserver = pool.sks-keyservers.net
|
||||
-----BEGIN PGP SIGNATURE-----
|
||||
|
||||
iQIzBAABCgAdFiEEsGiE7bd5yJsETmTjzW2/jvOxfT4FAl6cSpEACgkQzW2/jvOx
|
||||
fT6YbA/6A+IbIbNBJUW45oj23Io9Tw/CzAcTeLHR+McKwV77qMbR+L+kQ+fUdM5F
|
||||
rHAmd8bVVlyHc4WanVfWItEmzBzHA/ifTNvVpefSGGEbDb80RF66k7ACiZUokg1b
|
||||
kkPwc/SjDhe2wvketIaBiVVd7pylrlCdVvazcF8gE9MWDOIlJND5mnHXidXvwkbJ
|
||||
T2//8JZVEmcmN9pdFGNAUVckFm+AnwWXcRM1SQPYDGSVUtjVlqido8snLTA1mZwl
|
||||
rIpjppujMV54OOWlj+Gqa3MZkpNzIaMCAfphzUFlsQY+/sRUYAOv1wmxw2WclxlK
|
||||
WlWM+fw8OsYNDMwkOScKZZWceoAkq6UsUHzCAdJIdLqV/R6mZ9nfuZ6BHIr0+2dP
|
||||
bDf9MU4KXbwEuXiRD/KPziUxxOZwSPivbm3wy9DqTTZfO9V+Iq6FVHX+ahxJ0XbM
|
||||
JWRYA3GW+DRLjorfsWxU5r4UJsrnBfhItPUAfGPjPjEGZ/pn8r9G6MGenNGPLMKy
|
||||
wP1rMlOhrZPwerzokzAvKx8G0WWkfN+IPv2JK3rDot6RiJIOuvnZZd4RIuVNTGbh
|
||||
liO7M24JlWX3WD2wHBzxQag46+plb3VvrrVChwIQnZ2Qzpf50w0Bife/wtNBGpK0
|
||||
k/Xi/nocO796YS8GZBnmhS1lEGEwp/YpJBFWmIjTWMUMEOcswVA=
|
||||
=PKl0
|
||||
-----END PGP SIGNATURE-----
|
@ -1,296 +0,0 @@
|
||||
commit fdd4123629320aa1ee4c3481bb392437c90d188d
|
||||
Author: Amos Jeffries <yadij@users.noreply.github.com>
|
||||
Date: 2019-05-20 11:23:13 +0000
|
||||
|
||||
ESI: convert parse exceptions into 500 status response (#411)
|
||||
|
||||
Produce a valid HTTP 500 status reply and continue operations when
|
||||
ESI parser throws an exception. This will prevent incomplete ESI
|
||||
responses reaching clients on server errors. Such responses might
|
||||
have been cacheable and thus corrupted, albeit corrupted consistently
|
||||
and at source by the reverse-proxy delivering them.
|
||||
|
||||
ESI: throw on large stack recursions (#408)
|
||||
|
||||
This reduces the impact on concurrent clients to only those
|
||||
accessing the malformed resource.
|
||||
|
||||
Depending on what type of recursion is being performed the
|
||||
resource may appear to the client with missing segments, or
|
||||
not at all.
|
||||
|
||||
diff --git a/src/esi/Context.h b/src/esi/Context.h
|
||||
index f3281a1..1b08cfb 100644
|
||||
--- a/src/esi/Context.h
|
||||
+++ b/src/esi/Context.h
|
||||
@@ -12,6 +12,7 @@
|
||||
#include "clientStream.h"
|
||||
#include "err_type.h"
|
||||
#include "esi/Element.h"
|
||||
+#include "esi/Esi.h"
|
||||
#include "esi/Parser.h"
|
||||
#include "http/forward.h"
|
||||
#include "http/StatusCode.h"
|
||||
@@ -113,7 +114,7 @@ public:
|
||||
{
|
||||
|
||||
public:
|
||||
- ESIElement::Pointer stack[10]; /* a stack of esi elements that are open */
|
||||
+ ESIElement::Pointer stack[ESI_STACK_DEPTH_LIMIT]; /* a stack of esi elements that are open */
|
||||
int stackdepth; /* self explanatory */
|
||||
ESIParser::Pointer theParser;
|
||||
ESIElement::Pointer top();
|
||||
diff --git a/src/esi/Esi.cc b/src/esi/Esi.cc
|
||||
index cc662c4..e41d593 100644
|
||||
--- a/src/esi/Esi.cc
|
||||
+++ b/src/esi/Esi.cc
|
||||
@@ -29,6 +29,7 @@
|
||||
#include "esi/Expression.h"
|
||||
#include "esi/Segment.h"
|
||||
#include "esi/VarState.h"
|
||||
+#include "FadingCounter.h"
|
||||
#include "fatal.h"
|
||||
#include "http/Stream.h"
|
||||
#include "HttpHdrSc.h"
|
||||
@@ -930,13 +931,18 @@ void
|
||||
ESIContext::addStackElement (ESIElement::Pointer element)
|
||||
{
|
||||
/* Put on the stack to allow skipping of 'invalid' markup */
|
||||
- assert (parserState.stackdepth <11);
|
||||
+
|
||||
+ // throw an error if the stack location would be invalid
|
||||
+ if (parserState.stackdepth >= ESI_STACK_DEPTH_LIMIT)
|
||||
+ throw Esi::Error("ESI Too many nested elements");
|
||||
+ if (parserState.stackdepth < 0)
|
||||
+ throw Esi::Error("ESI elements stack error, probable error in ESI template");
|
||||
+
|
||||
assert (!failed());
|
||||
debugs(86, 5, "ESIContext::addStackElement: About to add ESI Node " << element.getRaw());
|
||||
|
||||
if (!parserState.top()->addElement(element)) {
|
||||
- debugs(86, DBG_IMPORTANT, "ESIContext::addStackElement: failed to add esi node, probable error in ESI template");
|
||||
- flags.error = 1;
|
||||
+ throw Esi::Error("ESIContext::addStackElement failed, probable error in ESI template");
|
||||
} else {
|
||||
/* added ok, push onto the stack */
|
||||
parserState.stack[parserState.stackdepth] = element;
|
||||
@@ -1188,13 +1194,10 @@ ESIContext::addLiteral (const char *s, int len)
|
||||
assert (len);
|
||||
debugs(86, 5, "literal length is " << len);
|
||||
/* give a literal to the current element */
|
||||
- assert (parserState.stackdepth <11);
|
||||
ESIElement::Pointer element (new esiLiteral (this, s, len));
|
||||
|
||||
- if (!parserState.top()->addElement(element)) {
|
||||
- debugs(86, DBG_IMPORTANT, "ESIContext::addLiteral: failed to add esi node, probable error in ESI template");
|
||||
- flags.error = 1;
|
||||
- }
|
||||
+ if (!parserState.top()->addElement(element))
|
||||
+ throw Esi::Error("ESIContext::addLiteral failed, probable error in ESI template");
|
||||
}
|
||||
|
||||
void
|
||||
@@ -1256,8 +1259,24 @@ ESIContext::parse()
|
||||
|
||||
PROF_start(esiParsing);
|
||||
|
||||
- while (buffered.getRaw() && !flags.error)
|
||||
- parseOneBuffer();
|
||||
+ try {
|
||||
+ while (buffered.getRaw() && !flags.error)
|
||||
+ parseOneBuffer();
|
||||
+
|
||||
+ } catch (Esi::ErrorDetail &errMsg) { // FIXME: non-const for c_str()
|
||||
+ // level-2: these are protocol/syntax errors from upstream
|
||||
+ debugs(86, 2, "WARNING: ESI syntax error: " << errMsg);
|
||||
+ setError();
|
||||
+ setErrorMessage(errMsg.c_str());
|
||||
+
|
||||
+ } catch (...) {
|
||||
+ // DBG_IMPORTANT because these are local issues the admin needs to fix
|
||||
+ static FadingCounter logEntries; // TODO: set horizon less than infinity
|
||||
+ if (logEntries.count(1) < 100)
|
||||
+ debugs(86, DBG_IMPORTANT, "ERROR: ESI parser: " << CurrentException);
|
||||
+ setError();
|
||||
+ setErrorMessage("ESI parser error");
|
||||
+ }
|
||||
|
||||
PROF_stop(esiParsing);
|
||||
|
||||
diff --git a/src/esi/Esi.h b/src/esi/Esi.h
|
||||
index 180b2c4..6fd5aac 100644
|
||||
--- a/src/esi/Esi.h
|
||||
+++ b/src/esi/Esi.h
|
||||
@@ -10,6 +10,11 @@
|
||||
#define SQUID_ESI_H
|
||||
|
||||
#include "clientStream.h"
|
||||
+#include "sbuf/SBuf.h"
|
||||
+
|
||||
+#if !defined(ESI_STACK_DEPTH_LIMIT)
|
||||
+#define ESI_STACK_DEPTH_LIMIT 20
|
||||
+#endif
|
||||
|
||||
/* ESI.c */
|
||||
extern CSR esiStreamRead;
|
||||
@@ -18,5 +23,14 @@ extern CSD esiStreamDetach;
|
||||
extern CSS esiStreamStatus;
|
||||
int esiEnableProcessing (HttpReply *);
|
||||
|
||||
+namespace Esi
|
||||
+{
|
||||
+
|
||||
+typedef SBuf ErrorDetail;
|
||||
+/// prepare an Esi::ErrorDetail for throw on ESI parser internal errors
|
||||
+inline Esi::ErrorDetail Error(const char *msg) { return ErrorDetail(msg); }
|
||||
+
|
||||
+} // namespace Esi
|
||||
+
|
||||
#endif /* SQUID_ESI_H */
|
||||
|
||||
diff --git a/src/esi/Expression.cc b/src/esi/Expression.cc
|
||||
index 2b5b762..8519b03 100644
|
||||
--- a/src/esi/Expression.cc
|
||||
+++ b/src/esi/Expression.cc
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
#include "squid.h"
|
||||
#include "Debug.h"
|
||||
+#include "esi/Esi.h"
|
||||
#include "esi/Expression.h"
|
||||
#include "profiler/Profiler.h"
|
||||
|
||||
@@ -97,6 +98,17 @@ stackpop(stackmember * s, int *depth)
|
||||
cleanmember(&s[*depth]);
|
||||
}
|
||||
|
||||
+static void
|
||||
+stackpush(stackmember *stack, stackmember &item, int *depth)
|
||||
+{
|
||||
+ if (*depth < 0)
|
||||
+ throw Esi::Error("ESIExpression stack has negative size");
|
||||
+ if (*depth >= ESI_STACK_DEPTH_LIMIT)
|
||||
+ throw Esi::Error("ESIExpression stack is full, cannot push");
|
||||
+
|
||||
+ stack[(*depth)++] = item;
|
||||
+}
|
||||
+
|
||||
static evaluate evalnegate;
|
||||
static evaluate evalliteral;
|
||||
static evaluate evalor;
|
||||
@@ -208,6 +220,11 @@ evalnegate(stackmember * stack, int *depth, int whereAmI, stackmember * candidat
|
||||
/* invalid stack */
|
||||
return 1;
|
||||
|
||||
+ if (whereAmI < 0)
|
||||
+ throw Esi::Error("negate expression location too small");
|
||||
+ if (*depth >= ESI_STACK_DEPTH_LIMIT)
|
||||
+ throw Esi::Error("negate expression too complex");
|
||||
+
|
||||
if (stack[whereAmI + 1].valuetype != ESI_EXPR_EXPR)
|
||||
/* invalid operand */
|
||||
return 1;
|
||||
@@ -280,7 +297,7 @@ evalor(stackmember * stack, int *depth, int whereAmI, stackmember * candidate)
|
||||
|
||||
srv.precedence = 1;
|
||||
|
||||
- stack[(*depth)++] = srv;
|
||||
+ stackpush(stack, srv, depth);
|
||||
|
||||
/* we're out of way, try adding now */
|
||||
if (!addmember(stack, depth, candidate))
|
||||
@@ -327,7 +344,7 @@ evaland(stackmember * stack, int *depth, int whereAmI, stackmember * candidate)
|
||||
|
||||
srv.precedence = 1;
|
||||
|
||||
- stack[(*depth)++] = srv;
|
||||
+ stackpush(stack, srv, depth);
|
||||
|
||||
/* we're out of way, try adding now */
|
||||
if (!addmember(stack, depth, candidate))
|
||||
@@ -373,7 +390,7 @@ evallesseq(stackmember * stack, int *depth, int whereAmI, stackmember * candidat
|
||||
|
||||
srv.precedence = 1;
|
||||
|
||||
- stack[(*depth)++] = srv;
|
||||
+ stackpush(stack, srv, depth);
|
||||
|
||||
/* we're out of way, try adding now */
|
||||
if (!addmember(stack, depth, candidate))
|
||||
@@ -421,7 +438,7 @@ evallessthan(stackmember * stack, int *depth, int whereAmI, stackmember * candid
|
||||
|
||||
srv.precedence = 1;
|
||||
|
||||
- stack[(*depth)++] = srv;
|
||||
+ stackpush(stack, srv, depth);
|
||||
|
||||
/* we're out of way, try adding now */
|
||||
if (!addmember(stack, depth, candidate))
|
||||
@@ -469,7 +486,7 @@ evalmoreeq(stackmember * stack, int *depth, int whereAmI, stackmember * candidat
|
||||
|
||||
srv.precedence = 1;
|
||||
|
||||
- stack[(*depth)++] = srv;
|
||||
+ stackpush(stack, srv, depth);
|
||||
|
||||
/* we're out of way, try adding now */
|
||||
if (!addmember(stack, depth, candidate))
|
||||
@@ -517,7 +534,7 @@ evalmorethan(stackmember * stack, int *depth, int whereAmI, stackmember * candid
|
||||
|
||||
srv.precedence = 1;
|
||||
|
||||
- stack[(*depth)++] = srv;
|
||||
+ stackpush(stack, srv, depth);
|
||||
|
||||
/* we're out of way, try adding now */
|
||||
if (!addmember(stack, depth, candidate))
|
||||
@@ -566,7 +583,7 @@ evalequals(stackmember * stack, int *depth, int whereAmI,
|
||||
|
||||
srv.precedence = 1;
|
||||
|
||||
- stack[(*depth)++] = srv;
|
||||
+ stackpush(stack, srv, depth);
|
||||
|
||||
/* we're out of way, try adding now */
|
||||
if (!addmember(stack, depth, candidate))
|
||||
@@ -613,7 +630,7 @@ evalnotequals(stackmember * stack, int *depth, int whereAmI, stackmember * candi
|
||||
|
||||
srv.precedence = 1;
|
||||
|
||||
- stack[(*depth)++] = srv;
|
||||
+ stackpush(stack, srv, depth);
|
||||
|
||||
/* we're out of way, try adding now */
|
||||
if (!addmember(stack, depth, candidate))
|
||||
@@ -953,6 +970,9 @@ addmember(stackmember * stack, int *stackdepth, stackmember * candidate)
|
||||
/* !(!(a==b))) is why thats safe */
|
||||
/* strictly less than until we unwind */
|
||||
|
||||
+ if (*stackdepth >= ESI_STACK_DEPTH_LIMIT)
|
||||
+ throw Esi::Error("ESI expression too complex to add member");
|
||||
+
|
||||
if (candidate->precedence < stack[*stackdepth - 1].precedence ||
|
||||
candidate->precedence < stack[*stackdepth - 2].precedence) {
|
||||
/* must be an operator */
|
||||
@@ -968,10 +988,10 @@ addmember(stackmember * stack, int *stackdepth, stackmember * candidate)
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
- stack[(*stackdepth)++] = *candidate;
|
||||
+ stackpush(stack, *candidate, stackdepth);
|
||||
}
|
||||
} else if (candidate->valuetype != ESI_EXPR_INVALID)
|
||||
- stack[(*stackdepth)++] = *candidate;
|
||||
+ stackpush(stack, *candidate, stackdepth);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -979,7 +999,7 @@ addmember(stackmember * stack, int *stackdepth, stackmember * candidate)
|
||||
int
|
||||
ESIExpression::Evaluate(char const *s)
|
||||
{
|
||||
- stackmember stack[20];
|
||||
+ stackmember stack[ESI_STACK_DEPTH_LIMIT];
|
||||
int stackdepth = 0;
|
||||
char const *end;
|
||||
PROF_start(esiExpressionEval);
|
@ -1,30 +0,0 @@
|
||||
commit 409956536647b3a05ee1e367424a24ae6b8f13fd
|
||||
Author: Amos Jeffries <yadij@users.noreply.github.com>
|
||||
Date: 2019-06-08 21:09:23 +0000
|
||||
|
||||
Fix Digest auth parameter parsing (#415)
|
||||
|
||||
Only remove quoting if the domain=, uri= or qop= parameter
|
||||
value is surrounded by double-quotes.
|
||||
|
||||
diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc
|
||||
index a8a07cd..b547bf8 100644
|
||||
--- a/src/auth/digest/Config.cc
|
||||
+++ b/src/auth/digest/Config.cc
|
||||
@@ -787,14 +787,14 @@ Auth::Digest::Config::decode(char const *proxy_auth, const char *aRequestRealm)
|
||||
if (keyName == SBuf("domain",6) || keyName == SBuf("uri",3)) {
|
||||
// domain is Special. Not a quoted-string, must not be de-quoted. But is wrapped in '"'
|
||||
// BUG 3077: uri= can also be sent to us in a mangled (invalid!) form like domain
|
||||
- if (*p == '"' && *(p + vlen -1) == '"') {
|
||||
+ if (vlen > 1 && *p == '"' && *(p + vlen -1) == '"') {
|
||||
value.limitInit(p+1, vlen-2);
|
||||
}
|
||||
} else if (keyName == SBuf("qop",3)) {
|
||||
// qop is more special.
|
||||
// On request this must not be quoted-string de-quoted. But is several values wrapped in '"'
|
||||
// On response this is a single un-quoted token.
|
||||
- if (*p == '"' && *(p + vlen -1) == '"') {
|
||||
+ if (vlen > 1 && *p == '"' && *(p + vlen -1) == '"') {
|
||||
value.limitInit(p+1, vlen-2);
|
||||
} else {
|
||||
value.limitInit(p, vlen);
|
@ -1,139 +0,0 @@
|
||||
commit 7f73e9c5d17664b882ed32590e6af310c247f320
|
||||
Author: Amos Jeffries <yadij@users.noreply.github.com>
|
||||
Date: 2019-06-19 05:58:36 +0000
|
||||
|
||||
Update HttpHeader::getAuth to SBuf (#416)
|
||||
|
||||
Replace the fixed-size buffer for decoding base64 tokens with an
|
||||
SBuf to avoid decoder issues on large inputs.
|
||||
|
||||
Update callers to SBuf API operations for more efficient memory
|
||||
management.
|
||||
|
||||
diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc
|
||||
index 1e2b650..284a057 100644
|
||||
--- a/src/HttpHeader.cc
|
||||
+++ b/src/HttpHeader.cc
|
||||
@@ -1268,43 +1268,46 @@ HttpHeader::getContRange() const
|
||||
return cr;
|
||||
}
|
||||
|
||||
-const char *
|
||||
-HttpHeader::getAuth(Http::HdrType id, const char *auth_scheme) const
|
||||
+SBuf
|
||||
+HttpHeader::getAuthToken(Http::HdrType id, const char *auth_scheme) const
|
||||
{
|
||||
const char *field;
|
||||
int l;
|
||||
assert(auth_scheme);
|
||||
field = getStr(id);
|
||||
|
||||
+ static const SBuf nil;
|
||||
if (!field) /* no authorization field */
|
||||
- return NULL;
|
||||
+ return nil;
|
||||
|
||||
l = strlen(auth_scheme);
|
||||
|
||||
if (!l || strncasecmp(field, auth_scheme, l)) /* wrong scheme */
|
||||
- return NULL;
|
||||
+ return nil;
|
||||
|
||||
field += l;
|
||||
|
||||
if (!xisspace(*field)) /* wrong scheme */
|
||||
- return NULL;
|
||||
+ return nil;
|
||||
|
||||
/* skip white space */
|
||||
for (; field && xisspace(*field); ++field);
|
||||
|
||||
if (!*field) /* no authorization cookie */
|
||||
- return NULL;
|
||||
+ return nil;
|
||||
|
||||
- static char decodedAuthToken[8192];
|
||||
+ const auto fieldLen = strlen(field);
|
||||
+ SBuf result;
|
||||
+ char *decodedAuthToken = result.rawAppendStart(BASE64_DECODE_LENGTH(fieldLen));
|
||||
struct base64_decode_ctx ctx;
|
||||
base64_decode_init(&ctx);
|
||||
size_t decodedLen = 0;
|
||||
- if (!base64_decode_update(&ctx, &decodedLen, reinterpret_cast<uint8_t*>(decodedAuthToken), strlen(field), field) ||
|
||||
+ if (!base64_decode_update(&ctx, &decodedLen, reinterpret_cast<uint8_t*>(decodedAuthToken), fieldLen, field) ||
|
||||
!base64_decode_final(&ctx)) {
|
||||
- return NULL;
|
||||
+ return nil;
|
||||
}
|
||||
- decodedAuthToken[decodedLen] = '\0';
|
||||
- return decodedAuthToken;
|
||||
+ result.rawAppendFinish(decodedAuthToken, decodedLen);
|
||||
+ return result;
|
||||
}
|
||||
|
||||
ETag
|
||||
diff --git a/src/HttpHeader.h b/src/HttpHeader.h
|
||||
index a26b127..3b262be 100644
|
||||
--- a/src/HttpHeader.h
|
||||
+++ b/src/HttpHeader.h
|
||||
@@ -134,7 +134,7 @@ public:
|
||||
HttpHdrRange *getRange() const;
|
||||
HttpHdrSc *getSc() const;
|
||||
HttpHdrContRange *getContRange() const;
|
||||
- const char *getAuth(Http::HdrType id, const char *auth_scheme) const;
|
||||
+ SBuf getAuthToken(Http::HdrType id, const char *auth_scheme) const;
|
||||
ETag getETag(Http::HdrType id) const;
|
||||
TimeOrTag getTimeOrTag(Http::HdrType id) const;
|
||||
int hasListMember(Http::HdrType id, const char *member, const char separator) const;
|
||||
diff --git a/src/cache_manager.cc b/src/cache_manager.cc
|
||||
index da22f7a..2fae767 100644
|
||||
--- a/src/cache_manager.cc
|
||||
+++ b/src/cache_manager.cc
|
||||
@@ -27,6 +27,7 @@
|
||||
#include "mgr/FunAction.h"
|
||||
#include "mgr/QueryParams.h"
|
||||
#include "protos.h"
|
||||
+#include "sbuf/StringConvert.h"
|
||||
#include "SquidConfig.h"
|
||||
#include "SquidTime.h"
|
||||
#include "Store.h"
|
||||
@@ -243,20 +244,20 @@ CacheManager::ParseHeaders(const HttpRequest * request, Mgr::ActionParams ¶m
|
||||
// TODO: use the authentication system decode to retrieve these details properly.
|
||||
|
||||
/* base 64 _decoded_ user:passwd pair */
|
||||
- const char *basic_cookie = request->header.getAuth(Http::HdrType::AUTHORIZATION, "Basic");
|
||||
+ const auto basic_cookie(request->header.getAuthToken(Http::HdrType::AUTHORIZATION, "Basic"));
|
||||
|
||||
- if (!basic_cookie)
|
||||
+ if (basic_cookie.isEmpty())
|
||||
return;
|
||||
|
||||
- const char *passwd_del;
|
||||
- if (!(passwd_del = strchr(basic_cookie, ':'))) {
|
||||
+ const auto colonPos = basic_cookie.find(':');
|
||||
+ if (colonPos == SBuf::npos) {
|
||||
debugs(16, DBG_IMPORTANT, "CacheManager::ParseHeaders: unknown basic_cookie format '" << basic_cookie << "'");
|
||||
return;
|
||||
}
|
||||
|
||||
/* found user:password pair, reset old values */
|
||||
- params.userName.limitInit(basic_cookie, passwd_del - basic_cookie);
|
||||
- params.password = passwd_del + 1;
|
||||
+ params.userName = SBufToString(basic_cookie.substr(0, colonPos));
|
||||
+ params.password = SBufToString(basic_cookie.substr(colonPos+1));
|
||||
|
||||
/* warning: this prints decoded password which maybe not be what you want to do @?@ @?@ */
|
||||
debugs(16, 9, "CacheManager::ParseHeaders: got user: '" <<
|
||||
diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc
|
||||
index b958b14..7ca5d24 100644
|
||||
--- a/src/clients/FtpGateway.cc
|
||||
+++ b/src/clients/FtpGateway.cc
|
||||
@@ -1050,7 +1050,7 @@ Ftp::Gateway::checkAuth(const HttpHeader * req_hdr)
|
||||
|
||||
#if HAVE_AUTH_MODULE_BASIC
|
||||
/* Check HTTP Authorization: headers (better than defaults, but less than URL) */
|
||||
- const SBuf auth(req_hdr->getAuth(Http::HdrType::AUTHORIZATION, "Basic"));
|
||||
+ const auto auth(req_hdr->getAuthToken(Http::HdrType::AUTHORIZATION, "Basic"));
|
||||
if (!auth.isEmpty()) {
|
||||
flags.authenticated = 1;
|
||||
loginParser(auth, false);
|
@ -1,64 +0,0 @@
|
||||
diff --git a/tools/cachemgr.cc b/tools/cachemgr.cc
|
||||
index 0c745c2..8a67eba 100644
|
||||
--- a/tools/cachemgr.cc
|
||||
+++ b/tools/cachemgr.cc
|
||||
@@ -355,7 +355,7 @@ auth_html(const char *host, int port, const char *user_name)
|
||||
|
||||
printf("<TR><TH ALIGN=\"left\">Manager name:</TH><TD><INPUT NAME=\"user_name\" ");
|
||||
|
||||
- printf("size=\"30\" VALUE=\"%s\"></TD></TR>\n", user_name);
|
||||
+ printf("size=\"30\" VALUE=\"%s\"></TD></TR>\n", rfc1738_escape(user_name));
|
||||
|
||||
printf("<TR><TH ALIGN=\"left\">Password:</TH><TD><INPUT TYPE=\"password\" NAME=\"passwd\" ");
|
||||
|
||||
@@ -419,7 +419,7 @@ menu_url(cachemgr_request * req, const char *action)
|
||||
script_name,
|
||||
req->hostname,
|
||||
req->port,
|
||||
- safe_str(req->user_name),
|
||||
+ rfc1738_escape(safe_str(req->user_name)),
|
||||
action,
|
||||
safe_str(req->pub_auth));
|
||||
return url;
|
||||
@@ -1074,8 +1074,8 @@ make_pub_auth(cachemgr_request * req)
|
||||
const int bufLen = snprintf(buf, sizeof(buf), "%s|%d|%s|%s",
|
||||
req->hostname,
|
||||
(int) now,
|
||||
- req->user_name ? req->user_name : "",
|
||||
- req->passwd);
|
||||
+ rfc1738_escape(safe_str(req->user_name)),
|
||||
+ rfc1738_escape(req->passwd));
|
||||
debug("cmgr: pre-encoded for pub: %s\n", buf);
|
||||
|
||||
const int encodedLen = base64_encode_len(bufLen);
|
||||
@@ -1094,8 +1094,6 @@ decode_pub_auth(cachemgr_request * req)
|
||||
char *buf;
|
||||
const char *host_name;
|
||||
const char *time_str;
|
||||
- const char *user_name;
|
||||
- const char *passwd;
|
||||
|
||||
debug("cmgr: decoding pub: '%s'\n", safe_str(req->pub_auth));
|
||||
safe_free(req->passwd);
|
||||
@@ -1131,17 +1129,21 @@ decode_pub_auth(cachemgr_request * req)
|
||||
|
||||
debug("cmgr: decoded time: '%s' (now: %d)\n", time_str, (int) now);
|
||||
|
||||
+ char *user_name;
|
||||
if ((user_name = strtok(NULL, "|")) == NULL) {
|
||||
xfree(buf);
|
||||
return;
|
||||
}
|
||||
+ rfc1738_unescape(user_name);
|
||||
|
||||
debug("cmgr: decoded uname: '%s'\n", user_name);
|
||||
|
||||
+ char *passwd;
|
||||
if ((passwd = strtok(NULL, "|")) == NULL) {
|
||||
xfree(buf);
|
||||
return;
|
||||
}
|
||||
+ rfc1738_unescape(passwd);
|
||||
|
||||
debug("cmgr: decoded passwd: '%s'\n", passwd);
|
||||
|
@ -1,50 +0,0 @@
|
||||
diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc
|
||||
index 45e46c0..433335a 100644
|
||||
--- a/src/auth/digest/Config.cc
|
||||
+++ b/src/auth/digest/Config.cc
|
||||
@@ -94,9 +94,6 @@ static void authenticateDigestNonceDelete(digest_nonce_h * nonce);
|
||||
static void authenticateDigestNonceSetup(void);
|
||||
static void authDigestNonceEncode(digest_nonce_h * nonce);
|
||||
static void authDigestNonceLink(digest_nonce_h * nonce);
|
||||
-#if NOT_USED
|
||||
-static int authDigestNonceLinks(digest_nonce_h * nonce);
|
||||
-#endif
|
||||
static void authDigestNonceUserUnlink(digest_nonce_h * nonce);
|
||||
|
||||
static void
|
||||
@@ -287,21 +284,10 @@ authDigestNonceLink(digest_nonce_h * nonce)
|
||||
{
|
||||
assert(nonce != NULL);
|
||||
++nonce->references;
|
||||
+ assert(nonce->references != 0); // no overflows
|
||||
debugs(29, 9, "nonce '" << nonce << "' now at '" << nonce->references << "'.");
|
||||
}
|
||||
|
||||
-#if NOT_USED
|
||||
-static int
|
||||
-authDigestNonceLinks(digest_nonce_h * nonce)
|
||||
-{
|
||||
- if (!nonce)
|
||||
- return -1;
|
||||
-
|
||||
- return nonce->references;
|
||||
-}
|
||||
-
|
||||
-#endif
|
||||
-
|
||||
void
|
||||
authDigestNonceUnlink(digest_nonce_h * nonce)
|
||||
{
|
||||
diff --git a/src/auth/digest/Config.h b/src/auth/digest/Config.h
|
||||
index b79ff30..2840865 100644
|
||||
--- a/src/auth/digest/Config.h
|
||||
+++ b/src/auth/digest/Config.h
|
||||
@@ -44,7 +44,7 @@ struct _digest_nonce_h : public hash_link {
|
||||
/* number of uses we've seen of this nonce */
|
||||
unsigned long nc;
|
||||
/* reference count */
|
||||
- short references;
|
||||
+ uint64_t references;
|
||||
/* the auth_user this nonce has been tied to */
|
||||
Auth::Digest::User *user;
|
||||
/* has this nonce been invalidated ? */
|
@ -1,12 +0,0 @@
|
||||
diff --git a/src/cache_cf.cc b/src/cache_cf.cc
|
||||
index 9165ef99c..32a3df322 100644
|
||||
--- a/src/cache_cf.cc
|
||||
+++ b/src/cache_cf.cc
|
||||
@@ -2081,6 +2081,7 @@ parse_peer(CachePeer ** head)
|
||||
|
||||
CachePeer *p = new CachePeer;
|
||||
p->host = xstrdup(host_str);
|
||||
+ Tolower(p->host);
|
||||
p->name = xstrdup(host_str);
|
||||
p->type = parseNeighborType(token);
|
||||
|
@ -1,26 +0,0 @@
|
||||
diff --git a/src/http/url_rewriters/LFS/url_lfs_rewrite.8 b/src/http/url_rewriters/LFS/url_lfs_rewrite.8
|
||||
index 3053180..1d295fb 100644
|
||||
--- a/src/http/url_rewriters/LFS/url_lfs_rewrite.8
|
||||
+++ b/src/http/url_rewriters/LFS/url_lfs_rewrite.8
|
||||
@@ -135,7 +135,7 @@
|
||||
.if n .ad l
|
||||
.nh
|
||||
.SH "NAME"
|
||||
-url_lfs_rewrite
|
||||
+\& url_lfs_rewrite \- a URL-rewriter based on local file existence
|
||||
.SH "SYNOPSIS"
|
||||
.IX Header "SYNOPSIS"
|
||||
.Vb 1
|
||||
diff --git a/src/http/url_rewriters/LFS/url_lfs_rewrite.pl.in b/src/http/url_rewriters/LFS/url_lfs_rewrite.pl.in
|
||||
index a7168e0..da7055c 100755
|
||||
--- a/src/http/url_rewriters/LFS/url_lfs_rewrite.pl.in
|
||||
+++ b/src/http/url_rewriters/LFS/url_lfs_rewrite.pl.in
|
||||
@@ -8,7 +8,7 @@ use Pod::Usage;
|
||||
|
||||
=head1 NAME
|
||||
|
||||
-B<url_lfs_rewrite>
|
||||
+B<url_lfs_rewrite> - a URL-rewriter based on local file existence
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
@ -1,25 +0,0 @@
|
||||
File: squid-4.4.tar.xz
|
||||
Date: Sat Oct 27 21:20:24 UTC 2018
|
||||
Size: 2436468
|
||||
MD5 : 892504ca9700e1f139a53f84098613bd
|
||||
SHA1: 0ab6b133f65866d825bf72cbbe8cef209768b2fa
|
||||
Key : CD6DBF8EF3B17D3E <squid3@treenet.co.nz>
|
||||
B068 84ED B779 C89B 044E 64E3 CD6D BF8E F3B1 7D3E
|
||||
keyring = http://www.squid-cache.org/pgp.asc
|
||||
keyserver = pool.sks-keyservers.net
|
||||
-----BEGIN PGP SIGNATURE-----
|
||||
|
||||
iQIzBAABCgAdFiEEsGiE7bd5yJsETmTjzW2/jvOxfT4FAlvU1qAACgkQzW2/jvOx
|
||||
fT5Y3Q//R3/ZtDHal9H9c4VUB1fEzkk22JfgXTzRRUdzNkN+XxDkVGmM9R0E0Opo
|
||||
9E/lsE9PcLX1EBtBXbPfwLESzfMe4QJgqq1B4FocpJcdtfCQX6ADU4Qdfc+oo8Z1
|
||||
J/xCf8XrU3yUgXn3pMnQ9DT+IuPYe+Jte7Awm148mC15GMC49NBAYAd793XZ+L2t
|
||||
fVPCbVYA40AU3xVJkxlblh7O0E8UEQ7zQMxcXM2jJJ4jJOjqecOIoJt6lyPD59q3
|
||||
UjD0EmcjTj54BpaU8r++kAc2TkLyBvFV1vWQuQRNG5IAMEOF3H8OfujCXl3lX9fD
|
||||
Tvi9763f9LxdImLJttkzgTt20XAudlUmKOdpj6t1uF+7EmNJg/ChowyLsLzlLLST
|
||||
1mGNdcUdP9VhX2aoTXN/ctn8BTQ/cNIx2VY8kKWsXB+ymFcCJRBW1cBAr3R+UzuX
|
||||
KVlsDzlxP6Dp8EFvKN3sIbM/QtpstKgbTkxro7d9XBkeldsasd5uI2Yt5PSMIs+y
|
||||
VtscqCnwDjxAIW6FNqB96J4hcOYECdWHDL3s46wEDnQaiR0IdBAN5QHn1imzM5e1
|
||||
eHuwZimqBW6vE4rPnVpPIr1Gml5OlLl3te2jsbUVmBiOwDVlQLZJQGzI5UTazvnN
|
||||
eR3QeTW+ggSAdVc6GEApELARfKPRxywLQTOlAhEPn0xayy4ByME=
|
||||
=1eSQ
|
||||
-----END PGP SIGNATURE-----
|
@ -4,14 +4,15 @@ Documentation=man:squid(8)
|
||||
After=network.target network-online.target nss-lookup.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
Type=notify
|
||||
LimitNOFILE=16384
|
||||
PIDFile=/run/squid.pid
|
||||
EnvironmentFile=/etc/sysconfig/squid
|
||||
ExecStartPre=/usr/libexec/squid/cache_swap.sh
|
||||
ExecStart=/usr/sbin/squid $SQUID_OPTS -f $SQUID_CONF
|
||||
ExecReload=/usr/sbin/squid $SQUID_OPTS -k reconfigure -f $SQUID_CONF
|
||||
ExecStop=/usr/sbin/squid -k shutdown -f $SQUID_CONF
|
||||
TimeoutSec=0
|
||||
ExecStart=/usr/sbin/squid --foreground $SQUID_OPTS -f ${SQUID_CONF}
|
||||
ExecReload=/usr/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
NotifyAccess=all
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
129
SPECS/squid.spec
129
SPECS/squid.spec
@ -1,8 +1,8 @@
|
||||
%define __perl_requires %{SOURCE98}
|
||||
|
||||
Name: squid
|
||||
Version: 4.4
|
||||
Release: 8%{?dist}.2
|
||||
Version: 4.11
|
||||
Release: 3%{?dist}
|
||||
Summary: The Squid proxy caching server
|
||||
Epoch: 7
|
||||
# See CREDITS for breakdown of non GPLv2+ code
|
||||
@ -26,33 +26,29 @@ Source98: perl-requires-squid.sh
|
||||
# Local patches
|
||||
# Applying upstream patches first makes it less likely that local patches
|
||||
# will break upstream ones.
|
||||
Patch201: squid-4.0.11-config.patch
|
||||
Patch202: squid-3.1.0.9-location.patch
|
||||
Patch203: squid-3.0.STABLE1-perlpath.patch
|
||||
Patch204: squid-3.5.9-include-guards.patch
|
||||
Patch205: squid-4.0.21-large-acl.patch
|
||||
Patch201: squid-4.11-config.patch
|
||||
Patch202: squid-4.11-location.patch
|
||||
Patch203: squid-4.11-perlpath.patch
|
||||
Patch204: squid-4.11-include-guards.patch
|
||||
Patch205: squid-4.11-large-acl.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=980511
|
||||
Patch206: squid-4.4.0-active-ftp.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1612524
|
||||
Patch207: squid-4.4.0-man-pages.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1691741
|
||||
Patch208: squid-4.4.0-lower-cachepeer.patch
|
||||
Patch206: squid-4.11-active-ftp.patch
|
||||
# https://github.com/squid-cache/squid/commit/c26cd1cb6a60ff196ef13c00e82576d3bfeb2e30
|
||||
Patch207: squid-4.11-systemd.patch
|
||||
Patch208: squid-4.11-convert-ipv4.patch
|
||||
|
||||
# Security fixes
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1729436
|
||||
Patch500: squid-4.4.0-CVE-2019-13345.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1738485
|
||||
Patch501: squid-4.4.0-CVE-2019-12527.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1828368
|
||||
Patch502: squid-4.4.0-CVE-2019-12519.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1828367
|
||||
Patch503: squid-4.4.0-CVE-2020-11945.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1829402
|
||||
Patch504: squid-4.4.0-CVE-2019-12525.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1871700
|
||||
Patch505: squid-4.4.0-CVE-2020-15810.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1852554
|
||||
Patch500: squid-4.11-CVE-2020-14058.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1852550
|
||||
Patch501: squid-4.11-CVE-2020-15049.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1871705
|
||||
Patch502: squid-4.11-CVE-2020-24606.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1871702
|
||||
Patch506: squid-4.4.0-CVE-2020-15811.patch
|
||||
Patch503: squid-4.11-CVE-2020-15811.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1871700
|
||||
Patch504: squid-4.11-CVE-2020-15810.patch
|
||||
|
||||
|
||||
Requires: bash >= 2.0
|
||||
Requires(pre): shadow-utils
|
||||
@ -82,6 +78,8 @@ BuildRequires: perl-generators
|
||||
# For test suite
|
||||
BuildRequires: pkgconfig(cppunit)
|
||||
BuildRequires: autoconf
|
||||
# systemd notify
|
||||
BuildRequires: systemd-devel
|
||||
|
||||
%description
|
||||
Squid is a high-performance proxy caching server for Web clients,
|
||||
@ -106,19 +104,18 @@ lookup program (dnsserver), a program for retrieving FTP data
|
||||
%patch201 -p1 -b .config
|
||||
%patch202 -p1 -b .location
|
||||
%patch203 -p1 -b .perlpath
|
||||
%patch204 -p0 -b .include-guards
|
||||
%patch204 -p1 -b .include-guards
|
||||
%patch205 -p1 -b .large_acl
|
||||
%patch206 -p1 -b .active-ftp
|
||||
%patch207 -p1 -b .man-pages
|
||||
%patch208 -p1 -b .lower-cachepeer
|
||||
%patch207 -p1 -b .systemd
|
||||
%patch208 -p1 -R -b .convert-ipv4
|
||||
|
||||
%patch500 -p1 -b .CVE-2019-13345
|
||||
%patch501 -p1 -b .CVE-2019-12527
|
||||
%patch502 -p1 -b .CVE-2019-12519
|
||||
%patch503 -p1 -b .CVE-2020-11945
|
||||
%patch504 -p1 -b .CVE-2019-12525
|
||||
%patch505 -p1 -b .CVE-2020-15810
|
||||
%patch506 -p1 -b .CVE-2020-15811
|
||||
# Security patches
|
||||
%patch500 -p1 -b .cve-2020-14058
|
||||
%patch501 -p1 -b .cve-2020-15049
|
||||
%patch502 -p1 -b .cve-2020-24606
|
||||
%patch503 -p1 -b .CVE-2020-15811
|
||||
%patch504 -p1 -b .CVE-2020-15810
|
||||
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1679526
|
||||
# Patch in the vendor documentation and used different location for documentation
|
||||
@ -335,19 +332,59 @@ fi
|
||||
|
||||
|
||||
%changelog
|
||||
* Wed Aug 26 2020 Lubos Uhliarik <luhliari@redhat.com> - 7:4.4-8.2
|
||||
- Resolves: #1872345 - CVE-2020-15811 squid:4/squid: HTTP Request Splitting
|
||||
could result in cache poisoning
|
||||
- Resolves: #1872330 - CVE-2020-15810 squid:4/squid: HTTP Request Smuggling
|
||||
could result in cache poisoning
|
||||
* Wed Aug 26 2020 Lubos Uhliarik <luhliari@redhat.com> - 7:4.11-3
|
||||
- Resolves: #1871705 - CVE-2020-24606 squid: Improper Input Validation could
|
||||
result in a DoS
|
||||
- Resolves: #1871702 - CVE-2020-15811 squid: HTTP Request Splitting could result
|
||||
in cache poisoning
|
||||
- Resolves: #1871700 - CVE-2020-15810 squid: HTTP Request Smuggling could result
|
||||
in cache poisoning
|
||||
|
||||
* Wed Apr 29 2020 Lubos Uhliarik <luhliari@redhat.com> - 7:4.4-8.1
|
||||
- Resolves: #1828368 - CVE-2019-12519 squid: improper check for new member in
|
||||
ESIExpression::Evaluate allows for stack buffer overflow
|
||||
- Resolves: #1828367 - CVE-2020-11945 squid: improper access restriction upon
|
||||
Digest Authentication nonce replay could lead to remote code execution
|
||||
- Resolves: #1829402 - CVE-2019-12525 squid:4/squid: parsing of header
|
||||
* Thu Jul 02 2020 Lubos Uhliarik <luhliari@redhat.com> - 7:4.11-2
|
||||
- Resolves: #1853130 - CVE-2020-15049 squid:4/squid: request smuggling and
|
||||
poisoning attack against the HTTP cache
|
||||
- Resolves: #1853136 - CVE-2020-14058 squid:4/squid: DoS in TLS handshake
|
||||
|
||||
* Thu May 07 2020 Lubos Uhliarik <luhliari@redhat.com> - 7:4.11-1
|
||||
- new version 4.11
|
||||
- libsystemd integration
|
||||
- Resolves: #1829467 - squid:4 rebase
|
||||
- Resolves: #1828378 - CVE-2019-12521 squid:4/squid: off-by-one error in
|
||||
addStackElement allows for a heap buffer overflow and a crash
|
||||
- Resolves: #1828377 - CVE-2019-12520 squid:4/squid: improper input validation
|
||||
in request allows for proxy manipulation
|
||||
- Resolves: #1828375 - CVE-2019-12524 squid:4/squid: improper access restriction
|
||||
in url_regex may lead to security bypass
|
||||
- Resolves: #1820664 - CVE-2019-18860 squid: mishandles HTML in the host
|
||||
parameter to cachemgr.cgi which could result in squid behaving in unsecure way
|
||||
- Resolves: #1802514 - CVE-2020-8449 squid:4/squid: Improper input validation
|
||||
issues in HTTP Request processing
|
||||
- Resolves: #1802513 - CVE-2020-8450 squid:4/squid: Buffer overflow in a Squid
|
||||
acting as reverse-proxy
|
||||
- Resolves: #1802512 - CVE-2019-12528 squid:4/squid: Information Disclosure
|
||||
issue in FTP Gateway
|
||||
- Resolves: #1771288 - CVE-2019-18678 squid:4/squid: HTTP Request Splitting
|
||||
issue in HTTP message processing
|
||||
- Resolves: #1771283 - CVE-2019-18679 squid:4/squid: Information Disclosure
|
||||
issue in HTTP Digest Authentication
|
||||
- Resolves: #1771280 - CVE-2019-18677 squid:4/squid: Cross-Site Request Forgery
|
||||
issue in HTTP Request processing
|
||||
- Resolves: #1771275 - CVE-2019-12523 squid:4/squid: Improper input validation
|
||||
in URI processor
|
||||
- Resolves: #1771272 - CVE-2019-18676 squid:4/squid: Buffer overflow in URI
|
||||
processor
|
||||
- Resolves: #1771264 - CVE-2019-12526 squid:4/squid: Heap overflow issue in URN
|
||||
processing
|
||||
- Resolves: #1738581 - CVE-2019-12529 squid: OOB read in Proxy-Authorization
|
||||
header causes DoS
|
||||
|
||||
* Tue Apr 28 2020 Lubos Uhliarik <luhliari@redhat.com> - 7:4.4-9
|
||||
- Resolves: #1738583 - CVE-2019-12525 squid:4/squid: parsing of header
|
||||
Proxy-Authentication leads to memory corruption
|
||||
- Resolves: #1828369 - CVE-2020-11945 squid: improper access restriction upon
|
||||
Digest Authentication nonce replay could lead to remote code execution
|
||||
- Resolves: #1828370 - CVE-2019-12519 squid: improper check for new member in
|
||||
ESIExpression::Evaluate allows for stack buffer overflow
|
||||
|
||||
* Fri Aug 23 2019 Lubos Uhliarik <luhliari@redhat.com> - 7:4.4-8
|
||||
- Resolves: # 1738485 - CVE-2019-12527 squid:4/squid: heap-based buffer overflow
|
||||
|
Loading…
Reference in New Issue
Block a user