import squid-4.11-4.module+el8.4.0+9038+276d2d9f

This commit is contained in:
CentOS Sources 2021-03-30 15:08:02 -04:00 committed by Stepan Oksanichenko
parent 14299772a1
commit 8d8cb7fe83
8 changed files with 830 additions and 1 deletions

View File

@ -0,0 +1,295 @@
commit 93f5fda134a2a010b84ffedbe833d670e63ba4be
Author: Christos Tsantilas <christos@chtsanti.net>
Date: 2020-05-15 04:54:54 +0000
Fix sending of unknown validation errors to cert. validator (#633)
Squid may be compiled with an OpenSSL release introducing X509
validation errors that Squid does not have the names for. Send their
integer codes.
Also sync Squid certificate verification errors with OpenSSL v1.1.1g.
This is a Measurement Factory project.
diff --git a/src/format/Format.cc b/src/format/Format.cc
index 8c5574b..4b4ad42 100644
--- a/src/format/Format.cc
+++ b/src/format/Format.cc
@@ -322,15 +322,6 @@ log_quoted_string(const char *str, char *out)
*p = '\0';
}
-#if USE_OPENSSL
-static char *
-sslErrorName(Security::ErrorCode err, char *buf, size_t size)
-{
- snprintf(buf, size, "SSL_ERR=%d", err);
- return buf;
-}
-#endif
-
/// XXX: Misnamed. TODO: Split <h (and this function) to distinguish received
/// headers from sent headers rather than failing to distinguish requests from responses.
/// \retval HttpReply sent to the HTTP client (access.log and default context).
@@ -959,9 +950,7 @@ Format::Format::assemble(MemBuf &mb, const AccessLogEntry::Pointer &al, int logS
case LFT_SQUID_ERROR_DETAIL:
#if USE_OPENSSL
if (al->request && al->request->errType == ERR_SECURE_CONNECT_FAIL) {
- out = Ssl::GetErrorName(al->request->errDetail);
- if (!out)
- out = sslErrorName(al->request->errDetail, tmp, sizeof(tmp));
+ out = Ssl::GetErrorName(al->request->errDetail, true);
} else
#endif
if (al->request && al->request->errDetail != ERR_DETAIL_NONE) {
@@ -1263,10 +1252,7 @@ Format::Format::assemble(MemBuf &mb, const AccessLogEntry::Pointer &al, int logS
for (const Security::CertErrors *sslError = srvBump->sslErrors(); sslError; sslError = sslError->next) {
if (!sb.isEmpty())
sb.append(separator);
- if (const char *errorName = Ssl::GetErrorName(sslError->element.code))
- sb.append(errorName);
- else
- sb.append(sslErrorName(sslError->element.code, tmp, sizeof(tmp)));
+ sb.append(Ssl::GetErrorName(sslError->element.code, true));
if (sslError->element.depth >= 0)
sb.appendf("@depth=%d", sslError->element.depth);
}
diff --git a/src/ssl/ErrorDetail.cc b/src/ssl/ErrorDetail.cc
index ddd61fd..00eb0e2 100644
--- a/src/ssl/ErrorDetail.cc
+++ b/src/ssl/ErrorDetail.cc
@@ -233,6 +233,9 @@ static SslErrorEntry TheSslErrorArray[] = {
"X509_V_ERR_SUBTREE_MINMAX"
},
#endif
+ { X509_V_ERR_APPLICATION_VERIFICATION, //50
+ "X509_V_ERR_APPLICATION_VERIFICATION"
+ },
#if defined(X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE)
{
X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE, //51
@@ -257,9 +260,132 @@ static SslErrorEntry TheSslErrorArray[] = {
"X509_V_ERR_CRL_PATH_VALIDATION_ERROR"
},
#endif
- { X509_V_ERR_APPLICATION_VERIFICATION,
- "X509_V_ERR_APPLICATION_VERIFICATION"
+#if defined(X509_V_ERR_PATH_LOOP)
+ {
+ X509_V_ERR_PATH_LOOP, //55
+ "X509_V_ERR_PATH_LOOP"
+ },
+#endif
+#if defined(X509_V_ERR_SUITE_B_INVALID_VERSION)
+ {
+ X509_V_ERR_SUITE_B_INVALID_VERSION, //56
+ "X509_V_ERR_SUITE_B_INVALID_VERSION"
+ },
+#endif
+#if defined(X509_V_ERR_SUITE_B_INVALID_ALGORITHM)
+ {
+ X509_V_ERR_SUITE_B_INVALID_ALGORITHM, //57
+ "X509_V_ERR_SUITE_B_INVALID_ALGORITHM"
+ },
+#endif
+#if defined(X509_V_ERR_SUITE_B_INVALID_CURVE)
+ {
+ X509_V_ERR_SUITE_B_INVALID_CURVE, //58
+ "X509_V_ERR_SUITE_B_INVALID_CURVE"
+ },
+#endif
+#if defined(X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM)
+ {
+ X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM, //59
+ "X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM"
+ },
+#endif
+#if defined(X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED)
+ {
+ X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED, //60
+ "X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED"
+ },
+#endif
+#if defined(X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256)
+ {
+ X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256, //61
+ "X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256"
+ },
+#endif
+#if defined(X509_V_ERR_HOSTNAME_MISMATCH)
+ {
+ X509_V_ERR_HOSTNAME_MISMATCH, //62
+ "X509_V_ERR_HOSTNAME_MISMATCH"
+ },
+#endif
+#if defined(X509_V_ERR_EMAIL_MISMATCH)
+ {
+ X509_V_ERR_EMAIL_MISMATCH, //63
+ "X509_V_ERR_EMAIL_MISMATCH"
+ },
+#endif
+#if defined(X509_V_ERR_IP_ADDRESS_MISMATCH)
+ {
+ X509_V_ERR_IP_ADDRESS_MISMATCH, //64
+ "X509_V_ERR_IP_ADDRESS_MISMATCH"
+ },
+#endif
+#if defined(X509_V_ERR_DANE_NO_MATCH)
+ {
+ X509_V_ERR_DANE_NO_MATCH, //65
+ "X509_V_ERR_DANE_NO_MATCH"
},
+#endif
+#if defined(X509_V_ERR_EE_KEY_TOO_SMALL)
+ {
+ X509_V_ERR_EE_KEY_TOO_SMALL, //66
+ "X509_V_ERR_EE_KEY_TOO_SMALL"
+ },
+#endif
+#if defined(X509_V_ERR_CA_KEY_TOO_SMALL)
+ {
+ X509_V_ERR_CA_KEY_TOO_SMALL, //67
+ "X509_V_ERR_CA_KEY_TOO_SMALL"
+ },
+#endif
+#if defined(X509_V_ERR_CA_MD_TOO_WEAK)
+ {
+ X509_V_ERR_CA_MD_TOO_WEAK, //68
+ "X509_V_ERR_CA_MD_TOO_WEAK"
+ },
+#endif
+#if defined(X509_V_ERR_INVALID_CALL)
+ {
+ X509_V_ERR_INVALID_CALL, //69
+ "X509_V_ERR_INVALID_CALL"
+ },
+#endif
+#if defined(X509_V_ERR_STORE_LOOKUP)
+ {
+ X509_V_ERR_STORE_LOOKUP, //70
+ "X509_V_ERR_STORE_LOOKUP"
+ },
+#endif
+#if defined(X509_V_ERR_NO_VALID_SCTS)
+ {
+ X509_V_ERR_NO_VALID_SCTS, //71
+ "X509_V_ERR_NO_VALID_SCTS"
+ },
+#endif
+#if defined(X509_V_ERR_PROXY_SUBJECT_NAME_VIOLATION)
+ {
+ X509_V_ERR_PROXY_SUBJECT_NAME_VIOLATION, //72
+ "X509_V_ERR_PROXY_SUBJECT_NAME_VIOLATION"
+ },
+#endif
+#if defined(X509_V_ERR_OCSP_VERIFY_NEEDED)
+ {
+ X509_V_ERR_OCSP_VERIFY_NEEDED, //73
+ "X509_V_ERR_OCSP_VERIFY_NEEDED"
+ },
+#endif
+#if defined(X509_V_ERR_OCSP_VERIFY_FAILED)
+ {
+ X509_V_ERR_OCSP_VERIFY_FAILED, //74
+ "X509_V_ERR_OCSP_VERIFY_FAILED"
+ },
+#endif
+#if defined(X509_V_ERR_OCSP_CERT_UNKNOWN)
+ {
+ X509_V_ERR_OCSP_CERT_UNKNOWN, //75
+ "X509_V_ERR_OCSP_CERT_UNKNOWN"
+ },
+#endif
{ SSL_ERROR_NONE, "SSL_ERROR_NONE"},
{SSL_ERROR_NONE, NULL}
};
@@ -286,6 +412,27 @@ static const char *OptionalSslErrors[] = {
"X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX",
"X509_V_ERR_UNSUPPORTED_NAME_SYNTAX",
"X509_V_ERR_CRL_PATH_VALIDATION_ERROR",
+ "X509_V_ERR_PATH_LOOP",
+ "X509_V_ERR_SUITE_B_INVALID_VERSION",
+ "X509_V_ERR_SUITE_B_INVALID_ALGORITHM",
+ "X509_V_ERR_SUITE_B_INVALID_CURVE",
+ "X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM",
+ "X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED",
+ "X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256",
+ "X509_V_ERR_HOSTNAME_MISMATCH",
+ "X509_V_ERR_EMAIL_MISMATCH",
+ "X509_V_ERR_IP_ADDRESS_MISMATCH",
+ "X509_V_ERR_DANE_NO_MATCH",
+ "X509_V_ERR_EE_KEY_TOO_SMALL",
+ "X509_V_ERR_CA_KEY_TOO_SMALL",
+ "X509_V_ERR_CA_MD_TOO_WEAK",
+ "X509_V_ERR_INVALID_CALL",
+ "X509_V_ERR_STORE_LOOKUP",
+ "X509_V_ERR_NO_VALID_SCTS",
+ "X509_V_ERR_PROXY_SUBJECT_NAME_VIOLATION",
+ "X509_V_ERR_OCSP_VERIFY_NEEDED",
+ "X509_V_ERR_OCSP_VERIFY_FAILED",
+ "X509_V_ERR_OCSP_CERT_UNKNOWN",
NULL
};
@@ -390,7 +537,7 @@ Ssl::ParseErrorString(const char *name, Security::Errors &errors)
return false; // not reached
}
-const char *Ssl::GetErrorName(Security::ErrorCode value)
+const char *Ssl::GetErrorName(Security::ErrorCode value, const bool prefixRawCode)
{
if (TheSslErrors.empty())
loadSslErrorMap();
@@ -399,7 +546,9 @@ const char *Ssl::GetErrorName(Security::ErrorCode value)
if (it != TheSslErrors.end())
return it->second->name;
- return NULL;
+ static char tmpBuffer[128];
+ snprintf(tmpBuffer, sizeof(tmpBuffer), "%s%d", prefixRawCode ? "SSL_ERR=" : "", (int)value);
+ return tmpBuffer;
}
bool
@@ -529,21 +678,14 @@ const char *Ssl::ErrorDetail::notafter() const
*/
const char *Ssl::ErrorDetail::err_code() const
{
- static char tmpBuffer[64];
// We can use the GetErrorName but using the detailEntry is faster,
// so try it first.
- const char *err = detailEntry.name.termedBuf();
+ if (const char *err = detailEntry.name.termedBuf())
+ return err;
// error details not loaded yet or not defined in error_details.txt,
// try the GetErrorName...
- if (!err)
- err = GetErrorName(error_no);
-
- if (!err) {
- snprintf(tmpBuffer, 64, "%d", (int)error_no);
- err = tmpBuffer;
- }
- return err;
+ return GetErrorName(error_no);
}
/**
diff --git a/src/ssl/ErrorDetail.h b/src/ssl/ErrorDetail.h
index 48dc405..0eec0a9 100644
--- a/src/ssl/ErrorDetail.h
+++ b/src/ssl/ErrorDetail.h
@@ -26,8 +26,9 @@ bool ParseErrorString(const char *name, Security::Errors &);
/// The Security::ErrorCode code of the error described by "name".
Security::ErrorCode GetErrorCode(const char *name);
-/// The string representation of the TLS error "value"
-const char *GetErrorName(Security::ErrorCode value);
+/// \return string representation of a known TLS error (or a raw error code)
+/// \param prefixRawCode whether to prefix raw codes with "SSL_ERR="
+const char *GetErrorName(Security::ErrorCode value, const bool prefixRawCode = false);
/// A short description of the TLS error "value"
const char *GetErrorDescr(Security::ErrorCode value);

View File

@ -0,0 +1,105 @@
commit ea12a34d338b962707d5078d6d1fc7c6eb119a22
Author: Alex Rousskov <rousskov@measurement-factory.com>
Date: 2020-05-13 14:05:00 +0000
Validate Content-Length value prefix (#629)
The new code detects all invalid Content-Length prefixes but the old
code was already rejecting most invalid prefixes using strtoll(). The
newly covered (and now rejected) invalid characters are
* explicit "+" sign;
* explicit "-" sign in "-0" values;
* isspace(3) characters that are not (relaxed) OWS characters.
In most deployment environments, the last set is probably empty because
the relaxed OWS set has all the POSIX/C isspace(3) characters but the
new line, and the new line is unlikely to sneak in past other checks.
Thank you, Amit Klein <amit.klein@safebreach.com>, for elevating the
importance of this 2016 TODO (added in commit a1b9ec2).
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
index 36957f2..c10a221 100644
--- a/CONTRIBUTORS
+++ b/CONTRIBUTORS
@@ -25,6 +25,7 @@ Thank you!
Alex Wu <alex_wu2012@hotmail.com>
Alin Nastac <mrness@gentoo.org>
Alter <alter@alter.org.ua>
+ Amit Klein <amit.klein@safebreach.com>
Amos Jeffries
Amos Jeffries <amosjeffries@squid-cache.org>
Amos Jeffries <squid3@treenet.co.nz>
diff --git a/src/http/ContentLengthInterpreter.cc b/src/http/ContentLengthInterpreter.cc
index 3fdf7de..a3741eb 100644
--- a/src/http/ContentLengthInterpreter.cc
+++ b/src/http/ContentLengthInterpreter.cc
@@ -28,6 +28,24 @@ Http::ContentLengthInterpreter::ContentLengthInterpreter(const int aDebugLevel):
{
}
+/// checks whether all characters before the Content-Length number are allowed
+/// \returns the start of the digit sequence (or nil on errors)
+const char *
+Http::ContentLengthInterpreter::findDigits(const char *prefix, const char * const valueEnd) const
+{
+ // skip leading OWS in RFC 7230's `OWS field-value OWS`
+ const CharacterSet &whitespace = Http::One::Parser::WhitespaceCharacters();
+ while (prefix < valueEnd) {
+ const auto ch = *prefix;
+ if (CharacterSet::DIGIT[ch])
+ return prefix; // common case: a pre-trimmed field value
+ if (!whitespace[ch])
+ return nullptr; // (trimmed) length does not start with a digit
+ ++prefix;
+ }
+ return nullptr; // empty or whitespace-only value
+}
+
/// checks whether all characters after the Content-Length are allowed
bool
Http::ContentLengthInterpreter::goodSuffix(const char *suffix, const char * const end) const
@@ -52,10 +70,19 @@ Http::ContentLengthInterpreter::checkValue(const char *rawValue, const int value
{
Must(!sawBad);
+ const auto valueEnd = rawValue + valueSize;
+
+ const auto digits = findDigits(rawValue, valueEnd);
+ if (!digits) {
+ debugs(55, debugLevel, "WARNING: Leading garbage or empty value in" << Raw("Content-Length", rawValue, valueSize));
+ sawBad = true;
+ return false;
+ }
+
int64_t latestValue = -1;
char *suffix = nullptr;
- // TODO: Handle malformed values with leading signs (e.g., "-0" or "+1").
- if (!httpHeaderParseOffset(rawValue, &latestValue, &suffix)) {
+
+ if (!httpHeaderParseOffset(digits, &latestValue, &suffix)) {
debugs(55, DBG_IMPORTANT, "WARNING: Malformed" << Raw("Content-Length", rawValue, valueSize));
sawBad = true;
return false;
@@ -68,7 +95,7 @@ Http::ContentLengthInterpreter::checkValue(const char *rawValue, const int value
}
// check for garbage after the number
- if (!goodSuffix(suffix, rawValue + valueSize)) {
+ if (!goodSuffix(suffix, valueEnd)) {
debugs(55, debugLevel, "WARNING: Trailing garbage in" << Raw("Content-Length", rawValue, valueSize));
sawBad = true;
return false;
diff --git a/src/http/ContentLengthInterpreter.h b/src/http/ContentLengthInterpreter.h
index ce36e22..f22de91 100644
--- a/src/http/ContentLengthInterpreter.h
+++ b/src/http/ContentLengthInterpreter.h
@@ -46,6 +46,7 @@ public:
bool sawGood;
protected:
+ const char *findDigits(const char *prefix, const char *valueEnd) const;
bool goodSuffix(const char *suffix, const char * const end) const;
bool checkValue(const char *start, const int size);
bool checkList(const String &list);

View File

@ -0,0 +1,44 @@
diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc
index 67543a4..19efc6d 100644
--- a/src/HttpHeader.cc
+++ b/src/HttpHeader.cc
@@ -445,18 +445,6 @@ HttpHeader::parse(const char *header_start, size_t hdrLen)
return 0;
}
- if (e->id == Http::HdrType::OTHER && stringHasWhitespace(e->name.termedBuf())) {
- debugs(55, warnOnError, "WARNING: found whitespace in HTTP header name {" <<
- getStringPrefix(field_start, field_end-field_start) << "}");
-
- if (!Config.onoff.relaxed_header_parser) {
- delete e;
- PROF_stop(HttpHeaderParse);
- clean();
- return 0;
- }
- }
-
addEntry(e);
}
@@ -1451,6 +1439,20 @@ HttpHeaderEntry::parse(const char *field_start, const char *field_end, const htt
}
}
+ /* RFC 7230 section 3.2:
+ *
+ * header-field = field-name ":" OWS field-value OWS
+ * field-name = token
+ * token = 1*TCHAR
+ */
+ for (const char *pos = field_start; pos < (field_start+name_len); ++pos) {
+ if (!CharacterSet::TCHAR[*pos]) {
+ debugs(55, 2, "found header with invalid characters in " <<
+ Raw("field-name", field_start, min(name_len,100)) << "...");
+ return nullptr;
+ }
+ }
+
/* now we know we can parse it */
debugs(55, 9, "parsing HttpHeaderEntry: near '" << getStringPrefix(field_start, field_end-field_start) << "'");

View File

@ -0,0 +1,139 @@
diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc
index dc6e0ff..67543a4 100644
--- a/src/HttpHeader.cc
+++ b/src/HttpHeader.cc
@@ -174,6 +174,7 @@ HttpHeader::operator =(const HttpHeader &other)
update(&other); // will update the mask as well
len = other.len;
conflictingContentLength_ = other.conflictingContentLength_;
+ teUnsupported_ = other.teUnsupported_;
}
return *this;
}
@@ -222,6 +223,7 @@ HttpHeader::clean()
httpHeaderMaskInit(&mask, 0);
len = 0;
conflictingContentLength_ = false;
+ teUnsupported_ = false;
PROF_stop(HttpHeaderClean);
}
@@ -464,11 +466,23 @@ HttpHeader::parse(const char *header_start, size_t hdrLen)
Raw("header", header_start, hdrLen));
}
- if (chunked()) {
+ String rawTe;
+ if (getByIdIfPresent(Http::HdrType::TRANSFER_ENCODING, &rawTe)) {
// RFC 2616 section 4.4: ignore Content-Length with Transfer-Encoding
// RFC 7230 section 3.3.3 #3: Transfer-Encoding overwrites Content-Length
delById(Http::HdrType::CONTENT_LENGTH);
// and clen state becomes irrelevant
+
+ if (rawTe == "chunked") {
+ ; // leave header present for chunked() method
+ } else if (rawTe == "identity") { // deprecated. no coding
+ delById(Http::HdrType::TRANSFER_ENCODING);
+ } else {
+ // This also rejects multiple encodings until we support them properly.
+ debugs(55, warnOnError, "WARNING: unsupported Transfer-Encoding used by client: " << rawTe);
+ teUnsupported_ = true;
+ }
+
} else if (clen.sawBad) {
// ensure our callers do not accidentally see bad Content-Length values
delById(Http::HdrType::CONTENT_LENGTH);
diff --git a/src/HttpHeader.h b/src/HttpHeader.h
index e3553a4..64f294a 100644
--- a/src/HttpHeader.h
+++ b/src/HttpHeader.h
@@ -140,7 +140,13 @@ public:
int hasListMember(Http::HdrType id, const char *member, const char separator) const;
int hasByNameListMember(const char *name, const char *member, const char separator) const;
void removeHopByHopEntries();
- inline bool chunked() const; ///< whether message uses chunked Transfer-Encoding
+
+ /// whether the message uses chunked Transfer-Encoding
+ /// optimized implementation relies on us rejecting/removing other codings
+ bool chunked() const { return has(Http::HdrType::TRANSFER_ENCODING); }
+
+ /// whether message used an unsupported and/or invalid Transfer-Encoding
+ bool unsupportedTe() const { return teUnsupported_; }
/* protected, do not use these, use interface functions instead */
std::vector<HttpHeaderEntry *> entries; /**< parsed fields in raw format */
@@ -158,6 +164,9 @@ protected:
private:
HttpHeaderEntry *findLastEntry(Http::HdrType id) const;
bool conflictingContentLength_; ///< found different Content-Length fields
+ /// unsupported encoding, unnecessary syntax characters, and/or
+ /// invalid field-value found in Transfer-Encoding header
+ bool teUnsupported_ = false;
};
int httpHeaderParseQuotedString(const char *start, const int len, String *val);
@@ -167,13 +176,6 @@ SBuf httpHeaderQuoteString(const char *raw);
void httpHeaderCalcMask(HttpHeaderMask * mask, Http::HdrType http_hdr_type_enums[], size_t count);
-inline bool
-HttpHeader::chunked() const
-{
- return has(Http::HdrType::TRANSFER_ENCODING) &&
- hasListMember(Http::HdrType::TRANSFER_ENCODING, "chunked", ',');
-}
-
void httpHeaderInitModule(void);
#endif /* SQUID_HTTPHEADER_H */
diff --git a/src/client_side.cc b/src/client_side.cc
index 5f5a79e..000a00b 100644
--- a/src/client_side.cc
+++ b/src/client_side.cc
@@ -1600,9 +1600,7 @@ void
clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp, Http::Stream *context)
{
ClientHttpRequest *http = context->http;
- bool chunked = false;
bool mustReplyToOptions = false;
- bool unsupportedTe = false;
bool expectBody = false;
// We already have the request parsed and checked, so we
@@ -1659,13 +1657,7 @@ clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp,
request->http_ver.minor = http_ver.minor;
}
- if (request->header.chunked()) {
- chunked = true;
- } else if (request->header.has(Http::HdrType::TRANSFER_ENCODING)) {
- const String te = request->header.getList(Http::HdrType::TRANSFER_ENCODING);
- // HTTP/1.1 requires chunking to be the last encoding if there is one
- unsupportedTe = te.size() && te != "identity";
- } // else implied identity coding
+ const auto unsupportedTe = request->header.unsupportedTe();
mustReplyToOptions = (request->method == Http::METHOD_OPTIONS) &&
(request->header.getInt64(Http::HdrType::MAX_FORWARDS) == 0);
@@ -1682,6 +1674,7 @@ clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp,
return;
}
+ const auto chunked = request->header.chunked();
if (!chunked && !clientIsContentLengthValid(request.getRaw())) {
clientStreamNode *node = context->getClientReplyContext();
clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
diff --git a/src/http.cc b/src/http.cc
index 9654c4a..6f4d3b2 100644
--- a/src/http.cc
+++ b/src/http.cc
@@ -1292,6 +1292,9 @@ HttpStateData::continueAfterParsingHeader()
} else if (vrep->header.conflictingContentLength()) {
fwd->dontRetry(true);
error = ERR_INVALID_RESP;
+ } else if (vrep->header.unsupportedTe()) {
+ fwd->dontRetry(true);
+ error = ERR_INVALID_RESP;
} else {
return true; // done parsing, got reply, and no error
}

View File

@ -0,0 +1,34 @@
commit b789e719affbb0a6ff9c22095f6ca8db6a5f4926
Author: Eduard Bagdasaryan <eduard.bagdasaryan@measurement-factory.com>
Date: 2020-07-27 15:28:31 +0000
Fix livelocking in peerDigestHandleReply (#698)
peerDigestHandleReply() was missing a premature EOF check. The existing
peerDigestFetchedEnough() cannot detect EOF because it does not have
access to receivedData.length used to indicate the EOF condition. We did
not adjust peerDigestFetchedEnough() because it is abused to check both
post-I/O state and the state after each digest processing step. The
latter invocations lack access to receivedData.length and should not
really bother with EOF anyway.
diff --git a/src/peer_digest.cc b/src/peer_digest.cc
index d48340f97..265f16183 100644
--- a/src/peer_digest.cc
+++ b/src/peer_digest.cc
@@ -483,6 +483,15 @@ peerDigestHandleReply(void *data, StoreIOBuffer receivedData)
} while (cbdataReferenceValid(fetch) && prevstate != fetch->state && fetch->bufofs > 0);
+ // Check for EOF here, thus giving the parser one extra run. We could avoid this overhead by
+ // checking at the beginning of this function. However, in this case, we would have to require
+ // that the parser does not regard EOF as a special condition (it is true now but may change
+ // in the future).
+ if (!receivedData.length) { // EOF
+ peerDigestFetchAbort(fetch, fetch->buf, "premature end of digest reply");
+ return;
+ }
+
/* Update the copy offset */
fetch->offset += receivedData.length;

View File

@ -0,0 +1,27 @@
diff --git a/tools/cachemgr.cc b/tools/cachemgr.cc
index 36d8e38..50bb9b6 100644
--- a/tools/cachemgr.cc
+++ b/tools/cachemgr.cc
@@ -1092,14 +1092,20 @@ make_pub_auth(cachemgr_request * req)
if (!req->passwd || !strlen(req->passwd))
return;
+ auto *rfc1738_username = xstrdup(rfc1738_escape(safe_str(req->user_name)));
+ auto *rfc1738_passwd = xstrdup(rfc1738_escape(req->passwd));
+
/* host | time | user | passwd */
const int bufLen = snprintf(buf, sizeof(buf), "%s|%d|%s|%s",
req->hostname,
(int) now,
- rfc1738_escape(safe_str(req->user_name)),
- rfc1738_escape(req->passwd));
+ rfc1738_username,
+ rfc1738_passwd);
debug("cmgr: pre-encoded for pub: %s\n", buf);
+ safe_free(rfc1738_username);
+ safe_free(rfc1738_passwd);
+
const int encodedLen = base64_encode_len(bufLen);
req->pub_auth = (char *) xmalloc(encodedLen);
struct base64_encode_ctx ctx;

View File

@ -0,0 +1,145 @@
From 771908d313ee9c255adfb5e4fdba4d6797c18409 Mon Sep 17 00:00:00 2001
From: Amos Jeffries <yadij@users.noreply.github.com>
Date: Thu, 7 Mar 2019 13:50:38 +0000
Subject: [PATCH] Bug 4928: Cannot convert non-IPv4 to IPv4 (#379)
... when reaching client_ip_max_connections
The client_ip_max_connections limit is checked before the TCP dst-IP is located for the newly received TCP connection. This leaves Squid unable to fetch the NFMARK or similar
details later on (they do not exist for [::]).
Move client_ip_max_connections test later in the TCP accept process to ensure dst-IP is known when the error is produced.
---
src/comm/TcpAcceptor.cc | 82 ++++++++++++++++++++---------------------
1 file changed, 39 insertions(+), 43 deletions(-)
diff --git a/src/comm/TcpAcceptor.cc b/src/comm/TcpAcceptor.cc
index cae92a7b1e..2109913008 100644
--- a/src/comm/TcpAcceptor.cc
+++ b/src/comm/TcpAcceptor.cc
@@ -282,16 +282,7 @@ Comm::TcpAcceptor::acceptOne()
ConnectionPointer newConnDetails = new Connection();
const Comm::Flag flag = oldAccept(newConnDetails);
- /* Check for errors */
- if (!newConnDetails->isOpen()) {
-
- if (flag == Comm::NOMESSAGE) {
- /* register interest again */
- debugs(5, 5, HERE << "try later: " << conn << " handler Subscription: " << theCallSub);
- SetSelect(conn->fd, COMM_SELECT_READ, doAccept, this, 0);
- return;
- }
-
+ if (flag == Comm::COMM_ERROR) {
// A non-recoverable error; notify the caller */
debugs(5, 5, HERE << "non-recoverable error:" << status() << " handler Subscription: " << theCallSub);
if (intendedForUserConnections())
@@ -301,12 +292,16 @@ Comm::TcpAcceptor::acceptOne()
return;
}
- newConnDetails->nfmark = Ip::Qos::getNfmarkFromConnection(newConnDetails, Ip::Qos::dirAccepted);
+ if (flag == Comm::NOMESSAGE) {
+ /* register interest again */
+ debugs(5, 5, "try later: " << conn << " handler Subscription: " << theCallSub);
+ } else {
+ debugs(5, 5, "Listener: " << conn <<
+ " accepted new connection " << newConnDetails <<
+ " handler Subscription: " << theCallSub);
+ notify(flag, newConnDetails);
+ }
- debugs(5, 5, HERE << "Listener: " << conn <<
- " accepted new connection " << newConnDetails <<
- " handler Subscription: " << theCallSub);
- notify(flag, newConnDetails);
SetSelect(conn->fd, COMM_SELECT_READ, doAccept, this, 0);
}
@@ -346,8 +341,8 @@ Comm::TcpAcceptor::notify(const Comm::Flag flag, const Comm::ConnectionPointer &
*
* \retval Comm::OK success. details parameter filled.
* \retval Comm::NOMESSAGE attempted accept() but nothing useful came in.
- * \retval Comm::COMM_ERROR an outright failure occurred.
* Or this client has too many connections already.
+ * \retval Comm::COMM_ERROR an outright failure occurred.
*/
Comm::Flag
Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
@@ -382,15 +377,6 @@ Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
details->fd = sock;
details->remote = *gai;
- if ( Config.client_ip_max_connections >= 0) {
- if (clientdbEstablished(details->remote, 0) > Config.client_ip_max_connections) {
- debugs(50, DBG_IMPORTANT, "WARNING: " << details->remote << " attempting more than " << Config.client_ip_max_connections << " connections.");
- Ip::Address::FreeAddr(gai);
- PROF_stop(comm_accept);
- return Comm::COMM_ERROR;
- }
- }
-
// lookup the local-end details of this new connection
Ip::Address::InitAddr(gai);
details->local.setEmpty();
@@ -404,6 +390,34 @@ Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
details->local = *gai;
Ip::Address::FreeAddr(gai);
+ // Perform NAT or TPROXY operations to retrieve the real client/dest IP addresses
+ if (conn->flags&(COMM_TRANSPARENT|COMM_INTERCEPTION) && !Ip::Interceptor.Lookup(details, conn)) {
+ debugs(50, DBG_IMPORTANT, "ERROR: NAT/TPROXY lookup failed to locate original IPs on " << details);
+ // Failed.
+ PROF_stop(comm_accept);
+ return Comm::COMM_ERROR;
+ }
+
+#if USE_SQUID_EUI
+ if (Eui::TheConfig.euiLookup) {
+ if (details->remote.isIPv4()) {
+ details->remoteEui48.lookup(details->remote);
+ } else if (details->remote.isIPv6()) {
+ details->remoteEui64.lookup(details->remote);
+ }
+ }
+#endif
+
+ details->nfmark = Ip::Qos::getNfmarkFromConnection(details, Ip::Qos::dirAccepted);
+
+ if (Config.client_ip_max_connections >= 0) {
+ if (clientdbEstablished(details->remote, 0) > Config.client_ip_max_connections) {
+ debugs(50, DBG_IMPORTANT, "WARNING: " << details->remote << " attempting more than " << Config.client_ip_max_connections << " connections.");
+ PROF_stop(comm_accept);
+ return Comm::NOMESSAGE;
+ }
+ }
+
/* fdstat update */
// XXX : these are not all HTTP requests. use a note about type and ip:port details->
// so we end up with a uniform "(HTTP|FTP-data|HTTPS|...) remote-ip:remote-port"
@@ -425,24 +439,6 @@ Comm::TcpAcceptor::oldAccept(Comm::ConnectionPointer &details)
/* IFF the socket is (tproxy) transparent, pass the flag down to allow spoofing */
F->flags.transparent = fd_table[conn->fd].flags.transparent; // XXX: can we remove this line yet?
- // Perform NAT or TPROXY operations to retrieve the real client/dest IP addresses
- if (conn->flags&(COMM_TRANSPARENT|COMM_INTERCEPTION) && !Ip::Interceptor.Lookup(details, conn)) {
- debugs(50, DBG_IMPORTANT, "ERROR: NAT/TPROXY lookup failed to locate original IPs on " << details);
- // Failed.
- PROF_stop(comm_accept);
- return Comm::COMM_ERROR;
- }
-
-#if USE_SQUID_EUI
- if (Eui::TheConfig.euiLookup) {
- if (details->remote.isIPv4()) {
- details->remoteEui48.lookup(details->remote);
- } else if (details->remote.isIPv6()) {
- details->remoteEui64.lookup(details->remote);
- }
- }
-#endif
-
PROF_stop(comm_accept);
return Comm::OK;
}

View File

@ -2,7 +2,7 @@
Name: squid
Version: 4.11
Release: 1%{?dist}
Release: 4%{?dist}
Summary: The Squid proxy caching server
Epoch: 7
# See CREDITS for breakdown of non GPLv2+ code
@ -35,8 +35,22 @@ Patch205: squid-4.11-large-acl.patch
Patch206: squid-4.11-active-ftp.patch
# https://github.com/squid-cache/squid/commit/c26cd1cb6a60ff196ef13c00e82576d3bfeb2e30
Patch207: squid-4.11-systemd.patch
Patch208: squid-4.11-convert-ipv4.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1890606
Patch209: squid-4.11-cache-mgr-auth-strdup.patch
# Security fixes
# https://bugzilla.redhat.com/show_bug.cgi?id=1852554
Patch500: squid-4.11-CVE-2020-14058.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1852550
Patch501: squid-4.11-CVE-2020-15049.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1871705
Patch502: squid-4.11-CVE-2020-24606.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1871702
Patch503: squid-4.11-CVE-2020-15811.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1871700
Patch504: squid-4.11-CVE-2020-15810.patch
Requires: bash >= 2.0
Requires(pre): shadow-utils
@ -96,6 +110,15 @@ lookup program (dnsserver), a program for retrieving FTP data
%patch205 -p1 -b .large_acl
%patch206 -p1 -b .active-ftp
%patch207 -p1 -b .systemd
%patch208 -p1 -R -b .convert-ipv4
%patch209 -p1 -b .cachemgr-strdup
# Security patches
%patch500 -p1 -b .cve-2020-14058
%patch501 -p1 -b .cve-2020-15049
%patch502 -p1 -b .cve-2020-24606
%patch503 -p1 -b .CVE-2020-15811
%patch504 -p1 -b .CVE-2020-15810
# https://bugzilla.redhat.com/show_bug.cgi?id=1679526
# Patch in the vendor documentation and used different location for documentation
@ -312,6 +335,23 @@ fi
%changelog
* Mon Oct 26 2020 Lubos Uhliarik <luhliari@redhat.com> - 7:4.11-4
- Resolves: #1890606 - Fix for CVE 2019-13345 breaks authentication in
cachemgr.cgi
* Wed Aug 26 2020 Lubos Uhliarik <luhliari@redhat.com> - 7:4.11-3
- Resolves: #1871705 - CVE-2020-24606 squid: Improper Input Validation could
result in a DoS
- Resolves: #1871702 - CVE-2020-15811 squid: HTTP Request Splitting could result
in cache poisoning
- Resolves: #1871700 - CVE-2020-15810 squid: HTTP Request Smuggling could result
in cache poisoning
* Thu Jul 02 2020 Lubos Uhliarik <luhliari@redhat.com> - 7:4.11-2
- Resolves: #1853130 - CVE-2020-15049 squid:4/squid: request smuggling and
poisoning attack against the HTTP cache
- Resolves: #1853136 - CVE-2020-14058 squid:4/squid: DoS in TLS handshake
* Thu May 07 2020 Lubos Uhliarik <luhliari@redhat.com> - 7:4.11-1
- new version 4.11
- libsystemd integration