import squid-4.11-3.module+el8.3.0+7851+7808b5f9

This commit is contained in:
CentOS Sources 2020-10-15 14:11:53 +00:00 committed by Andrew Lukoshko
parent 34994580ae
commit d702a1cc42
4 changed files with 236 additions and 1 deletions

View File

@ -0,0 +1,44 @@
diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc
index 67543a4..19efc6d 100644
--- a/src/HttpHeader.cc
+++ b/src/HttpHeader.cc
@@ -445,18 +445,6 @@ HttpHeader::parse(const char *header_start, size_t hdrLen)
return 0;
}
- if (e->id == Http::HdrType::OTHER && stringHasWhitespace(e->name.termedBuf())) {
- debugs(55, warnOnError, "WARNING: found whitespace in HTTP header name {" <<
- getStringPrefix(field_start, field_end-field_start) << "}");
-
- if (!Config.onoff.relaxed_header_parser) {
- delete e;
- PROF_stop(HttpHeaderParse);
- clean();
- return 0;
- }
- }
-
addEntry(e);
}
@@ -1451,6 +1439,20 @@ HttpHeaderEntry::parse(const char *field_start, const char *field_end, const htt
}
}
+ /* RFC 7230 section 3.2:
+ *
+ * header-field = field-name ":" OWS field-value OWS
+ * field-name = token
+ * token = 1*TCHAR
+ */
+ for (const char *pos = field_start; pos < (field_start+name_len); ++pos) {
+ if (!CharacterSet::TCHAR[*pos]) {
+ debugs(55, 2, "found header with invalid characters in " <<
+ Raw("field-name", field_start, min(name_len,100)) << "...");
+ return nullptr;
+ }
+ }
+
/* now we know we can parse it */
debugs(55, 9, "parsing HttpHeaderEntry: near '" << getStringPrefix(field_start, field_end-field_start) << "'");

View File

@ -0,0 +1,139 @@
diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc
index dc6e0ff..67543a4 100644
--- a/src/HttpHeader.cc
+++ b/src/HttpHeader.cc
@@ -174,6 +174,7 @@ HttpHeader::operator =(const HttpHeader &other)
update(&other); // will update the mask as well
len = other.len;
conflictingContentLength_ = other.conflictingContentLength_;
+ teUnsupported_ = other.teUnsupported_;
}
return *this;
}
@@ -222,6 +223,7 @@ HttpHeader::clean()
httpHeaderMaskInit(&mask, 0);
len = 0;
conflictingContentLength_ = false;
+ teUnsupported_ = false;
PROF_stop(HttpHeaderClean);
}
@@ -464,11 +466,23 @@ HttpHeader::parse(const char *header_start, size_t hdrLen)
Raw("header", header_start, hdrLen));
}
- if (chunked()) {
+ String rawTe;
+ if (getByIdIfPresent(Http::HdrType::TRANSFER_ENCODING, &rawTe)) {
// RFC 2616 section 4.4: ignore Content-Length with Transfer-Encoding
// RFC 7230 section 3.3.3 #3: Transfer-Encoding overwrites Content-Length
delById(Http::HdrType::CONTENT_LENGTH);
// and clen state becomes irrelevant
+
+ if (rawTe == "chunked") {
+ ; // leave header present for chunked() method
+ } else if (rawTe == "identity") { // deprecated. no coding
+ delById(Http::HdrType::TRANSFER_ENCODING);
+ } else {
+ // This also rejects multiple encodings until we support them properly.
+ debugs(55, warnOnError, "WARNING: unsupported Transfer-Encoding used by client: " << rawTe);
+ teUnsupported_ = true;
+ }
+
} else if (clen.sawBad) {
// ensure our callers do not accidentally see bad Content-Length values
delById(Http::HdrType::CONTENT_LENGTH);
diff --git a/src/HttpHeader.h b/src/HttpHeader.h
index e3553a4..64f294a 100644
--- a/src/HttpHeader.h
+++ b/src/HttpHeader.h
@@ -140,7 +140,13 @@ public:
int hasListMember(Http::HdrType id, const char *member, const char separator) const;
int hasByNameListMember(const char *name, const char *member, const char separator) const;
void removeHopByHopEntries();
- inline bool chunked() const; ///< whether message uses chunked Transfer-Encoding
+
+ /// whether the message uses chunked Transfer-Encoding
+ /// optimized implementation relies on us rejecting/removing other codings
+ bool chunked() const { return has(Http::HdrType::TRANSFER_ENCODING); }
+
+ /// whether message used an unsupported and/or invalid Transfer-Encoding
+ bool unsupportedTe() const { return teUnsupported_; }
/* protected, do not use these, use interface functions instead */
std::vector<HttpHeaderEntry *> entries; /**< parsed fields in raw format */
@@ -158,6 +164,9 @@ protected:
private:
HttpHeaderEntry *findLastEntry(Http::HdrType id) const;
bool conflictingContentLength_; ///< found different Content-Length fields
+ /// unsupported encoding, unnecessary syntax characters, and/or
+ /// invalid field-value found in Transfer-Encoding header
+ bool teUnsupported_ = false;
};
int httpHeaderParseQuotedString(const char *start, const int len, String *val);
@@ -167,13 +176,6 @@ SBuf httpHeaderQuoteString(const char *raw);
void httpHeaderCalcMask(HttpHeaderMask * mask, Http::HdrType http_hdr_type_enums[], size_t count);
-inline bool
-HttpHeader::chunked() const
-{
- return has(Http::HdrType::TRANSFER_ENCODING) &&
- hasListMember(Http::HdrType::TRANSFER_ENCODING, "chunked", ',');
-}
-
void httpHeaderInitModule(void);
#endif /* SQUID_HTTPHEADER_H */
diff --git a/src/client_side.cc b/src/client_side.cc
index 5f5a79e..000a00b 100644
--- a/src/client_side.cc
+++ b/src/client_side.cc
@@ -1600,9 +1600,7 @@ void
clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp, Http::Stream *context)
{
ClientHttpRequest *http = context->http;
- bool chunked = false;
bool mustReplyToOptions = false;
- bool unsupportedTe = false;
bool expectBody = false;
// We already have the request parsed and checked, so we
@@ -1659,13 +1657,7 @@ clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp,
request->http_ver.minor = http_ver.minor;
}
- if (request->header.chunked()) {
- chunked = true;
- } else if (request->header.has(Http::HdrType::TRANSFER_ENCODING)) {
- const String te = request->header.getList(Http::HdrType::TRANSFER_ENCODING);
- // HTTP/1.1 requires chunking to be the last encoding if there is one
- unsupportedTe = te.size() && te != "identity";
- } // else implied identity coding
+ const auto unsupportedTe = request->header.unsupportedTe();
mustReplyToOptions = (request->method == Http::METHOD_OPTIONS) &&
(request->header.getInt64(Http::HdrType::MAX_FORWARDS) == 0);
@@ -1682,6 +1674,7 @@ clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp,
return;
}
+ const auto chunked = request->header.chunked();
if (!chunked && !clientIsContentLengthValid(request.getRaw())) {
clientStreamNode *node = context->getClientReplyContext();
clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
diff --git a/src/http.cc b/src/http.cc
index 9654c4a..6f4d3b2 100644
--- a/src/http.cc
+++ b/src/http.cc
@@ -1292,6 +1292,9 @@ HttpStateData::continueAfterParsingHeader()
} else if (vrep->header.conflictingContentLength()) {
fwd->dontRetry(true);
error = ERR_INVALID_RESP;
+ } else if (vrep->header.unsupportedTe()) {
+ fwd->dontRetry(true);
+ error = ERR_INVALID_RESP;
} else {
return true; // done parsing, got reply, and no error
}

View File

@ -0,0 +1,34 @@
commit b789e719affbb0a6ff9c22095f6ca8db6a5f4926
Author: Eduard Bagdasaryan <eduard.bagdasaryan@measurement-factory.com>
Date: 2020-07-27 15:28:31 +0000
Fix livelocking in peerDigestHandleReply (#698)
peerDigestHandleReply() was missing a premature EOF check. The existing
peerDigestFetchedEnough() cannot detect EOF because it does not have
access to receivedData.length used to indicate the EOF condition. We did
not adjust peerDigestFetchedEnough() because it is abused to check both
post-I/O state and the state after each digest processing step. The
latter invocations lack access to receivedData.length and should not
really bother with EOF anyway.
diff --git a/src/peer_digest.cc b/src/peer_digest.cc
index d48340f97..265f16183 100644
--- a/src/peer_digest.cc
+++ b/src/peer_digest.cc
@@ -483,6 +483,15 @@ peerDigestHandleReply(void *data, StoreIOBuffer receivedData)
} while (cbdataReferenceValid(fetch) && prevstate != fetch->state && fetch->bufofs > 0);
+ // Check for EOF here, thus giving the parser one extra run. We could avoid this overhead by
+ // checking at the beginning of this function. However, in this case, we would have to require
+ // that the parser does not regard EOF as a special condition (it is true now but may change
+ // in the future).
+ if (!receivedData.length) { // EOF
+ peerDigestFetchAbort(fetch, fetch->buf, "premature end of digest reply");
+ return;
+ }
+
/* Update the copy offset */
fetch->offset += receivedData.length;

View File

@ -2,7 +2,7 @@
Name: squid
Version: 4.11
Release: 2%{?dist}
Release: 3%{?dist}
Summary: The Squid proxy caching server
Epoch: 7
# See CREDITS for breakdown of non GPLv2+ code
@ -42,6 +42,13 @@ Patch208: squid-4.11-convert-ipv4.patch
Patch500: squid-4.11-CVE-2020-14058.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1852550
Patch501: squid-4.11-CVE-2020-15049.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1871705
Patch502: squid-4.11-CVE-2020-24606.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1871702
Patch503: squid-4.11-CVE-2020-15811.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1871700
Patch504: squid-4.11-CVE-2020-15810.patch
Requires: bash >= 2.0
Requires(pre): shadow-utils
@ -106,6 +113,9 @@ lookup program (dnsserver), a program for retrieving FTP data
# Security patches
%patch500 -p1 -b .cve-2020-14058
%patch501 -p1 -b .cve-2020-15049
%patch502 -p1 -b .cve-2020-24606
%patch503 -p1 -b .CVE-2020-15811
%patch504 -p1 -b .CVE-2020-15810
# https://bugzilla.redhat.com/show_bug.cgi?id=1679526
# Patch in the vendor documentation and used different location for documentation
@ -322,6 +332,14 @@ fi
%changelog
* Wed Aug 26 2020 Lubos Uhliarik <luhliari@redhat.com> - 7:4.11-3
- Resolves: #1871705 - CVE-2020-24606 squid: Improper Input Validation could
result in a DoS
- Resolves: #1871702 - CVE-2020-15811 squid: HTTP Request Splitting could result
in cache poisoning
- Resolves: #1871700 - CVE-2020-15810 squid: HTTP Request Smuggling could result
in cache poisoning
* Thu Jul 02 2020 Lubos Uhliarik <luhliari@redhat.com> - 7:4.11-2
- Resolves: #1853130 - CVE-2020-15049 squid:4/squid: request smuggling and
poisoning attack against the HTTP cache