import CS squid-5.5-10.el9

This commit is contained in:
eabdullin 2024-03-28 11:51:24 +00:00
parent db1800b931
commit 4741fa67de
10 changed files with 6106 additions and 1 deletions

View File

@ -0,0 +1,24 @@
diff --git a/src/anyp/Uri.cc b/src/anyp/Uri.cc
index 20b9bf1..81ebb18 100644
--- a/src/anyp/Uri.cc
+++ b/src/anyp/Uri.cc
@@ -173,6 +173,10 @@ urlInitialize(void)
assert(0 == matchDomainName("*.foo.com", ".foo.com", mdnHonorWildcards));
assert(0 != matchDomainName("*.foo.com", "foo.com", mdnHonorWildcards));
+ assert(0 != matchDomainName("foo.com", ""));
+ assert(0 != matchDomainName("foo.com", "", mdnHonorWildcards));
+ assert(0 != matchDomainName("foo.com", "", mdnRejectSubsubDomains));
+
/* more cases? */
}
@@ -756,6 +760,8 @@ matchDomainName(const char *h, const char *d, MatchDomainNameFlags flags)
return -1;
dl = strlen(d);
+ if (dl == 0)
+ return 1;
/*
* Start at the ends of the two strings and work towards the

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,178 @@
From 05f6af2f4c85cc99323cfff6149c3d74af661b6d Mon Sep 17 00:00:00 2001
From: Amos Jeffries <yadij@users.noreply.github.com>
Date: Fri, 13 Oct 2023 08:44:16 +0000
Subject: [PATCH] RFC 9112: Improve HTTP chunked encoding compliance (#1498)
---
src/http/one/Parser.cc | 8 +-------
src/http/one/Parser.h | 4 +---
src/http/one/TeChunkedParser.cc | 23 ++++++++++++++++++-----
src/parser/Tokenizer.cc | 12 ++++++++++++
src/parser/Tokenizer.h | 7 +++++++
5 files changed, 39 insertions(+), 15 deletions(-)
diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
index c78ddd7f0..291ae39f0 100644
--- a/src/http/one/Parser.cc
+++ b/src/http/one/Parser.cc
@@ -65,16 +65,10 @@ Http::One::Parser::DelimiterCharacters()
void
Http::One::Parser::skipLineTerminator(Tokenizer &tok) const
{
- if (tok.skip(Http1::CrLf()))
- return;
-
if (Config.onoff.relaxed_header_parser && tok.skipOne(CharacterSet::LF))
return;
- if (tok.atEnd() || (tok.remaining().length() == 1 && tok.remaining().at(0) == '\r'))
- throw InsufficientInput();
-
- throw TexcHere("garbage instead of CRLF line terminator");
+ tok.skipRequired("line-terminating CRLF", Http1::CrLf());
}
/// all characters except the LF line terminator
diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
index f83c01a9a..aab895583 100644
--- a/src/http/one/Parser.h
+++ b/src/http/one/Parser.h
@@ -124,9 +124,7 @@ protected:
* detect and skip the CRLF or (if tolerant) LF line terminator
* consume from the tokenizer.
*
- * \throws exception on bad or InsuffientInput.
- * \retval true only if line terminator found.
- * \retval false incomplete or missing line terminator, need more data.
+ * \throws exception on bad or InsufficientInput
*/
void skipLineTerminator(Tokenizer &) const;
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
index 1434100b6..8bdb65abb 100644
--- a/src/http/one/TeChunkedParser.cc
+++ b/src/http/one/TeChunkedParser.cc
@@ -91,6 +91,11 @@ Http::One::TeChunkedParser::parseChunkSize(Tokenizer &tok)
{
Must(theChunkSize <= 0); // Should(), really
+ static const SBuf bannedHexPrefixLower("0x");
+ static const SBuf bannedHexPrefixUpper("0X");
+ if (tok.skip(bannedHexPrefixLower) || tok.skip(bannedHexPrefixUpper))
+ throw TextException("chunk starts with 0x", Here());
+
int64_t size = -1;
if (tok.int64(size, 16, false) && !tok.atEnd()) {
if (size < 0)
@@ -121,7 +126,7 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
// bad or insufficient input, like in the code below. TODO: Expand up.
try {
parseChunkExtensions(tok); // a possibly empty chunk-ext list
- skipLineTerminator(tok);
+ tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
buf_ = tok.remaining();
parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
return true;
@@ -132,12 +137,14 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
// other exceptions bubble up to kill message parsing
}
-/// Parses the chunk-ext list (RFC 7230 section 4.1.1 and its Errata #4667):
+/// Parses the chunk-ext list (RFC 9112 section 7.1.1:
/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
void
-Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &tok)
+Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
{
do {
+ auto tok = callerTok;
+
ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
if (!tok.skip(';'))
@@ -145,6 +152,7 @@ Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &tok)
parseOneChunkExtension(tok);
buf_ = tok.remaining(); // got one extension
+ callerTok = tok;
} while (true);
}
@@ -158,11 +166,14 @@ Http::One::ChunkExtensionValueParser::Ignore(Tokenizer &tok, const SBuf &extName
/// Parses a single chunk-ext list element:
/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
void
-Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &tok)
+Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &callerTok)
{
+ auto tok = callerTok;
+
ParseBws(tok); // Bug 4492: ICAP servers send SP before chunk-ext-name
const auto extName = tok.prefix("chunk-ext-name", CharacterSet::TCHAR);
+ callerTok = tok; // in case we determine that this is a valueless chunk-ext
ParseBws(tok);
@@ -176,6 +187,8 @@ Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &tok)
customExtensionValueParser->parse(tok, extName);
else
ChunkExtensionValueParser::Ignore(tok, extName);
+
+ callerTok = tok;
}
bool
@@ -209,7 +222,7 @@ Http::One::TeChunkedParser::parseChunkEnd(Tokenizer &tok)
Must(theLeftBodySize == 0); // Should(), really
try {
- skipLineTerminator(tok);
+ tok.skipRequired("chunk CRLF", Http1::CrLf());
buf_ = tok.remaining(); // parse checkpoint
theChunkSize = 0; // done with the current chunk
parsingStage_ = Http1::HTTP_PARSE_CHUNK_SZ;
diff --git a/src/parser/Tokenizer.cc b/src/parser/Tokenizer.cc
index edaffd8d3..15df793b8 100644
--- a/src/parser/Tokenizer.cc
+++ b/src/parser/Tokenizer.cc
@@ -147,6 +147,18 @@ Parser::Tokenizer::skipAll(const CharacterSet &tokenChars)
return success(prefixLen);
}
+void
+Parser::Tokenizer::skipRequired(const char *description, const SBuf &tokenToSkip)
+{
+ if (skip(tokenToSkip) || tokenToSkip.isEmpty())
+ return;
+
+ if (tokenToSkip.startsWith(buf_))
+ throw InsufficientInput();
+
+ throw TextException(ToSBuf("cannot skip ", description), Here());
+}
+
bool
Parser::Tokenizer::skipOne(const CharacterSet &chars)
{
diff --git a/src/parser/Tokenizer.h b/src/parser/Tokenizer.h
index 7bae1ccbb..3cfa7dd6c 100644
--- a/src/parser/Tokenizer.h
+++ b/src/parser/Tokenizer.h
@@ -115,6 +115,13 @@ public:
*/
SBuf::size_type skipAll(const CharacterSet &discardables);
+ /** skips a given character sequence (string);
+ * does nothing if the sequence is empty
+ *
+ * \throws exception on mismatching prefix or InsufficientInput
+ */
+ void skipRequired(const char *description, const SBuf &tokenToSkip);
+
/** Removes a single trailing character from the set.
*
* \return whether a character was removed
--
2.25.1

View File

@ -0,0 +1,43 @@
From 052cf082b0faaef4eaaa4e94119d7a1437aac4a3 Mon Sep 17 00:00:00 2001
From: squidadm <squidadm@users.noreply.github.com>
Date: Wed, 18 Oct 2023 04:50:56 +1300
Subject: [PATCH] Fix stack buffer overflow when parsing Digest Authorization
(#1517)
The bug was discovered and detailed by Joshua Rogers at
https://megamansec.github.io/Squid-Security-Audit/digest-overflow.html
where it was filed as "Stack Buffer Overflow in Digest Authentication".
---------
Co-authored-by: Alex Bason <nonsleepr@gmail.com>
Co-authored-by: Amos Jeffries <yadij@users.noreply.github.com>
---
src/auth/digest/Config.cc | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc
index d42831a55..be9f3c433 100644
--- a/src/auth/digest/Config.cc
+++ b/src/auth/digest/Config.cc
@@ -844,11 +844,15 @@ Auth::Digest::Config::decode(char const *proxy_auth, const HttpRequest *request,
break;
case DIGEST_NC:
- if (value.size() != 8) {
+ if (value.size() == 8) {
+ // for historical reasons, the nc value MUST be exactly 8 bytes
+ static_assert(sizeof(digest_request->nc) == 8 + 1, "bad nc buffer size");
+ xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
+ debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
+ } else {
debugs(29, 9, "Invalid nc '" << value << "' in '" << temp << "'");
+ digest_request->nc[0] = 0;
}
- xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
- debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
break;
case DIGEST_CNONCE:
--
2.25.1

View File

@ -0,0 +1,46 @@
From c67bf049871a49e9871efe50b230a7f37b7039f6 Mon Sep 17 00:00:00 2001
From: Alex Rousskov <rousskov@measurement-factory.com>
Date: Thu, 25 May 2023 02:10:28 +0000
Subject: [PATCH] Fix userinfo percent-encoding (#1367)
%X expects an unsigned int, and that is what we were giving it. However,
to get to the correct unsigned int value from a (signed) char, one has
to cast to an unsigned char (or equivalent) first.
Broken since inception in commit 7b75100.
Also adjusted similar (commented out) ext_edirectory_userip_acl code.
---
src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc | 2 +-
src/anyp/Uri.cc | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc b/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc
index dbc20ae54..9028d1562 100644
--- a/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc
+++ b/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc
@@ -1612,7 +1612,7 @@ MainSafe(int argc, char **argv)
/* BINARY DEBUGGING *
local_printfx("while() -> bufa[%" PRIuSIZE "]: %s", k, bufa);
for (i = 0; i < k; ++i)
- local_printfx("%02X", bufa[i]);
+ local_printfx("%02X", static_cast<unsigned int>(static_cast<unsigned char>(bufa[i])));
local_printfx("\n");
* BINARY DEBUGGING */
/* Check for CRLF */
diff --git a/src/anyp/Uri.cc b/src/anyp/Uri.cc
index a6a5d5d9e..3d19188e9 100644
--- a/src/anyp/Uri.cc
+++ b/src/anyp/Uri.cc
@@ -70,7 +70,7 @@ AnyP::Uri::Encode(const SBuf &buf, const CharacterSet &ignore)
while (!tk.atEnd()) {
// TODO: Add Tokenizer::parseOne(void).
const auto ch = tk.remaining()[0];
- output.appendf("%%%02X", static_cast<unsigned int>(ch)); // TODO: Optimize using a table
+ output.appendf("%%%02X", static_cast<unsigned int>(static_cast<unsigned char>(ch))); // TODO: Optimize using a table
(void)tk.skip(ch);
if (tk.prefix(goodSection, ignore))
--
2.25.1

View File

@ -0,0 +1,30 @@
commit 77b3fb4df0f126784d5fd4967c28ed40eb8d521b
Author: Alex Rousskov <rousskov@measurement-factory.com>
Date: Wed Oct 25 19:41:45 2023 +0000
RFC 1123: Fix date parsing (#1538)
The bug was discovered and detailed by Joshua Rogers at
https://megamansec.github.io/Squid-Security-Audit/datetime-overflow.html
where it was filed as "1-Byte Buffer OverRead in RFC 1123 date/time
Handling".
diff --git a/lib/rfc1123.c b/lib/rfc1123.c
index e5bf9a4d7..cb484cc00 100644
--- a/lib/rfc1123.c
+++ b/lib/rfc1123.c
@@ -50,7 +50,13 @@ make_month(const char *s)
char month[3];
month[0] = xtoupper(*s);
+ if (!month[0])
+ return -1; // protects *(s + 1) below
+
month[1] = xtolower(*(s + 1));
+ if (!month[1])
+ return -1; // protects *(s + 2) below
+
month[2] = xtolower(*(s + 2));
for (i = 0; i < 12; i++)

View File

@ -0,0 +1,62 @@
diff --git a/src/ipc.cc b/src/ipc.cc
index 42e11e6..a68e623 100644
--- a/src/ipc.cc
+++ b/src/ipc.cc
@@ -19,6 +19,11 @@
#include "SquidConfig.h"
#include "SquidIpc.h"
#include "tools.h"
+#include <cstdlib>
+
+#if HAVE_UNISTD_H
+#include <unistd.h>
+#endif
static const char *hello_string = "hi there\n";
#ifndef HELLO_BUF_SZ
@@ -365,6 +370,22 @@ ipcCreate(int type, const char *prog, const char *const args[], const char *name
}
PutEnvironment();
+
+ // A dup(2) wrapper that reports and exits the process on errors. The
+ // exiting logic is only suitable for this child process context.
+ const auto dupOrExit = [prog,name](const int oldFd) {
+ const auto newFd = dup(oldFd);
+ if (newFd < 0) {
+ const auto savedErrno = errno;
+ debugs(54, DBG_CRITICAL, "ERROR: Helper process initialization failure: " << name <<
+ Debug::Extra << "helper (CHILD) PID: " << getpid() <<
+ Debug::Extra << "helper program name: " << prog <<
+ Debug::Extra << "dup(2) system call error for FD " << oldFd << ": " << xstrerr(savedErrno));
+ _exit(EXIT_FAILURE);
+ }
+ return newFd;
+ };
+
/*
* This double-dup stuff avoids problems when one of
* crfd, cwfd, or debug_log are in the rage 0-2.
@@ -372,17 +393,16 @@ ipcCreate(int type, const char *prog, const char *const args[], const char *name
do {
/* First make sure 0-2 is occupied by something. Gets cleaned up later */
- x = dup(crfd);
- assert(x > -1);
- } while (x < 3 && x > -1);
+ x = dupOrExit(crfd);
+ } while (x < 3);
close(x);
- t1 = dup(crfd);
+ t1 = dupOrExit(crfd);
- t2 = dup(cwfd);
+ t2 = dupOrExit(cwfd);
- t3 = dup(fileno(debug_log));
+ t3 = dupOrExit(fileno(debug_log));
assert(t1 > 2 && t2 > 2 && t3 > 2);

View File

@ -0,0 +1,50 @@
diff --git a/src/ClientRequestContext.h b/src/ClientRequestContext.h
index 55a7a43..94a8700 100644
--- a/src/ClientRequestContext.h
+++ b/src/ClientRequestContext.h
@@ -80,6 +80,10 @@ public:
#endif
ErrorState *error; ///< saved error page for centralized/delayed processing
bool readNextRequest; ///< whether Squid should read after error handling
+
+#if FOLLOW_X_FORWARDED_FOR
+ size_t currentXffHopNumber = 0; ///< number of X-Forwarded-For header values processed so far
+#endif
};
#endif /* SQUID_CLIENTREQUESTCONTEXT_H */
diff --git a/src/client_side_request.cc b/src/client_side_request.cc
index f44849e..c7c09d4 100644
--- a/src/client_side_request.cc
+++ b/src/client_side_request.cc
@@ -80,6 +80,11 @@
static const char *const crlf = "\r\n";
#if FOLLOW_X_FORWARDED_FOR
+
+#if !defined(SQUID_X_FORWARDED_FOR_HOP_MAX)
+#define SQUID_X_FORWARDED_FOR_HOP_MAX 64
+#endif
+
static void clientFollowXForwardedForCheck(Acl::Answer answer, void *data);
#endif /* FOLLOW_X_FORWARDED_FOR */
@@ -485,8 +490,16 @@ clientFollowXForwardedForCheck(Acl::Answer answer, void *data)
/* override the default src_addr tested if we have to go deeper than one level into XFF */
Filled(calloutContext->acl_checklist)->src_addr = request->indirect_client_addr;
}
- calloutContext->acl_checklist->nonBlockingCheck(clientFollowXForwardedForCheck, data);
- return;
+ if (++calloutContext->currentXffHopNumber < SQUID_X_FORWARDED_FOR_HOP_MAX) {
+ calloutContext->acl_checklist->nonBlockingCheck(clientFollowXForwardedForCheck, data);
+ return;
+ }
+ const auto headerName = Http::HeaderLookupTable.lookup(Http::HdrType::X_FORWARDED_FOR).name;
+ debugs(28, DBG_CRITICAL, "ERROR: Ignoring trailing " << headerName << " addresses" <<
+ Debug::Extra << "addresses allowed by follow_x_forwarded_for: " << calloutContext->currentXffHopNumber <<
+ Debug::Extra << "last/accepted address: " << request->indirect_client_addr <<
+ Debug::Extra << "ignored trailing addresses: " << request->x_forwarded_for_iterator);
+ // fall through to resume clientAccessCheck() processing
}
}

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@
Name: squid
Version: 5.5
Release: 6%{?dist}
Release: 10%{?dist}
Summary: The Squid proxy caching server
Epoch: 7
# See CREDITS for breakdown of non GPLv2+ code
@ -52,6 +52,25 @@ Patch209: squid-5.5-halfclosed.patch
Patch501: squid-5.5-CVE-2021-46784.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2129771
Patch502: squid-5.5-CVE-2022-41318.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2245910
Patch503: squid-5.5-CVE-2023-46846.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2245916
Patch504: squid-5.5-CVE-2023-46847.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2245919
Patch505: squid-5.5-CVE-2023-46848.patch
# https://issues.redhat.com/browse/RHEL-14802
Patch506: squid-5.5-CVE-2023-5824.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2248521
Patch507: squid-5.5-CVE-2023-46728.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2247567
Patch508: squid-5.5-CVE-2023-46724.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2252926
Patch509: squid-5.5-CVE-2023-49285.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2252923
Patch510: squid-5.5-CVE-2023-49286.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2254663
Patch511: squid-5.5-CVE-2023-50269.patch
# cache_swap.sh
Requires: bash gawk
@ -129,6 +148,15 @@ lookup program (dnsserver), a program for retrieving FTP data
%patch501 -p1 -b .CVE-2021-46784
%patch502 -p1 -b .CVE-2022-41318
%patch503 -p1 -b .CVE-2023-46846
%patch504 -p1 -b .CVE-2023-46847
%patch505 -p1 -b .CVE-2023-46848
%patch506 -p1 -b .CVE-2023-5824
%patch507 -p1 -b .CVE-2023-46728
%patch508 -p1 -b .CVE-2023-46724
%patch509 -p1 -b .CVE-2023-49285
%patch510 -p1 -b .CVE-2023-49286
%patch511 -p1 -b .CVE-2023-50269
# https://bugzilla.redhat.com/show_bug.cgi?id=1679526
# Patch in the vendor documentation and used different location for documentation
@ -355,6 +383,31 @@ fi
%changelog
* Fri Feb 02 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-10
- Resolves: RHEL-19556 - squid: denial of service in HTTP request
parsing (CVE-2023-50269)
* Thu Feb 01 2024 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-9
- Resolves: RHEL-18354 - squid: Buffer over-read in the HTTP Message processing
feature (CVE-2023-49285)
- Resolves: RHEL-18345 - squid: Incorrect Check of Function Return Value In
Helper Process management (CVE-2023-49286)
- Resolves: RHEL-18146 - squid crashes in assertion when a parent peer exists
- Resolves: RHEL-18231 - squid: Denial of Service in SSL Certificate validation
(CVE-2023-46724)
- Resolves: RHEL-15912 - squid: NULL pointer dereference in the gopher protocol
code (CVE-2023-46728)
* Tue Dec 05 2023 Tomas Korbar <tkorbar@redhat.com> - 7:5.5-8
- Resolves: RHEL-14802 - squid: multiple issues in HTTP response caching
* Sun Nov 12 2023 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-7
- Resolves: RHEL-14820 - squid: squid: denial of Servicein FTP
- Resolves: RHEL-14809 - squid: squid: Denial of Service in HTTP Digest
Authentication
- Resolves: RHEL-14781 - squid: squid: Request/Response smuggling in HTTP/1.1
and ICAP
* Wed Aug 16 2023 Luboš Uhliarik <luhliari@redhat.com> - 7:5.5-6
- Resolves: #2231827 - Crash with half_closed_client on